diff --git a/.github/.markdownlint.json b/.github/.markdownlint.json new file mode 100644 index 000000000..bea483d98 --- /dev/null +++ b/.github/.markdownlint.json @@ -0,0 +1,7 @@ +{ + "MD013": false, + "MD010": false, + "MD024": { + "siblings_only": true + } +} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..25ec8498b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @microsoft/retina diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000..312ecb223 --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,33 @@ +version: 2 +updates: + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "daily" + reviewers: + - "microsoft/retina" + commit-message: + prefix: "ci" + labels: [ "ci", "dependencies" ] + open-pull-requests-limit: 10 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + reviewers: + - "microsoft/retina" + commit-message: + prefix: "ci" + labels: [ "ci", "dependencies" ] + open-pull-requests-limit: 10 + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + reviewers: + - "microsoft/retina" + commit-message: + prefix: "deps" + ignore: + - dependency-name: "github.com/inspektor-gadget/inspektor-gadget" + open-pull-requests-limit: 10 diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml new file mode 100644 index 000000000..c2c16d6c1 --- /dev/null +++ b/.github/workflows/codeql.yaml @@ -0,0 +1,39 @@ +name: "CodeQL" +on: + workflow_dispatch: + push: + branches: [ main ] + pull_request: + branches: [ main ] +jobs: + analyze: + name: Analyze + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest] + language: [go] + go-version: ["1.21"] + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + permissions: + actions: read + contents: read + security-events: write + steps: + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - name: Checkout repository + uses: actions/checkout@v4 + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/commit-message.yaml b/.github/workflows/commit-message.yaml new file mode 100644 index 000000000..eeabc3395 --- /dev/null +++ b/.github/workflows/commit-message.yaml @@ -0,0 +1,45 @@ +name: commit-message +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + types: + - opened + - synchronize + - edited + - reopened +jobs: + commit-message: + runs-on: ubuntu-20.04 + steps: + - name: verify_commit_message + run: | + if [[ "${{ github.event_name }}" == pull_request ]]; then + commit_msg_header="${{ github.event.pull_request.title }}" + else + # get first line of commit message + commit_msg_header=`echo "${{ github.event.head_commit.message }}" | head -n 1` + fi + + commit_msg_type_regex='feat|fix|refactor|style|test|docs|build|tool|chore|deps' + commit_msg_scope_regex='.{1,20}' + commit_msg_subject_regex='.{1,150}' + commit_msg_regex="^(${commit_msg_type_regex})(\(${commit_msg_scope_regex}\))?: (${commit_msg_subject_regex})\$" + merge_msg_regex="^Merge branch '.+' into .+\$" + full_regex="(${commit_msg_regex})|(${merge_msg_regex})" + + echo $commit_msg_header | grep -qP "$full_regex" || { + echo "ERROR: Invalid commit message header. Please fix format of your PR title or the commit pushed to main." + echo "Current value:" + echo "$commit_msg_header" + echo + echo "Examples of valid commits:" + echo 'example 1: "feat(cli): new feature"' + echo 'example 2: "fix(advanced-metrics): bug fix"' + echo 'example 3: "docs: update readme"' + echo + echo "Valid types are: $commit_msg_type_regex" + echo "For more details, see .github/workflows/commit-message.yaml" + exit 1 + } diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 000000000..e979375cd --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,38 @@ +# Simple workflow for deploying static content to GitHub Pages +name: Build and Deploy Retina.sh +on: + push: + branches: ["docs", "main", "ghpages"] + workflow_dispatch: +permissions: + contents: read + pages: write + id-token: write +concurrency: + group: "pages" + cancel-in-progress: false +jobs: + deploy: + environment: + name: retina.sh + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v3 + - uses: actions/setup-node@v3 + with: + node-version: 20 + - name: build + run: | + npm install --prefix site/ + npm run build --prefix site/ + - name: Upload artifact + uses: actions/upload-pages-artifact@v1 + with: + path: "./site/build" + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v1 diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml new file mode 100644 index 000000000..16dab9064 --- /dev/null +++ b/.github/workflows/golangci-lint.yaml @@ -0,0 +1,30 @@ +name: golangci-lint +on: + workflow_dispatch: + push: + branches: [ main ] + pull_request: + branches: [ main ] +jobs: + golangci: + strategy: + fail-fast: false + matrix: + go-version: ['1.21.x'] + os: [ubuntu-latest, windows-latest] + name: Lint + runs-on: ${{ matrix.os }} + steps: + - uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: golangci-lint + uses: golangci/golangci-lint-action@v4 + with: + version: v1.55 + args: --concurrency 4 --verbose --new-from-rev=origin/master --config=.golangci.yml --timeout=25m + only-new-issues: true + skip-cache: true diff --git a/.github/workflows/images.yaml b/.github/workflows/images.yaml new file mode 100644 index 000000000..3ae845f84 --- /dev/null +++ b/.github/workflows/images.yaml @@ -0,0 +1,86 @@ +name: Build and Publish Retina Container Images + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + images: + name: Build Images + runs-on: ubuntu-latest + + strategy: + matrix: + platform: ["linux"] + arch: ["amd64", "arm64"] + component: ["agent", "operator"] + + # required for AZ login/SP + permissions: + id-token: write + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - uses: actions/setup-go@v4 + with: + go-version: ">=1.21.0" + - run: go version + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Az CLI login + uses: azure/login@v1 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Build/Push Images + shell: bash + run: | + set -euo pipefail + az acr login -n ${{ secrets.ACR_NAME }} + echo "TAG=$(make version)" >> $GITHUB_ENV + make build PLATFORMS=${{ matrix.platform }}/${{ matrix.arch }} COMPONENT=${{ matrix.component }} + + + manifests: + name: Generate Manifests + runs-on: ubuntu-latest + needs: images + + strategy: + matrix: + component: ["agent", "operator"] + + # required for AZ login/SP + permissions: + id-token: write + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Az CLI login + uses: azure/login@v1 + with: + client-id: ${{ secrets.AZURE_CLIENT_ID }} + tenant-id: ${{ secrets.AZURE_TENANT_ID }} + subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + + - name: Generate Manifests + shell: bash + run: | + set -euo pipefail + az acr login -n ${{ secrets.ACR_NAME }} + make manifest COMPONENT=${{ matrix.component }} diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml new file mode 100644 index 000000000..5f2600e21 --- /dev/null +++ b/.github/workflows/integration.yaml @@ -0,0 +1,74 @@ +name: Integration Tests For Retina +on: + workflow_dispatch: +jobs: + integ-test: + runs-on: ubuntu-latest + steps: + - name: Free up disk space + run: | + # https://github.com/actions/runner-images/issues/2840#issuecomment-790492173 + # du -sh /* 2> /dev/null | sort -rh 2> /dev/null | head + # du -h -d2 /usr 2> /dev/null | sort -rh 2> /dev/null | head + echo "Check free disk space before cleanup." + df -h + echo "Removing non-essential tools and libraries." + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + sudo rm -rf /opt/ghc + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/share/boost + # delete libraries for Android (12G), PowerShell (1.3G), Swift (1.7G) + sudo rm -rf /usr/local/lib/android + sudo rm -rf /usr/local/share/powershell + sudo rm -rf /usr/share/swift + echo "Check free disk space after cleanup." + df -h + + - name: Checkout + uses: actions/checkout@v3 + + - uses: actions/setup-go@v2 + with: + go-version: "^1.18" + + - name: Make retina image + run: | + export CONTAINER_BUILDER=docker + export CONTAINER_RUNTIME=docker + make all-images-local + make install-kubectl-retina + make base-images-remove + curl -LO https://github.com/kvaps/kubectl-node-shell/raw/master/kubectl-node_shell + chmod +x ./kubectl-node_shell + sudo mv ./kubectl-node_shell /usr/local/bin/kubectl-node_shell + docker system prune -f + + - name: Deploy Kind + run: make kind-setup + + - name: Install retina + run: make kind-install + + - name: Run tests + run: make retina-integration + + - name: Export Kubernetes logs + if: failure() + run: make retina-export-logs + + - name: Archive Kubernetes logs + if: failure() + uses: actions/upload-artifact@v3 + with: + name: kubernetes-node-logs + path: kubernetes-logs + + - name: Archive iptable and ipset + if: failure() + uses: actions/upload-artifact@v3 + with: + name: iptable-ipset-snapshot + path: ./test/integration/plugin-simulations/npm-iptables + - name: Cleanup + if: always() + run: make kind-clean diff --git a/.github/workflows/markdownlint.yaml b/.github/workflows/markdownlint.yaml new file mode 100644 index 000000000..74543c05d --- /dev/null +++ b/.github/workflows/markdownlint.yaml @@ -0,0 +1,19 @@ +name: Markdown Lint +on: + push: + branches: [main] + pull_request: + branches: [main] +jobs: + markdownlint: + name: markdownlint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - uses: DavidAnson/markdownlint-cli2-action@v9 + with: + command: config + globs: | + .github/.markdownlint.json + **/*.md diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 000000000..fbaa1f2ba --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,29 @@ +name: 'Stale PR and Issue Handler' +on: + workflow_dispatch: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + permissions: + contents: write + issues: write + pull-requests: write + steps: + - uses: actions/stale@main + id: stale + with: + ascending: true + close-issue-message: 'Issue closed due to inactivity.' + close-pr-message: 'Pull request closed due to inactivity.' + days-before-close: 7 + days-before-stale: 14 + delete-branch: true + exempt-issue-labels: 'exempt-stale' + operations-per-run: 100 + stale-issue-message: 'This issue is stale because it has been open for 2 weeks with no activity. Remove stale label or comment or this will be closed in 7 days' + stale-pr-message: 'This pull request is stale because it has been open for 2 weeks with no activity. Remove stale label or comment or this will be closed in 7 days' + - name: Print outputs + run: echo ${{ join(steps.stale.outputs.*, ',') }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..767f0ba3e --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,37 @@ +name: Test Retina Image +on: + push: + branches: [main] + pull_request: + branches: [main] + workflow_dispatch: +permissions: + actions: read + contents: read + deployments: read + packages: none + pull-requests: write + security-events: write + issues: write +jobs: + test-image: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - uses: actions/setup-go@v2 + with: + go-version: "^1.20" + + - name: Make Retina Test image + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} + run: | + make retina-test-image PLATFORM=linux/amd64 + - name: Upload Artifacts + uses: actions/upload-artifact@v3 + with: + name: coverage-files + path: ./coverage* diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..d9cfd93bd --- /dev/null +++ b/.gitignore @@ -0,0 +1,28 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# docusaurus +site/yarn.lock +site/.docusaurus/ +site/node_modules/ + +hack/tools/bin diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..b0d9e59c0 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,40 @@ +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + new-from-rev: origin/master +linters: + presets: + - bugs + - error + - format + - performance + - unused + disable: + - maligned + - scopelint + enable: + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - gomnd + - goprintffuncname + - gosimple + - lll + - misspell + - nakedret + - promlinter + - revive +linters-settings: + gocritic: + enabled-tags: + - "diagnostic" + - "style" + - "performance" + disabled-checks: + - "hugeParam" + govet: + check-shadowing: true + lll: + line-length: 200 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..f9ba8cf65 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..9e841e7a2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..a3cfd7714 --- /dev/null +++ b/Makefile @@ -0,0 +1,804 @@ +.DEFAULT_GOAL := help + +# Default platform commands +RMDIR := rm -rf + +## Globals + +REPO_ROOT = $(shell git rev-parse --show-toplevel) +ifndef TAG + TAG ?= $(shell git describe --tags --always) +endif +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) +OUTPUT_DIR = $(REPO_ROOT)/output +BUILD_DIR = $(OUTPUT_DIR)/$(GOOS)_$(GOARCH) +RETINA_BUILD_DIR = $(BUILD_DIR)/retina +RETINA_DIR = $(REPO_ROOT)/controller +KUBECTL_RETINA_BUILD_DIR = $(OUTPUT_DIR)/kubectl-retina +OPERATOR_DIR=$(REPO_ROOT)/operator +CAPTURE_WORKLOAD_DIR = $(REPO_ROOT)/captureworkload + +KIND = /usr/local/bin/kind +KIND_CLUSTER = retina-cluster +WINVER2022 ?= "10.0.20348.1906" +WINVER2019 ?= "10.0.17763.4737" +APP_INSIGHTS_ID ?= "" +GENERATE_TARGET_DIRS = \ + ./pkg/plugin/linuxutil + +IMAGE_REGISTRY ?= acnpublic.azurecr.io +OS ?= $(GOOS) +ARCH ?= $(GOARCH) +PLATFORM ?= $(OS)/$(ARCH) + +CONTAINER_BUILDER ?= buildah +CONTAINER_RUNTIME ?= podman +YEAR ?=2022 + +ALL_ARCH.linux = amd64 arm64 +ALL_ARCH.windows = amd64 + +# prefer buildah, if available, but fall back to docker if that binary is not in the path. +ifeq (, $(shell which $(CONTAINER_BUILDER))) +CONTAINER_BUILDER = docker +endif +# use docker if platform is windows +ifeq ($(OS),windows) +CONTAINER_BUILDER = docker +endif +# prefer podman, if available, but fall back to docker if that binary is not in the path. +ifeq (, $(shell which $(CONTAINER_RUNTIME))) +CONTAINER_RUNTIME = docker +endif + +# TAG is OS and platform agonstic, which can be used for binary version and image manifest tag, +# while RETINA_PLATFORM_TAG is platform specific, which can be used for image built for specific platforms. +RETINA_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(TAG) + +# for windows os, add year to the platform tag +ifeq ($(OS),windows) +RETINA_PLATFORM_TAG = windows-ltsc$(YEAR)-amd64-$(TAG) +endif + +qemu-user-static: ## Set up the host to run qemu multiplatform container builds. + sudo $(CONTAINER_RUNTIME) run --rm --privileged multiarch/qemu-user-static --reset -p yes + +version: ## prints the root version + @echo $(TAG) + +##@ Help + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + + +##@ Tools + +TOOLS_DIR = $(REPO_ROOT)/hack/tools +TOOLS_BIN_DIR = $(TOOLS_DIR)/bin +GOFUMPT := $(TOOLS_BIN_DIR)/gofumpt +GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen +GINKGO := $(TOOLS_BIN_DIR)/ginkgo +MOCKGEN := $(TOOLS_BIN_DIR)/mockgen +ENVTEST := $(TOOLS_BIN_DIR)/setup-envtest +GIT_CURRENT_BRANCH_NAME := $(shell git rev-parse --abbrev-ref HEAD) + +$(TOOLS_DIR)/go.mod: + cd $(TOOLS_DIR); go mod init && go mod tidy + +$(GOFUMPT): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=tools -o bin/gofumpt mvdan.cc/gofumpt + +gofumpt: $(GOFUMPT) ## Build gofumpt + +$(GOLANGCI_LINT): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=tools -o bin/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint + +golangci-lint: $(GOLANGCI_LINT) ## Build golangci-lint + +$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=tools -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen + +controller-gen: $(CONTROLLER_GEN) ## Build controller-gen + +$(GINKGO): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=tools -o bin/ginkgo github.com/onsi/ginkgo/ginkgo + +ginkgo: $(GINKGO) ## Build ginkgo + +$(MOCKGEN): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=$(TOOL_TAG) -o bin/mockgen github.com/golang/mock/mockgen + +mockgen: $(MOCKGEN) ## Build mockgen + +$(ENVTEST): $(TOOLS_DIR)/go.mod + cd $(TOOLS_DIR); go mod download; go build -tags=$(TOOL_TAG) -o bin/setup-envtest sigs.k8s.io/controller-runtime/tools/setup-envtest + +setup-envtest: $(ENVTEST) + +all: generate + +generate: generate-bpf-go + CGO_ENABLED=0 go generate ./... + for dir in $(GENERATE_TARGET_DIRS); do \ + make -C $$dir $@; \ + done + +generate-bpf-go: ## generate ebpf wrappers for plugins for all archs + for arch in $(ALL_ARCH.linux); do \ + CGO_ENABLED=0 GOARCH=$$arch go generate ./pkg/plugin/...; \ + done + +.PHONY: all generate generate-bpf-go + +##@ Utils + +FMT_PKG ?= . +LINT_PKG ?= . + +fmt: $(GOFUMPT) ## run gofumpt on $FMT_PKG (default "retina"). + $(GOFUMPT) -w $(FMT_PKG) + +lint: $(GOLANGCI_LINT) ## Fast lint vs default branch showing only new issues. + $(GOLANGCI_LINT) run --new-from-rev main --timeout 10m -v $(LINT_PKG)/... + +lint-existing: $(GOLANGCI_LINT) ## Lint the current branch in entirety. + $(GOLANGCI_LINT) run -v $(LINT_PKG)/... + +clean: ## clean build artifacts + $(RMDIR) $(OUTPUT_DIR) + +##@ Build Binaries + +retina: ## builds both retina and kapctl binaries + $(MAKE) retina-binary kubectl-retina + +retina-binary: ## build the Retina binary + go generate ./... &&\ + cd $(RETINA_DIR) &&\ + CGO_ENABLED=0 &&\ + go build -v -o $(RETINA_BUILD_DIR)/retina$(EXE_EXT) -gcflags="-dwarflocationlists=true" -ldflags "-X main.version=$(TAG) -X main.applicationInsightsID=$(APP_INSIGHTS_ID)" + +all-kubectl-retina: $(addprefix kubectl-retina-linux-,${ALL_ARCH.linux}) $(addprefix kubectl-retina-windows-,${ALL_ARCH.windows}) ## build kubectl plugin for all platforms + +kubectl-retina: # build kubectl plugin for current platform + if [ "$(BUILD_LOCALLY)" = "true" ]; then \ + $(MAKE) kubectl-retina-binary-$(GOOS)-$(GOARCH); \ + else \ + $(MAKE) kubectl-retina-$(GOOS)-$(GOARCH); \ + fi + cp $(KUBECTL_RETINA_BUILD_DIR)/kubectl-retina-$(GOOS)-$(GOARCH) $(KUBECTL_RETINA_BUILD_DIR)/kubectl-gadget + +kubectl-retina-binary-%: ## build kubectl plugin locally. + export CGO_ENABLED=0 && \ + export GOOS=$(shell echo $* |cut -f1 -d-) GOARCH=$(shell echo $* |cut -f2 -d-) && \ + go build -v \ + -o $(KUBECTL_RETINA_BUILD_DIR)/kubectl-retina-$${GOOS}-$${GOARCH} \ + -gcflags="-dwarflocationlists=true" \ + -ldflags "-X github.com/microsoft/retina/cli/cmd.Version=$(TAG)" \ + github.com/microsoft/retina/cli + +kubectl-retina-%: ## build kubectl plugin + CONTAINER_BUILDER=docker $(MAKE) kubectl-retina-image +# copy the binary from the container image. + mkdir -p output/kubectl-retina + docker run --rm --platform=linux/amd64 \ + --user $(shell id -u):$(shell id -g) \ + -v $(PWD):/app \ + $(IMAGE_REGISTRY)/$(KUBECTL_RETINA_IMAGE):$(RETINA_PLATFORM_TAG) \ + sh -c "cp /bin/kubectl-retina /app/output/kubectl-retina/kubectl-gadget && cp /bin/kubectl-retina /app/output/kubectl-retina/kubectl-retina-$(GOOS)-$(GOARCH)" + +install-kubectl-retina: kubectl-retina ## install kubectl plugin + chmod +x $(KUBECTL_RETINA_BUILD_DIR)/kubectl-gadget + sudo cp $(KUBECTL_RETINA_BUILD_DIR)/kubectl-gadget /usr/local/bin/kubectl-retina + kubectl retina --help + +retina-capture-workload: ## build the Retina capture workload + cd $(CAPTURE_WORKLOAD_DIR) && CGO_ENABLED=0 go build -v -o $(RETINA_BUILD_DIR)/captureworkload$(EXE_EXT) -gcflags="-dwarflocationlists=true" -ldflags "-X main.version=$(TAG)" + +##@ Containers + +RETINA_BUILDER_IMAGE = retina-builder +RETINA_TOOLS_IMAGE = retina-tools +RETINA_IMAGE = retina-agent +RETINA_INIT_IMAGE = retina-init +KUBECTL_RETINA_IMAGE=kubectl-retina +RETINA_OPERATOR_IMAGE=retina-operator +RETINA_INTEGRATION_TEST_IMAGE=retina-integration-test +RETINA_PROTO_IMAGE=retina-proto-gen +RETINA_GO_GEN_IMAGE=retina-go-gen +KAPINGER_IMAGE = kapinger + +skopeo-export: # util target to copy a container from containers-storage to the docker daemon. + skopeo copy \ + containers-storage:$(REF) \ + docker-daemon:$(REF) + + +container-push: # util target to publish container image. do not invoke directly. + $(CONTAINER_BUILDER) push \ + $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) + +container-pull: # util target to pull container image. + $(CONTAINER_BUILDER) pull \ + $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) + +retina-skopeo-export: + $(MAKE) skopeo-export \ + REF=$(IMAGE_REGISTRY)/$(RETINA_IMAGE):$(RETINA_PLATFORM_TAG) \ + IMG=$(RETINA_IMAGE) + TAG=$(RETINA_PLATFORM_TAG) + +# VERSION vs TAG: VERSION is the version of the binary, TAG is the version of the container image +# which may contain OS and ARCH information. +container-buildah: # util target to build container images using buildah. do not invoke directly. + buildah bud \ + --jobs 16 \ + --platform $(PLATFORM) \ + -f $(DOCKERFILE) \ + --build-arg VERSION=$(VERSION) $(EXTRA_BUILD_ARGS) \ + --build-arg GOOS=$(GOOS) \ + --build-arg GOARCH=$(GOARCH) \ + --build-arg APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + --build-arg builderImage=$(IMAGE_REGISTRY)/$(RETINA_BUILDER_IMAGE):$(TAG) \ + --build-arg toolsImage=$(IMAGE_REGISTRY)/$(RETINA_TOOLS_IMAGE):$(TAG) \ + -t $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) \ + $(CONTEXT_DIR) + +container-docker: # util target to build container images using docker buildx. do not invoke directly. + docker buildx build \ + $(ACTION) \ + --platform $(PLATFORM) \ + -f $(DOCKERFILE) \ + --build-arg VERSION=$(VERSION) $(EXTRA_BUILD_ARGS) \ + --build-arg GOOS=$(GOOS) \ + --build-arg GOARCH=$(GOARCH) \ + --build-arg APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + --build-arg builderImage=$(IMAGE_REGISTRY)/$(RETINA_BUILDER_IMAGE):$(TAG) \ + --build-arg toolsImage=$(IMAGE_REGISTRY)/$(RETINA_TOOLS_IMAGE):$(TAG) \ + -t $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) \ + $(CONTEXT_DIR) + +retina-builder-image: ## build the retina builder container image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=controller/Dockerfile.builder \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_BUILDER_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +retina-builder-image-remove: + $(CONTAINER_BUILDER) rmi $(IMAGE_REGISTRY)/$(RETINA_BUILDER_IMAGE):$(RETINA_PLATFORM_TAG) + +retina-tools-image: ## build the retina container image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=controller/Dockerfile.tools \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_TOOLS_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +retina-tools-image-remove: + $(CONTAINER_BUILDER) rmi $(IMAGE_REGISTRY)/$(RETINA_TOOLS_IMAGE):$(RETINA_PLATFORM_TAG) + +retina-image: ## This pulls dependecies from registry. Use retina-image-local to build dependencies first locally. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=controller/Dockerfile.controller \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +retina-init-image: ## build the retina container image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=controller/Dockerfile.init \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_INIT_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +kubectl-retina-image: ## build the kubectl-retina image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=cli/Dockerfile.kubectl-retina \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(KUBECTL_RETINA_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +retina-operator-image: ## build the retina operator image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=operator/Dockerfile \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_OPERATOR_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +kubectl-retina-image-push: ## push kubectl-retina container image. + $(MAKE) container-push \ + IMAGE=$(KUBECTL_RETINA_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-image-push: ## push the retina container image. + $(MAKE) container-push \ + IMAGE=$(RETINA_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-builder-image-push: ## push the retina builder container image. + $(MAKE) container-push \ + IMAGE=$(RETINA_BUILDER_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-tools-image-push: ## push the retina tools container image. + $(MAKE) container-push \ + IMAGE=$(RETINA_TOOLS_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-init-image-push: ## push the retina container image. + $(MAKE) container-push \ + IMAGE=$(RETINA_INIT_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-operator-image-push: ## push the retina container image. + $(MAKE) container-push \ + IMAGE=$(RETINA_OPERATOR_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +retina-image-win: ## build the retina Windows container image. + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=windows/amd64 \ + DOCKERFILE=controller/Dockerfile.windows-$(YEAR) \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +retina-image-win-push: ## push the retina Windows container image. + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=windows/amd64 \ + DOCKERFILE=controller/Dockerfile.windows-$(YEAR) \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--push + +proto-gen: ## generate protobuf code + docker build --platform=linux/amd64 \ + -t $(IMAGE_REGISTRY)/$(RETINA_PROTO_IMAGE):$(RETINA_PLATFORM_TAG) \ + -f controller/Dockerfile.proto . + docker run --rm --platform=linux/amd64 \ + --user $(shell id -u):$(shell id -g) \ + -v $(PWD):/app $(IMAGE_REGISTRY)/$(RETINA_PROTO_IMAGE):$(RETINA_PLATFORM_TAG) + +go-gen: ## run go generate at the repository root + docker build -t $(IMAGE_REGISTRY)/$(RETINA_GO_GEN_IMAGE):$(RETINA_PLATFORM_TAG) \ + --build-arg GOOS=$(GOOS) \ + --build-arg GOARCH=$(GOARCH) \ + -f controller/Dockerfile.gogen . + docker run --rm --user $(shell id -u):$(shell id -g) -v $(PWD):/app $(IMAGE_REGISTRY)/$(RETINA_GO_GEN_IMAGE):$(RETINA_PLATFORM_TAG) + +all-gen: ## generate all code + $(MAKE) proto-gen + $(MAKE) go-gen + +all-images-local: + $(MAKE) -j4 retina-builder-image retina-tools-image retina-operator-image kubectl-retina-image + $(MAKE) -j2 retina-image retina-init-image + +all-images-local-push: + $(MAKE) -j3 retina-builder-image-push retina-tools-image-push retina-operator-image-push + $(MAKE) -j3 retina-image-push retina-init-image-push kubectl-retina-image-push + +base-images-remove: + $(MAKE) -j2 retina-builder-image-remove retina-tools-image-remove + +# Build images locally. +# Don't use this in pipeline, we want to pull images from registry. +retina-image-local: + $(MAKE) -j2 retina-builder-image retina-tools-image + $(MAKE) retina-image + +retina-init-image-local: + $(MAKE) -j2 retina-builder-image retina-tools-image + $(MAKE) retina-init-image + +##@ Tests +# Make sure the layer has only one directory. +# the test DockerFile needs to build the scratch stage with all the output files +# and we will untar the archive and copy the files from scratch stage +retina-test-image: ## build the retina container image for testing. + $(MAKE) container-docker \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=./test/image/Dockerfile \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(RETINA_IMAGE) \ + CONTEXT_DIR=$(REPO_ROOT) \ + TAG=$(RETINA_PLATFORM_TAG) \ + + docker save -o archives.tar $(IMAGE_REGISTRY)/$(RETINA_IMAGE):$(RETINA_PLATFORM_TAG) && \ + mkdir -p archivelayers && \ + cp archives.tar archivelayers/archives.tar && \ + cd archivelayers/ && \ + pwd && \ + tar -xvf archives.tar && \ + cd `ls -d */` && \ + pwd && \ + tar -xvf layer.tar && \ + cp coverage.out ../../ + $(MAKE) retina-cc + +COVER_PKG ?= . + +retina-integration-test-image: # Build the retina container image for integration testing. + docker build \ + -t $(IMAGE_REGISTRY)/$(RETINA_INTEGRATION_TEST_IMAGE):$(RETINA_PLATFORM_TAG) \ + -f test/integration/Dockerfile.integration \ + --build-arg kubeconfig=$(HOME)/.kube/config \ + --build-arg ENABLE_POD_LEVEL=$(ENABLE_POD_LEVEL) \ + . + +retina-integration-docker-deploy: + docker rm -f retina-integ-container 2> /dev/null + docker run \ + -e RETINA_AGENT_IMAGE=$(IMAGE_REGISTRY)/$(RETINA_IMAGE):$(TAG) \ + -e ENABLE_POD_LEVEL=$(ENABLE_POD_LEVEL) \ + -v $(HOME)/.kube/config:/root/.kube/config \ + --name retina-integ-container \ + $(IMAGE_REGISTRY)/$(RETINA_INTEGRATION_TEST_IMAGE):$(RETINA_PLATFORM_TAG) \ + || true + docker cp retina-integ-container:/tmp/retina-integration-logs . + +retina-ut: $(ENVTEST) # Run unit tests. + go build -o test-summary ./test/utsummary/main.go + CGO_ENABLED=0 KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use -p path)" go test -tags=unit -coverprofile=coverage.out -v -json ./... | ./test-summary --progress --verbose + +retina-cc: # Code coverage. +# go generate ./... && go test -tags=unit -coverprofile=coverage.out.tmp ./... + cat coverage.out | grep -v "_bpf.go\|_bpfel_x86.go\|_bpfel_arm64.go|_generated.go|mock_" | grep -v mock > coveragenew.out + go tool cover -html coveragenew.out -o coverage.html + go tool cover -func=coveragenew.out -o coverageexpanded.out + ls -al + rm coverage.out + mv coveragenew.out coverage.out + if [ "$(GIT_CURRENT_BRANCH_NAME)" != "main" ]; then \ + python3 scripts/coverage/get_coverage.py; \ + go tool cover -func=mainbranchcoverage/coverage.out -o maincoverageexpanded.out; \ + python3 scripts/coverage/compare_cov.py; \ + fi; + +retina-integration: $(GINKGO) # Integration tests. + export ACK_GINKGO_RC=true && $(GINKGO) -keepGoing -tags=integration ./test/integration/... -v -progress -trace -cover -coverprofile=coverage.out + +retina-export-logs: # Export worker node logs. + mkdir kubernetes-logs + kubectl get pods -A -o wide > kubernetes-logs/pods.txt + docker cp retina-cluster-worker:/var/log kubernetes-logs + +##@ kind + +kind-setup: ## Deploy kind cluster. + $(KIND) create cluster --name $(KIND_CLUSTER) --config ./test/kind/kind.yaml +# Install NPM + kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml + sleep 5s + kubectl -n kube-system wait --for=condition=ready --timeout=120s pod -l k8s-app=azure-npm + +kind-clean: ## Delete kind cluster. + $(KIND) delete cluster --name $(KIND_CLUSTER) + +kind-load-image: ## Load local image to kind nodes. +# $(MAKE) retina-image + $(KIND) load docker-image --name $(KIND_CLUSTER) $(IMAGE_REGISTRY)/$(RETINA_IMAGE):$(RETINA_PLATFORM_TAG) + $(KIND) load docker-image --name $(KIND_CLUSTER) $(IMAGE_REGISTRY)/$(RETINA_OPERATOR_IMAGE):$(RETINA_PLATFORM_TAG) + $(KIND) load docker-image --name $(KIND_CLUSTER) $(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE):$(RETINA_PLATFORM_TAG) + +kind-install: kind-load-image # Install Retina in kind cluster. + helm install retina ./deploy/manifests/controller/helm/retina/ \ + --set image.repository=$(IMAGE_REGISTRY)/$(RETINA_IMAGE) \ + --set image.tag=$(RETINA_PLATFORM_TAG) \ + --set operator.repository=$(IMAGE_REGISTRY)/$(RETINA_OPERATOR_IMAGE) \ + --set operator.tag=$(RETINA_PLATFORM_TAG) \ + --set image.initRepository=$(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE) \ + --set image.pullPolicy=Never \ + --set logLevel=debug \ + --set os.windows=false \ + --namespace kube-system --dependency-update + sleep 5s +# Wait for retina agent to be ready. + kubectl -n kube-system wait --for=condition=ready --timeout=120s pod -l app=retina +# Port forward the retina api server. + kubectl -n kube-system port-forward svc/retina-svc 8889:10093 2>&1 >/dev/null & + +kind-uninstall: # Uninstall Retina from kind cluster. + helm uninstall retina -n kube-system + +## Reusable targets for building multiplat container image manifests. + +IMAGE_ARCHIVE_DIR ?= $(shell pwd) + +manifest-create: # util target to compose multiarch container manifests from platform specific images. + cat /usr/share/containers/containers.conf + $(CONTAINER_BUILDER) manifest create $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) + for PLATFORM in $(PLATFORMS); do $(MAKE) manifest-add PLATFORM=$$PLATFORM IMAGE=$(IMAGE) TAG=$(TAG); done + +manifest-add: + if [ "$(PLATFORM)" = "windows/amd64/2022" ]; then \ + echo "Adding windows/amd64/2022"; \ + $(CONTAINER_BUILDER) manifest add --os-version=$(WINVER2022) --os=windows $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):windows-ltsc2022-amd64-$(TAG); \ + elif [ "$(PLATFORM)" = "windows/amd64/2019" ]; then \ + echo "Adding windows/amd64/2019"; \ + $(CONTAINER_BUILDER) manifest add --os-version=$(WINVER2019) --os=windows $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):windows-ltsc2019-amd64-$(TAG); \ + else \ + echo "Adding $(PLATFORM)"; \ + $(CONTAINER_BUILDER) manifest add $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(subst /,-,$(PLATFORM))-$(TAG); \ + fi; + +manifest-push: # util target to push multiarch container manifest. + $(CONTAINER_BUILDER) manifest inspect $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) + $(CONTAINER_BUILDER) manifest push --all $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(TAG) + +manifest-skopeo-archive: # util target to export tar archive of multiarch container manifest. + skopeo copy --all docker://$(IMAGE_REGISTRY)/$(IMAGE):$(TAG) oci-archive:$(IMAGE_ARCHIVE_DIR)/$(IMAGE)-$(TAG).tar + +## Build specific multiplat images. + +retina-builder-manifest-create: ## build retina multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(RETINA_BUILDER_IMAGE) \ + TAG=$(TAG) + +retina-builder-manifest-push: ## push retina multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(RETINA_BUILDER_IMAGE) \ + TAG=$(TAG) + +retina-builder-skopeo-archive: ## export tar archive of retina multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(RETINA_BUILDER_IMAGE) \ + TAG=$(TAG) + +retina-tools-manifest-create: ## build retina multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(RETINA_TOOLS_IMAGE) \ + TAG=$(TAG) + +retina-tools-manifest-push: ## push retina multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(RETINA_TOOLS_IMAGE) \ + TAG=$(TAG) + +retina-tools-skopeo-archive: ## export tar archive of retina multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(RETINA_TOOLS_IMAGE) \ + TAG=$(TAG) + +retina-init-manifest-create: ## build retina multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(RETINA_INIT_IMAGE) \ + TAG=$(TAG) + +retina-init-manifest-push: ## push retina multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(RETINA_INIT_IMAGE) \ + TAG=$(TAG) + +retina-init-skopeo-archive: ## export tar archive of retina multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(RETINA_INIT_IMAGE) \ + TAG=$(TAG) + +retina-agent-manifest-create: ## build retina multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(RETINA_IMAGE) \ + TAG=$(TAG) + +retina-agent-manifest-push: ## push retina multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(RETINA_IMAGE) \ + TAG=$(TAG) + +retina-agent-skopeo-archive: ## export tar archive of retina multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(RETINA_IMAGE) \ + TAG=$(TAG) + +retina-operator-manifest-create: ## build retina multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(RETINA_OPERATOR_IMAGE) \ + TAG=$(TAG) + +retina-operator-manifest-push: ## push retina multiplat container manifest + $(MAKE) manifest-push \ + IMAGE=$(RETINA_OPERATOR_IMAGE) \ + TAG=$(TAG) + +retina-operator-skopeo-archive: ## export tar archive of retina multiplat container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(RETINA_OPERATOR_IMAGE) \ + TAG=$(TAG) + +kubectl-retina-manifest-create: ## build kubectl plugin multiplat container manifest. + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(KUBECTL_RETINA_IMAGE) \ + TAG=$(TAG) + +kubectl-retina-manifest-push: ## push kubectl plugin multiplat container manifest. + $(MAKE) manifest-push \ + IMAGE=$(KUBECTL_RETINA_IMAGE) \ + TAG=$(TAG) + +kubectl-retina-skopeo-archive: ## export tar archive of retina kubectl plugin container manifest. + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(KUBECTL_RETINA_IMAGE) \ + TAG=$(TAG) + + +.PHONY: manifests +manifests: + cd crd && make manifests && make generate + +# basic/node-level mode +helm-install: manifests + helm install retina ./deploy/manifests/controller/helm/retina/ \ + --namespace kube-system \ + --set image.repository=$(IMAGE_REGISTRY)/$(RETINA_IMAGE) \ + --set image.tag=$(RETINA_PLATFORM_TAG) \ + --set image.initRepository=$(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE) \ + --set image.pullPolicy=Always \ + --set logLevel=info \ + --set os.windows=true \ + --set operator.enabled=false \ + --set enabledPlugin_linux="[\"dropreason\"\,\"packetforward\"\,\"linuxutil\"\,\"dns\"]" + +helm-install-with-operator: manifests + helm install retina ./deploy/manifests/controller/helm/retina/ \ + --namespace kube-system \ + --set image.repository=$(IMAGE_REGISTRY)/$(RETINA_IMAGE) \ + --set image.tag=$(RETINA_PLATFORM_TAG) \ + --set image.initRepository=$(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE) \ + --set image.pullPolicy=Always \ + --set logLevel=info \ + --set os.windows=true \ + --set operator.enabled=true \ + --set operator.enableRetinaEndpoint=true \ + --set operator.tag=$(RETINA_PLATFORM_TAG) \ + --set operator.repository=$(IMAGE_REGISTRY)/$(RETINA_OPERATOR_IMAGE) \ + --skip-crds \ + --set enabledPlugin_linux="[\"dropreason\"\,\"packetforward\"\,\"linuxutil\"\,\"dns\"]" + +# advanced/pod-level mode with scale limitations, where metrics are aggregated by source and destination Pod +helm-install-advanced-remote-context: manifests + helm install retina ./deploy/manifests/controller/helm/retina/ \ + --namespace kube-system \ + --set image.repository=$(IMAGE_REGISTRY)/$(RETINA_IMAGE) \ + --set image.tag=$(RETINA_PLATFORM_TAG) \ + --set image.initRepository=$(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE) \ + --set image.pullPolicy=Always \ + --set logLevel=info \ + --set os.windows=true \ + --set operator.enabled=true \ + --set operator.enableRetinaEndpoint=true \ + --set operator.tag=$(RETINA_PLATFORM_TAG) \ + --set operator.repository=$(IMAGE_REGISTRY)/$(RETINA_OPERATOR_IMAGE) \ + --skip-crds \ + --set enabledPlugin_linux="[\"dropreason\"\,\"packetforward\"\,\"linuxutil\"\,\"dns\",\"packetparser\"\]" \ + --set enablePodLevel=true \ + --set remoteContext=true + +# advanced/pod-level mode designed for scale, where metrics are aggregated by "local" Pod (source for outgoing traffic, destination for incoming traffic) +helm-install-advanced-local-context: manifests + helm install retina ./deploy/manifests/controller/helm/retina/ \ + --namespace kube-system \ + --set image.repository=$(IMAGE_REGISTRY)/$(RETINA_IMAGE) \ + --set image.tag=$(RETINA_PLATFORM_TAG) \ + --set image.initRepository=$(IMAGE_REGISTRY)/$(RETINA_INIT_IMAGE) \ + --set image.pullPolicy=Always \ + --set logLevel=info \ + --set os.windows=true \ + --set operator.enabled=true \ + --set operator.enableRetinaEndpoint=true \ + --set operator.tag=$(RETINA_PLATFORM_TAG) \ + --set operator.repository=$(IMAGE_REGISTRY)/$(RETINA_OPERATOR_IMAGE) \ + --skip-crds \ + --set enabledPlugin_linux="[\"dropreason\"\,\"packetforward\"\,\"linuxutil\"\,\"dns\",\"packetparser\"\]" \ + --set enablePodLevel=true \ + --set enableAnnotations=true \ + --set bypassLookupIPOfInterest=false + +helm-uninstall: + helm uninstall retina -n kube-system + +.PHONY: docs +docs: + echo $(PWD) + docker run -it -p 3000:3000 -v $(PWD):/retina -w /retina/ node:20-alpine ./site/start-dev.sh + +.PHONY: docs-pod +docs-prod: + docker run -i -p 3000:3000 -v $(PWD):/retina -w /retina/ node:20-alpine npm install --prefix site && npm run build --prefix site + +kapinger-image: ## build the retina container image. + echo "Building for $(PLATFORM)" + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=$(PLATFORM) \ + DOCKERFILE=hack/tools/kapinger/Dockerfile \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(KAPINGER_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + APP_INSIGHTS_ID=$(APP_INSIGHTS_ID) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--load + +kapinger-image-push: + $(MAKE) container-push \ + IMAGE=$(KAPINGER_IMAGE) \ + TAG=$(RETINA_PLATFORM_TAG) + +kapinger-manifest-create: + $(MAKE) manifest-create \ + PLATFORMS="$(PLATFORMS)" \ + IMAGE=$(KAPINGER_IMAGE) \ + TAG=$(TAG) + +kapinger-manifest-push: + $(MAKE) manifest-push \ + IMAGE=$(KAPINGER_IMAGE) \ + TAG=$(TAG) + +kapinger-image-win-push: + $(MAKE) container-$(CONTAINER_BUILDER) \ + PLATFORM=windows/amd64 \ + DOCKERFILE=hack/tools/kapinger/Dockerfile.windows \ + REGISTRY=$(IMAGE_REGISTRY) \ + IMAGE=$(KAPINGER_IMAGE) \ + VERSION=$(TAG) \ + TAG=$(RETINA_PLATFORM_TAG) \ + CONTEXT_DIR=$(REPO_ROOT) \ + ACTION=--push + +kapinger-skopeo-archive: + $(MAKE) manifest-skopeo-archive \ + IMAGE=$(KAPINGER_IMAGE) \ + TAG=$(TAG) diff --git a/README.md b/README.md new file mode 100644 index 000000000..3fb46341c --- /dev/null +++ b/README.md @@ -0,0 +1,61 @@ +# Retina + +## Overview + +Retina is a cloud and vendor agnostic container workload observability platform which helps customers with enterprise grade DevOps, SecOps and compliance use cases. It is designed to cater to cluster network administrators, cluster security administrators and DevOps engineers by providing a centralized platform for monitoring application and network health, and security. Retina is capable of collecting telemetry data from multiple sources and aggregating it into a single time-series database. Retina is also capable of sending data to multiple destinations, such as Prometheus, Azure Monitor, and other vendors, and visualizing the data in a variety of ways, like Grafana, Azure Monitor, Azure log analytics, and more. + +## Documentation + +See [retina.sh](http://retina.sh) for more information and examples. + +## Capabilities + +Retina is currently supported in AKS. It has two major features: + +### Metrics + +[Read more](docs/metrics/intro.md) + +### Quick Install Guide + +1. Create a Kubernetes cluster with a minimum of 2 nodes. Retina supports Linux (Ubuntu) and Windows (2019 and 2022) nodes. +2. Follow steps in [Using Managed Prometheus and Grafana](https://retina.sh/docs/metrics/managed-retina) + +### Captures + +[Read more](docs/captures/readme.md) + +## Contributing + +[Read more](docs/contributing/readme.md) + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit . + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). +Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. +Any use of third-party trademarks or logos are subject to those third-party's policies. + +## License + +See [LICENSE](LICENSE). + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Contact + +"Retina Devs" diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..b3c89efc8 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). + + diff --git a/SUPPORT.md b/SUPPORT.md new file mode 100644 index 000000000..2b2b9180b --- /dev/null +++ b/SUPPORT.md @@ -0,0 +1,11 @@ +# Support + +## How to file issues and get help + +This project uses GitHub Issues to track bugs and feature requests. Please search the existing +issues before filing new issues to avoid duplicates. For new issues, file your bug or +feature request as a new Issue. + +## Microsoft Support Policy + +Support for this project is limited to the resources listed above. diff --git a/captureworkload/README.md b/captureworkload/README.md new file mode 100644 index 000000000..5b2b3c2ff --- /dev/null +++ b/captureworkload/README.md @@ -0,0 +1,22 @@ +# Retina Capture + +This directory serves as the entrypoint to Retina Capture: a wrapper to handle lifecyle of packet capture workload + +## Build + +```bash + make retina-capture-workload +``` + +## Test + +```bash +HOSTPATH="/tmp/" CAPTURE_NAME="out" CAPTURE_DURATION="10s" output/linux_amd64/retina/captureworkload +``` + +## Release + +```bash + make retina-image + make retina-image-push +``` diff --git a/captureworkload/main.go b/captureworkload/main.go new file mode 100644 index 000000000..52e22a378 --- /dev/null +++ b/captureworkload/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + + "go.uber.org/zap" + + "github.com/microsoft/retina/pkg/capture" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" +) + +var ( + applicationInsightsID string //nolint // aiMetadata is set in Makefile + version string //nolint +) + +func main() { + lOpts := log.GetDefaultLogOpts() + lOpts.ApplicationInsightsID = applicationInsightsID + log.SetupZapLogger(lOpts) + l := log.Logger().Named("captureworkload") + l.Info("Start to capture network traffic") + l.Info("Version: ", zap.String("version", version)) + l.Info("Start telemetry with App Insights ID: ", zap.String("applicationInsightsID", applicationInsightsID)) + + telemetry.InitAppInsights(applicationInsightsID, version) + defer telemetry.ShutdownAppInsights() + + // Create channel to listen for signals. + sigChan := make(chan os.Signal, 1) + // Notify sigChan for SIGTERM. + signal.Notify(sigChan, syscall.SIGTERM) + + tel := telemetry.NewAppInsightsTelemetryClient("retina-capture", map[string]string{ + "version": version, + telemetry.PropertyApiserver: os.Getenv(captureConstants.ApiserverEnvKey), + }) + + cm := capture.NewCaptureManager(l, tel) + + defer func() { + if err := cm.Cleanup(); err != nil { + l.Error("Failed to cleanup network capture", zap.Error(err)) + } + }() + srcDir, err := cm.CaptureNetwork(sigChan) + if err != nil { + l.Error("Failed to capture network traffic", zap.Error(err)) + os.Exit(1) + } + if err := cm.OutputCapture(srcDir); err != nil { + l.Error("Failed to output network traffic", zap.Error(err)) + } + l.Info("Done for capturing network traffic") +} diff --git a/cli/Dockerfile.kubectl-retina b/cli/Dockerfile.kubectl-retina new file mode 100644 index 000000000..74a2935c8 --- /dev/null +++ b/cli/Dockerfile.kubectl-retina @@ -0,0 +1,20 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 as builder + +# Cache go modules so they won't be downloaded at each build +COPY go.mod go.sum /retina/ +RUN cd /retina && go mod download + +# Build args +ARG VERSION +ARG APP_INSIGHTS_ID + +ENV GOOS=linux +ENV GOARCH=amd64 + +# This COPY is limited by .dockerignore +COPY ./ /retina +RUN cd /retina && make kubectl-retina-binary-${GOOS}-${GOARCH} +RUN mv /retina/output/kubectl-retina/kubectl-retina-${GOOS}-${GOARCH} /retina/output/kubectl-retina/kubectl-retina + +FROM --platform=linux/amd64 mcr.microsoft.com/mirror/docker/library/alpine:3.14 +COPY --from=builder /retina/output/kubectl-retina/kubectl-retina /bin/kubectl-retina diff --git a/cli/README.md b/cli/README.md new file mode 100644 index 000000000..d638fa6ad --- /dev/null +++ b/cli/README.md @@ -0,0 +1,17 @@ +# Retina CLI + +This directory serves as the entrypoint to Retina CLI + +## Tools + +### trace + +Trace subcommand retrieve status or results from Retina. + +### Config + +Config subcommand configures retina CLI. + +### Capture + +Capture subcommand allows the user to collect networking resources on a Kubernetes cluster. diff --git a/cli/cmd/ai/analyze.go b/cli/cmd/ai/analyze.go new file mode 100644 index 000000000..f9b65d8c6 --- /dev/null +++ b/cli/cmd/ai/analyze.go @@ -0,0 +1,144 @@ +package ai + +import ( + "fmt" + "strings" + + "github.com/microsoft/retina/cli/cmd/parse" + "github.com/spf13/cobra" +) + +const ( + defaultPcapQuestion = "Can you summarize these connections?" + defaultTraceQuestion = "which pods are dropping packets?" + analyzeTemp = 0.3 + analyzeMaxTokens = 800 +) + +var ( + ErrMustSpecifyFile = fmt.Errorf("must specify either --pcap-file or --trace-file") + ErrCantSupportBothFiles = fmt.Errorf("can't support both --pcap-file and --trace-file") + ErrUnsupportedTraceFile = fmt.Errorf("trace files are not yet supported") +) + +func Analyze() *cobra.Command { + cmd := &cobra.Command{ + Use: "analyze", + Short: "answers any question about a capture file", + RunE: func(cmd *cobra.Command, args []string) error { + gpt := NewGPT(analyzeTemp, 0.95, 0, 0, analyzeMaxTokens) + if err := gpt.Init(); err != nil { + return err + } + + prompt, err := analyzePrompt(cmd, args) + if err != nil { + return err + } + + debug, _ := cmd.Flags().GetBool("debug") + if debug { + fmt.Println("Prompt:", prompt) + } + + resp, err := gpt.Ask(prompt) + if err != nil { + return err + } + + fmt.Printf("Answer: ") + fmt.Println(resp) + return nil + }, + } + + cmd.Flags().StringP("question", "q", "", "question to ask") + cmd.Flags().String("pcap-file", "", "pcap file to parse") + cmd.Flags().String("trace-file", "", "trace file to parse") + cmd.Flags().Bool("json", false, "use json format for the parsed result sent to GPT") + cmd.Flags().Bool("numeric", false, "when true, will NOT enrich IPs with Node/Pod/Service information") + cmd.Flags().Bool("show-all", false, "show result of parsing the pcap or trace file") + cmd.Flags().Bool("debug", false, "show verbose logging") + return cmd +} + +func analyzePrompt(cmd *cobra.Command, args []string) (string, error) { + question, _ := cmd.Flags().GetString("question") + pcapFile, _ := cmd.Flags().GetString("pcap-file") + traceFile, _ := cmd.Flags().GetString("trace-file") + isJson, _ := cmd.Flags().GetBool("json") + numeric, _ := cmd.Flags().GetBool("numeric") + showAll, _ := cmd.Flags().GetBool("show-all") + debug, _ := cmd.Flags().GetBool("debug") + + if pcapFile == "" && traceFile == "" { + return "", ErrMustSpecifyFile + } + + if pcapFile != "" && traceFile != "" { + return "", ErrCantSupportBothFiles + } + + if question == "" { + if pcapFile != "" { + question = defaultPcapQuestion + } + + if traceFile != "" { + question = defaultTraceQuestion + } + + fmt.Println("no question provided, using default question:", question) + fmt.Println() + + } else { + fmt.Println("Question:", question) + } + + cfg := &parse.ParserConfig{ + Format: "pretty", + Enrich: !numeric, + Debug: false, + IPIncludeFilter: []string{ + // "10.224.0.252", + // "10.224.1.122", + // "10.224.0.216", + }, + } + + if isJson { + cfg.Format = "json" + } + + var data string + if pcapFile != "" { + p := parse.NewPcapParser() + if err := p.Parse(pcapFile, cfg); err != nil { + return "", err + } + + data = p.Result(cfg) + } else { + t := parse.NewTraceParser() + if err := t.Parse(traceFile, cfg); err != nil { + return "", fmt.Errorf("error parsing trace file: %v", err) + } + + data = t.Result(cfg) + } + + if debug { + fmt.Println("Parsed data:", data) + } + + if showAll { + fmt.Println(data) + } + + sb := strings.Builder{} + sb.WriteString("You are an expert at troubleshooting network issues based on 5-tuple connection data on UDP and TCP connections. Read the following summary of all connections:\n") + sb.WriteString(data) + sb.WriteString("\nBased on the above summary of all connections, provide insights to the customer's question. Your response should be as brief as possible while still providing both a clear explanation and potential reasons.\n\n") + sb.WriteString(fmt.Sprintf("Customer's Question: %s\nInsights: ", question)) + return sb.String(), nil +} diff --git a/cli/cmd/ai/capture.go b/cli/cmd/ai/capture.go new file mode 100644 index 000000000..027cc407c --- /dev/null +++ b/cli/cmd/ai/capture.go @@ -0,0 +1,133 @@ +package ai + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func Capture() *cobra.Command { + cmd := &cobra.Command{ + Use: "capture", + Short: "converts natural language into a \"kubectl-retina capture create\" command", + RunE: func(cmd *cobra.Command, args []string) error { + gpt := NewGPT(0.3, 0.95, 0, 0, 100) + if err := gpt.Init(); err != nil { + return err + } + + prompt, err := capturePrompt(cmd, args) + if err != nil { + return err + } + + resp, err := gpt.Ask(prompt) + if err != nil { + return err + } + + fmt.Println(resp) + return nil + }, + } + + return cmd +} + +/* +Example +User Input: capture traffic on Windows nodes with a maximum file size of 10MB +GPT Answer: kubectl retina capture --node-selectors=kubernetes.io/os=windows --max-size=10MB +*/ +func capturePrompt(cmd *cobra.Command, args []string) (string, error) { + sb := strings.Builder{} + sb.WriteString("you are a kubernetes expert, use the following documentation to learn about the kubectl retina capture command:") + // err := cmd.Root().GenBashCompletion() + // err := cmd.Root().GenBashCompletion(io.Writer(&sb)) + // if err != nil { + // return "", err + // } + // err = doc.GenMarkdownTree(cmd.Root(), "./") + // if err != nil { + // return "", err + // } + + sb.WriteString(`capture create --help + create a Retina Capture + + Usage: + capture create [flags] + + Examples: + # Capture network packets on the node selected by node names and copy the artifacts to the node host path /mnt/capture + kubectl retina capture create --host-path /mnt/capture --namespace capture --node-names "aks-nodepool1-41844487-vmss000000,aks-nodepool1-41844487-vmss000001" + + # Capture network packets on the coredns pods determined by pod-selectors and namespace-selectors + kubectl retina capture create --host-path /mnt/capture --namespace capture --pod-selectors="k8s-app=kube-dns" --namespace-selectors="kubernetes.io/metadata.name=kube-system" + + # Capture network packets on nodes with label "agentpool=agentpool" and "version:v20" + kubectl retina capture create --host-path /mnt/capture --node-selectors="agentpool=agentpool,version:v20" + + # Capture network packets on nodes using node-selector with duration 10s + kubectl retina capture create --host-path=/mnt/capture --node-selectors="agentpool=agentpool" --duration=10s + + # Capture network packets on nodes using node-selector and upload the artifacts to blob storage with SAS URL https://testaccount.blob.core.windows.net/ + kubectl retina capture create --node-selectors="agentpool=agentpool" --blob-upload=https://testaccount.blob.core.windows.net/ + + Flags: + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation. + --blob-upload string Blob SAS URL with write permission to upload capture files + --cache-dir string Default cache directory (default "/home/azureuser/.kube/cache") + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --debug When debug is true, a customized retina-agent image, determined by the environment variable RETINA_AGENT_IMAGE, is set + --disable-compression If true, opt-out of response compression for all requests to the server + --duration duration Duration of capturing packets (default 1m0s) + --exclude-filter string A comma-separated list of IP:Port pairs that are excluded from capturing network packets. Supported formats are IP:Port, IP, Port, *:Port, IP:* + -h, --help help for create + --host-path string HostPath of the node to store the capture files + --include-filter string A comma-separated list of IP:Port pairs that are used to filter capture network packets. Supported formats are IP:Port, IP, Port, *:Port, IP:* + --include-metadata If true, collect static network metadata into capture file (default true) + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --job-num-limit int The maximum number of jobs can be created for each capture. 0 means no limit + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --max-size int Limit the capture file to MB in size which works only for Linux (default 100) + -n, --namespace string Namespace to host capture job (default "default") + --namespace-selectors string A comma-separated list of namespace labels together with pod-selector to select pods on which the network capture will be performed. By default, the pod namespace is specified by the flag namespace + --no-wait Do not wait for the long-running capture job to finish (default true) + --node-names string A comma-separated list of node names to select nodes on which the network capture will be performed + --node-selectors string A comma-separated list of node labels to select nodes on which the network capture will be performed + --packet-size int Limits the each packet to bytes in size which works only for Linux + --pod-selectors string A comma-separated list of pod labels together with namespace-selector to select pods on which the network capture will be performed + --pvc string PersistentVolumeClaim under the specified or default namespace to store capture files + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --tcpdump-filter string Raw tcpdump flags which works only for Linux + --tls-server-name string Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use`) + + // sb.WriteString(". Now generate a kubectl-retina capture command for the following question, and only give the command without any description:") + sb.WriteString("\nGiven the above auto-completion script, provide the command for the following task. Your response should contain only the command.") + sb.WriteString("\n\nTASK: all windows Pods\n") + sb.WriteString("RESPONSE: kubectl retina capture create --node-selectors=kubernetes.io/os=windows") + sb.WriteString("\n\nTASK: pods with label key1=val1 or key2=val2 in the default namespace\n") + sb.WriteString("RESPONSE: kubectl retina capture create --namespace-selectors=kubernetes.io/metadata.name=default --pod-selectors=pod=a,pod=b") + + sb.WriteString("\n\nTASK: ") + + for _, str := range args { + sb.WriteString(str) + sb.WriteString(" ") + } + + sb.WriteString("\nRESPONSE: ") + + return sb.String(), nil +} diff --git a/cli/cmd/ai/chat.go b/cli/cmd/ai/chat.go new file mode 100644 index 000000000..92364d0f0 --- /dev/null +++ b/cli/cmd/ai/chat.go @@ -0,0 +1,870 @@ +package ai + +import ( + "bufio" + "fmt" + "net/url" + "os" + "os/exec" + "strings" + "time" + + "github.com/microsoft/retina/cli/cmd/capture" + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/spf13/cobra" + "golang.org/x/term" + "k8s.io/apimachinery/pkg/util/wait" + utilexec "k8s.io/utils/exec" +) + +const ( + debug = true + skipCapture = false + captureIsActuallyTrace = true + + // e.g. 5 --> "#####" + + Retina = "RETINA🔍" + User = "USER🕵️" +) + +var ( + allowedToPipeToCommands = []string{ + "wc", + "grep", + "sort", + "uniq", + "head", + "tail", + "awk", + "sed", + "tr", + "jq", + } + + // don't allow these characters in the command + badChars = []string{ + "&", + "||", + ";", + ">", + "<", + } +) + +func chatDivider() string { + width, _, err := term.GetSize(0) + if err != nil { + fmt.Println(err) + } + return strings.Repeat("#", width) +} + +func responseDivider() string { + width, _, err := term.GetSize(0) + if err != nil { + fmt.Println(err) + } + return strings.Repeat("-", width) +} + +type ChatAgent struct { + AutoRun bool + Captured bool + Traced bool + CaptureFile string + TraceFile string + Exec utilexec.Interface + *Chat +} + +// func chatDivider() string { + +// width, _, err := term.GetSize(0) + +// if err != nil { + +// fmt.Println(err) + +// } + +// return strings.Repeat("#", width) + +// } + +// func responseDivider() string { + +// width, _, err := term.GetSize(0) + +// if err != nil { + +// fmt.Println(err) + +// } + +// return strings.Repeat("-", width) + +// } + +func NewChatAgent(gpt *GPT) *ChatAgent { + return &ChatAgent{ + Exec: utilexec.New(), + Chat: &Chat{ + GPT: gpt, + }, + } +} + +func (c *ChatAgent) Loop(cmd *cobra.Command) error { + fmt.Println(chatDivider()) + fmt.Println() + fmt.Printf("%s: Hello!👋 I'm an AI tool that can help you explore your AKS Cluster and answer your questions.\n\n", Retina) + // fmt.Println("NOTE: This agent will be able to run \"kubectl get\" commands as well as all Retina commands. It will also be able to pipe \"kubectl get\" commands into the following commands:") + // for _, command := range allowedToPipeToCommands { + // fmt.Println(command) + // } + fmt.Println() + fmt.Println("There will be a prompt before running each command.") + fmt.Print("Would you like to auto-run commands instead? ") + val, err := c.ReadExpectedValues("y", "n") + if err != nil { + return err + } + + c.AutoRun = val == "y" + fmt.Println() + fmt.Println("Okay, let's get started!") + fmt.Println(chatDivider()) + fmt.Println() + + c.PrintAndStoreMessage(Retina, "What would you like to know?") + fmt.Println(responseDivider()) + + // chat loop + var userInput string + for { + // prompt user input + userInput, err = c.ReadAndStoreUserInput() + if err != nil { + return err + } + fmt.Println(responseDivider()) + + // triage the ask + prompt := c.chatTriagePrompt(userInput) + + b := wait.Backoff{ + Steps: 5, + Duration: 1 * time.Second, + Factor: 2, + Jitter: 0, + } + + var resp string + err = wait.ExponentialBackoff(b, func() (bool, error) { + resp, err = c.GPT.Ask(prompt) + if err != nil { + fmt.Println("Retrying ChatGPT prompt...") + return false, err + } + if resp == "" { + fmt.Println("Retrying ChatGPT prompt...") + return false, nil + } + return true, nil + }) + // resp, err := c.GPT.Ask(prompt) + // if err != nil { + // return err + // } + + if debug { + fmt.Println("[DEBUG] [TRIAGE]", resp) + } + + // execute and respond to triaged task + if strings.Contains(resp, "UNKNOWN-TASK") { + c.PrintAndStoreMessage(Retina, "Cannot perform this action.") + fmt.Println(responseDivider()) + continue + } + + // 1. Kubectl get command + if strings.Contains(resp, "KUBECTL-BASH-COMMAND") { + prompt := c.kubectlBashPrompt(userInput) + resp, err := c.GPT.Ask(prompt) + if err != nil { + return err + } + + // execute kubectl command + resp = strings.TrimSpace(resp) + c.PrintAndStoreMessage(Retina, fmt.Sprintf("Will execute below command:\n%s", resp)) + splitResp := strings.Split(resp, " ") + isBadChar := false + for _, s := range badChars { + if strings.Contains(resp, s) { + isBadChar = true + fmt.Println("bad char:", s) + break + } + } + + if isBadChar || len(splitResp) < 2 || splitResp[0] != "kubectl" { + return fmt.Errorf("invalid kubectl/bash command: %s", resp) + } + + jqIndex := strings.Index(resp, "| jq ") + jqIndexEnd := jqIndex + 1 + jqExists := jqIndex != -1 + jqCount := 0 + for i := 5; i < len(resp); i++ { + if resp[i-5:i] == "| jq " { + jqCount++ + } + } + if jqCount > 1 { + return fmt.Errorf("only one jq command is allowed") + } + if jqExists { + // jq strings are delineated by apostrophes + j := jqIndex + 5 + if resp[j] != '\'' { + return fmt.Errorf("unsure how to parse jq string") + } + success := false + j++ + for ; j < len(resp); j++ { + if resp[j] == '\'' { + jqIndexEnd = j + success = true + break + } + } + if !success { + return fmt.Errorf("reached end of jq string. unsure how to parse") + } + } + + pipeIndices := []int{} + for i := 0; i < len(resp); i++ { + if resp[i] == '|' { + pipeIndices = append(pipeIndices, i) + } + } + commandsWithArgs := strings.Split(resp, "|") + for i, commandWithArgs := range commandsWithArgs { + if i == 0 { + // kubectl get ... | + continue + } + + if jqExists && jqIndex <= pipeIndices[i-1] && pipeIndices[i-1] <= jqIndexEnd { + // don't check jq command + continue + } + + command := strings.Split(strings.TrimSpace(commandWithArgs), " ")[0] + + allowed := false + for _, allowedCommand := range allowedToPipeToCommands { + if command == allowedCommand { + allowed = true + break + } + } + if !allowed { + return fmt.Errorf("command from GPT is not allowed: %s", command) + } + } + + if !c.AutoRun { + fmt.Print("Execute? ") + val, err = c.ReadExpectedValues("y", "n") + if err != nil { + return err + } + + if val == "n" { + c.PrintAndStoreMessage(Retina, "Aborting.") + fmt.Println(responseDivider()) + continue + } + } + + bashArgs := []string{"-c", resp} + fmt.Println(chatDivider()) + fmt.Println() + cmd := c.Exec.Command("bash", bashArgs...) + // cmd.SetEnv([]string{"RETINA_AGENT_IMAGE=acnpublic.azurecr.io/retina-agent:linux-amd64-v0.0.11-33-g34dfb20"}) + output, err := cmd.CombinedOutput() + if err != nil { + fmt.Println("failed:", err.Error()) + return fmt.Errorf("error executing command: %w", err) + } + c.PrintAndStoreMessage("OUTPUT", string(output)) + // grep returns exit code 1 when it doesn't find anything + // if err != nil { + // return fmt.Errorf("error executing command: %w", err) + // } + fmt.Println(chatDivider()) + fmt.Println() + continue + } + + // 2. packet capture or trace + if strings.Contains(resp, "PACKET-CAPTURE-OR-TRACE") { + c.PrintAndStoreMessage(Retina, "Let's gather some network data with a packet capture or packet trace.") + fmt.Println() + fmt.Println("A packet capture is a file that contains network traffic. From it we can analyze e.g.:") + fmt.Println("- number of connections per Pod.") + fmt.Println("- state of TCP/UDP connections.") + fmt.Println("- which network entities are communicating.") + fmt.Println("- long-running connections.") + fmt.Println() + fmt.Println("A packet trace installs an eBPF program to collect Flows. From it we can analyze e.g.:") + fmt.Println("- packet drops in iptables (firewall).") + fmt.Println("- etc.") + fmt.Println() + // fmt.Print("Would you like to perform a capture or trace? ") + // val, err = c.ReadExpectedValues("c", "t", "neither") + // if err != nil { + // return err + // } + + // if val == "neither" { + // c.PrintAndStoreMessage(Retina, "Aborting.") + // fmt.Println(responseDivider()) + // continue + // } + + fmt.Println(responseDivider()) + c.PrintAndStoreMessage(Retina, "Describe which nodes to examine, or which pods and namespaces to examine.") + fmt.Println(responseDivider()) + + // prompt user input + userInput, err = c.ReadAndStoreUserInput() + if err != nil { + return err + } + fmt.Println(responseDivider()) + + // if val == "c" { + // capture + c.Traced = false + + for { + c.PrintAndStoreMessage(Retina, "Will perform packet capture...") + + prompt, err := capturePrompt(cmd, strings.Split(userInput, " ")) + if err != nil { + return err + } + + if c.Memory[len(c.Memory)-3].Text == "Aborting Capture." && strings.Contains(c.Memory[len(c.Memory)-4].Text, "Will execute below command:\n") { + sb := strings.Builder{} + sb.WriteString("This is the previous command you suggested: \"") + sb.WriteString(c.Memory[len(c.Memory)-4].Text[len("Will execute below command:\n"):]) + sb.WriteString("\". Modify this previous command based on the new TASK below.\n") + for i, msg := range c.Memory { + if i == len(c.Memory)-1 { + // skip last user message so we can display it later + break + } + sb.WriteString(fmt.Sprintf("%s: %s\n", msg.Role, msg.Text)) + } + sb.WriteString("\n") + sb.WriteString("Given the above chat history as context, consider this new ask from USER: ") + sb.WriteString(userInput) + sb.WriteString("\n") + + prompt = sb.String() + prompt + } + + resp, err := c.GPT.Ask(prompt) + if err != nil { + return err + } + + // execute capture command + resp = strings.TrimSpace(resp) + if !strings.Contains(resp, "--no-wait") { + resp += " --no-wait=false" + } + + //if !strings.Contains(resp, "--blob-upload") { + // resp += " --blob-upload=\"\"" + //} + + c.PrintAndStoreMessage(Retina, fmt.Sprintf("Will execute below command:\n%s", resp)) + + splitResp := strings.Split(resp, " ") + if len(splitResp) < 4 || splitResp[0] != "kubectl" || splitResp[1] != "retina" || splitResp[2] != "capture" || splitResp[3] != "create" { + return fmt.Errorf("invalid capture command: %s", resp) + } + + if !c.AutoRun { + fmt.Print("Execute? ") + val, err = c.ReadExpectedValues("y", "n") + if err != nil { + return err + } + + if val == "n" { + c.PrintAndStoreMessage(Retina, "Aborting Capture.") + fmt.Println(responseDivider()) + + // prompt user input + fmt.Println("hint: to quit out of capture loop, say \"quit\"") + userInput, err = c.ReadAndStoreUserInput() + if err != nil { + return err + } + fmt.Println(responseDivider()) + + if userInput == "quit" { + break + } + + continue + } + } + + // splitResp[len(splitResp)-1] = strings.Replace(splitResp[len(splitResp)-1], "", c.BlobURL, -1) + splitResp = append(splitResp, "--namespace=kube-system", "--debug") + // fmt.Printf("splitresp: %+v", splitResp) + + fmt.Println("Running...") + fmt.Println(chatDivider()) + fmt.Println() + + var cap *retinav1alpha1.Capture + if !skipCapture { + // very hacky way to get capture name! + + image := "acnpublic.azurecr.io/retina-agent:linux-amd64-v0.0.11-44-g25bc9fa" + args := []string{} + args = append(args, splitResp[2:]...) + args = append(args, fmt.Sprintf("--blob-upload=%s", c.BlobURL)) + os.Setenv("RETINA_AGENT_IMAGE", image) + fmt.Println("passing args", args) + + capture.CaptureCmd(true) + createCmd := capture.CaptureCmdCreate() + createCmd.Flags().Parse(args) + + cap, err = capture.CaptureCreate(cmd, args) + + cmd := exec.Command("kubectl-retina", args...) + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, fmt.Sprintf("RETINA_AGENT_IMAGE=%s", image)) + + output, _ := cmd.CombinedOutput() + fmt.Println("OUTPUT: ") + fmt.Println(string(output)) + } + + if err != nil { + fmt.Println("failed:", err.Error()) + } else { + fmt.Println("success") + } + fmt.Println(chatDivider()) + fmt.Println() + + if err != nil { + // prompt user input + fmt.Println("hint: to quit out of capture loop, say \"quit\"") + userInput, err = c.ReadAndStoreUserInput() + if err != nil { + return err + } + fmt.Println(responseDivider()) + + if userInput == "quit" { + break + } + + continue + } + + captureName := "retina-capture-jgggm" + if !skipCapture { + captureName = cap.Name + } + c.PrintAndStoreMessage(Retina, fmt.Sprintf("Downloading capture %s from blob...", captureName)) + fmt.Println(responseDivider()) + + // TODO(optimization) allow user [y/n] option unless there's AutoRun + fmt.Println(chatDivider()) + fmt.Println() + blobName, err := capture.Download(cmd, &captureName) + if err != nil { + fmt.Println("failed:", err.Error()) + } else { + fmt.Println("success") + } + fmt.Println(chatDivider()) + fmt.Println() + + if err != nil { + fmt.Print("Retry download? ") + val, err = c.ReadExpectedValues("y", "n") + if err != nil { + return err + } + + if val == "n" { + break + } + + continue + } + + c.Captured = true + + // unzip + // TODO(optimization) allow user [y/n] option unless there's AutoRun + fmt.Println(chatDivider()) + fmt.Println() + index := strings.Index(blobName, ".tar.gz") + if index == -1 { + fmt.Println("failed: unable to find .tar.gz in blob name") + break + } + + folder := "captures/" + blobName[:index] + _, _ = c.Exec.Command("mkdir", "-p", folder).CombinedOutput() + _, err = c.Exec.Command("tar", "-xvf", blobName, "-C", folder).CombinedOutput() + if err != nil { + fmt.Println("failed to unzip (maybe it was already unzipped):", err.Error()) + // c.PrintAndStoreMessage("OUTPUT", string(output)) + } else { + c.PrintAndStoreMessage("OUTPUT", "success") + } + + // keep track of capture file + // use "grep flow" for trace + keyword := ".pcap" + if captureIsActuallyTrace { + keyword = "flow" + } + output, err := c.Exec.Command("bash", "-c", fmt.Sprintf("ls %s | grep %s", folder, keyword)).CombinedOutput() + if err != nil { + return fmt.Errorf("error finding file: %w", err) + } + c.CaptureFile = folder + "/" + strings.TrimSpace(string(output)) + fmt.Println("capture file:", c.CaptureFile) + output, _ = c.Exec.Command("pwd").CombinedOutput() + fmt.Println(string(output)) + fmt.Println(chatDivider()) + fmt.Println() + + c.PrintAndStoreMessage(Retina, "Let's analyze the result. What should we look into?") + fmt.Println(responseDivider()) + + if captureIsActuallyTrace { + c.Traced = true + c.TraceFile = c.CaptureFile + c.Captured = false + c.CaptureFile = "" + } + + break + } + + continue + // } + + // if val == "t" { + // //trace + // c.Captured = false + // for { + // fmt.Println("TRACE UNIMPLEMENTED. Need to copy/paste from capture logic") + // // TODO copy paste and make two modifications: + // // use trace prompt/command instead + // // search for "flow" instead of ".pcap" and save that into the trace file + + // break + + // // would need to set these before breaking + // c.Traced = true + // c.TraceFile = "TODO" + // } + // continue + // } + + // neither + continue + } + + // 3. parse + // if strings.Contains(resp, "PARSE") { + // if (c.CaptureFile == "" && c.TraceFile == "") || (!c.Captured && !c.Traced) { + // c.PrintAndStoreMessage(Retina, "No capture or trace file to parse.") + // fmt.Println(responseDivider()) + // continue + // } + + // c.PrintAndStoreMessage(Retina, "Parsing file...") + // fmt.Println(chatDivider()) + // if c.Captured && c.CaptureFile != "" { + // output, err := c.Exec.Command("kubectl-retina", "parse", "--pcap-file", c.CaptureFile, "--enrich").CombinedOutput() + // fmt.Println(string(output)) + // if err != nil { + // return fmt.Errorf("error executing command: %w", err) + // } + // } else if c.Traced && c.TraceFile != "" { + // output, err := c.Exec.Command("kubectl-retina", "parse", "--trace-file", c.TraceFile, "--enrich").CombinedOutput() + // fmt.Println(string(output)) + // if err != nil { + // return fmt.Errorf("error executing command: %w", err) + // } + // } + + // fmt.Println(chatDivider()) + // continue + // } + + // 4. analyze + if strings.Contains(resp, "ANALYZE") || strings.Contains(resp, "PARSE") { + if (c.CaptureFile == "" && c.TraceFile == "") || (!c.Captured && !c.Traced) { + c.PrintAndStoreMessage(Retina, "No capture or trace file to analyze.") + continue + } + + c.PrintAndStoreMessage(Retina, "Parsing file...") + fmt.Println(chatDivider()) + fmt.Println() + var cmd utilexec.Cmd + if c.Captured && c.CaptureFile != "" { + // output, err := c.Exec.Command("kubectl-retina", "ai", "analyze", "--pcap-file", c.CaptureFile, "--question", userInput).CombinedOutput() + // fmt.Println(string(output)) + // if err != nil { + // return fmt.Errorf("error executing command: %w", err) + // } + cmd = c.Exec.Command("kubectl-retina", "ai", "analyze", "--pcap-file", c.CaptureFile, "--question", userInput) + } else if c.Traced && c.TraceFile != "" { + // output, err := c.Exec.Command("kubectl-retina", "ai", "analyze", "--trace-file", c.TraceFile, "--question", userInput).CombinedOutput() + // fmt.Println(string(output)) + // if err != nil { + // return fmt.Errorf("error executing command: %w", err) + // } + cmd = c.Exec.Command("kubectl-retina", "ai", "analyze", "--trace-file", c.TraceFile, "--question", userInput) + } + out := retry(cmd) + fmt.Println(out) + + fmt.Println(chatDivider()) + fmt.Println() + continue + } + + // bad response from GPT + c.PrintAndStoreMessage(Retina, "Unable to triage request.") + fmt.Println(responseDivider()) + } +} + +func retry(cmd utilexec.Cmd) (res string) { + b := wait.Backoff{ + Steps: 5, + Duration: 1 * time.Second, + Factor: 2, + Jitter: 0, + } + wait.ExponentialBackoff(b, func() (bool, error) { + output, err := cmd.CombinedOutput() + if err != nil { + fmt.Println("Retrying...") + return false, err + } + if string(output) == "" { + fmt.Println("Retrying...") + return false, nil + } + res = string(output) + return true, nil + }) + return +} + +func (c *ChatAgent) ReadAndStoreUserInput() (string, error) { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("%s : ", User) + for { + // must hit enter to continue + userInput, err := reader.ReadString('\n') + if err != nil { + return "", fmt.Errorf("error reading input: %w", err) + } + + userInput = strings.TrimSpace(userInput) + if userInput == "" { + continue + } + + c.Memory = append(c.Memory, &ChatMessage{ + Role: User, + Text: userInput, + }) + return userInput, nil + } +} + +func (c *ChatAgent) ReadExpectedValues(vals ...string) (string, error) { + if len(vals) == 0 { + return "", fmt.Errorf("no expected values given") + } + + reader := bufio.NewReader(os.Stdin) + for { + fmt.Printf("[%s]: ", strings.Join(vals, "/")) + // must hit enter to continue + userInput, err := reader.ReadString('\n') + if err != nil { + return "", fmt.Errorf("error reading input: %w", err) + } + + userInput = strings.TrimSpace(userInput) + for _, val := range vals { + if userInput == val { + return userInput, nil + } + } + + // retry + reader.Reset(os.Stdin) + fmt.Printf("invalid choice, please try again\n") + } +} + +func (c *ChatAgent) chatTriagePrompt(resp string) string { + sb := strings.Builder{} + sb.WriteString("You are RETINA. You are an expert at Kubernetes, bash commands, and network diagnostics.") + sb.WriteString("Determine which task RETINA should perform.") + + if !c.Captured && !c.Traced { + sb.WriteString("There are three kinds of tasks: 1) UNKNOWN-TASK 2) KUBECTL-BASH-COMMAND, 3) PACKET-CAPTURE-OR-TRACE.") + sb.WriteString("Follow this logic:") + sb.WriteString("- If the USER's problem/question is related to trace, capture, packets or communication between network entities, then perform \"PACKET-CAPTURE-OR-TRACE\".") + sb.WriteString("- If the USER's problem/question requires a kubectl command, then perform \"KUBECTL-BASH-COMMAND\".") + sb.WriteString("- Lastly, if you think the USER's statement is not relevant to the numbered tasks above, then you should choose \"1) UNKNOWN-TASK\".") + } else { + sb.WriteString("There are five kinds of tasks: 1) UNKNOWN-TASK 2) KUBECTL-BASH-COMMAND, 3) PARSE, 4) ANALYZE, OR 5) NEW-PACKET-CAPTURE-OR-TRACE.") + sb.WriteString("Follow this logic:") + sb.WriteString("- If the USER is asking for a new packet capture or a new trace, then perform \"NEW-PACKET-CAPTURE-OR-TRACE\".") + sb.WriteString("- If the USER's problem/question is related to packets or communication between network entities, then perform \"ANALYZE\".") + sb.WriteString("- If the USER's asks to parse the file, then perform \"PARSE\".") + sb.WriteString("- If the USER's problem/question requires a kubectl command, then perform \"KUBECTL-BASH-COMMAND\".") + sb.WriteString("- Lastly, if you think the USER's statement is not relevant to the numbered tasks above, then you should choose \"1) UNKNOWN-TASK\".") + } + + sb.WriteString("\n") + sb.WriteString("Chat History:") + for _, msg := range c.Memory { + sb.WriteString(fmt.Sprintf("%s: %s\n", msg.Role, msg.Text)) + } + sb.WriteString("\n") + sb.WriteString("Read the bottom message in the chat again. Which of the capitalized tasks above should RETINA perform for the bottom message? To arrive at your answer, include your reasoning in a very short sentence.") + return sb.String() +} + +func (c *ChatAgent) kubectlBashPrompt(userInput string) string { + sb := strings.Builder{} + sb.WriteString("You are RETINA. You are an expert at Kubernetes and bash commands.") + + sb.WriteString("Provide a \"kubectl get\" command that will help answer the bottom message.") + sb.WriteString("You may pipe outputs into other commands. In all, you may only use the following commands:\n") + for _, command := range allowedToPipeToCommands { + sb.WriteString(command) + sb.WriteString("\n") + } + sb.WriteString("Your answer should be only the bash command (piped to other commands as necessary).\n") + sb.WriteString("QUESTION: How many Pods are in the test namespace?\n") + sb.WriteString("ANSWER: kubectl get pods -n test -o name | wc -l\n") + sb.WriteString("QUESTION: Which Pods are not Running?\n") + sb.WriteString("ANSWER: kubectl get pods --all-namespaces -o json | jq '.items[] | select(.status.phase != \"Running\") | .metadata.namespace + \"/\" + .metadata.name'\n") + sb.WriteString("QUESTION: NetPols selecting key1=val1 in default namespace\n") + sb.WriteString("ANSWER: kubectl get netpol -n default -o json | jq '.items[] | select(.spec.podSelector.matchLabels.key1 == \"val1\") | .metadata.namespace + \"/\" + .metadata.name'\n") + sb.WriteString("QUESTION: Linux nodes\n") + sb.WriteString("ANSWER: kubectl get nodes --show-labels | grep linux\n") + + sb.WriteString("\n") + sb.WriteString("Chat History:") + for i, msg := range c.Memory { + if i == len(c.Memory)-1 { + // skip last user message so we can display it later + break + } + sb.WriteString(fmt.Sprintf("%s: %s\n", msg.Role, msg.Text)) + } + sb.WriteString("\n") + + sb.WriteString("Given the context in the Chat History, provide a command for the USER that will help answer the below QUESTION, responding in the same format as the above examples.\n") + sb.WriteString(fmt.Sprintf("QUESTION: %s\n", userInput)) + sb.WriteString("ANSWER: ") + + return sb.String() +} + +type Chat struct { + GPT *GPT + BlobURL string + Memory []*ChatMessage +} + +func (c *Chat) Init() error { + s := os.Getenv(capture.BLOB_URL) + // fmt.Println("blob url:", s) + purl, err := url.Parse(s) + if err != nil { + return fmt.Errorf("error parsing blob url: %w", err) + } + c.BlobURL = purl.String() + if c.BlobURL == "" { + return capture.ErrEmptyBlobURL + } + + return nil +} + +func (c *Chat) PrintAndStoreMessage(role, text string) { + c.Memory = append(c.Memory, &ChatMessage{ + Role: role, + Text: text, + }) + + fmt.Printf("%s: %s\n", role, text) +} + +type ChatMessage struct { + Role string + Text string +} + +func ChatCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "chat", + Short: "chat bot providing insights/automation for debugging network issues and cluster state", + RunE: func(cmd *cobra.Command, args []string) error { + gpt := NewGPT(analyzeTemp, 0.95, 0, 0, analyzeMaxTokens) + if err := gpt.Init(); err != nil { + return err + } + + chatAgent := NewChatAgent(gpt) + if err := chatAgent.Init(); err != nil { + return err + } + + if err := chatAgent.Loop(cmd); err != nil { + return err + } + + return nil + }, + } + + return cmd +} diff --git a/cli/cmd/capture/capture.go b/cli/cmd/capture/capture.go new file mode 100644 index 000000000..363d4e8fa --- /dev/null +++ b/cli/cmd/capture/capture.go @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "github.com/spf13/cobra" +) + +func CaptureCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "capture", + Short: "Retina Capture - capture network traffic", + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() //nolint:errcheck + }, + } + + cmd.AddCommand(CaptureCmdCreate()) + cmd.AddCommand(CaptureCmdList()) + cmd.AddCommand(CaptureCmdDelete()) + + return cmd +} diff --git a/cli/cmd/capture/create.go b/cli/cmd/capture/create.go new file mode 100644 index 000000000..e3605324f --- /dev/null +++ b/cli/cmd/capture/create.go @@ -0,0 +1,405 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + "go.uber.org/zap" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + + retinacmd "github.com/microsoft/retina/cli/cmd" + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + pkgcapture "github.com/microsoft/retina/pkg/capture" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" + "github.com/microsoft/retina/pkg/config" +) + +var ( + configFlags *genericclioptions.ConfigFlags + + duration time.Duration + maxSize int + packetSize int + nodeSelectors string + podSelectors string + namespaceSelectors string + nodeNames string + hostPath string + pvc string + blobUpload string + tcpdumpFilter string + excludeFilter string + includeFilter string + includeMetadata bool + namespace string + jobNumLimit int + + nowait bool + + debug bool +) + +var createExample = templates.Examples(i18n.T(` + # Capture network packets on the node selected by node names and copy the artifacts to the node host path /mnt/capture + kubectl retina capture create --host-path /mnt/capture --namespace capture --node-names "aks-nodepool1-41844487-vmss000000,aks-nodepool1-41844487-vmss000001" + + # Capture network packets on the coredns pods determined by pod-selectors and namespace-selectors + kubectl retina capture create --host-path /mnt/capture --namespace capture --pod-selectors="k8s-app=kube-dns" --namespace-selectors="kubernetes.io/metadata.name=kube-system" + + # Capture network packets on nodes with label "agentpool=agentpool" and "version:v20" + kubectl retina capture create --host-path /mnt/capture --node-selectors="agentpool=agentpool,version:v20" + + # Capture network packets on nodes using node-selector with duration 10s + kubectl retina capture create --host-path=/mnt/capture --node-selectors="agentpool=agentpool" --duration=10s + + # Capture network packets on nodes using node-selector and upload the artifacts to blob storage with SAS URL https://testaccount.blob.core.windows.net/ + kubectl retina capture create --node-selectors="agentpool=agentpool" --blob-upload=https://testaccount.blob.core.windows.net/ + `)) + +const ( + defaultWaitTimeout time.Duration = 5 * time.Minute + defaultWaitPeriod time.Duration = 1 * time.Minute +) + +func CaptureCmdCreate() *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Short: "create a Retina Capture", + Example: createExample, + RunE: func(cmd *cobra.Command, args []string) error { + kubeConfig, err := configFlags.ToRESTConfig() + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return err + } + + capture, err := createCapture(kubeClient) + if err != nil { + return err + } + + jobsCreated, err := createJobs(kubeClient, capture) + if err != nil { + retinacmd.Logger.Error("Failed to create job", zap.Error(err)) + return err + } + if nowait { + retinacmd.Logger.Info("Please manually delete all capture jobs") + if capture.Spec.OutputConfiguration.BlobUpload != nil { + retinacmd.Logger.Info("Please manually delete capture secret", zap.String("namespace", namespace), zap.String("secret name", *capture.Spec.OutputConfiguration.BlobUpload)) + } + printCaptureResult(jobsCreated) + return nil + } + + // Wait until all jobs finish then delete the jobs before the timeout, otherwise print jobs created to + // let the customer recycle them. + retinacmd.Logger.Info("Waiting for capture jobs to finish") + + allJobsCompleted := waitUntilJobsComplete(kubeClient, jobsCreated) + + // Delete all jobs created only if they all completed, otherwise keep the jobs for debugging. + if allJobsCompleted { + retinacmd.Logger.Info("Deleting jobs as all jobs are completed") + jobsFailedToDelete := deleteJobs(kubeClient, jobsCreated) + if len(jobsFailedToDelete) != 0 { + retinacmd.Logger.Info("Please manually delete capture jobs failed to delete", zap.String("namespace", namespace), zap.String("job list", strings.Join(jobsFailedToDelete, ","))) + } + + err = deleteSecret(kubeClient, capture.Spec.OutputConfiguration.BlobUpload) + if err != nil { + retinacmd.Logger.Error("Failed to delete capture secret, please manually delete it", zap.String("namespace", namespace), zap.String("secret name", *capture.Spec.OutputConfiguration.BlobUpload), zap.Error(err)) + } + + if len(jobsFailedToDelete) == 0 && err == nil { + retinacmd.Logger.Info("Done for deleting jobs") + } + return nil + } + + retinacmd.Logger.Info("Not all job are completed in the given time") + retinacmd.Logger.Info("Please manually delete the Capture") + return getCaptureAndPrintCaptureResult(kubeClient, capture.Name, namespace) + }, + } + + configFlags = genericclioptions.NewConfigFlags(true) + configFlags.AddFlags(cmd.PersistentFlags()) + cmd.Flags().DurationVar(&duration, "duration", 60*time.Second, "Duration of capturing packets") + cmd.Flags().IntVar(&maxSize, "max-size", 100, "Limit the capture file to MB in size which works only for Linux") + cmd.Flags().IntVar(&packetSize, "packet-size", 0, "Limits the each packet to bytes in size which works only for Linux") + cmd.Flags().StringVar(&nodeNames, "node-names", "", "A comma-separated list of node names to select nodes on which the network capture will be performed") + cmd.Flags().StringVar(&nodeSelectors, "node-selectors", "", "A comma-separated list of node labels to select nodes on which the network capture will be performed") + cmd.Flags().StringVar(&podSelectors, "pod-selectors", "", "A comma-separated list of pod labels together with namespace-selector to select pods on which the network capture will be performed") + cmd.Flags().StringVar(&namespaceSelectors, "namespace-selectors", "", "A comma-separated list of namespace labels together with pod-selector to select pods on which the network capture will be performed. By default, the pod namespace is specified by the flag namespace") + cmd.Flags().StringVar(&hostPath, "host-path", "", "HostPath of the node to store the capture files") + cmd.Flags().StringVar(&pvc, "pvc", "", "PersistentVolumeClaim under the specified or default namespace to store capture files") + cmd.Flags().StringVar(&blobUpload, "blob-upload", "", "Blob SAS URL with write permission to upload capture files") + cmd.Flags().StringVar(&tcpdumpFilter, "tcpdump-filter", "", "Raw tcpdump flags which works only for Linux") + cmd.Flags().StringVar(&excludeFilter, "exclude-filter", "", "A comma-separated list of IP:Port pairs that are "+ + "excluded from capturing network packets. Supported formats are IP:Port, IP, Port, *:Port, IP:*") + cmd.Flags().StringVar(&includeFilter, "include-filter", "", "A comma-separated list of IP:Port pairs that are "+ + "used to filter capture network packets. Supported formats are IP:Port, IP, Port, *:Port, IP:*") + cmd.Flags().BoolVar(&includeMetadata, "include-metadata", true, "If true, collect static network metadata into capture file") + cmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "Namespace to host capture job") + cmd.Flags().IntVar(&jobNumLimit, "job-num-limit", 0, "The maximum number of jobs can be created for each capture. 0 means no limit") + cmd.Flags().BoolVar(&nowait, "no-wait", true, "Do not wait for the long-running capture job to finish") + cmd.Flags().BoolVar(&debug, "debug", false, "When debug is true, a customized retina-agent image, determined by the environment variable RETINA_AGENT_IMAGE, is set") + + return cmd +} + +func createSecretFromBlobUpload(kubeClient kubernetes.Interface, blobUpload, captureName string) (string, error) { + if blobUpload == "" { + return "", nil + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: captureConstants.CaptureOutputLocationBlobUploadSecretName, + Labels: captureUtils.GetSerectLabelsFromCaptureName(captureName), + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + captureConstants.CaptureOutputLocationBlobUploadSecretKey: []byte(blobUpload), + }, + } + secret, err := kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return "", err + } + return secret.Name, nil +} + +func deleteSecret(kubeClient kubernetes.Interface, secretName *string) error { + if secretName == nil { + return nil + } + + return kubeClient.CoreV1().Secrets(namespace).Delete(context.TODO(), *secretName, metav1.DeleteOptions{}) +} + +func createCapture(kubeClient kubernetes.Interface) (*retinav1alpha1.Capture, error) { + captureName := fmt.Sprintf("retina-capture-%s", utilrand.String(5)) + capture := &retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + Namespace: namespace, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + TcpdumpFilter: &tcpdumpFilter, + CaptureTarget: retinav1alpha1.CaptureTarget{}, + IncludeMetadata: includeMetadata, + CaptureOption: retinav1alpha1.CaptureOption{}, + }, + }, + } + + if duration != 0 { + retinacmd.Logger.Info(fmt.Sprintf("The capture duration is set to %s", duration)) + capture.Spec.CaptureConfiguration.CaptureOption.Duration = &metav1.Duration{Duration: duration} + } + + nodeSelectorLabelsMap, err := labels.ConvertSelectorToLabelsMap(nodeSelectors) + if err != nil { + return nil, err + } + podSelectorLabelsMap, err := labels.ConvertSelectorToLabelsMap(podSelectors) + if err != nil { + return nil, err + } + namespaceSelectorLabelsMap, err := labels.ConvertSelectorToLabelsMap(namespaceSelectors) + if err != nil { + return nil, err + } + + if len(nodeSelectorLabelsMap) != 0 || len(nodeNames) != 0 { + capture.Spec.CaptureConfiguration.CaptureTarget.NodeSelector = &metav1.LabelSelector{} + } + if len(nodeSelectorLabelsMap) != 0 { + capture.Spec.CaptureConfiguration.CaptureTarget.NodeSelector.MatchLabels = nodeSelectorLabelsMap + } + if len(nodeNames) != 0 { + nodeNameSlice := strings.Split(nodeNames, ",") + if len(nodeNameSlice) != 0 { + capture.Spec.CaptureConfiguration.CaptureTarget.NodeSelector.MatchExpressions = []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: nodeNameSlice, + }} + } + } + + if len(namespaceSelectorLabelsMap) != 0 { + capture.Spec.CaptureConfiguration.CaptureTarget.NamespaceSelector = &metav1.LabelSelector{ + MatchLabels: namespaceSelectorLabelsMap, + } + } + if len(podSelectorLabelsMap) != 0 { + capture.Spec.CaptureConfiguration.CaptureTarget.PodSelector = &metav1.LabelSelector{ + MatchLabels: podSelectorLabelsMap, + } + } + + if maxSize != 0 { + retinacmd.Logger.Info(fmt.Sprintf("The capture file max size is set to %dMB", maxSize)) + capture.Spec.CaptureConfiguration.CaptureOption.MaxCaptureSize = &maxSize + } + + if packetSize != 0 { + retinacmd.Logger.Info(fmt.Sprintf("The capture packet size is set to %d bytes", packetSize)) + capture.Spec.CaptureConfiguration.CaptureOption.PacketSize = &packetSize + } + + if len(hostPath) != 0 { + capture.Spec.OutputConfiguration.HostPath = &hostPath + } + if len(pvc) != 0 { + capture.Spec.OutputConfiguration.PersistentVolumeClaim = &pvc + } + + if len(blobUpload) != 0 { + // Mount blob url as secret onto the capture pod for security concern if blob url is not empty. + secretName, err := createSecretFromBlobUpload(kubeClient, blobUpload, captureName) + if err != nil { + return nil, err + } + capture.Spec.OutputConfiguration.BlobUpload = &secretName + } + + if len(excludeFilter) != 0 { + if capture.Spec.CaptureConfiguration.Filters == nil { + capture.Spec.CaptureConfiguration.Filters = &retinav1alpha1.CaptureConfigurationFilters{} + } + excludeFilterSlice := strings.Split(excludeFilter, ",") + capture.Spec.CaptureConfiguration.Filters.Exclude = excludeFilterSlice + } + + if len(includeFilter) != 0 { + if capture.Spec.CaptureConfiguration.Filters == nil { + capture.Spec.CaptureConfiguration.Filters = &retinav1alpha1.CaptureConfigurationFilters{} + } + includeFilterSlice := strings.Split(includeFilter, ",") + capture.Spec.CaptureConfiguration.Filters.Include = includeFilterSlice + } + return capture, nil +} + +func getCLICaptureConfig() config.CaptureConfig { + return config.CaptureConfig{ + CaptureImageVersion: retinacmd.Version, + CaptureDebug: debug, + CaptureImageVersionSource: captureUtils.VersionSourceCLIVersion, + CaptureJobNumLimit: jobNumLimit, + } +} + +func createJobs(kubeClient kubernetes.Interface, capture *retinav1alpha1.Capture) ([]batchv1.Job, error) { + translator := pkgcapture.NewCaptureToPodTranslator(kubeClient, retinacmd.Logger, getCLICaptureConfig()) + jobs, err := translator.TranslateCaptureToJobs(capture) + if err != nil { + return nil, err + } + + jobsCreated := []batchv1.Job{} + for _, job := range jobs { + jobCreated, err := kubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + jobsCreated = append(jobsCreated, *jobCreated) + retinacmd.Logger.Info("Packet capture job is created", zap.String("namespace", namespace), zap.String("capture job", jobCreated.Name)) + } + return jobsCreated, nil +} + +func waitUntilJobsComplete(kubeClient kubernetes.Interface, jobs []batchv1.Job) bool { + allJobsCompleted := false + + // TODO: let's make the timeout and period to wait for all job to finish configurable. + var deadline time.Duration = defaultWaitTimeout + if duration != 0 { + deadline = duration * 2 + } + + var period time.Duration = defaultWaitPeriod + // To print less noisy messages, we rely on duration to decide the wait period. + if period < duration/10 { + period = duration / 10 + } + retinacmd.Logger.Info(fmt.Sprintf("Waiting timeout is set to %s", deadline)) + + ctx, cancel := context.WithTimeout(context.TODO(), deadline) + defer cancel() + + wait.JitterUntil(func() { + jobsCompleted := []string{} + jobsIncompleted := []string{} + + for _, job := range jobs { + jobRet, err := kubeClient.BatchV1().Jobs(job.Namespace).Get(context.TODO(), job.Name, metav1.GetOptions{}) + if err != nil { + retinacmd.Logger.Error("Failed to get job", zap.String("namespace", job.Namespace), zap.String("job name", job.Name), zap.Error(err)) + jobsIncompleted = append(jobsIncompleted, job.Name) + continue + } + if jobRet.Status.CompletionTime != nil { + jobsCompleted = append(jobsCompleted, job.Name) + } else { + jobsIncompleted = append(jobsIncompleted, job.Name) + } + } + + if len(jobsIncompleted) != 0 { + retinacmd.Logger.Info("Not all jobs are completed", + zap.String("namespace", namespace), + zap.String("Completed jobs", strings.Join(jobsCompleted, ",")), + zap.String("Uncompleted packet capture jobs", strings.Join(jobsIncompleted, ",")), + ) + // Return to have another try after an interval. + return + } + allJobsCompleted = true + cancel() + }, period, 0.2, true, ctx.Done()) + + return allJobsCompleted +} + +func deleteJobs(kubeClient kubernetes.Interface, jobs []batchv1.Job) []string { + jobsFailedtoDelete := []string{} + for _, job := range jobs { + // Child pods are preserved by default when jobs are deleted, we need to set propagationPolicy=Background to + // remove them. + deletePropagationBackground := metav1.DeletePropagationBackground + err := kubeClient.BatchV1().Jobs(job.Namespace).Delete(context.TODO(), job.Name, metav1.DeleteOptions{ + PropagationPolicy: &deletePropagationBackground, + }) + if err != nil { + jobsFailedtoDelete = append(jobsFailedtoDelete, job.Name) + retinacmd.Logger.Error("Failed to delete job", zap.String("namespace", job.Namespace), zap.String("job name", job.Name), zap.Error(err)) + } + } + return jobsFailedtoDelete +} diff --git a/cli/cmd/capture/delete.go b/cli/cmd/capture/delete.go new file mode 100644 index 000000000..1d10f3e6a --- /dev/null +++ b/cli/cmd/capture/delete.go @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "context" + "fmt" + + retinacmd "github.com/microsoft/retina/cli/cmd" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/label" + "github.com/spf13/cobra" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var name string + +var deleteExample = templates.Examples(i18n.T(` + # Delete the Retina Capture "retina-capture-8v6wd" in namespace "capture" + kubectl retina capture delete --name retina-capture-8v6wd --namespace capture + `)) + +func CaptureCmdDelete() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete", + Short: "Delete a Retina capture", + Example: deleteExample, + RunE: func(cmd *cobra.Command, args []string) error { + kubeConfig, err := configFlags.ToRESTConfig() + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return err + } + + if len(name) == 0 { + return fmt.Errorf("Capture name is not specified") + } + + captureJobSelector := &metav1.LabelSelector{ + MatchLabels: map[string]string{ + label.CaptureNameLabel: name, + label.AppLabel: captureConstants.CaptureAppname, + }, + } + labelSelector, _ := labels.Parse(metav1.FormatLabelSelector(captureJobSelector)) + jobListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + + jobList, err := kubeClient.BatchV1().Jobs(namespace).List(context.TODO(), jobListOpt) + if err != nil { + return err + } + if len(jobList.Items) == 0 { + return fmt.Errorf("Capture %s in namespace %s is not found", name, namespace) + } + + for _, job := range jobList.Items { + deletePropagationBackground := metav1.DeletePropagationBackground + if err := kubeClient.BatchV1().Jobs(job.Namespace).Delete(context.TODO(), job.Name, metav1.DeleteOptions{ + PropagationPolicy: &deletePropagationBackground, + }); err != nil { + retinacmd.Logger.Info("Failed to delete job", zap.String("job name", job.Name), zap.Error(err)) + } + } + + for _, volume := range jobList.Items[0].Spec.Template.Spec.Volumes { + if volume.Secret != nil { + if err := kubeClient.CoreV1().Secrets(namespace).Delete(context.TODO(), volume.Secret.SecretName, metav1.DeleteOptions{}); err != nil { + return err + } + break + } + } + retinacmd.Logger.Info(fmt.Sprintf("Retina Capture %q delete", name)) + + return nil + }, + } + + configFlags = genericclioptions.NewConfigFlags(true) + configFlags.AddFlags(cmd.PersistentFlags()) + cmd.Flags().StringVar(&name, "name", "", "name of the Retina Capture") + cmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "Namespace to host capture job") + return cmd +} diff --git a/cli/cmd/capture/download.go b/cli/cmd/capture/download.go new file mode 100644 index 000000000..0e830fff4 --- /dev/null +++ b/cli/cmd/capture/download.go @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "fmt" + "io" + "net/url" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + BLOB_URL = "BLOB_URL" +) + +var ErrEmptyBlobURL = fmt.Errorf("BLOB_URL must be set/exported") + +func CaptureCmdDownload() *cobra.Command { + var captureName string + cmd := &cobra.Command{ + Use: "download", + Short: "Download Retina Captures", + RunE: func(cmd *cobra.Command, args []string) error { + _, err := Download(cmd, &captureName) + return err + }, + } + + cmd.Flags().StringVarP(&captureName, "capture-name", "n", "", "name of capture to download") + + return cmd +} + +func Download(cmd *cobra.Command, captureName *string) (string, error) { + blobURL := viper.GetString(BLOB_URL) + if blobURL == "" { + return "", ErrEmptyBlobURL + } + + bloburl := viper.GetString(BLOB_URL) + if bloburl == "" { + return "", ErrEmptyBlobURL + } + + u, err := url.Parse(bloburl) + if err != nil { + return "", fmt.Errorf("failed to parse SAS URL: %+v", err) + } + + // blobService, err := storage.NewAccountSASClientFromEndpointToken(u.String(), u.Query().Encode()).GetBlobService() + b, err := storage.NewAccountSASClientFromEndpointToken(u.String(), u.Query().Encode()) + if err != nil { + return "", fmt.Errorf("failed to create storage account client: %+v", err) + } + + blobService := b.GetBlobService() + containerPath := strings.TrimLeft(u.Path, "/") + splitPath := strings.SplitN(containerPath, "/", 2) + containerName := splitPath[0] + + params := storage.ListBlobsParameters{Prefix: *captureName} + blobList, err := blobService.GetContainerReference(containerName).ListBlobs(params) + if err != nil { + return "", fmt.Errorf("failed to list blobstore with: %+v", err) + } + + if len(blobList.Blobs) == 0 { + return "", fmt.Errorf("no blobs found with prefix: %s", *captureName) + } + + blobName := "" + for _, v := range blobList.Blobs { + blob := blobService.GetContainerReference(containerName).GetBlobReference(v.Name) + readCloser, err := blob.Get(&storage.GetBlobOptions{}) + if err != nil { + return "", fmt.Errorf("failed to read from blobstore with: %+v", err) + } + + defer readCloser.Close() + + blobData, err := io.ReadAll(readCloser) + if err != nil { + return "", fmt.Errorf("failed to obtain blob from blobstore with: %+v", err) + } + + err = os.WriteFile(v.Name, blobData, 0o644) + if err != nil { + return "", fmt.Errorf("failed to write file with: %+v", err) + } + + blobName = v.Name + fmt.Println("Downloaded blob: ", v.Name) + } + return blobName, nil +} diff --git a/cli/cmd/capture/list.go b/cli/cmd/capture/list.go new file mode 100644 index 000000000..758e607f6 --- /dev/null +++ b/cli/cmd/capture/list.go @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var allNamespaces bool + +var listExample = templates.Examples(i18n.T(` + # List Retina Capture in namespace "capture" + kubectl retina capture list -n capture + + # List Retina Capture in all namespaces + kubectl retina capture list --all-namespaces + `)) + +func CaptureCmdList() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List Retina Captures", + Example: listExample, + RunE: func(cmd *cobra.Command, args []string) error { + kubeConfig, err := configFlags.ToRESTConfig() + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return err + } + + captureNamespace := namespace + if allNamespaces { + captureNamespace = "" + } + return listCapturesInNamespaceAndPrintCaptureResults(kubeClient, captureNamespace) + }, + } + + configFlags = genericclioptions.NewConfigFlags(true) + configFlags.AddFlags(cmd.PersistentFlags()) + cmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "Namespace to host capture job") + cmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", allNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + return cmd +} diff --git a/cli/cmd/capture/table_util.go b/cli/cmd/capture/table_util.go new file mode 100644 index 000000000..8d94ef184 --- /dev/null +++ b/cli/cmd/capture/table_util.go @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "context" + "fmt" + "os" + "sort" + "strings" + "text/tabwriter" + "time" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/label" + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + durationUtil "k8s.io/apimachinery/pkg/util/duration" + "k8s.io/client-go/kubernetes" +) + +func getCaptureAndPrintCaptureResult(kubeClient *kubernetes.Clientset, name, namespace string) error { + return listCapturesAndPrintCaptureResults(kubeClient, name, namespace) +} + +func listCapturesInNamespaceAndPrintCaptureResults(kubeClient *kubernetes.Clientset, namespace string) error { + return listCapturesAndPrintCaptureResults(kubeClient, "", namespace) +} + +// listCapturesAndPrintCaptureResults list captures and print the running jobs into properly aligned text. +func listCapturesAndPrintCaptureResults(kubeClient *kubernetes.Clientset, name, namespace string) error { + jobListOpt := metav1.ListOptions{} + if len(name) != 0 { + captureJobSelector := &metav1.LabelSelector{ + MatchLabels: map[string]string{ + label.CaptureNameLabel: name, + label.AppLabel: captureConstants.CaptureAppname, + }, + } + labelSelector, _ := labels.Parse(metav1.FormatLabelSelector(captureJobSelector)) + jobListOpt = metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + } + + jobList, err := kubeClient.BatchV1().Jobs(namespace).List(context.TODO(), jobListOpt) + if err != nil { + return err + } + if len(jobList.Items) == 0 { + fmt.Printf("No Capture found in %s namespace.\n", namespace) + return nil + } + printCaptureResult(jobList.Items) + return nil +} + +func printCaptureResult(captureJobs []batchv1.Job) { + if len(captureJobs) == 0 { + return + } + captureToJobs := map[string][]batchv1.Job{} + + for _, job := range captureJobs { + captureName, ok := job.Labels[label.CaptureNameLabel] + if !ok { + continue + } + captureRef := fmt.Sprintf("%s/%s", job.Namespace, captureName) + captureToJobs[captureRef] = append(captureToJobs[captureRef], job) + } + + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 3, ' ', 0) + fmt.Fprintln(w, "NAMESPACE\tCAPTURE NAME\tJOBS\tCOMPLETIONS\tAGE") + for captureRef, jobs := range captureToJobs { + captureRef := strings.Split(captureRef, "/") + captureNamespace, captureName := captureRef[0], captureRef[1] + jobNames := []string{} + completedJobNum := 0 + age := "" + totalJobNum := len(jobs) + for _, job := range jobs { + jobNames = append(jobNames, job.Name) + if job.Status.CompletionTime != nil { + completedJobNum += 1 + } + } + sort.SliceStable(jobNames, func(i, j int) bool { + return jobNames[i] < jobNames[j] + }) + if len(jobs) > 0 { + age = durationUtil.HumanDuration(time.Since(jobs[0].CreationTimestamp.Time)) + } + + jobsNameJoined := strings.Join(jobNames, ",") + + completions := fmt.Sprintf("%d/%d", completedJobNum, totalJobNum) + rr := fmt.Sprintf("%s\t%s\t%s\t%s\t%s\t", captureNamespace, captureName, jobsNameJoined, completions, age) + fmt.Fprintln(w, rr) + } + w.Flush() + fmt.Println() +} diff --git a/cli/cmd/config.go b/cli/cmd/config.go new file mode 100644 index 000000000..a3575f082 --- /dev/null +++ b/cli/cmd/config.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cmd + +import ( + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/spf13/cobra" +) + +// ConfigCmd :- Configure retina CLI +func ConfigCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Configure retina CLI", + } + cmd.AddCommand(configView()) + cmd.AddCommand(configSet()) + return cmd +} + +func configSet() *cobra.Command { + cmd := &cobra.Command{ + Use: "set", + Short: "Configure Retina client", + RunE: func(cmd *cobra.Command, args []string) error { + endpoint, _ := cmd.Flags().GetString("endpoint") + config := Config{RetinaEndpoint: endpoint} + b, _ := json.MarshalIndent(config, "", " ") + err := ioutil.WriteFile(ClientConfigPath, b, 0o644) + if err == nil { + fmt.Print(string(b)) + } + + return err + }, + } + cmd.Flags().String("endpoint", "", "Set Retina server") + cmd.MarkFlagRequired("endpoint") //nolint:errcheck + + return cmd +} + +func configView() *cobra.Command { + return &cobra.Command{ + Use: "view", + Short: "View Retina client config", + Run: func(cmd *cobra.Command, args []string) { + // print client config + }, + } +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go new file mode 100644 index 000000000..801477dcc --- /dev/null +++ b/cli/cmd/root.go @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cmd + +import ( + "encoding/json" + "io/ioutil" + + "github.com/microsoft/retina/pkg/client" + "github.com/microsoft/retina/pkg/log" + "github.com/spf13/cobra" +) + +var Logger *log.ZapLogger + +func init() { + log.SetupZapLogger(log.GetDefaultLogOpts()) + Logger = log.Logger().Named("retina-cli") +} + +// RetinaClient for customer consume +var RetinaClient *client.Retina + +// ClientConfigPath used +var ClientConfigPath = ".retinactl.json" + +type Config struct { + RetinaEndpoint string `json:"retina_endpoint"` +} + +// NewRootCmd returns a root +func NewRootCmd() *cobra.Command { + rootCmd := &cobra.Command{ + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var config Config + file, _ := ioutil.ReadFile(ClientConfigPath) + _ = json.Unmarshal([]byte(file), &config) + RetinaClient = client.NewRetinaClient(config.RetinaEndpoint) + }, + } + return rootCmd +} diff --git a/cli/cmd/trace.go b/cli/cmd/trace.go new file mode 100644 index 000000000..d63680475 --- /dev/null +++ b/cli/cmd/trace.go @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cmd + +import ( + "github.com/spf13/cobra" +) + +func TraceCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "trace", + Short: "retrieve status or results from Retina", + } + cmd.AddCommand(getTrace()) + return cmd +} + +func getTrace() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Retrieve network trace results with operation ID", + RunE: func(cmd *cobra.Command, args []string) error { + operationID, _ := cmd.Flags().GetString("operationID") + return RetinaClient.GetTrace(operationID) + }, + } + cmd.Flags().String("operationID", "", "Network Trace Operation ID") + _ = cmd.MarkFlagRequired("operationID") + return cmd +} diff --git a/cli/cmd/version.go b/cli/cmd/version.go new file mode 100644 index 000000000..5b4cca501 --- /dev/null +++ b/cli/cmd/version.go @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// This variable is used by the "version" command and is set during build. +var Version = "undefined" + +func VersionCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Show version", + Run: func(cmd *cobra.Command, args []string) { + fmt.Println(Version) + }, + } + return cmd +} diff --git a/cli/main.go b/cli/main.go new file mode 100644 index 000000000..5a2cc24a5 --- /dev/null +++ b/cli/main.go @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package main + +import ( + "github.com/microsoft/retina/cli/cmd" + "github.com/microsoft/retina/cli/cmd/capture" +) + +func main() { + rootCmd := cmd.NewRootCmd() + rootCmd.AddCommand(capture.CaptureCmd()) + rootCmd.AddCommand(cmd.VersionCmd()) + rootCmd.Execute() //nolint:errcheck +} diff --git a/controller/Dockerfile.builder b/controller/Dockerfile.builder new file mode 100644 index 000000000..e7c24467a --- /dev/null +++ b/controller/Dockerfile.builder @@ -0,0 +1,37 @@ +# Stage: Build binary +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder +LABEL Name=retina-builder Version=0.0.1 + +RUN apt-get update &&\ + apt-get -y install lsb-release wget software-properties-common gnupg file git make + +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - +RUN add-apt-repository "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-14 main" +RUN apt-get update + +RUN apt-get install -y clang-14 lldb-14 lld-14 clangd-14 man-db +RUN apt-get install -y bpftool libbpf-dev + +RUN ln -s /usr/bin/clang-14 /usr/bin/clang + +COPY . /go/src/github.com/microsoft/retina +WORKDIR /go/src/github.com/microsoft/retina + +# RUN go mod edit -module retina +# RUN go generate ./... + +# Default linux/architecture. +ARG GOOS=linux +ENV GOOS=${GOOS} + +ARG GOARCH=amd64 +ENV GOARCH=${GOARCH} + +ENV CGO_ENABLED=0 + +ARG VERSION +ARG APP_INSIGHTS_ID + +RUN go build -v -o /go/bin/retina/controller -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" controller/main.go +RUN go build -v -o /go/bin/retina/captureworkload -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" captureworkload/main.go +RUN go build -v -o /go/bin/retina/initretina -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" ././init/retina/main.go diff --git a/controller/Dockerfile.controller b/controller/Dockerfile.controller new file mode 100644 index 000000000..917aeb475 --- /dev/null +++ b/controller/Dockerfile.controller @@ -0,0 +1,37 @@ +ARG builderImage="acnpublic.azurecr.io/retina-builder:0.0.1" +ARG toolsImage="acnpublic.azurecr.io/retina-tools:0.0.1" + +# Stage: Builder +FROM ${builderImage} as builder + +# ----------------------------------------------------------------------------------- # + +# Stage: Tools +FROM ${toolsImage} as clang + +# ----------------------------------------------------------------------------------- # + +# Stage: Base distroless image +FROM mcr.microsoft.com/mirror/gcr/distroless/cc-debian11:latest@sha256:b53fbf5f81f4a120a489fedff2092e6fcbeacf7863fce3e45d99cc58dc230ccc as base + +LABEL Name=retina-controller Version=0.0.1 + +# Copy dependencies for clang and tools. +COPY --from=clang /lib/ /lib +COPY --from=clang /usr/lib/ /usr/lib + +# Copy clang+llvm. +COPY --from=clang /usr/local/clang+llvm/bin/clang /bin + +# Copy tools. +COPY --from=clang /tmp/bin/ /bin + +# Copy the Retina binary. +COPY --from=builder /go/bin/retina/controller /retina/controller +COPY --from=builder /go/bin/retina/captureworkload /retina/captureworkload + +# Copy the plugin eBPF code and headers. +COPY --from=builder /go/src/github.com/microsoft/retina/pkg/plugin /go/src/github.com/microsoft/retina/pkg/plugin + +EXPOSE 80 +ENTRYPOINT ["./retina/controller"] diff --git a/controller/Dockerfile.gogen b/controller/Dockerfile.gogen new file mode 100644 index 000000000..ffc35dc99 --- /dev/null +++ b/controller/Dockerfile.gogen @@ -0,0 +1,24 @@ +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 + +# Default linux/architecture. +ARG GOOS=linux +ENV GOOS=${GOOS} + +ARG GOARCH=amd64 +ENV GOARCH=${GOARCH} + +RUN apt-get update &&\ + apt-get -y install lsb-release wget software-properties-common gnupg file git make + +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - +RUN add-apt-repository "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-14 main" +RUN apt-get update + +RUN apt-get install -y clang-14 lldb-14 lld-14 clangd-14 man-db +RUN apt-get install -y bpftool libbpf-dev + +RUN ln -s /usr/bin/clang-14 /usr/bin/clang + +WORKDIR /app + +ENTRYPOINT mkdir /tmp/.cache && export GOCACHE=/tmp/.cache && go generate ./... diff --git a/controller/Dockerfile.init b/controller/Dockerfile.init new file mode 100644 index 000000000..2509ec9c0 --- /dev/null +++ b/controller/Dockerfile.init @@ -0,0 +1,25 @@ +ARG builderImage="acnpublic.azurecr.io/retina-builder:0.0.1" +ARG toolsImage="acnpublic.azurecr.io/retina-tools:0.0.1" + +# Stage: Builder +FROM ${builderImage} as builder + +# ----------------------------------------------------------------------------------- # + +# Stage: Tools +FROM ${toolsImage} as tools + +# ----------------------------------------------------------------------------------- # + +# Stage: Base distroless image +FROM mcr.microsoft.com/mirror/gcr/distroless/cc-debian11:latest@sha256:b53fbf5f81f4a120a489fedff2092e6fcbeacf7863fce3e45d99cc58dc230ccc as base +LABEL Name=retina-filtermap-init Version=0.0.1 + +COPY --from=builder /go/bin/retina/initretina /retina/initretina + +# Copy dependencies for mount. Needed for initretina. +COPY --from=tools /lib/ /lib +COPY --from=tools /usr/lib/ /usr/lib +COPY --from=tools /bin/mount /bin/mount + +ENTRYPOINT ["./retina/initretina"] diff --git a/controller/Dockerfile.proto b/controller/Dockerfile.proto new file mode 100644 index 000000000..68d6fb015 --- /dev/null +++ b/controller/Dockerfile.proto @@ -0,0 +1,15 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder +LABEL Name=retina-builder Version=0.0.1 + +RUN apt-get update &&\ + apt-get -y install lsb-release wget software-properties-common gnupg file git make unzip + +WORKDIR /tmp + +RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@latest +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v24.2/protoc-24.2-linux-x86_64.zip +RUN unzip protoc-24.2-linux-x86_64.zip -d protoc +RUN mv protoc/bin/protoc /usr/bin/protoc + +WORKDIR /app +ENTRYPOINT protoc -I=. --go_out=paths=source_relative:. ./pkg/utils/metadata.proto diff --git a/controller/Dockerfile.tools b/controller/Dockerfile.tools new file mode 100644 index 000000000..094eb1575 --- /dev/null +++ b/controller/Dockerfile.tools @@ -0,0 +1,41 @@ +# Stage: Prepare clang and tools +# Bullseye -> debian11 +FROM mcr.microsoft.com/mirror/docker/library/debian:bullseye@sha256:a648e10e02af129706b1fb89e1ac9694ae3db7f2b8439aa906321e68cc281bc0 +LABEL Name=retina-tools Version=0.0.1 + +WORKDIR /tmp +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl xz-utils binutils wget gnupg2 \ + ca-certificates tcpdump iproute2 iptables + +RUN mkdir -p /usr/local + +ARG GOARCH=amd64 +ENV GOARCH=${GOARCH} + +RUN if [ "$GOARCH" = "amd64" ] ; then \ + # Download clang and llvm. + wget https://releases.llvm.org/release-keys.asc; \ + gpg2 --import release-keys.asc; \ + wget -O clang+llvm.tar.xz https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz; \ + wget -O clang+llvm.tar.xz.sig https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz.sig; \ + gpg2 --verify clang+llvm.tar.xz.sig clang+llvm.tar.xz; \ + tar -C /usr/local -xJf ./clang+llvm.tar.xz --no-same-owner; \ + mv /usr/local/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04 /usr/local/clang+llvm; \ + else \ + # GOARCH=Arm64. + # Download clang and llvm. + wget -O clang+llvm.tar.xz https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/clang+llvm-14.0.0-aarch64-linux-gnu.tar.xz; \ + tar -C /usr/local -xJf ./clang+llvm.tar.xz --no-same-owner; \ + mv /usr/local/clang+llvm-14.0.0-aarch64-linux-gnu /usr/local/clang+llvm; \ + # Prepare dependencies for clang to be copied to base. + apt-get install -y --no-install-recommends libncurses5; \ + fi + +# Copy tools. +RUN mkdir -p /tmp/bin +RUN arr="tcpdump ip ss iptables-legacy iptables-legacy-save iptables-nft iptables-nft-save cp uname" ;\ + for i in $arr; do \ + cp $(which $i) /tmp/bin; \ + done diff --git a/controller/Dockerfile.windows-2019 b/controller/Dockerfile.windows-2019 new file mode 100644 index 000000000..490ed2209 --- /dev/null +++ b/controller/Dockerfile.windows-2019 @@ -0,0 +1,25 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 as builder +# Build args +ARG VERSION +ARG APP_INSIGHTS_ID + +ENV GOOS=windows +ENV GOARCH=amd64 + +WORKDIR /usr/src/retina +# Copy the source +COPY . . + +RUN go build -v -o /usr/bin/controller.exe -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" ./controller/ +RUN go build -v -o /usr/bin/captureworkload.exe ./captureworkload/ + +# Copy into final image +FROM mcr.microsoft.com/windows/servercore:ltsc2019 +COPY --from=builder /usr/src/retina/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml +COPY --from=builder /usr/src/retina/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 +COPY --from=builder /usr/bin/controller.exe controller.exe +COPY --from=builder /usr/bin/captureworkload.exe captureworkload.exe + +ADD https://github.com/microsoft/etl2pcapng/releases/download/v1.10.0/etl2pcapng.exe /etl2pcapng.exe + +CMD ["controller.exe", "start", "--kubeconfig=.\\kubeconfig"] diff --git a/controller/Dockerfile.windows-2022 b/controller/Dockerfile.windows-2022 new file mode 100644 index 000000000..70cd27258 --- /dev/null +++ b/controller/Dockerfile.windows-2022 @@ -0,0 +1,26 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 as builder + +# Build args +ARG VERSION +ARG APP_INSIGHTS_ID + +ENV GOOS=windows +ENV GOARCH=amd64 + +WORKDIR /usr/src/retina +# Copy the source +COPY . . + +RUN go build -v -o /usr/bin/controller.exe -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" ./controller/ +RUN go build -v -o /usr/bin/captureworkload.exe ./captureworkload/ + +# Copy into final image +FROM mcr.microsoft.com/windows/servercore:ltsc2022 +COPY --from=builder /usr/src/retina/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml +COPY --from=builder /usr/src/retina/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 +COPY --from=builder /usr/bin/controller.exe controller.exe +COPY --from=builder /usr/bin/captureworkload.exe captureworkload.exe + +ADD https://github.com/microsoft/etl2pcapng/releases/download/v1.10.0/etl2pcapng.exe /etl2pcapng.exe + +CMD ["controller.exe", "start", "--kubeconfig=.\\kubeconfig"] diff --git a/controller/main.go b/controller/main.go new file mode 100644 index 000000000..1666fb610 --- /dev/null +++ b/controller/main.go @@ -0,0 +1,311 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package main + +import ( + "flag" + "os" + "strings" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + crcache "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + kcfg "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" + crmgr "sigs.k8s.io/controller-runtime/pkg/manager" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/config" + controllercache "github.com/microsoft/retina/pkg/controllers/cache" + mcc "github.com/microsoft/retina/pkg/controllers/daemon/metricsconfiguration" + namespacecontroller "github.com/microsoft/retina/pkg/controllers/daemon/namespace" + nc "github.com/microsoft/retina/pkg/controllers/daemon/node" + pc "github.com/microsoft/retina/pkg/controllers/daemon/pod" + kec "github.com/microsoft/retina/pkg/controllers/daemon/retinaendpoint" + sc "github.com/microsoft/retina/pkg/controllers/daemon/service" + + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + cm "github.com/microsoft/retina/pkg/managers/controllermanager" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/metrics" + mm "github.com/microsoft/retina/pkg/module/metrics" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/telemetry" +) + +const ( + configFileName = "/retina/config/config.yaml" + logFileName = "retina.log" + heartbeatInterval = 5 * time.Minute + + nodeNameEnvKey = "NODE_NAME" + nodeIPEnvKey = "NODE_IP" +) + +var ( + scheme = k8sruntime.NewScheme() + + applicationInsightsID string //nolint // aiMetadata is set in Makefile + version string + + cfgFile string +) + +func init() { + //+kubebuilder:scaffold:scheme + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(scheme)) +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":18080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":18081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + + // initialize Application Insights + telemetry.InitAppInsights(applicationInsightsID, version) + + defer telemetry.TrackPanic() + + flag.StringVar(&cfgFile, "config", configFileName, "config file") + flag.Parse() + + config, err := config.GetConfig(cfgFile) + if err != nil { + panic(err) + } + + err = initLogging(config, applicationInsightsID) + if err != nil { + panic(err) + } + + mainLogger := log.Logger().Named("main").Sugar() + + mainLogger.Infof("starting Retina version: %s", version) + mainLogger.Info("Reading config ...") + + mainLogger.Info("Initializing metrics") + metrics.InitializeMetrics() + + mainLogger.Info("Initializing Kubernetes client-go ...") + cfg, err := kcfg.GetConfig() + if err != nil { + panic(err) + } + additionalLoggerFields := []zap.Field{ + zap.String("version", version), + zap.String("apiserver", cfg.Host), + zap.String("plugins", strings.Join(config.EnabledPlugin, `,`)), + } + + // Setup the logger to log the version, apiserver and enabled plugin. + log.Logger().AddFields(additionalLoggerFields...) + mainLogger = log.Logger().Named("main").Sugar() + + var tel telemetry.Telemetry + if config.EnableTelemetry { + mainLogger.Infof("telemetry enabled, using Application Insights ID: %s", applicationInsightsID) + tel = telemetry.NewAppInsightsTelemetryClient("retina-agent", map[string]string{ + "version": version, + "apiserver": cfg.Host, + "plugins": strings.Join(config.EnabledPlugin, `,`), + }) + } else { + mainLogger.Info("telemetry disabled") + tel = telemetry.NewNoopTelemetry() + } + + // Create a manager for controller-runtime + + mgrOption := crmgr.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + // Port: 9443, // retina-agent is host-networked, we don't want to abuse the port for conflicts. + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "ecaf1259.retina.io", + } + + // Local context has its meaning only when pod level(advanced) metrics is enabled. + if config.EnablePodLevel && !config.RemoteContext { + mainLogger.Info("Remote context is disabled, only pods deployed on the same node as retina-agent will be monitored") + // the new cache sets Selector options on the Manager cache which are used + // to perform *server-side* filtering of the cached objects. This is very important + // for high node/pod count clusters, as it keeps us from watching objects at the + // whole cluster scope when we are only interested in the Node's scope. + nodeName := os.Getenv(nodeNameEnvKey) + if len(nodeName) == 0 { + mainLogger.Error("failed to get node name from environment variable", zap.String("node name env key", nodeNameEnvKey)) + os.Exit(1) + } + podNodeNameSelector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) + // Ignore hostnetwork pods which share the same IP with the node and pods on the same node. + // Unlike spec.nodeName, field label "spec.hostNetwork" is not supported, and as a workaround, + // We use status.podIP to filter out hostnetwork pods. + // https://github.com/kubernetes/kubernetes/blob/41da26dbe15207cbe5b6c36b48a31d2cd3344123/pkg/apis/core/v1/conversion.go#L36 + nodeIP := os.Getenv(nodeIPEnvKey) + if len(nodeIP) == 0 { + mainLogger.Error("failed to get node IP from environment variable", zap.String("node IP env key", nodeIPEnvKey)) + os.Exit(1) + } + podNodeIPNotMatchSelector := fields.OneTermNotEqualSelector("status.podIP", nodeIP) + podSelector := fields.AndSelectors(podNodeNameSelector, podNodeIPNotMatchSelector) + + mainLogger.Info("pod selector when remote context is disabled", zap.String("pod selector", podSelector.String())) + mgrOption.NewCache = crcache.BuilderWithOptions(crcache.Options{ + ByObject: map[client.Object]crcache.ByObject{ + &corev1.Pod{}: { + Field: podSelector, + }, + }, + }) + } + + mgr, err := crmgr.New(cfg, mgrOption) + if err != nil { + mainLogger.Error("Unable to start manager", zap.Error(err)) + os.Exit(1) + } + + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + mainLogger.Error("Unable to set up health check", zap.Error(err)) + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + mainLogger.Error("Unable to set up ready check", zap.Error(err)) + os.Exit(1) + } + + // k8s Client used for informers + cl := kubernetes.NewForConfigOrDie(mgr.GetConfig()) + + serverVersion, err := cl.Discovery().ServerVersion() + if err != nil { + mainLogger.Error("failed to get Kubernetes server version: ", zap.Error(err)) + } else { + mainLogger.Infof("Kubernetes server version: %v", serverVersion) + } + + // Setup RetinaEndpoint controller. + // TODO(mainred): This is to temporarily create a cache and pubsub for RetinaEndpoint, need to refactor this. + ctx := ctrl.SetupSignalHandler() + + if config.EnablePodLevel { + pubSub := pubsub.New() + controllerCache := controllercache.New(pubSub) + enrich := enricher.New(ctx, controllerCache) + fm, err := filtermanager.Init(5) + if err != nil { + mainLogger.Error("unable to create filter manager", zap.Error(err)) + os.Exit(1) + } + defer fm.Stop() + enrich.Run() + metricsModule := mm.InitModule(ctx, config, pubSub, enrich, fm, controllerCache) + + if !config.RemoteContext { + mainLogger.Info("Initializing Pod controller") + + podController := pc.New(mgr.GetClient(), controllerCache) + if err := podController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create PodController", zap.Error(err)) + } + } else { + if config.EnableRetinaEndpoint { + mainLogger.Info("RetinaEndpoint is enabled") + mainLogger.Info("Initializing RetinaEndpoint controller") + + retinaEndpointController := kec.New(mgr.GetClient(), controllerCache) + if err := retinaEndpointController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create retinaEndpointController", zap.Error(err)) + } + } + } + + mainLogger.Info("Initializing Node controller") + nodeController := nc.New(mgr.GetClient(), controllerCache) + if err := nodeController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create nodeController", zap.Error(err)) + } + + mainLogger.Info("Initializing Service controller") + svcController := sc.New(mgr.GetClient(), controllerCache) + if err := svcController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create svcController", zap.Error(err)) + } + + if config.EnableAnnotations { + mainLogger.Info("Initializing MetricsConfig namespaceController") + namespaceController := namespacecontroller.New(mgr.GetClient(), controllerCache, metricsModule) + if err := namespaceController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create namespaceController", zap.Error(err)) + } + go namespaceController.Start(ctx) + } else { + mainLogger.Info("Initializing MetricsConfig controller") + metricsConfigController := mcc.New(mgr.GetClient(), mgr.GetScheme(), metricsModule) + if err := metricsConfigController.SetupWithManager(mgr); err != nil { + mainLogger.Fatal("unable to create metricsConfigController", zap.Error(err)) + } + } + } + + controllerMgr, err := cm.NewControllerManager(config, cl, tel) + if err != nil { + mainLogger.Fatal("Failed to create controller manager", zap.Error(err)) + } + if err := controllerMgr.Init(ctx); err != nil { + mainLogger.Fatal("Failed to initialize controller manager", zap.Error(err)) + } + // Stop is best effort. If it fails, we still want to stop the main process. + // This is needed for graceful shutdown of Retina plugins. + // Do it in the main thread as graceful shutdown is important. + defer controllerMgr.Stop(ctx) + + // start heartbeat goroutine for application insights + go tel.Heartbeat(ctx, heartbeatInterval) + + // Start controller manager, which will start http server and plugin manager. + go controllerMgr.Start(ctx) + mainLogger.Info("Started controller manager") + + // Start all registered controllers. This will block until container receives SIGTERM. + if err := mgr.Start(ctx); err != nil { + mainLogger.Fatal("unable to start manager", zap.Error(err)) + } + + mainLogger.Info("Network observability exiting. Till next time!") +} + +func initLogging(config *config.Config, applicationInsightsID string) error { + logOpts := &log.LogOpts{ + Level: config.LogLevel, + File: false, + FileName: logFileName, + MaxFileSizeMB: 100, + MaxBackups: 3, + MaxAgeDays: 30, + ApplicationInsightsID: applicationInsightsID, + EnableTelemetry: config.EnableTelemetry, + } + + log.SetupZapLogger(logOpts) + return nil +} diff --git a/crd/Makefile b/crd/Makefile new file mode 100644 index 000000000..9a1e3caab --- /dev/null +++ b/crd/Makefile @@ -0,0 +1,21 @@ +.DEFAULT_GOAL = all + +REPO_ROOT = $(shell git rev-parse --show-toplevel) +TOOLS_DIR = $(REPO_ROOT)/hack/tools +TOOLS_BIN_DIR = $(TOOLS_DIR)/bin +CONTROLLER_GEN = $(TOOLS_BIN_DIR)/controller-gen +HELM_CRD_DIR = $(REPO_ROOT)/deploy/manifests/controller/helm/retina/crds + +.PHONY: generate manifests + +all: generate manifests + +generate: $(CONTROLLER_GEN) + $(CONTROLLER_GEN) object paths="./api/..." + +manifests: $(CONTROLLER_GEN) + rm -rf $(HELM_CRD_DIR) + $(CONTROLLER_GEN) crd paths="./api/..." output:crd:artifacts:config=$(HELM_CRD_DIR) + +$(CONTROLLER_GEN): + @make -C $(REPO_ROOT) $(CONTROLLER_GEN) diff --git a/crd/README.md b/crd/README.md new file mode 100644 index 000000000..b7bab82f8 --- /dev/null +++ b/crd/README.md @@ -0,0 +1,3 @@ +# Retina CRDs + +This package contains the CRD definitions for Retina diff --git a/crd/api/v1alpha1/capture_types.go b/crd/api/v1alpha1/capture_types.go new file mode 100644 index 000000000..68cd4578b --- /dev/null +++ b/crd/api/v1alpha1/capture_types.go @@ -0,0 +1,180 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CaptureConditionType string + +const ( + // CaptureComplete indicates the capture is completed. + CaptureComplete CaptureConditionType = "complete" + // CaptureError indicates the capture is errored. + CaptureError CaptureConditionType = "error" +) + +// CaptureStatus describes the status of the capture. +type CaptureStatus struct { + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // Represents time when the Capture controller started processing a job. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // Represents time when the Capture was completed, and it is determined by the last completed capture job. + // The completion time is only set when the Capture finishes successfully. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // The number of pending and running jobs. + // +optional + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // The number of completed jobs. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // The number of failed jobs. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +// CaptureOption lists the options of the capture. +type CaptureOption struct { + // Duration indicates length of time that the capture should continue for. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +optional + Duration *metav1.Duration `json:"duration,omitempty"` + + // PacketSize limits the each packet to bytes in size and packets longer than PacketSize will be truncated. + // +optional + PacketSize *int `json:"packetSize,omitempty"` + + // MaxCaptureSize limits the capture file to MB in size. + // +kubebuilder:default=100 + // +optional + MaxCaptureSize *int `json:"maxCaptureSize,omitempty"` +} + +// CaptureTarget indicates the target on which the network packets capture will be performed. +type CaptureTarget struct { + // NodeSelector is a selector which select the node to capture network packets. + // Selector which must match a node's labels. + // NodeSelector is incompatible with NamespaceSelector/PodSelector pair. + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // NamespaceSelector selects Namespaces using cluster-scoped labels. This field follows + // standard label selector semantics. + // NamespaceSelector and PodSelector pair selects a pod to capture pod network namespace traffic. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // This is a label selector which selects Pods. This field follows standard label + // selector semantics. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"` +} + +// CaptureConfiguration indicates the configurations of the network capture. +type CaptureConfiguration struct { + CaptureTarget CaptureTarget `json:"captureTarget"` + // Filters represent a range of filters to be included/excluded in the capture. + // +optional + Filters *CaptureConfigurationFilters `json:"filters,omitempty"` + + // TcpdumpFilter is a raw tcpdump filter string. + // +optional + TcpdumpFilter *string `json:"tcpdumpFilter,omitempty"` + + // IncludeMetadata represents whether or not networking metadata should be captured. + // Networking metadata will consists of the following info, but is expected to grow: + // - IP address configuration + // - IP neighbor status + // - IPtables rule dumps + // - Network statistics information + // +optional + // +kubebuilder:default=true + IncludeMetadata bool `json:"includeMetadata"` + + // +optional + CaptureOption CaptureOption `json:"captureOption,omitempty"` +} + +// CaptureConfigurationFilters presents filters of capture. +type CaptureConfigurationFilters struct { + // Include specifies what IP or IP:port is included in the capture with wildcard support. + // If a port not specified or is *, the port filter is excluded. + // If an IP is specified as *, the host filter should be included. + // Include and Exclude arguments will finally be translated into a logic like: + // (include1 or include2) and not (exclude1 or exclude2) + // +optional + Include []string `json:"include,omitempty"` + // Exclude specifies what IP or IP:port is excluded in the capture with wildcard support. + // See Include for detailed explanation. + // +optional + Exclude []string `json:"exclude,omitempty"` +} + +// OutputConfiguration indicates the location capture will be stored. +type OutputConfiguration struct { + // HostPath stores the capture files into the specified host filesystem. + // If nothing exists at the given path of the host, an empty directory will be created there. + // +optional + HostPath *string `json:"hostPath,omitempty"` + // PersistentVolumeClaim mounts the supplied PVC into the pod on `/capture` and write the capture files there. + // +optional + PersistentVolumeClaim *string `json:"persistentVolumeClaim,omitempty"` + // BlobUpload is a secret containing the blob SAS URL to the given blob container. + // +optional + BlobUpload *string `json:"blobUpload,omitempty"` +} + +// CaptureSpec indicates the specification of Capture. +type CaptureSpec struct { + // +kubebuilder:validation:Required + CaptureConfiguration CaptureConfiguration `json:"captureConfiguration"` + // +kubebuilder:validation:Required + OutputConfiguration OutputConfiguration `json:"outputConfiguration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories={retina},scope=Namespaced +// +kubebuilder:subresource:status + +// Capture indicates the settings of a network trace. +type Capture struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:Required + Spec CaptureSpec `json:"spec"` + // +optional + Status CaptureStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CaptureList contains a list of Capture. +type CaptureList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // +listType=set + Items []Capture `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Capture{}, &CaptureList{}) +} diff --git a/crd/api/v1alpha1/groupversion_info.go b/crd/api/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..5ddc970e6 --- /dev/null +++ b/crd/api/v1alpha1/groupversion_info.go @@ -0,0 +1,23 @@ +//go:build !ignore_uncovered +// +build !ignore_uncovered + +// Package v1alpha1 contains API Schema definitions for the retina v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=retina.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "retina.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/crd/api/v1alpha1/metricsconfiguration_types.go b/crd/api/v1alpha1/metricsconfiguration_types.go new file mode 100644 index 000000000..18902c6fa --- /dev/null +++ b/crd/api/v1alpha1/metricsconfiguration_types.go @@ -0,0 +1,121 @@ +//go:build !ignore_uncovered +// +build !ignore_uncovered + +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// Important: Run "make" to regenerate code after modifying this file + +const ( + StateInitialized string = "Initialized" + StateAccepted string = "Accepted" + StateErrored string = "Errored" + StateWarning string = "Warning" +) + +// MetricsContextOptions indicates the configuration for retina plugin metrics +type MetricsContextOptions struct { + // MetricName indicates the name of the metric + MetricName string `json:"metricName"` + // SourceLabels represents the source context of the metrics collected + // Such as IP, pod, port + // +listType=set + SourceLabels []string `json:"sourceLabels,omitempty"` + // DestinationLabels represents the destination context of the metrics collected + // Such as IP, pod, port, workload (deployment/replicaset/statefulset/daemonset) + // +listType=set + DestinationLabels []string `json:"destinationLabels,omitempty"` + // AdditionalContext represents the additional context of the metrics collected + // Such as Direction (ingress/egress) + // +optional + // +listType=set + AdditionalLabels []string `json:"additionalLabels,omitempty"` +} + +// MetricsNamespaces indicates the namespaces to include or exclude in metric collection +type MetricsNamespaces struct { + // +listType=set + Include []string `json:"include,omitempty"` + // +listType=set + Exclude []string `json:"exclude,omitempty"` +} + +// Specification of the desired behavior of the RetinaMetrics. Can be omitted because this is for advanced metrics. +type MetricsSpec struct { + ContextOptions []MetricsContextOptions `json:"contextOptions"` + Namespaces MetricsNamespaces `json:"namespaces"` +} + +type MetricsStatus struct { + // +kubebuilder:validation:Enum=Initialized;Accepted;Errored;Warning + // +kubebuilder:default:="Initialized" + State string `json:"state"` + Reason string `json:"reason"` + LastKnownSpec *MetricsSpec `json:"lastKnownSpec,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories={retina},scope=Cluster +// +kubebuilder:subresource:status + +// MetricsConfiguration contains the specification for the retina plugin metrics +type MetricsConfiguration struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec MetricsSpec `json:"spec"` + // +optional + Status MetricsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// MetricsConfigurationList contains a list of MetricsConfiguration +type MetricsConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MetricsConfiguration `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MetricsConfiguration{}, &MetricsConfigurationList{}) +} + +func (m *MetricsContextOptions) IsAdvanced() bool { + return m != nil && + m.MetricName != "" && (len(m.SourceLabels) > 0 || + len(m.DestinationLabels) > 0) +} + +func (m *MetricsSpec) WithIncludedNamespaces(namespaces []string) *MetricsSpec { + m.Namespaces.Include = namespaces + return m +} + +func (m *MetricsSpec) WithMetricsContextOptions(metrics, srcLabels, dstLabels []string) *MetricsSpec { + for _, metric := range metrics { + m.ContextOptions = append(m.ContextOptions, MetricsContextOptions{ + MetricName: metric, + SourceLabels: srcLabels, + DestinationLabels: dstLabels, + }) + } + return m +} + +func (m *MetricsSpec) Equals(other *MetricsSpec) bool { + return reflect.DeepEqual(m, other) +} diff --git a/crd/api/v1alpha1/retinaendpoint_types.go b/crd/api/v1alpha1/retinaendpoint_types.go new file mode 100644 index 000000000..1454cd387 --- /dev/null +++ b/crd/api/v1alpha1/retinaendpoint_types.go @@ -0,0 +1,72 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=ke +// +kubebuilder:printcolumn:name="Pod IP",type=string,priority=0,JSONPath=`.spec.podIP` +// +kubebuilder:printcolumn:name="Referenced By",type=string,priority=1,JSONPath=`.spec.ownerReferences` +// RetinaEndpoint is the Schema for the retinaendpoints API +type RetinaEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RetinaEndpointSpec `json:"spec,omitempty"` + Status RetinaEndpointStatus `json:"status,omitempty"` +} + +// RetinaEndpointSpec defines the desired state of RetinaEndpoint +type RetinaEndpointSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Containers []RetinaEndpointStatusContainers `json:"containers,omitempty"` + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"` + NodeIP string `json:"nodeIP,omitempty"` + PodIP string `json:"podIP,omitempty"` + PodIPs []string `json:"podIPs,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +type Containers struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type RetinaEndpointStatusContainers struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type OwnerReference struct { + APIVersion string `json:"apiVersion,omitempty"` + Kind string `json:"kind,omitempty"` + Name string `json:"name,omitempty"` +} + +// RetinaEndpointStatus defines the observed state of RetinaEndpoint +type RetinaEndpointStatus struct{} + +//+kubebuilder:object:root=true + +// RetinaEndpointList contains a list of RetinaEndpoint +type RetinaEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RetinaEndpoint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RetinaEndpoint{}, &RetinaEndpointList{}) +} diff --git a/crd/api/v1alpha1/tracesconfiguration_types.go b/crd/api/v1alpha1/tracesconfiguration_types.go new file mode 100644 index 000000000..cc6714e8a --- /dev/null +++ b/crd/api/v1alpha1/tracesconfiguration_types.go @@ -0,0 +1,283 @@ +//go:build !ignore_uncovered +// +build !ignore_uncovered + +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// Important: Run "make" to regenerate code after modifying this file + +const ( + AllPacketsCapture string = "AllPackets" + FirstPacketCapture string = "FirstPacket" + PodToNode string = "PodToNode" + NodeToPod string = "NodeToPod" + NodeToNetwork string = "NodeToNetwork" + NetworkToNode string = "NetworkToNode" +) + +type TracePoints []string + +type TracePorts struct { + // +kubebuilder:validation:XIntOrString + Port string `json:"port"` + // +kubebuilder:default=TCP + Protocol string `json:"protocol"` + // +optional + // +kubebuilder:validation:XIntOrString + EndPort string `json:"endPort"` +} + +type IPBlock struct { + // +optional + CIDR string `json:"cidr"` + // +optional + Except []string `json:"except"` +} + +type TraceTarget struct { + // +optional + IPBlock IPBlock `json:"ipBlock"` + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector"` + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector"` + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector"` + // +optional + ServiceSelector *metav1.LabelSelector `json:"serviceSelector"` +} + +type TraceTargets struct { + // +optional + Source *TraceTarget `json:"from"` + // +optional + Destination *TraceTarget `json:"to"` + // +optional + Ports []*TracePorts `json:"ports"` + // +optional + TracePoints TracePoints `json:"tracePoints"` +} + +// TraceConfiguration indicates the configuration for retina traces +type TraceConfiguration struct { + // +kubebuilder:validation:Enum=AllPackets;FirstPacket + TraceCaptureLevel string `json:"captureLevel"` + IncludeLayer7Data bool `json:"includeLayer7Data"` + TraceTargets []*TraceTargets `json:"traceTargets"` +} + +// TraceOutputConfiguration indicates the configuration for retina traces outputs +type TraceOutputConfiguration struct { + // +kubebuilder:validation:Enum=stdout;azuretable;loganalytics;opentelemetry + TraceOutputDestination string `json:"destination"` + ConnectionConfiguration string `json:"connectionConfiguration"` +} + +// Specification of the desired behavior of the RetinaTraces. Can be omitted because this is for advanced Traces. +type TracesSpec struct { + TraceConfiguration []*TraceConfiguration `json:"traceConfiguration"` + TraceOutputConfiguration *TraceOutputConfiguration `json:"outputConfiguration"` +} + +type TracesStatus struct { + // +kubebuilder:validation:Enum=Initialized;Accepted;Errored;Warning + // +kubebuilder:default:="Initialized" + State string `json:"state"` + Reason string `json:"reason"` + LastKnownSpec *TracesSpec `json:"lastKnownSpec"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories={retina},scope=Cluster +// +kubebuilder:subresource:status + +// TracesConfiguration contains the specification for the retina plugin Traces +type TracesConfiguration struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec *TracesSpec `json:"spec"` + + Status *TracesStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// TracesConfigurationList contains a list of TracesConfigurationList +type TracesConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TracesConfigurationList `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TracesConfiguration{}, &TracesConfigurationList{}) +} + +func (tc *TraceConfiguration) Equal(new *TraceConfiguration) bool { + if tc == nil && new == nil { + return true + } + + if tc == nil || new == nil { + return false + } + + if tc.TraceCaptureLevel != new.TraceCaptureLevel { + return false + } + + if tc.IncludeLayer7Data != new.IncludeLayer7Data { + return false + } + + if len(tc.TraceTargets) != len(new.TraceTargets) { + return false + } + + for i := range tc.TraceTargets { + if !tc.TraceTargets[i].Equal(new.TraceTargets[i]) { + return false + } + } + + return true +} + +func (tt *TraceTargets) Equal(new *TraceTargets) bool { + if tt == nil && new == nil { + return true + } + + if tt == nil || new == nil { + return false + } + + if !tt.Source.Equal(new.Source) { + return false + } + + if !tt.Destination.Equal(new.Destination) { + return false + } + + if len(tt.Ports) != len(new.Ports) { + return false + } + + for i := range tt.Ports { + if !tt.Ports[i].Equal(new.Ports[i]) { + return false + } + } + + if len(tt.TracePoints) != len(new.TracePoints) { + return false + } + + for i := range tt.TracePoints { + if tt.TracePoints[i] != new.TracePoints[i] { + return false + } + } + + return true +} + +func (tp *TracePorts) Equal(new *TracePorts) bool { + if tp == nil && new == nil { + return true + } + + if tp == nil || new == nil { + return false + } + + if tp.Port != new.Port { + return false + } + + if tp.Protocol != new.Protocol { + return false + } + + if tp.EndPort != new.EndPort { + return false + } + + return true +} + +func (tt *TraceTarget) Equal(new *TraceTarget) bool { + if tt == nil && new == nil { + return true + } + + if tt == nil || new == nil { + return false + } + + if !tt.IPBlock.Equal(&new.IPBlock) { + return false + } + + if tt.NamespaceSelector.String() != new.NamespaceSelector.String() { + return false + } + + if tt.PodSelector.String() != new.PodSelector.String() { + return false + } + + if tt.NodeSelector.String() != new.NodeSelector.String() { + return false + } + + if tt.ServiceSelector.String() != new.ServiceSelector.String() { + return false + } + + return true +} + +func (ip *IPBlock) Equal(new *IPBlock) bool { + if ip == nil && new == nil { + return true + } + + if ip == nil || new == nil { + return false + } + + if ip.CIDR != new.CIDR { + return false + } + + if len(ip.Except) != len(new.Except) { + return false + } + + for i := range ip.Except { + if ip.Except[i] != new.Except[i] { + return false + } + } + + return true +} + +func (ip *IPBlock) IsEmpty() bool { + // if ip is nil or CIDR is empty, then it is empty + // even if except is populated with CIDR being empty that is invalid + return ip == nil || ip.CIDR == "" +} diff --git a/crd/api/v1alpha1/validations/validate_metricconfiguration.go b/crd/api/v1alpha1/validations/validate_metricconfiguration.go new file mode 100644 index 000000000..0a7a456d4 --- /dev/null +++ b/crd/api/v1alpha1/validations/validate_metricconfiguration.go @@ -0,0 +1,162 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package validations + +import ( + "fmt" + + "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/utils" +) + +// MetricsConfiguration validates the metrics configuration +func MetricsCRD(metricsConfig *v1alpha1.MetricsConfiguration) error { + if metricsConfig == nil { + return fmt.Errorf("metrics configuration is nil") + } + + if metricsConfig.Name == "" { + return fmt.Errorf("metrics configuration name is empty") + } + + err := MetricsSpec(metricsConfig.Spec) + if err != nil { + return err + } + + return nil +} + +// MetricsSpec validates the metrics spec +func MetricsSpec(metricsSpec v1alpha1.MetricsSpec) error { + if len(metricsSpec.ContextOptions) == 0 { + return fmt.Errorf("metrics spec context options is empty") + } + + for _, contextOption := range metricsSpec.ContextOptions { + if !utils.IsAdvancedMetric(contextOption.MetricName) { + return fmt.Errorf("%s is not a valid metric", contextOption.MetricName) + } + } + + err := MetricsNamespaces(metricsSpec.Namespaces) + if err != nil { + return err + } + + return nil +} + +// MetricsNamespaces validates the metrics namespaces +func MetricsNamespaces(mn v1alpha1.MetricsNamespaces) error { + if mn.Include == nil && mn.Exclude == nil { + return fmt.Errorf("metrics namespaces include and exclude are both nil") + } + + if mn.Include != nil && mn.Exclude != nil { + return fmt.Errorf("metrics namespaces include and exclude are both not nil") + } + + return nil +} + +// CompareMetricsConfig compares two metrics configurations +func CompareMetricsConfig(old, new *v1alpha1.MetricsConfiguration) bool { + if old == nil && new == nil { + return true + } + + if old == nil || new == nil { + return false + } + + if old.Name != new.Name { + return false + } + + if !MetricsSpecCompare(old.Spec, new.Spec) { + return false + } + + return true +} + +// MetricsSpecCompare compares two metrics specs +func MetricsSpecCompare(old, new v1alpha1.MetricsSpec) bool { + if len(old.ContextOptions) != len(new.ContextOptions) { + return false + } + + if !MetricsNamespacesCompare(old.Namespaces, new.Namespaces) { + return false + } + + if !MetricsContextOptionsCompare(old.ContextOptions, new.ContextOptions) { + return false + } + + return true +} + +// MetricsNamespacesCompare compares two metrics namespaces +func MetricsNamespacesCompare(old, new v1alpha1.MetricsNamespaces) bool { + if !utils.CompareStringSlice(old.Include, new.Include) { + return false + } + + if !utils.CompareStringSlice(old.Exclude, new.Exclude) { + return false + } + + return true +} + +// MetricsContextOptionsCompare compares two metrics context options +func MetricsContextOptionsCompare(old, new []v1alpha1.MetricsContextOptions) bool { + if len(old) != len(new) { + return false + } + + oldMap := make(map[string]v1alpha1.MetricsContextOptions) + for _, contextOption := range old { + oldMap[contextOption.MetricName] = contextOption + } + + newMap := make(map[string]v1alpha1.MetricsContextOptions) + for _, contextOption := range new { + newMap[contextOption.MetricName] = contextOption + } + + if len(oldMap) != len(newMap) { + return false + } + + for key, oldContextOption := range oldMap { + newContextOption, ok := newMap[key] + if !ok { + return false + } + + if !utils.CompareStringSlice(oldContextOption.SourceLabels, newContextOption.SourceLabels) { + return false + } + + if !utils.CompareStringSlice(oldContextOption.DestinationLabels, newContextOption.DestinationLabels) { + return false + } + + if oldContextOption.MetricName != newContextOption.MetricName { + return false + } + + if !utils.CompareStringSlice(oldContextOption.AdditionalLabels, newContextOption.AdditionalLabels) { + return false + } + + } + + return true +} diff --git a/crd/api/v1alpha1/validations/validate_metricconfiguration_test.go b/crd/api/v1alpha1/validations/validate_metricconfiguration_test.go new file mode 100644 index 000000000..d5197facb --- /dev/null +++ b/crd/api/v1alpha1/validations/validate_metricconfiguration_test.go @@ -0,0 +1,359 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package validations + +import ( + "testing" + + "github.com/microsoft/retina/crd/api/v1alpha1" + "gotest.tools/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestMetricsConfiguration tests the metrics configuration validation +func TestMetricsConfiguration(t *testing.T) { + tests := []struct { + name string + obj *v1alpha1.MetricsConfiguration + wantErr bool + }{ + { + name: "invalid test 1", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{}, + }, + wantErr: true, + }, + { + name: "Test 1", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{}, + }, + wantErr: true, + }, + { + name: "valid metrics crd", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid metrics crd", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + wantErr: true, + }, + { + name: "valid metrics crd with exclude", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Exclude: []string{"kube-system"}, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid metrics crd with random metric name", + obj: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "test-wrong-metric", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Exclude: []string{"kube-system"}, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := MetricsCRD(tt.obj) + if (err != nil) != tt.wantErr { + t.Errorf("MetricsConfiguration() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} + +// TestMetricsSpec tests the metrics spec validation +func TestCompare(t *testing.T) { + tests := []struct { + name string + old *v1alpha1.MetricsConfiguration + new *v1alpha1.MetricsConfiguration + equal bool + }{ + { + name: "invalid test 1", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{}, + }, + new: &v1alpha1.MetricsConfiguration{}, + equal: false, + }, + { + name: "valid test 1", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{}, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{}, + }, + equal: true, + }, + { + name: "valid test 2", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + }, + }, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + }, + }, + }, + equal: true, + }, + { + name: "invalied test 3", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + }, + }, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + }, + }, + }, + equal: false, + }, + { + name: "valid test 3", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default", "test"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + equal: false, + }, + { + name: "valid test 3", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default", "test"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default", "test"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + equal: true, + }, + { + name: "valid test 5", + old: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ns", "ip", "port"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default", "test"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + new: &v1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metricsconfig", + }, + Spec: v1alpha1.MetricsSpec{ + ContextOptions: []v1alpha1.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "port", "ns"}, + }, + }, + Namespaces: v1alpha1.MetricsNamespaces{ + Include: []string{"default", "test"}, + Exclude: []string{"kube-system"}, + }, + }, + }, + equal: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + eq := CompareMetricsConfig(tt.old, tt.new) + assert.Equal(t, tt.equal, eq, "Compare() error = %v, wantErr %v", eq, tt.equal) + }) + } +} diff --git a/crd/api/v1alpha1/validations/validate_traceconfiguration.go b/crd/api/v1alpha1/validations/validate_traceconfiguration.go new file mode 100644 index 000000000..b132c4215 --- /dev/null +++ b/crd/api/v1alpha1/validations/validate_traceconfiguration.go @@ -0,0 +1,218 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package validations + +import ( + "fmt" + "strconv" + + "github.com/microsoft/retina/crd/api/v1alpha1" +) + +func TracesCRD(tracesConfig *v1alpha1.TracesConfiguration) error { + if tracesConfig == nil { + return fmt.Errorf("trace configuration is nil") + } + + if tracesConfig.Name == "" { + return fmt.Errorf("trace configuration name is empty") + } + + if tracesConfig.Spec == nil { + return fmt.Errorf("trace configuration spec is nil") + } + + if tracesConfig.Spec.TraceConfiguration == nil { + return fmt.Errorf("trace configuration spec is nil") + } + + if len(tracesConfig.Spec.TraceConfiguration) == 0 { + return fmt.Errorf("trace configuration spec is empty") + } + + err := TraceConfiguration(tracesConfig.Spec.TraceConfiguration) + if err != nil { + return err + } + + err = TraceOutputConfiguration(tracesConfig.Spec.TraceOutputConfiguration) + if err != nil { + return err + } + + if tracesConfig.Status == nil || tracesConfig.Status.LastKnownSpec == nil { + // TODO add status validation + return nil + } + + err = TraceConfiguration(tracesConfig.Status.LastKnownSpec.TraceConfiguration) + if err != nil { + return err + } + + return nil +} + +// TraceConfiguration validates the trace configuration +func TraceConfiguration(traceConfig []*v1alpha1.TraceConfiguration) error { + for _, trace := range traceConfig { + if (trace.TraceCaptureLevel != v1alpha1.AllPacketsCapture) && (trace.TraceCaptureLevel != v1alpha1.FirstPacketCapture) { + return fmt.Errorf("%s trace capture level is invalid", trace.TraceCaptureLevel) + } + + if trace.TraceTargets == nil { + return fmt.Errorf("trace targets is nil") + } + + if len(trace.TraceTargets) == 0 { + return fmt.Errorf("trace targets is empty") + } + + for _, targets := range trace.TraceTargets { + err := TraceTargets(targets) + if err != nil { + return err + } + } + + } + + return nil +} + +func TraceOutputConfiguration(traceOutputConfig *v1alpha1.TraceOutputConfiguration) error { + if traceOutputConfig == nil { + return fmt.Errorf("trace output configuration is nil") + } + + if traceOutputConfig.TraceOutputDestination == "" { + return fmt.Errorf("trace output type is invalid") + } + + // TODO add actual output type validation + + return nil +} + +func TraceTargets(target *v1alpha1.TraceTargets) error { + if target == nil { + return fmt.Errorf("trace target is nil") + } + + if target.Source == nil && target.Destination == nil && len(target.Ports) == 0 { + return fmt.Errorf("trace target is empty") + } + + err := TracePoints(target.TracePoints) + if err != nil { + return err + } + + err = TraceTarget(target.Source) + if err != nil { + return err + } + + err = TraceTarget(target.Destination) + if err != nil { + return err + } + + for _, port := range target.Ports { + if port == nil { + continue + } + err = TracePort(port) + if err != nil { + return err + } + } + + return nil +} + +func TracePoints(tp v1alpha1.TracePoints) error { + for _, tracePoint := range tp { + if tracePoint != v1alpha1.PodToNode && + tracePoint != v1alpha1.NodeToPod && + tracePoint != v1alpha1.NodeToNetwork && + tracePoint != v1alpha1.NetworkToNode { + return fmt.Errorf("invalid trace point %s", tracePoint) + } + } + + return nil +} + +func TraceTarget(tt *v1alpha1.TraceTarget) error { + if tt == nil { + return nil + } + + // Atleast one of the selector must be present + // Only one of the followin group of selectors can be present + // 1. IPBlock + // 2. NamespaceSelector, PodSelector // NamespaceSelector is mandatory if PodSelector is present + // 3. NodeSelector + // 4. ServiceSelector + if tt.IPBlock.IsEmpty() && + tt.NamespaceSelector == nil && + tt.PodSelector == nil && + tt.NodeSelector == nil && + tt.ServiceSelector == nil { + return fmt.Errorf("trace target must have at least one selector") + } + + if !tt.IPBlock.IsEmpty() && + (tt.NamespaceSelector != nil || tt.PodSelector != nil || tt.NodeSelector != nil || tt.ServiceSelector != nil) { + return fmt.Errorf("trace target cannot have both IPBlock and other selectors") + } + + if tt.NamespaceSelector == nil && tt.PodSelector != nil { + return fmt.Errorf("trace target must have namespace selector if pod selector is present") + } + + if tt.NamespaceSelector != nil && + (tt.NodeSelector != nil || tt.ServiceSelector != nil) { + return fmt.Errorf("trace target cannot have node or service selector if namespace and pod selector is present") + } + + if tt.NamespaceSelector == nil && tt.PodSelector == nil && + (tt.NodeSelector != nil || tt.ServiceSelector != nil) && + (tt.NodeSelector != nil && tt.ServiceSelector != nil) { + return fmt.Errorf("trace target cannot have both node and service selector") + } + + return nil +} + +func TracePort(port *v1alpha1.TracePorts) error { + startPort, err := strconv.Atoi(port.Port) + if err != nil { + return fmt.Errorf("invalid port %s", port.Port) + } + + if startPort < 0 || startPort > 65535 { + return fmt.Errorf("invalid port %s", port.Port) + } + + if port.EndPort != "" && port.EndPort != "0" { + endPort, err := strconv.Atoi(port.EndPort) + if err != nil { + return fmt.Errorf("invalid end port %s", port.EndPort) + } + + if endPort < 0 || endPort > 65535 { + return fmt.Errorf("invalid end port %s", port.EndPort) + } + + if endPort < startPort { + return fmt.Errorf("invalid end port %s compared to starting port %s", port.EndPort, port.Port) + } + } + + return nil +} diff --git a/crd/api/v1alpha1/validations/validate_traceconfiguration_test.go b/crd/api/v1alpha1/validations/validate_traceconfiguration_test.go new file mode 100644 index 000000000..1c14b6e40 --- /dev/null +++ b/crd/api/v1alpha1/validations/validate_traceconfiguration_test.go @@ -0,0 +1,693 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package validations + +import ( + "testing" + + "github.com/microsoft/retina/crd/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestTraceConfiguration tests the validation of trace configuration +func TestTraceConfiguration(t *testing.T) { + tests := []struct { + name string + trace *v1alpha1.TracesConfiguration + wantErr bool + }{ + { + name: "nil trace configuration", + trace: nil, + wantErr: true, + }, + { + name: "empty trace configuration", + trace: &v1alpha1.TracesConfiguration{}, + wantErr: true, + }, + { + name: "nil trace spec configuration", + trace: &v1alpha1.TracesConfiguration{ + Spec: nil, + }, + wantErr: true, + }, + { + name: "empty trace spec configuration", + trace: &v1alpha1.TracesConfiguration{ + Spec: &v1alpha1.TracesSpec{}, + }, + wantErr: true, + }, + { + name: "empty trace targets", + trace: &v1alpha1.TracesConfiguration{ + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace capture level", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: "invalid", + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "10.0.0.0/8", + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace configuration name", + trace: &v1alpha1.TracesConfiguration{ + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "10.0.0.0/8", + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace targets", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + {}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "valid trace configuration", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "10.0.0.0/8", + }, + }, + }, + }, + }, + }, + TraceOutputConfiguration: &v1alpha1.TraceOutputConfiguration{ + TraceOutputDestination: "Test", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid trace configuration with pod selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "valid trace configuration with ns and pod selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + }, + }, + }, + }, + TraceOutputConfiguration: &v1alpha1.TraceOutputConfiguration{ + TraceOutputDestination: "Test", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid trace configuration with ns and node selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace configuration with ns , pod and node selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace configuration with ns, pod and service selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace configuration with node and service selector", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "valid trace configuration with service selector and port", + trace: &v1alpha1.TracesConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "traceconfig", + }, + Spec: &v1alpha1.TracesSpec{ + TraceConfiguration: []*v1alpha1.TraceConfiguration{ + { + TraceCaptureLevel: v1alpha1.AllPacketsCapture, + TraceTargets: []*v1alpha1.TraceTargets{ + { + Source: &v1alpha1.TraceTarget{ + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nginx", + }, + }, + }, + Ports: []*v1alpha1.TracePorts{ + { + Port: "80", + }, + }, + TracePoints: v1alpha1.TracePoints{ + v1alpha1.NetworkToNode, + v1alpha1.NodeToPod, + }, + }, + }, + IncludeLayer7Data: true, + }, + }, + TraceOutputConfiguration: &v1alpha1.TraceOutputConfiguration{ + TraceOutputDestination: "Test", + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := TracesCRD(tt.trace); (err != nil) != tt.wantErr { + t.Errorf("TraceConfiguration() name = %s error = %v, wantErr %v", tt.name, err, tt.wantErr) + } + }) + } +} + +func TestTraceTarget(t *testing.T) { + tests := []struct { + name string + target *v1alpha1.TraceTarget + wantErr bool + }{ + { + name: "valid trace target", + target: &v1alpha1.TraceTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid trace target with ns and service selector", + target: &v1alpha1.TraceTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "valid trace target with ns and pod selector", + target: &v1alpha1.TraceTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid trace target with ns and node selector", + target: &v1alpha1.TraceTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with pod and service selector", + target: &v1alpha1.TraceTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with pod and node selector", + target: &v1alpha1.TraceTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with service and node selector", + target: &v1alpha1.TraceTarget{ + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with pod and service selector", + target: &v1alpha1.TraceTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with ipblock and namespace selector", + target: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "0.0.0.0", + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with ipblock and pod selector", + target: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "0.0.0.0", + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with ipblock and service selector", + target: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "0.0.0.0", + }, + ServiceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with ipblock and node selector", + target: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + CIDR: "0.0.0.0", + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "ns": "nginx", + }, + }, + }, + wantErr: true, + }, + { + name: "invalid trace target with ipblock", + target: &v1alpha1.TraceTarget{ + IPBlock: v1alpha1.IPBlock{ + Except: []string{"0.0.0.0"}, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := TraceTarget(tt.target); (err != nil) != tt.wantErr { + t.Errorf("TraceConfiguration() name = %s error = %v, wantErr %v", tt.name, err, tt.wantErr) + } + }) + } +} + +func TestTracePorts(t *testing.T) { + tests := []struct { + name string + trace *v1alpha1.TracePorts + wantErr bool + }{ + { + name: "valid trace ports", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "TCP", + }, + wantErr: false, + }, + { + name: "udp valid trace ports", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "UDP", + }, + wantErr: false, + }, + { + name: "invalid trace ports", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "UDP", + EndPort: "79", + }, + wantErr: true, + }, + { + name: "invalid trace ports 2", + trace: &v1alpha1.TracePorts{ + Port: "-1", + Protocol: "UDP", + EndPort: "79", + }, + wantErr: true, + }, + { + name: "valid trace ports 2", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "UDP", + EndPort: "89", + }, + wantErr: false, + }, + { + name: "invalid trace ports 3", + trace: &v1alpha1.TracePorts{ + Port: "aaa", + }, + wantErr: true, + }, + { + name: "invalid trace ports 4", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "aaa", + EndPort: "4444444444", + }, + wantErr: true, + }, + { + name: "invalid trace ports 5", + trace: &v1alpha1.TracePorts{ + Port: "80", + Protocol: "aaa", + EndPort: "aa", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := TracePort(tt.trace); (err != nil) != tt.wantErr { + t.Errorf("TracePorts() name = %s error = %v, wantErr %v", tt.name, err, tt.wantErr) + } + }) + } +} diff --git a/crd/api/v1alpha1/zz_generated.deepcopy.go b/crd/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d8f8893c0 --- /dev/null +++ b/crd/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,579 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capture) DeepCopyInto(out *Capture) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capture. +func (in *Capture) DeepCopy() *Capture { + if in == nil { + return nil + } + out := new(Capture) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Capture) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureConfiguration) DeepCopyInto(out *CaptureConfiguration) { + *out = *in + in.CaptureTarget.DeepCopyInto(&out.CaptureTarget) + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new(CaptureConfigurationFilters) + (*in).DeepCopyInto(*out) + } + if in.TcpdumpFilter != nil { + in, out := &in.TcpdumpFilter, &out.TcpdumpFilter + *out = new(string) + **out = **in + } + in.CaptureOption.DeepCopyInto(&out.CaptureOption) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureConfiguration. +func (in *CaptureConfiguration) DeepCopy() *CaptureConfiguration { + if in == nil { + return nil + } + out := new(CaptureConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureConfigurationFilters) DeepCopyInto(out *CaptureConfigurationFilters) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureConfigurationFilters. +func (in *CaptureConfigurationFilters) DeepCopy() *CaptureConfigurationFilters { + if in == nil { + return nil + } + out := new(CaptureConfigurationFilters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureList) DeepCopyInto(out *CaptureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Capture, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureList. +func (in *CaptureList) DeepCopy() *CaptureList { + if in == nil { + return nil + } + out := new(CaptureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CaptureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureOption) DeepCopyInto(out *CaptureOption) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(v1.Duration) + **out = **in + } + if in.PacketSize != nil { + in, out := &in.PacketSize, &out.PacketSize + *out = new(int) + **out = **in + } + if in.MaxCaptureSize != nil { + in, out := &in.MaxCaptureSize, &out.MaxCaptureSize + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureOption. +func (in *CaptureOption) DeepCopy() *CaptureOption { + if in == nil { + return nil + } + out := new(CaptureOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureSpec) DeepCopyInto(out *CaptureSpec) { + *out = *in + in.CaptureConfiguration.DeepCopyInto(&out.CaptureConfiguration) + in.OutputConfiguration.DeepCopyInto(&out.OutputConfiguration) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureSpec. +func (in *CaptureSpec) DeepCopy() *CaptureSpec { + if in == nil { + return nil + } + out := new(CaptureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureStatus) DeepCopyInto(out *CaptureStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureStatus. +func (in *CaptureStatus) DeepCopy() *CaptureStatus { + if in == nil { + return nil + } + out := new(CaptureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CaptureTarget) DeepCopyInto(out *CaptureTarget) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CaptureTarget. +func (in *CaptureTarget) DeepCopy() *CaptureTarget { + if in == nil { + return nil + } + out := new(CaptureTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Containers) DeepCopyInto(out *Containers) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Containers. +func (in *Containers) DeepCopy() *Containers { + if in == nil { + return nil + } + out := new(Containers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetinaEndpoint) DeepCopyInto(out *RetinaEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetinaEndpoint. +func (in *RetinaEndpoint) DeepCopy() *RetinaEndpoint { + if in == nil { + return nil + } + out := new(RetinaEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RetinaEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetinaEndpointList) DeepCopyInto(out *RetinaEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RetinaEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetinaEndpointList. +func (in *RetinaEndpointList) DeepCopy() *RetinaEndpointList { + if in == nil { + return nil + } + out := new(RetinaEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RetinaEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetinaEndpointSpec) DeepCopyInto(out *RetinaEndpointSpec) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]RetinaEndpointStatusContainers, len(*in)) + copy(*out, *in) + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + copy(*out, *in) + } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetinaEndpointSpec. +func (in *RetinaEndpointSpec) DeepCopy() *RetinaEndpointSpec { + if in == nil { + return nil + } + out := new(RetinaEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetinaEndpointStatus) DeepCopyInto(out *RetinaEndpointStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetinaEndpointStatus. +func (in *RetinaEndpointStatus) DeepCopy() *RetinaEndpointStatus { + if in == nil { + return nil + } + out := new(RetinaEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetinaEndpointStatusContainers) DeepCopyInto(out *RetinaEndpointStatusContainers) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetinaEndpointStatusContainers. +func (in *RetinaEndpointStatusContainers) DeepCopy() *RetinaEndpointStatusContainers { + if in == nil { + return nil + } + out := new(RetinaEndpointStatusContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsConfiguration) DeepCopyInto(out *MetricsConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfiguration. +func (in *MetricsConfiguration) DeepCopy() *MetricsConfiguration { + if in == nil { + return nil + } + out := new(MetricsConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricsConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsConfigurationList) DeepCopyInto(out *MetricsConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MetricsConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfigurationList. +func (in *MetricsConfigurationList) DeepCopy() *MetricsConfigurationList { + if in == nil { + return nil + } + out := new(MetricsConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricsConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsContextOptions) DeepCopyInto(out *MetricsContextOptions) { + *out = *in + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DestinationLabels != nil { + in, out := &in.DestinationLabels, &out.DestinationLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalLabels != nil { + in, out := &in.AdditionalLabels, &out.AdditionalLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsContextOptions. +func (in *MetricsContextOptions) DeepCopy() *MetricsContextOptions { + if in == nil { + return nil + } + out := new(MetricsContextOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsNamespaces) DeepCopyInto(out *MetricsNamespaces) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsNamespaces. +func (in *MetricsNamespaces) DeepCopy() *MetricsNamespaces { + if in == nil { + return nil + } + out := new(MetricsNamespaces) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsSpec) DeepCopyInto(out *MetricsSpec) { + *out = *in + if in.ContextOptions != nil { + in, out := &in.ContextOptions, &out.ContextOptions + *out = make([]MetricsContextOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Namespaces.DeepCopyInto(&out.Namespaces) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsSpec. +func (in *MetricsSpec) DeepCopy() *MetricsSpec { + if in == nil { + return nil + } + out := new(MetricsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsStatus) DeepCopyInto(out *MetricsStatus) { + *out = *in + if in.LastKnownSpec != nil { + in, out := &in.LastKnownSpec, &out.LastKnownSpec + *out = new(MetricsSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsStatus. +func (in *MetricsStatus) DeepCopy() *MetricsStatus { + if in == nil { + return nil + } + out := new(MetricsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutputConfiguration) DeepCopyInto(out *OutputConfiguration) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(string) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(string) + **out = **in + } + if in.BlobUpload != nil { + in, out := &in.BlobUpload, &out.BlobUpload + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputConfiguration. +func (in *OutputConfiguration) DeepCopy() *OutputConfiguration { + if in == nil { + return nil + } + out := new(OutputConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference. +func (in *OwnerReference) DeepCopy() *OwnerReference { + if in == nil { + return nil + } + out := new(OwnerReference) + in.DeepCopyInto(out) + return out +} diff --git a/deploy/grafana/dashboards/README.md b/deploy/grafana/dashboards/README.md new file mode 100644 index 000000000..529e2ffe3 --- /dev/null +++ b/deploy/grafana/dashboards/README.md @@ -0,0 +1 @@ +Dashboards here are a copy of dashboards published in Retina organization on grafana.com diff --git a/deploy/grafana/dashboards/clusters.json b/deploy/grafana/dashboards/clusters.json new file mode 100644 index 000000000..8dd0108ec --- /dev/null +++ b/deploy/grafana/dashboards/clusters.json @@ -0,0 +1,3763 @@ +{ + "__inputs": [], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.5.16" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "editable": true, + "id": null, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "k8s:network-observability" + ], + "targetBlank": false, + "title": "Dashboards: Network Observability", + "tooltip": "", + "type": "dashboards", + "url": "" + }, + { + "asDropdown": false, + "icon": "info", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Documentation", + "tooltip": "", + "type": "link", + "url": "https://www.retina.sh" + } + ], + "liveNow": true, + "panels": [ + { + "description": "", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 35, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# Retina Network Observability\n\nVisualize metrics for any Kubernetes cluster using [Retina](https://retina.sh/) (any cloud provider, any CNI, any OS). Cycle between all dashboards in the dropdown above.", + "mode": "markdown" + }, + "type": "text" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 155, + "panels": [], + "title": "Fleet View", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "filterable": false, + "inspect": false + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 4 + }, + "id": 156, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "max" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bytes Forwarded" + } + ] + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (cluster) (rate(networkobservability_forward_bytes[$__rate_interval]))", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Current Traffic by Cluster", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Bytes Forwarded", + "cluster": "Cluster" + } + } + } + ], + "transparent": true, + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "semi-dark-blue", + "mode": "continuous-BlYlRd" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 18, + "x": 6, + "y": 4 + }, + "id": 123, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (cluster) (rate(networkobservability_forward_bytes[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytes Forwarded by Cluster", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" + }, + "filterable": false, + "inspect": false + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 11 + }, + "id": 158, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "max" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bytes Dropped" + } + ] + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (cluster) (rate(networkobservability_drop_bytes[$__rate_interval]))", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Current Dropped Traffic by Cluster", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value": "Bytes Dropped", + "cluster": "Cluster" + } + } + } + ], + "transparent": true, + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 6, + "y": 11 + }, + "id": 129, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (cluster) (rate(networkobservability_drop_bytes[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytes Dropped by Cluster", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 15, + "y": 11 + }, + "id": 130, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (cluster) (rate(networkobservability_drop_count[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped By Cluster", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 57, + "panels": [], + "title": "Traffic (on $cluster)", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 19 + }, + "id": 141, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "max" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Max Egress Bytes", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "super-light-blue", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 19 + }, + "id": 145, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "min" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "format": "time_series", + "instant": false, + "legendFormat": "Min Egress Bytes", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 19 + }, + "id": 120, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "max" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Max Egress Packets", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "super-light-blue", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 19 + }, + "id": 146, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "min" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Min Egress Packets", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 19 + }, + "id": 147, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "max" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Max Ingress Bytes", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "super-light-blue", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 15, + "y": 19 + }, + "id": 148, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "min" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Min Ingress Bytes", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 19 + }, + "id": 149, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "max" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Max Ingress Packets", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "super-light-blue", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 21, + "y": 19 + }, + "id": 150, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "min" + ], + "fields": "", + "values": false + }, + "textMode": "value_and_name" + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Min Ingress Packets", + "range": true, + "refId": "A" + } + ], + "transparent": true, + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "super-light-blue", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 23 + }, + "id": 119, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Egress Bytes", + "range": true, + "refId": "A" + } + ], + "title": "Egress Bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "super-light-blue", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 23 + }, + "id": 143, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Egress Packets", + "range": true, + "refId": "A" + } + ], + "title": "Egress Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "super-light-blue", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 23 + }, + "id": 142, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_bytes{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Ingress Bytes", + "range": true, + "refId": "A" + } + ], + "title": "Ingress Bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "super-light-blue", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 23 + }, + "id": 144, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_forward_count{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Ingress Packets", + "range": true, + "refId": "A" + } + ], + "title": "Ingress Packets", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 55, + "panels": [], + "title": "Drops (on $cluster)", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "red", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 30 + }, + "id": 124, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_drop_count{direction=\"egress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Dropped Packets (Egress)", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped - Egress", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "red", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 30 + }, + "id": 125, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_drop_count{direction=\"ingress\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "Dropped Packets (Ingress)", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped - Ingress", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "red", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 30 + }, + "id": 114, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_drop_count{direction=\"unknown\", instance=~\"$Nodes\", cluster=\"$cluster\"}[$__rate_interval]))", + "legendFormat": "Dropped Packets (Unknown)", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped - Unknown", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only. Values:\n- iptable_rule_drop: packet dropped in iptables, e.g., because of a NetworkPolicy if using --network-policy=azure\n- iptable_nat_drop: packet dropped in iptables during NAT (Network Address Translation)\n- tcp_connect_basic: packet dropped by tcp connect\n- tcp_accept_basic: packet dropped by tcp accept\n- tcp_close_basic: packet dropped by tcp close\n- conntrack_add_drop: packet dropped while conntrack (connection tracking) was adding the connection", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-YlRd" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 86, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (reason) (rate(networkobservability_drop_bytes{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytes Dropped by Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Values:\n- Linux:\n - iptable_rule_drop: packet dropped in iptables, e.g., because of a NetworkPolicy if using --network-policy=azure\n - iptable_nat_drop: packet dropped in iptables during NAT (Network Address Translation)\n - tcp_connect_basic: packet dropped by tcp connect\n - tcp_accept_basic: packet dropped by tcp accept\n - tcp_close_basic: packet dropped by tcp close\n - conntrack_add_drop: packet dropped while conntrack (connection tracking) was adding the connection\n - rx_dropped: an interface dropped a received packet\n - tx_dropped: an interface dropped a transmitted packet\n- Windows:\n - aclrule: dropped by an ACL rule in VFP, e.g., because of a NetworkPolicy\n - endpoint: dropped by an HNS Pod Endpoint ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-YlRd" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 88, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (reason) (rate(networkobservability_drop_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0\r\nor\r\nsum by (reason) (\r\n label_replace(\r\n rate(networkobservability_interface_stats{statistic_name=\"rx_dropped\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]),\r\n \"reason\",\r\n \"$1\",\r\n \"statistic_name\",\r\n \"(.*)\"\r\n )\r\n)\r\nor # cannot combine these interface_stats expressions into one using regex, since regex would capture anything with rx_dropped or tx_dropped in it\r\nsum by (reason) (\r\n label_replace(\r\n rate(networkobservability_interface_stats{statistic_name=\"tx_dropped\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]),\r\n \"reason\",\r\n \"$1\",\r\n \"statistic_name\",\r\n \"(.*)\"\r\n )\r\n)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped by Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlYlRd" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 45 + }, + "id": 131, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(instance) (rate(networkobservability_drop_bytes{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Bytes Dropped by Node", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-YlRd" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 45 + }, + "id": 132, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(instance) (rate(networkobservability_drop_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Packets Dropped by Node", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 53 + }, + "id": 59, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 54 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(state) (networkobservability_tcp_state{cluster=\"$cluster\", instance=~\"$Nodes\"})", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "TCP Active Connections by State", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 54 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(address) (networkobservability_tcp_connection_remote{cluster=\"$cluster\", instance=~\"$Nodes\", address!~\"127.0.0.1|0.0.0.0\"})", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "TCP Active Connections by Remote Addr", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 10, + "x": 0, + "y": 62 + }, + "id": 65, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_connection_stats{statistic_name=\"tcptimeouts\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP Timeouts", + "range": true, + "refId": "A" + } + ], + "title": "TCP Connection Timeouts", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 7, + "x": 10, + "y": 62 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_connection_stats{statistic_name=\"resetcount\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP Resets", + "range": true, + "refId": "A" + } + ], + "title": "TCP Connection Resets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 7, + "x": 17, + "y": 62 + }, + "id": 69, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_connection_stats{statistic_name=\"tcptsreorder\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP Reorders", + "range": true, + "refId": "A" + } + ], + "title": "TCP Connection Reorders", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Windows only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 70 + }, + "id": 61, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_flag_counters{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP packets", + "range": true, + "refId": "A" + } + ], + "title": "Windows TCP Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Windows only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 70 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_flag_counters{flag=\"rst\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP Reset Packets", + "range": true, + "refId": "A" + } + ], + "title": "Windows TCP RST Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Windows only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 70 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_tcp_flag_counters{flag=\"synack\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "TCP SYN-ACK packets", + "range": true, + "refId": "A" + } + ], + "title": "Windows TCP SYN-ACK Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-BlPu" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 78 + }, + "id": 72, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(networkobservability_udp_connection_stats{statistic_name=\"active\", cluster=\"$cluster\", instance=~\"$Nodes\"})", + "legendFormat": "Total UDP Connections", + "range": true, + "refId": "A" + } + ], + "title": "UDP Connections", + "type": "timeseries" + } + ], + "title": "Connections (on $cluster)", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 74, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 55 + }, + "id": 76, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (interface_name) (rate(networkobservability_interface_stats{statistic_name=\"rx_packets\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "{{interface_name}}", + "range": true, + "refId": "A" + } + ], + "title": "RX Packets by Interface", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 55 + }, + "id": 77, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (interface_name) (rate(networkobservability_interface_stats{statistic_name=\"tx_packets\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval]))", + "legendFormat": "{{interface_name}}", + "range": true, + "refId": "A" + } + ], + "title": "TX Packets by Interface", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 55 + }, + "id": 94, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=~\"rx[0-9]+_cache_full\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Rx Cache Full error", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 55 + }, + "id": 93, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=~\"tx[0-9]+_nop\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "hide": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Tx No Op Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 63 + }, + "id": 161, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=\"rx_dropped\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Dropped Rx Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 63 + }, + "id": 162, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=\"tx_dropped\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Dropped Tx Packets", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 63 + }, + "id": 160, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=\"rx_comp_full\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Rx Comp Full Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Linux only", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 63 + }, + "id": 159, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by (instance) (rate(networkobservability_interface_stats{statistic_name=\"tx_send_full\", cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) > 0", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Tx Send Full Errors", + "type": "timeseries" + } + ], + "title": "Interfaces/Nodes (on $cluster)", + "type": "row" + } + ], + "refresh": "", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [ + "k8s:network-observability" + ], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Data Source", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": "(.*)", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(kube_node_info, cluster)", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_node_info, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "allValue": "(.*)", + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(kube_node_info{cluster=\"$cluster\"},node)", + "hide": 0, + "includeAll": true, + "label": "Nodes", + "multi": true, + "name": "Nodes", + "options": [], + "query": { + "query": "label_values(kube_node_info{cluster=\"$cluster\"},node)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Kubernetes / Networking / Clusters", + "uid": "Retina11", + "version": 1 +} diff --git a/deploy/grafana/dashboards/dns.json b/deploy/grafana/dashboards/dns.json new file mode 100644 index 000000000..81d1f65c3 --- /dev/null +++ b/deploy/grafana/dashboards/dns.json @@ -0,0 +1,928 @@ +{ + "__inputs": [], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.5.15" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "editable": true, + "id": null, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "k8s:network-observability" + ], + "targetBlank": false, + "title": "Dashboards: Network Observability", + "tooltip": "", + "type": "dashboards", + "url": "" + }, + { + "asDropdown": false, + "icon": "info", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Documentation", + "tooltip": "", + "type": "link", + "url": "https://www.retina.sh" + } + ], + "liveNow": true, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "req/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 135, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum (rate(networkobservability_dns_request_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) by (query_type) > 0", + "legendFormat": "{{query_type}}", + "range": true, + "refId": "A" + } + ], + "title": "DNS Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "resp/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 0 + }, + "id": 136, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum (rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) by (query_type) > 0", + "legendFormat": "{{query_type}}", + "range": true, + "refId": "A" + } + ], + "title": "DNS Responses", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 0 + }, + "id": 137, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "(\r\n 1 - (\r\n sum (networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\"}) by (query_type) / sum (networkobservability_dns_request_count{cluster=\"$cluster\", instance=~\"$Nodes\"}) by (query_type)\r\n )\r\n) * 100 * (\r\n (\r\n sum (rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) by (query_type)\r\n ) > bool 0\r\n) > 0", + "legendFormat": "{{query_type}}", + "range": true, + "refId": "A" + } + ], + "title": "DNS Missing Response", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "resp/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 139, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum(rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\", return_code=\"NoError\"}[$__rate_interval])) by (num_response) > 0", + "legendFormat": "{{num_response}}", + "range": true, + "refId": "A" + } + ], + "title": "DNS Response IPs Returned", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "resp/s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 138, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "label_replace(\r\n label_replace(\r\n sum by (return_code, query_type) (\r\n rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\", return_code!=\"NoError\"}[$__rate_interval])\r\n ) > 0, \"return_code\", \"non-existent domain\", \"return_code\", \"nxdomain\"\r\n ), \"return_code\", \"server failure\", \"return_code\", \"servfail\"\r\n)", + "legendFormat": "{{return_code}} ({{query_type}})", + "range": true, + "refId": "A" + } + ], + "title": "DNS Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "req/min" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 140, + "options": { + "legend": { + "calcs": [ + "max", + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "round(topk(10, sum (60*rate(networkobservability_dns_request_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) by (query, query_type)), 1)", + "interval": "", + "legendFormat": "{{query}} ({{query_type}})", + "range": true, + "refId": "A" + } + ], + "title": "Top DNS Queries (Requests)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "resp/min" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 144, + "options": { + "legend": { + "calcs": [ + "max", + "last" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "round(topk(10, sum (60*rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\"}[$__rate_interval])) by (query, query_type)), 1)", + "interval": "", + "legendFormat": "{{query}} ({{query_type}})", + "range": true, + "refId": "A" + } + ], + "title": "Top DNS Queries (Responses)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 143, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 22, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Query" + } + ] + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "label_replace(\r\n label_replace(\r\n sum by (query, query_type, response, return_code) (\r\n 60*rate(networkobservability_dns_response_count{cluster=\"$cluster\", instance=~\"$Nodes\", num_response!=\"0\", response!=\"\"}[$__rate_interval])\r\n ), \"return_code\", \"non-existent domain\", \"return_code\", \"nxdomain\"\r\n ), \"return_code\", \"success\", \"return_code\", \"noerror\"\r\n)", + "instant": true, + "interval": "", + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "DNS Response Table", + "transformations": [ + { + "id": "labelsToFields", + "options": { + "keepLabels": [ + "num_response", + "query", + "query_type", + "response", + "return_code" + ] + } + }, + { + "id": "merge", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true + }, + "indexByName": { + "Time": 0, + "Value": 1, + "num_response": 5, + "query": 2, + "query_type": 3, + "response": 6, + "return_code": 4 + }, + "renameByName": { + "Value": "Responses/Min", + "num_response": "IPs in Response", + "query": "Query", + "query_type": "Type", + "response": "Response", + "return_code": "Return Code" + } + } + } + ], + "type": "table" + } + ], + "refresh": "", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [ + "k8s:network-observability" + ], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Data Source", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(kube_node_info, cluster)", + "hide": 0, + "includeAll": true, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_node_info, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(kube_node_info, node)", + "hide": 0, + "includeAll": true, + "label": "Nodes", + "multi": true, + "name": "Nodes", + "options": [], + "query": { + "query": "label_values(kube_node_info, node)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Kubernetes / Networking / DNS", + "uid": "Retina55", + "version": 1 +} diff --git a/deploy/grafana/dashboards/pod-level.json b/deploy/grafana/dashboards/pod-level.json new file mode 100755 index 000000000..8abda1d33 --- /dev/null +++ b/deploy/grafana/dashboards/pod-level.json @@ -0,0 +1,1106 @@ +{ + "__inputs": [], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.5.6" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "editable": true, + "id": null, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "k8s:network-observability" + ], + "targetBlank": false, + "title": "Dashboards: Network Observability", + "tooltip": "", + "type": "dashboards", + "url": "" + }, + { + "asDropdown": false, + "icon": "info", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Documentation", + "tooltip": "", + "type": "link", + "url": "https://www.retina.sh" + } + ], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum(irate(retina_adv_forward_count{source_podname=~\"$pod\"}[1m]))", + "legendFormat": "Total PPS", + "range": true, + "refId": "A" + } + ], + "title": "Outgoing Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum(irate(retina_adv_forward_count{destination_podname=~\"$pod\"}[1m]))", + "legendFormat": "Total PPS", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(source_podname) (irate(retina_adv_forward_count{destination_podname!=\"unknown\", source_podname!=\"unknown\"}[1m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "All Outgoing pps per pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(destination_podname) (irate(retina_adv_forward_count{destination_podname!=\"unknown\", source_podname!=\"unknown\"}[1m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "All Incoming pps per pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(source_podname) (irate(retina_adv_drop_count{destination_podname!=\"unknown\", source_podname!=\"unknown\"}[1m]))", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Dropped pps per source pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "sum by(destination_podname) (irate(retina_adv_forward_count{destination_podname!=\"unknown\", source_podname!=\"unknown\"}[1m]))", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Dropped pps per Destination pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 38, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(source_ip) (irate(retina_adv_forward_count{source_podname=\"unknown\"}[1m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "All Outgoing pps per pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 30, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(destination_ip) (irate(retina_adv_forward_count{destination_podname=\"unknown\"}[1m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "All Incoming pps per pod", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 12, + "options": { + "calculate": false, + "cellGap": 1, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 64 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "reverse": false + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "topk(10, sum by(source_podname) (irate(retina_adv_forward_count{source_podname!=\"unknown\"}[5m])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Top 10 Pods Outgoing traffic", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 15, + "options": { + "calculate": false, + "cellGap": 1, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 64 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "reverse": false + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "topk(10, sum by(destination_podname) (irate(retina_adv_forward_count{destination_podname!=\"unknown\"}[5m])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Top 10 Pods Incoming traffic", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "avg by(instance) (sum by(le, instance) (rate(retina_node_apiserver_handshake_latency_ms_bucket[1m])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "API server latency", + "type": "timeseries" + } + ], + "refresh": "", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [ + "k8s:network-observability" + ], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "query_result(retina_adv_forward_count)", + "hide": 1, + "includeAll": true, + "label": "Pod Name ", + "multi": false, + "name": "pod", + "options": [], + "query": { + "query": "query_result(retina_adv_forward_count)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/.*_podname=\"([^\"]*).*/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", +<<<<<<< HEAD:deploy/grafana/dashboards/pod-level.json + "title": "Kubernetes / Networking / Pod Traffic", + "uid": "Retina22", + "version": 1 +======= + "title": "Kappie Linux and Windows", + "uid": "UCFLVEOVk", + "version": 26, + "weekStart": "" +>>>>>>> a6373ce (remove matmerr):deploy/grafana/dashboards/kappie-linux.json +} diff --git a/deploy/manifests/controller/helm/retina/.helmignore b/deploy/manifests/controller/helm/retina/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/manifests/controller/helm/retina/Chart.yaml b/deploy/manifests/controller/helm/retina/Chart.yaml new file mode 100644 index 000000000..a017aa63e --- /dev/null +++ b/deploy/manifests/controller/helm/retina/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: retina +description: A Helm chart for retina Kubernetes with dependencies + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/deploy/manifests/controller/helm/retina/crds/retina.io_captures.yaml b/deploy/manifests/controller/helm/retina/crds/retina.io_captures.yaml new file mode 100644 index 000000000..3f919f5cf --- /dev/null +++ b/deploy/manifests/controller/helm/retina/crds/retina.io_captures.yaml @@ -0,0 +1,372 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: captures.retina.io +spec: + group: retina.io + names: + categories: + - retina + kind: Capture + listKind: CaptureList + plural: captures + singular: capture + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Capture indicates the settings of a network trace. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CaptureSpec indicates the specification of Capture. + properties: + captureConfiguration: + description: CaptureConfiguration indicates the configurations of + the network capture. + properties: + captureOption: + description: CaptureOption lists the options of the capture. + properties: + duration: + description: Duration indicates length of time that the capture + should continue for. + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + maxCaptureSize: + default: 100 + description: MaxCaptureSize limits the capture file to MB + in size. + type: integer + packetSize: + description: PacketSize limits the each packet to bytes in + size and packets longer than PacketSize will be truncated. + type: integer + type: object + captureTarget: + description: CaptureTarget indicates the target on which the network + packets capture will be performed. + properties: + namespaceSelector: + description: NamespaceSelector selects Namespaces using cluster-scoped + labels. This field follows standard label selector semantics. + NamespaceSelector and PodSelector pair selects a pod to + capture pod network namespace traffic. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: NodeSelector is a selector which select the node + to capture network packets. Selector which must match a + node's labels. NodeSelector is incompatible with NamespaceSelector/PodSelector + pair. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: This is a label selector which selects Pods. + This field follows standard label selector semantics. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + filters: + description: Filters represent a range of filters to be included/excluded + in the capture. + properties: + exclude: + description: Exclude specifies what IP or IP:port is excluded + in the capture with wildcard support. See Include for detailed + explanation. + items: + type: string + type: array + include: + description: 'Include specifies what IP or IP:port is included + in the capture with wildcard support. If a port not specified + or is *, the port filter is excluded. If an IP is specified + as *, the host filter should be included. Include and Exclude + arguments will finally be translated into a logic like: + (include1 or include2) and not (exclude1 or exclude2)' + items: + type: string + type: array + type: object + includeMetadata: + default: true + description: 'IncludeMetadata represents whether or not networking + metadata should be captured. Networking metadata will consists + of the following info, but is expected to grow: - IP address + configuration - IP neighbor status - IPtables rule dumps - Network + statistics information' + type: boolean + tcpdumpFilter: + description: TcpdumpFilter is a raw tcpdump filter string. + type: string + required: + - captureTarget + type: object + outputConfiguration: + description: OutputConfiguration indicates the location capture will + be stored. + properties: + blobUpload: + description: BlobUpload is a secret containing the blob SAS URL + to the given blob container. + type: string + hostPath: + description: HostPath stores the capture files into the specified + host filesystem. If nothing exists at the given path of the + host, an empty directory will be created there. + type: string + persistentVolumeClaim: + description: PersistentVolumeClaim mounts the supplied PVC into + the pod on `/capture` and write the capture files there. + type: string + type: object + required: + - captureConfiguration + type: object + status: + description: CaptureStatus describes the status of the capture. + properties: + active: + description: The number of pending and running jobs. + format: int32 + type: integer + completionTime: + description: Represents time when the Capture was completed, and it + is determined by the last completed capture job. The completion + time is only set when the Capture finishes successfully. + format: date-time + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failed: + description: The number of failed jobs. + format: int32 + type: integer + startTime: + description: Represents time when the Capture controller started processing + a job. + format: date-time + type: string + succeeded: + description: The number of completed jobs. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/manifests/controller/helm/retina/crds/retina.io_metricsconfigurations.yaml b/deploy/manifests/controller/helm/retina/crds/retina.io_metricsconfigurations.yaml new file mode 100644 index 000000000..bc32c59dc --- /dev/null +++ b/deploy/manifests/controller/helm/retina/crds/retina.io_metricsconfigurations.yaml @@ -0,0 +1,170 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: metricsconfigurations.retina.io +spec: + group: retina.io + names: + categories: + - retina + kind: MetricsConfiguration + listKind: MetricsConfigurationList + plural: metricsconfigurations + singular: metricsconfiguration + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MetricsConfiguration contains the specification for the retina + plugin metrics + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of the RetinaMetrics. + Can be omitted because this is for advanced metrics. + properties: + contextOptions: + items: + description: MetricsContextOptions indicates the configuration for + retina plugin metrics + properties: + additionalLabels: + description: AdditionalContext represents the additional context + of the metrics collected Such as Direction (ingress/egress) + items: + type: string + type: array + x-kubernetes-list-type: set + destinationLabels: + description: DestinationLabels represents the destination context + of the metrics collected Such as IP, pod, port, workload (deployment/replicaset/statefulset/daemonset) + items: + type: string + type: array + x-kubernetes-list-type: set + metricName: + description: MetricName indicates the name of the metric + type: string + sourceLabels: + description: SourceLabels represents the source context of the + metrics collected Such as IP, pod, port + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - metricName + type: object + type: array + namespaces: + description: MetricsNamespaces indicates the namespaces to include + or exclude in metric collection + properties: + exclude: + items: + type: string + type: array + x-kubernetes-list-type: set + include: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - contextOptions + - namespaces + type: object + status: + properties: + lastKnownSpec: + description: Specification of the desired behavior of the RetinaMetrics. + Can be omitted because this is for advanced metrics. + properties: + contextOptions: + items: + description: MetricsContextOptions indicates the configuration + for retina plugin metrics + properties: + additionalLabels: + description: AdditionalContext represents the additional + context of the metrics collected Such as Direction (ingress/egress) + items: + type: string + type: array + x-kubernetes-list-type: set + destinationLabels: + description: DestinationLabels represents the destination + context of the metrics collected Such as IP, pod, port, + workload (deployment/replicaset/statefulset/daemonset) + items: + type: string + type: array + x-kubernetes-list-type: set + metricName: + description: MetricName indicates the name of the metric + type: string + sourceLabels: + description: SourceLabels represents the source context + of the metrics collected Such as IP, pod, port + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - metricName + type: object + type: array + namespaces: + description: MetricsNamespaces indicates the namespaces to include + or exclude in metric collection + properties: + exclude: + items: + type: string + type: array + x-kubernetes-list-type: set + include: + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + required: + - contextOptions + - namespaces + type: object + reason: + type: string + state: + default: Initialized + enum: + - Initialized + - Accepted + - Errored + - Warning + type: string + required: + - reason + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/manifests/controller/helm/retina/crds/retina.io_retinaendpoints.yaml b/deploy/manifests/controller/helm/retina/crds/retina.io_retinaendpoints.yaml new file mode 100644 index 000000000..3bedb38c1 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/crds/retina.io_retinaendpoints.yaml @@ -0,0 +1,91 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: retinaendpoints.retina.io +spec: + group: retina.io + names: + kind: RetinaEndpoint + listKind: RetinaEndpointList + plural: retinaendpoints + shortNames: + - ke + singular: retinaendpoint + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.podIP + name: Pod IP + type: string + - jsonPath: .spec.ownerReferences + name: Referenced By + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: RetinaEndpoint is the Schema for the retinaendpoints API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RetinaEndpointSpec defines the desired state of RetinaEndpoint + properties: + annotations: + additionalProperties: + type: string + type: object + containers: + items: + properties: + id: + type: string + name: + type: string + type: object + type: array + labels: + additionalProperties: + type: string + type: object + nodeIP: + type: string + ownerReferences: + items: + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + type: object + type: array + podIP: + type: string + podIPs: + items: + type: string + type: array + type: object + status: + description: RetinaEndpointStatus defines the observed state of RetinaEndpoint + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/manifests/controller/helm/retina/crds/retina.io_tracesconfigurations.yaml b/deploy/manifests/controller/helm/retina/crds/retina.io_tracesconfigurations.yaml new file mode 100644 index 000000000..91bcdf43c --- /dev/null +++ b/deploy/manifests/controller/helm/retina/crds/retina.io_tracesconfigurations.yaml @@ -0,0 +1,1102 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: tracesconfigurations.retina.io +spec: + group: retina.io + names: + categories: + - retina + kind: TracesConfiguration + listKind: TracesConfigurationList + plural: tracesconfigurations + singular: tracesconfiguration + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: TracesConfiguration contains the specification for the retina + plugin Traces + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of the RetinaTraces. + Can be omitted because this is for advanced Traces. + properties: + outputConfiguration: + description: TraceOutputConfiguration indicates the configuration + for retina traces outputs + properties: + connectionConfiguration: + type: string + destination: + enum: + - stdout + - azuretable + - loganalytics + - opentelemetry + type: string + required: + - connectionConfiguration + - destination + type: object + traceConfiguration: + items: + description: TraceConfiguration indicates the configuration for + retina traces + properties: + captureLevel: + enum: + - AllPackets + - FirstPacket + type: string + includeLayer7Data: + type: boolean + traceTargets: + items: + properties: + from: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + type: object + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + serviceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + ports: + items: + properties: + endPort: + type: string + x-kubernetes-int-or-string: true + port: + type: string + x-kubernetes-int-or-string: true + protocol: + default: TCP + type: string + required: + - port + - protocol + type: object + type: array + to: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + type: object + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + serviceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + tracePoints: + items: + type: string + type: array + type: object + type: array + required: + - captureLevel + - includeLayer7Data + - traceTargets + type: object + type: array + required: + - outputConfiguration + - traceConfiguration + type: object + status: + properties: + lastKnownSpec: + description: Specification of the desired behavior of the RetinaTraces. + Can be omitted because this is for advanced Traces. + properties: + outputConfiguration: + description: TraceOutputConfiguration indicates the configuration + for retina traces outputs + properties: + connectionConfiguration: + type: string + destination: + enum: + - stdout + - azuretable + - loganalytics + - opentelemetry + type: string + required: + - connectionConfiguration + - destination + type: object + traceConfiguration: + items: + description: TraceConfiguration indicates the configuration + for retina traces + properties: + captureLevel: + enum: + - AllPackets + - FirstPacket + type: string + includeLayer7Data: + type: boolean + traceTargets: + items: + properties: + from: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + type: object + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + serviceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + ports: + items: + properties: + endPort: + type: string + x-kubernetes-int-or-string: true + port: + type: string + x-kubernetes-int-or-string: true + protocol: + default: TCP + type: string + required: + - port + - protocol + type: object + type: array + to: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + type: object + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodeSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + serviceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + tracePoints: + items: + type: string + type: array + type: object + type: array + required: + - captureLevel + - includeLayer7Data + - traceTargets + type: object + type: array + required: + - outputConfiguration + - traceConfiguration + type: object + reason: + type: string + state: + default: Initialized + enum: + - Initialized + - Accepted + - Errored + - Warning + type: string + required: + - lastKnownSpec + - reason + - state + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/manifests/controller/helm/retina/templates/NOTES.txt b/deploy/manifests/controller/helm/retina/templates/NOTES.txt new file mode 100644 index 000000000..8d0773b91 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/NOTES.txt @@ -0,0 +1,3 @@ +1. Installing retina service using helm: helm install retina ./deploy/manifests/controller/helm/retina/ --namespace kube-system --dependency-update +2. Cleaning up/uninstalling/deleting retina and dependencies related: + helm uninstall retina -n kube-system diff --git a/deploy/manifests/controller/helm/retina/templates/_helpers.tpl b/deploy/manifests/controller/helm/retina/templates/_helpers.tpl new file mode 100644 index 000000000..bd0ce0036 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "retina.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "retina.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "retina.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "retina.labels" -}} +helm.sh/chart: {{ include "retina.chart" . }} +{{ include "retina.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "retina.selectorLabels" -}} +app.kubernetes.io/name: {{ include "retina.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +{{- define "retina.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "retina.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} +*/}} diff --git a/deploy/manifests/controller/helm/retina/templates/configmap.yaml b/deploy/manifests/controller/helm/retina/templates/configmap.yaml new file mode 100644 index 000000000..129b38853 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/configmap.yaml @@ -0,0 +1,41 @@ +{{- if .Values.os.linux}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "retina.name" . }}-config + namespace: {{ .Values.namespace }} +data: + config.yaml: |- + apiServer: + host: {{ .Values.apiServer.host }} + port: {{ .Values.retinaPort }} + logLevel: {{ .Values.logLevel }} + enabledPlugin: {{ .Values.enabledPlugin_linux }} + metricsInterval: {{ .Values.metricsInterval }} + enableTelemetry: {{ .Values.enableTelemetry }} + enablePodLevel: {{ .Values.enablePodLevel }} + remoteContext: {{ .Values.remoteContext }} + enableAnnotations: {{ .Values.enableAnnotations }} +{{- end}} +--- +{{- if .Values.os.windows}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "retina.name" . }}-config-win + namespace: {{ .Values.namespace }} +data: + config.yaml: |- + apiServer: + host: {{ .Values.apiServer.host }} + port: {{ .Values.retinaPort }} + logLevel: {{ .Values.logLevel }} + enabledPlugin: {{ .Values.enabledPlugin_win }} + metricsInterval: {{ .Values.metricsInterval }} + enableTelemetry: {{ .Values.enableTelemetry }} + enablePodLevel: {{ .Values.enablePodLevel }} + remoteContext: {{ .Values.remoteContext }} +{{- end}} + + + diff --git a/deploy/manifests/controller/helm/retina/templates/daemonset.yaml b/deploy/manifests/controller/helm/retina/templates/daemonset.yaml new file mode 100644 index 000000000..766259773 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/daemonset.yaml @@ -0,0 +1,194 @@ +{{- if .Values.os.linux}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ .Values.agent.name }} + namespace: {{ .Values.namespace }} + labels: + k8s-app: {{ include "retina.name" . }} +spec: + selector: + matchLabels: + app: {{ include "retina.name" . }} + template: + metadata: + labels: + app: {{ include "retina.name" . }} + k8s-app: {{ include "retina.name" . }} + annotations: + prometheus.io/port: "{{ .Values.retinaPort }}" + prometheus.io/scrape: "true" + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + spec: + hostNetwork: true + serviceAccountName: {{ .Values.serviceAccount.name }} + nodeSelector: + kubernetes.io/os: linux + initContainers: + - name: init-retina + image: {{ .Values.image.initRepository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + volumeMounts: + - name: bpf + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + containers: + - name: {{ include "retina.name" . }} + livenessProbe: + httpGet: + path: /metrics + port: {{ .Values.retinaPort }} + initialDelaySeconds: 30 + periodSeconds: 30 + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.daemonset.container.retina.command }} + command: + {{- range .Values.daemonset.container.retina.command }} + - {{ . }} + {{- end }} + {{- end }} + {{- if .Values.daemonset.container.retina.args}} + args: + - --health-probe-bind-address={{ .Values.daemonset.container.retina.healthProbeBindAddress }} + - --metrics-bind-address={{ .Values.daemonset.container.retina.metricsBindAddress }} + {{- range $.Values.daemonset.container.retina.args}} + - {{ . | quote }} + {{- end}} + {{- end}} + ports: + - containerPort: {{ .Values.retinaPort }} + resources: + limits: + memory: {{ .Values.resources.limits.memory | quote }} + cpu: {{ .Values.resources.limits.cpu | quote }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + securityContext: + capabilities: + add: + {{- range .Values.securityContext.capabilities.add }} + - {{ . }} + {{- end }} + privileged: {{ .Values.securityContext.privileged }} + {{- if .Values.volumeMounts }} + volumeMounts: + {{- range $name, $mountPath := .Values.volumeMounts }} + - name: {{ $name }} + mountPath: {{ $mountPath }} + {{- end }} + {{- end }} + terminationGracePeriodSeconds: 90 # Allow for retina to cleanup plugin resources. + volumes: + {{- range $name, $hostPath := .Values.volumeMounts}} + - name: {{ $name }} + {{ if eq $name "config" }} + configMap: + name: {{ $.Values.nameOverride }}-config + {{ else if eq $name "tmp"}} + emptyDir: {} + {{ else }} + hostPath: + path: {{ $hostPath }} + {{ end }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux +{{- end }} +--- +{{- if .Values.os.windows}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: {{ include "retina.name" . }} + name: {{ .Values.agent_win.name }} + namespace: {{ .Values.namespace }} + annotations: + prometheus.io/port: "{{ .Values.retinaPort }}" + prometheus.io/scrape: "true" + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +spec: + selector: + matchLabels: + k8s-app: {{ include "retina.name" . }} + template: + metadata: + labels: + app: {{ include "retina.name" . }} + k8s-app: {{ include "retina.name" . }} + name: {{ include "retina.name" . }} + namespace: {{ .Values.namespace }} + spec: + serviceAccountName: {{ .Values.serviceAccount.name }} + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: {{ .Values.securityContext.windowsOptions.runAsUserName}} + runAsNonRoot: false + hostNetwork: true + containers: + - name: retinawin + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + ports: + - containerPort: {{ .Values.retinaPort }} + command: + - powershell.exe + - -command + - .\setkubeconfigpath.ps1; ./controller.exe --config ./retina/config.yaml --kubeconfig ./kubeconfig + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + securityContext: + capabilities: + add: + {{- range .Values.securityContext.capabilities.add }} + - {{ . }} + {{- end }} + privileged: {{ .Values.securityContext.privileged }} + {{- if .Values.volumeMounts_win }} + volumeMounts: + {{- range $name, $mountPath := .Values.volumeMounts_win }} + - name: {{ $name }} + mountPath: {{ $mountPath }} + {{- end }} + {{- end }} + nodeSelector: + kubernetes.io/os: windows + volumes: + {{- range $name, $mountPath := .Values.volumeMounts_win }} + - name: {{ $name }} + configMap: + name: {{ $name }} + {{- end }} +{{- end }} diff --git a/deploy/manifests/controller/helm/retina/templates/networkobserver.yaml b/deploy/manifests/controller/helm/retina/templates/networkobserver.yaml new file mode 100644 index 000000000..858d9f71a --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/networkobserver.yaml @@ -0,0 +1,20 @@ +# apiVersion: networking.azure.com/v1alpha1 +# kind: ClusterObserver +# metadata: +# name: retina +# namespace: retina +# spec: +# ignoreAllowed: true +# exportList: +# - otelAgent +# observeList: +# namespaceSelector: +# matchLabels: +# podSelector: +# matchLabels: +# direction: +# ignoreList: +# namespaceSelector: +# matchLabels: +# podSelector: +# matchLabels: diff --git a/deploy/manifests/controller/helm/retina/templates/operator.yaml b/deploy/manifests/controller/helm/retina/templates/operator.yaml new file mode 100644 index 000000000..4cc9a5ba3 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/operator.yaml @@ -0,0 +1,276 @@ +{{- if .Values.operator.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: retina-operator + namespace: kube-system + labels: + app: retina-operator + control-plane: retina-operator + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: retina-operator + app.kubernetes.io/component: retina-operator + app.kubernetes.io/created-by: operator + app.kubernetes.io/part-of: operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: retina-operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: retina-operator + labels: + app: retina-operator + control-plane: retina-operator + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + #- arm64 + #- ppc64le + #- s390x + - key: kubernetes.io/os + operator: In + values: + - linux + #- windows + securityContext: + runAsNonRoot: true + containers: + - command: + - /retina-operator + image: {{ .Values.operator.repository }}:{{ .Values.operator.tag }} + name: retina-operator + volumeMounts: + - name: retina-operator-config + mountPath: /retina/ + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: retina-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: retina-operator-config + configMap: + name: retina-operator-config + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: retina-operator + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: operator + app.kubernetes.io/part-of: operator + app.kubernetes.io/managed-by: kustomize + name: retina-operator + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: retina-operator-role +rules: + - apiGroups: + - "apiextensions.k8s.io" + resources: + - "customresourcedefinitions" + verbs: + - "create" + - "get" + - "update" + - "delete" + - "patch" + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - retina.io + resources: + - retinaendpoints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - metricsconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - metricsconfigurations/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - retinaendpoints/finalizers + verbs: + - update + - apiGroups: + - retina.io + resources: + - retinaendpoints/status + verbs: + - get + - patch + - update + - apiGroups: + - "" + resources: + - namespaces + - pods + - nodes + - secrets + verbs: + - get + - list + - apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - batch + resources: + - jobs/status + verbs: + - get + - apiGroups: + - retina.io + resources: + - captures + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - captures/finalizers + verbs: + - update + - apiGroups: + - retina.io + resources: + - captures/status + verbs: + - get + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: retina-operator-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: operator + app.kubernetes.io/part-of: operator + app.kubernetes.io/managed-by: kustomize + name: retina-operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: retina-operator-role +subjects: +- kind: ServiceAccount + name: retina-operator + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: retina-operator-config + namespace: {{ .Values.namespace }} +data: + operator-config.yaml: |- + installCRDs: {{ .Values.operator.installCRDs }} + enableTelemetry: {{ .Values.enableTelemetry }} + remoteContext: {{ .Values.remoteContext }} + captureDebug: {{ .Values.operator.capture.debug }} + captureJobNumLimit: {{ .Values.operator.capture.jobNumLimit }} +{{- end }} diff --git a/deploy/manifests/controller/helm/retina/templates/rbac.yaml b/deploy/manifests/controller/helm/retina/templates/rbac.yaml new file mode 100644 index 000000000..da0fab1aa --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/rbac.yaml @@ -0,0 +1,85 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + namespace: {{ .Values.namespace }} + name: retina-cluster-reader +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["pods", "services", "replicationcontrollers", "nodes", "namespaces"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "watch", "list"] + - apiGroups: ["networking.azure.com"] + resources: ["clusterobservers"] + verbs: ["get", "list", "watch"] + - apiGroups: + - retina.io + resources: + - retinaendpoints + verbs: + - get + - list + - watch + {{- if .Values.operator.enabled }} + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - retina.io + resources: + - retinaendpoints + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - metricsconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - retina.io + resources: + - retinaendpoints/finalizers + verbs: + - update + - apiGroups: + - retina.io + resources: + - retinaendpoints/status + verbs: + - get + - patch + - update + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: retina-cluster-reader-binding + namespace: {{ .Values.namespace }} +subjects: + - kind: ServiceAccount + name: retina-agent + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: retina-cluster-reader + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/manifests/controller/helm/retina/templates/service.yaml b/deploy/manifests/controller/helm/retina/templates/service.yaml new file mode 100644 index 000000000..e1da9a464 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "retina.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + app: {{ include "retina.name" . }} +spec: + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + selector: + app: {{ include "retina.name" . }} diff --git a/deploy/manifests/controller/helm/retina/templates/serviceaccount.yaml b/deploy/manifests/controller/helm/retina/templates/serviceaccount.yaml new file mode 100644 index 000000000..ad7392770 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }} + namespace: {{ .Values.namespace }} diff --git a/deploy/manifests/controller/helm/retina/templates/tests/test-connection.yaml b/deploy/manifests/controller/helm/retina/templates/tests/test-connection.yaml new file mode 100644 index 000000000..86bc25930 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "retina.fullname" . }}-test-connection" + labels: + {{- include "retina.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "retina.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/deploy/manifests/controller/helm/retina/values.yaml b/deploy/manifests/controller/helm/retina/values.yaml new file mode 100644 index 000000000..a4403c0a3 --- /dev/null +++ b/deploy/manifests/controller/helm/retina/values.yaml @@ -0,0 +1,115 @@ +# Default values for retina. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Support linux and windows by default. +os: + linux: true + windows: true + +operator: + enabled: false + repository: mcr.microsoft.com/containernetworking/retina-operator + tag: "v0.1.3" + installCRDs: true + enableRetinaEndpoint: false + capture: + debug: "true" + jobNumLimit: 0 + +image: + repository: mcr.microsoft.com/containernetworking/retina-agent + initRepository: mcr.microsoft.com/containernetworking/retina-init + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "v0.1.3" + +enablePodLevel: false +remoteContext: false +enableAnnotations: false +bypassLookupIPOfInterest: false + +imagePullSecrets: [] +nameOverride: "retina" +fullnameOverride: "retina-svc" + +namespace: kube-system + +agent: + name: retina-agent + +agent_win: + name: retina-agent-win + +retinaPort: 10093 + +apiServer: + host: "0.0.0.0" + port: 10093 + +# Supported - debug, info, error, warn, panic, fatal. +logLevel: debug + +enabledPlugin_linux: '["dropreason","packetforward","linuxutil", "dns"]' +enabledPlugin_win: '["hnsstats"]' + +enableTelemetry: true + +# Interval, in seconds, to scrape/publish metrics. +metricsInterval: 10 + +azure: + appinsights: + instrumentation_key: "app-insights-instrumentation-key" + +daemonset: + container: + retina: + command: + - "/retina/controller" + args: + - "--config" + - "/retina/config/config.yaml" + healthProbeBindAddress: ":18081" + metricsBindAddress: ":18080" + ports: + containerPort: 10093 + +# volume mounts with name and mountPath +volumeMounts: + debug: /sys/kernel/debug + trace: /sys/kernel/tracing + bpf: /sys/fs/bpf + cgroup: /sys/fs/cgroup + tmp: /tmp + config: /retina/config + +#volume mounts for indows +volumeMounts_win: + retina-config-win: retina + +securityContext: + privileged: false + capabilities: + add: + - SYS_ADMIN + - SYS_RESOURCE + - NET_ADMIN # for packetparser plugin + - IPC_LOCK # for mmap() calls made by NewReader(), ref: https://man7.org/linux/man-pages/man2/mmap.2.html + windowsOptions: + runAsUserName: "NT AUTHORITY\\SYSTEM" + +service: + type: ClusterIP + port: 10093 + targetPort: 10093 + name: retina + +serviceAccount: + annotations: {} + name: "retina-agent" + +resources: + limits: + memory: "300Mi" + cpu: "500m" diff --git a/deploy/prometheus/ama-metrics-settings-configmap.yaml b/deploy/prometheus/ama-metrics-settings-configmap.yaml new file mode 100644 index 000000000..c145dba2a --- /dev/null +++ b/deploy/prometheus/ama-metrics-settings-configmap.yaml @@ -0,0 +1,52 @@ +kind: ConfigMap +apiVersion: v1 +data: + schema-version: + #string.used by agent to parse config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by customer to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + prometheus-collector-settings: |- + cluster_alias = "" + default-scrape-settings-enabled: |- + kubelet = true + coredns = false + cadvisor = true + kubeproxy = false + apiserver = false + kubestate = true + nodeexporter = true + windowsexporter = false + windowskubeproxy = false + retinabasic = true + prometheuscollectorhealth = false + default-targets-metrics-keep-list: |- + kubelet = "" + coredns = "" + cadvisor = "" + kubeproxy = "" + apiserver = "" + kubestate = "" + nodeexporter = "" + windowsexporter = "" + windowskubeproxy = "" + retinabasic = "" + minimalingestionprofile = true + default-targets-scrape-interval-settings: |- + kubelet = "30s" + coredns = "30s" + cadvisor = "30s" + kubeproxy = "30s" + apiserver = "30s" + kubestate = "30s" + nodeexporter = "30s" + windowsexporter = "30s" + windowskubeproxy = "30s" + retinabasic = "30s" + prometheusCollectorHealth = "30s" + debug-mode: |- + enabled = false +metadata: + name: ama-metrics-settings-configmap + namespace: kube-system diff --git a/deploy/prometheus/collector-config-template.yml b/deploy/prometheus/collector-config-template.yml new file mode 100644 index 000000000..7276ee8c1 --- /dev/null +++ b/deploy/prometheus/collector-config-template.yml @@ -0,0 +1,46 @@ +exporters: + prometheus: + endpoint: "127.0.0.1:9091" + const_labels: + cluster: $AZMON_CLUSTER_LABEL + file: + path: /opt/microsoft/otelcollector/promotel.json + otlp: + endpoint: 127.0.0.1:55680 + tls: + insecure: true + compression: "gzip" + retry_on_failure: + enabled: false + timeout: 12s +processors: + batch: + send_batch_size: 7000 + timeout: 200ms + send_batch_max_size: 7000 + resource: + attributes: + - key: cluster + value: "$AZMON_CLUSTER_LABEL" + action: "upsert" + - key: job + from_attribute: service.name + action: insert + - key: instance + from_attribute: service.instance.id + action: insert +receivers: + prometheus: + config: +service: + pipelines: + metrics: + receivers: [prometheus] + exporters: [otlp] + processors: [batch, resource] + telemetry: + logs: + level: warn + encoding: json + metrics: + level: detailed diff --git a/deploy/prometheus/deploy-retina-clusters.sh b/deploy/prometheus/deploy-retina-clusters.sh new file mode 100755 index 000000000..1c74a989c --- /dev/null +++ b/deploy/prometheus/deploy-retina-clusters.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +set -o xtrace +export SUBSCRIPTION=9b8218f9-902a-4d20-a65c-e98acec5362f +export LOCATION=eastus +export VERSION=4 +export BASE_NAME=$USER-aks +SSH_KEY_FILE_PATH="~/.ssh/$USER-ssh.id_rsa.pub" # Depends on you. Or you can use --generate-ssh-keys +WINDOWS_USERNAME="azureuser" # Recommend azureuser +export PASSWORD=randOmPassword123@ + +# upgrade az cli to the latest version +az upgrade -y + +# Select the oldest patch version in the latest minor version from AKS support version list. +K8S_VERSION=$(az aks get-versions -l $LOCATION -o json | jq '.values |sort_by(.version) | first.patchVersions ' | jq 'keys|.[0]' --sort-keys | tr -d '"') +echo "$K8S_VERSION is selected from region $LOCATION" + +echo "creating azmon+grafana workspace" + +# deploy azure monitor workspace +AZMON_NAME=$BASE_NAME-azmon-$VERSION +AZMON_RESOURCE_GROUP=$AZMON_NAME +az group create --location $LOCATION --name $AZMON_RESOURCE_GROUP +az resource create --resource-group $AZMON_RESOURCE_GROUP --namespace microsoft.monitor --resource-type accounts --name $AZMON_NAME --location $LOCATION --properties {} + +GRAFANA_NAME=$BASE_NAME-grafana-$VERSION +az grafana create --name $GRAFANA_NAME --resource-group $AZMON_RESOURCE_GROUP + +echo "creating ws22 cluster" + +# Windows Server 2022 +NAME=$BASE_NAME-grafana-$VERSION-retina-ws22 +RESOURCE_GROUP=$NAME +az group create --location $LOCATION --name $RESOURCE_GROUP + +# Update aks-preview to the latest version +az extension add --name aks-preview +az extension update --name aks-preview + +# Enable Microsoft.ContainerService/AKSWindows2022Preview +az feature register --namespace Microsoft.ContainerService --name AKSWindows2022Preview +az provider register -n Microsoft.ContainerService + +az group create --name $RESOURCE_GROUP --location $LOCATION + +az aks create \ + --resource-group $RESOURCE_GROUP \ + --name $NAME \ + --generate-ssh-keys \ + --windows-admin-username $WINDOWS_USERNAME \ + --windows-admin-password $PASSWORD \ + --kubernetes-version $K8S_VERSION \ + --network-plugin azure \ + --vm-set-type VirtualMachineScaleSets \ + --node-count 1 + +# Set variables for Windows 2022 node pool +myWindowsNodePool="nwin22" # Length <= 6 +az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $NAME \ + --name $myWindowsNodePool \ + --os-type Windows \ + --os-sku Windows2022 \ + --node-count 1 + +# Set variables for Windows 2019 node pool +myWindowsNodePool="nwin19" # Length <= 6 +az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $NAME \ + --name $myWindowsNodePool \ + --os-type Windows \ + --os-sku Windows2019 \ + --node-count 1 + +az aks get-credentials -g $RESOURCE_GROUP -n $NAME --overwrite-existing + +kubectl apply -f ama-metrics-settings-configmap.yaml + +az aks update --enable-azuremonitormetrics -n $NAME -g $RESOURCE_GROUP --azure-monitor-workspace-resource-id /subscriptions/$SUBSCRIPTION/resourcegroups/$AZMON_RESOURCE_GROUP/providers/microsoft.monitor/accounts/$AZMON_NAME --grafana-resource-id /subscriptions/9b8218f9-902a-4d20-a65c-e98acec5362f/resourceGroups/$AZMON_RESOURCE_GROUP/providers/Microsoft.Dashboard/grafana/$GRAFANA_NAME + +echo "creating azcni overlay cluster" + +# create azcni overlay cluster +NAME=$BASE_NAME-grafana-$VERSION-retina-linux +RESOURCE_GROUP=$NAME +az group create --location $LOCATION --name $RESOURCE_GROUP + +# Create a VNet with a subnet for nodes and a subnet for pods +az network vnet create -g $RESOURCE_GROUP --location $LOCATION --name $NAME-vnet --address-prefixes 10.0.0.0/8 -o none +az network vnet subnet create -g $RESOURCE_GROUP --vnet-name $NAME-vnet --name nodesubnet --address-prefixes 10.240.0.0/16 -o none + +az aks create -n $NAME -g $RESOURCE_GROUP -l $LOCATION \ + --max-pods 250 \ + --node-count 2 \ + --network-plugin azure \ + --network-plugin-mode overlay \ + --kubernetes-version $K8S_VERSION \ + --pod-cidr 192.168.0.0/16 \ + --vnet-subnet-id /subscriptions/$SUBSCRIPTION/resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Network/virtualNetworks/$NAME-vnet/subnets/nodesubnet + +az aks get-credentials -n $NAME -g $RESOURCE_GROUP --subscription $SUBSCRIPTION + +kubectl apply -f ama-metrics-settings-configmap.yaml + +az aks update --enable-azuremonitormetrics -n $NAME -g $RESOURCE_GROUP --azure-monitor-workspace-resource-id /subscriptions/$SUBSCRIPTION/resourcegroups/$AZMON_RESOURCE_GROUP/providers/microsoft.monitor/accounts/$AZMON_NAME --grafana-resource-id /subscriptions/9b8218f9-902a-4d20-a65c-e98acec5362f/resourceGroups/$AZMON_RESOURCE_GROUP/providers/Microsoft.Dashboard/grafana/$GRAFANA_NAME + +echo "complete, todo: install unified retina linux/windows helm chart, and follow windows installation steps in /windows/readme.md" diff --git a/deploy/prometheus/kappie-windows/create-cm.sh b/deploy/prometheus/kappie-windows/create-cm.sh new file mode 100755 index 000000000..05d9f6657 --- /dev/null +++ b/deploy/prometheus/kappie-windows/create-cm.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +kubectl delete cm ama-metrics-prometheus-config-node -n kube-system + +kubectl create configmap ama-metrics-prometheus-config-node --from-file=./deploy/prometheus/retina-windows/prometheus-config -n kube-system diff --git a/deploy/prometheus/kappie-windows/prometheus-config b/deploy/prometheus/kappie-windows/prometheus-config new file mode 100644 index 000000000..dd86cb40c --- /dev/null +++ b/deploy/prometheus/kappie-windows/prometheus-config @@ -0,0 +1,24 @@ +global: + scrape_interval: 60s +scrape_configs: + - job_name: "retina-pods" + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep + regex: retina(.*) + - source_labels: + [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + separator: ":" + regex: ([^:]+)(?::\d+)? + target_label: __address__ + replacement: ${1}:${2} + action: replace + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: instance + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: (.*) diff --git a/deploy/prometheus/kappie/create-cm.sh b/deploy/prometheus/kappie/create-cm.sh new file mode 100755 index 000000000..8b5444bd4 --- /dev/null +++ b/deploy/prometheus/kappie/create-cm.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl create configmap ama-metrics-prometheus-config-node --from-file=./deploy/prometheus/retina/prometheus-config -n kube-system diff --git a/deploy/prometheus/kappie/prometheus-config b/deploy/prometheus/kappie/prometheus-config new file mode 100644 index 000000000..dd86cb40c --- /dev/null +++ b/deploy/prometheus/kappie/prometheus-config @@ -0,0 +1,24 @@ +global: + scrape_interval: 60s +scrape_configs: + - job_name: "retina-pods" + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep + regex: retina(.*) + - source_labels: + [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + separator: ":" + regex: ([^:]+)(?::\d+)? + target_label: __address__ + replacement: ${1}:${2} + action: replace + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: instance + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: (.*) diff --git a/deploy/prometheus/network-observability/create-cm.sh b/deploy/prometheus/network-observability/create-cm.sh new file mode 100755 index 000000000..3573f794f --- /dev/null +++ b/deploy/prometheus/network-observability/create-cm.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +kubectl delete cm ama-metrics-prometheus-config-node -n kube-system +kubectl create configmap ama-metrics-prometheus-config-node --from-file=./deploy/prometheus/cilium/prometheus-config -n kube-system +k rollout restart ds ama-metrics-node -n kube-system diff --git a/deploy/prometheus/network-observability/network-observability-svc.yaml b/deploy/prometheus/network-observability/network-observability-svc.yaml new file mode 100644 index 000000000..e49f4dbc9 --- /dev/null +++ b/deploy/prometheus/network-observability/network-observability-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: network-observability +spec: + ports: + # control which ports we enable via which ports we define here + - name: hubble + protocol: TCP + port: 9965 + targetPort: 9965 + - name: retina + protocol: TCP + port: 10093 + targetPort: 10093 + \ No newline at end of file diff --git a/deploy/prometheus/network-observability/prometheus-config b/deploy/prometheus/network-observability/prometheus-config new file mode 100644 index 000000000..3c46bd172 --- /dev/null +++ b/deploy/prometheus/network-observability/prometheus-config @@ -0,0 +1,58 @@ +scrape_configs: + - job_name: "network-observability-hubble" + kubernetes_sd_configs: + - role: service + scheme: http + relabel_configs: + - source_labels: + [ + __meta_kubernetes_namespace, + __meta_kubernetes_service_name, + __meta_kubernetes_service_port_name, + ] + action: keep + regex: kube-system;network-observability;hubble + + - source_labels: [__address__] + target_label: __address__ + replacement: $NODE_IP + action: replace + + - source_labels: [__address__, __meta_kubernetes_service_port_number] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + + - source_labels: [__address__] + replacement: "$NODE_NAME" + target_label: instance + + - job_name: "network-observability-retina" + kubernetes_sd_configs: + - role: service + scheme: http + relabel_configs: + - source_labels: + [ + __meta_kubernetes_namespace, + __meta_kubernetes_service_name, + __meta_kubernetes_service_port_name, + ] + action: keep + regex: kube-system;network-observability;retina + + - source_labels: [__address__] + target_label: __address__ + replacement: $NODE_IP + action: replace + + - source_labels: [__address__, __meta_kubernetes_service_port_number] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + + - source_labels: [__address__] + replacement: "$NODE_NAME" + target_label: instance diff --git a/deploy/prometheus/values.yaml b/deploy/prometheus/values.yaml new file mode 100644 index 000000000..1acefdf52 --- /dev/null +++ b/deploy/prometheus/values.yaml @@ -0,0 +1,24 @@ +prometheus: + prometheusSpec: + additionalScrapeConfigs: | + - job_name: "retina-pods" + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep + regex: retina(.*) + - source_labels: + [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + separator: ":" + regex: ([^:]+)(?::\d+)? + target_label: __address__ + replacement: ${1}:${2} + action: replace + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: instance + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: (.*) diff --git a/deploy/registercrd.go b/deploy/registercrd.go new file mode 100644 index 000000000..70f1244e3 --- /dev/null +++ b/deploy/registercrd.go @@ -0,0 +1,100 @@ +package deploy + +import ( + "context" + _ "embed" + "fmt" + "reflect" + + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +const ( + RetinaCapturesYAMLpath = "retina.io_captures.yaml" + RetinaEndpointsYAMLpath = "retina.io_retinaendpoints.yaml" + MetricsConfigurationYAMLpath = "retina.io_metricsconfigurations.yaml" +) + +//go:embed manifests/controller/helm/retina/crds/retina.io_captures.yaml +var RetinaCapturesYAML []byte + +//go:embed manifests/controller/helm/retina/crds/retina.io_retinaendpoints.yaml +var RetinaEndpointsYAML []byte + +//go:embed manifests/controller/helm/retina/crds/retina.io_metricsconfigurations.yaml +var MetricsConfgurationYAML []byte + +func GetRetinaCapturesCRD() (*apiextensionsv1.CustomResourceDefinition, error) { + retinaCapturesCRD := &apiextensionsv1.CustomResourceDefinition{} + if err := yaml.Unmarshal(RetinaCapturesYAML, &retinaCapturesCRD); err != nil { + fmt.Println("error unmarshalling embedded retinacaptures") + fmt.Println(err.Error()) + return nil, errors.Wrap(err, "error unmarshalling embedded retinacaptures") + } + return retinaCapturesCRD, nil +} + +func GetRetinaEndpointCRD() (*apiextensionsv1.CustomResourceDefinition, error) { + retinaEndpointCRD := &apiextensionsv1.CustomResourceDefinition{} + if err := yaml.Unmarshal(RetinaEndpointsYAML, &retinaEndpointCRD); err != nil { + return nil, errors.Wrap(err, "error unmarshalling embedded retinaendpoints") + } + return retinaEndpointCRD, nil +} + +func GetRetinaMetricsConfigurationCRD() (*apiextensionsv1.CustomResourceDefinition, error) { + retinaMetricsConfigurationCRD := &apiextensionsv1.CustomResourceDefinition{} + if err := yaml.Unmarshal(MetricsConfgurationYAML, &retinaMetricsConfigurationCRD); err != nil { + return nil, errors.Wrap(err, "error unmarshalling embedded metricsconfiguration") + } + return retinaMetricsConfigurationCRD, nil +} + +func InstallOrUpdateCRDs(ctx context.Context, enableRetinaEndpoint bool, apiExtensionsClient apiextv1.ApiextensionsV1Interface) (map[string]*apiextensionsv1.CustomResourceDefinition, error) { + crds := make(map[string]*apiextensionsv1.CustomResourceDefinition, 4) + + retinaCapture, err := GetRetinaCapturesCRD() + if err != nil { + return nil, err + } + crds[retinaCapture.GetObjectMeta().GetName()] = retinaCapture + + if enableRetinaEndpoint { + retinaEndpoint, err := GetRetinaEndpointCRD() + if err != nil { + return nil, err + } + crds[retinaEndpoint.GetObjectMeta().GetName()] = retinaEndpoint + } + + retinaMetricsConfiguration, err := GetRetinaMetricsConfigurationCRD() + if err != nil { + return nil, err + } + crds[retinaMetricsConfiguration.GetObjectMeta().GetName()] = retinaMetricsConfiguration + + for name, crd := range crds { + current, err := apiExtensionsClient.CustomResourceDefinitions().Create(ctx, crd, v1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + crds[name] = current + continue + } + + if !reflect.DeepEqual(crd.Spec.Versions, current.Spec.Versions) { + crd.SetResourceVersion(current.GetResourceVersion()) + current, err = apiExtensionsClient.CustomResourceDefinitions().Update(ctx, crd, v1.UpdateOptions{}) + if err != nil { + // on error, return the failed CRD + return crds, errors.Wrapf(err, "failed to update %s CRD", name) + } + crds[name] = current + } + } + + return crds, nil +} diff --git a/deploy/registercrd_test.go b/deploy/registercrd_test.go new file mode 100644 index 000000000..a837b77dd --- /dev/null +++ b/deploy/registercrd_test.go @@ -0,0 +1,86 @@ +package deploy + +import ( + "context" + _ "embed" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + apiextv1fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake" + kubernetesfake "k8s.io/client-go/kubernetes/fake" +) + +// This test exists to ensure that the YAML files are present in the manifests directory, +// as the dockerfile build and copy depends on them, and matching the names +func TestGetCRD(t *testing.T) { + base := "/manifests/controller/helm/retina/crds/%s" + pwd, _ := os.Getwd() + full := pwd + base + require.FileExists(t, fmt.Sprintf(full, RetinaCapturesYAMLpath)) + require.FileExists(t, fmt.Sprintf(full, RetinaEndpointsYAMLpath)) + require.FileExists(t, fmt.Sprintf(full, MetricsConfigurationYAMLpath)) + + capture, err := GetRetinaCapturesCRD() + require.NoError(t, err) + require.NotNil(t, capture) + require.NotEmpty(t, capture.TypeMeta.Kind) + + endpoint, err := GetRetinaEndpointCRD() + require.NoError(t, err) + require.NotNil(t, endpoint) + require.NotEmpty(t, endpoint.TypeMeta.Kind) + + metrics, err := GetRetinaMetricsConfigurationCRD() + require.NoError(t, err) + require.NotNil(t, metrics) + require.NotEmpty(t, metrics.TypeMeta.Kind) +} + +func TestInstallOrUpdateCRDs(t *testing.T) { + capture, _ := GetRetinaCapturesCRD() + endpoint, _ := GetRetinaEndpointCRD() + metrics, _ := GetRetinaMetricsConfigurationCRD() + + tests := []struct { + name string + enableRetinaEndpoint bool + want map[string]*apiextensionsv1.CustomResourceDefinition + wantErr bool + }{ + { + name: "install all CRDs", + enableRetinaEndpoint: true, + want: map[string]*apiextensionsv1.CustomResourceDefinition{ + "captures.retina.io": capture, + "retinaendpoints.retina.io": endpoint, + "metricsconfigurations.retina.io": metrics, + }, + }, + { + name: "disable retina endpoint", + enableRetinaEndpoint: false, + want: map[string]*apiextensionsv1.CustomResourceDefinition{ + "captures.retina.io": capture, + "metricsconfigurations.retina.io": metrics, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kubeClient := kubernetesfake.NewSimpleClientset() + apiExtensionsClient := &apiextv1fake.FakeApiextensionsV1{ + Fake: &kubeClient.Fake, + } + got, err := InstallOrUpdateCRDs(context.Background(), tt.enableRetinaEndpoint, apiExtensionsClient) + if (err != nil) != tt.wantErr { + require.NoError(t, err) + return + } + require.Exactly(t, tt.want, got) + }) + } +} diff --git a/docs/CRDs/Capture.md b/docs/CRDs/Capture.md new file mode 100644 index 000000000..641a3ff99 --- /dev/null +++ b/docs/CRDs/Capture.md @@ -0,0 +1,110 @@ +# Capture CRD + +## Overview + +The `Capture` CustomResourceDefinition (CRD) defines a custom resource called `Capture`, which represents the settings of a network trace. +This CRD allows users to specify the configurations for capturing network packets and storing the captured data. + +To use the `Capture` CRD, [install Retina](../installation/setup.md) with capture support. + +## CRD Specification + +The full specification for the `Capture` CRD can be found in the [Capture CRD](https://github.com/microsoft/retina/blob/main/deploy/manifests/controller/helm/retina/crds/retina.io_captures.yaml) file. + +The `Capture` CRD is defined with the following specifications: + +- **API Group:** retina.io +- **API Version:** v1alpha1 +- **Kind:** Capture +- **Plural:** captures +- **Singular:** capture +- **Scope:** Namespaced + +### Fields + +- **spec.captureConfiguration:** Specifies the configuration for capturing network packets. It includes the following properties: + - `captureOption`: Lists options for the capture, such as duration, maximum capture size, and packet size. + - `captureTarget`: Defines the target on which the network packets will be captured. It includes namespace, node, and pod selectors. + - `filters`: Specifies filters for including or excluding network packets based on IP or port. + - `includeMetadata`: Indicates whether networking metadata should be captured. + - `tcpdumpFilter`: Allows specifying a raw tcpdump filter string. + +- **spec.outputConfiguration:** Indicates where the captured data will be stored. It includes the following properties: + - `blobUpload`: Specifies a secret containing the blob SAS URL for storing the capture data. + - `hostPath`: Stores the capture files into the specified host filesystem. + - `persistentVolumeClaim`: Mounts a PersistentVolumeClaim into the Pod to store capture files. + +- **status:** Describes the status of the capture, including the number of active, failed, and completed jobs, completion time, conditions, and more. Check [capture lifecycle](#capture-lifecycle) for more details. + +## Usage + +### Creating a Capture + +To create a `Capture`, create a YAML manifest file with the desired specifications and apply it to the cluster using `kubectl apply`: + +```yaml +apiVersion: retina.io/v1alpha1 +kind: Capture +metadata: + name: example-capture +spec: + captureConfiguration: + captureOption: + duration: "30s" + maxCaptureSize: 100 + packetSize: 1500 + captureTarget: + namespaceSelector: + matchLabels: + app: target-app + outputConfiguration: + hostPath: /captures + blobUpload: blob-sas-url +``` + +### Capture Lifecycle + +Once a Capture is created, the capture controller inside retina-operator is responsible for managing the lifecycle of the Capture. +A Capture can be turned into error when errors happens like no required selector is specified, or InProgress when created workload are running, or completed when all workloads are completed. +In implementation, the complete status is defined by setting complete status condition to true, and InProgress is defined as a false complete status condition. + +#### Examples of Capture status + +- No allowed selectors are specified + +```yaml +Status: + Conditions: + Last Transition Time: 2023-10-23T06:17:39Z + Message: Neither NodeSelector nor NamespaceSelector&PodSelector is set. + Reason: otherError + Status: True + Type: error +``` + +- Capture is running in progress + +```yaml +Status: + Active: 2 + Conditions: + Last Transition Time: 2023-10-23T06:33:56Z + Message: 2/2 Capture jobs are in progress, waiting for completion + Reason: JobsInProgress + Status: False + Type: complete +``` + +- Capture is completed + +```yaml +Status: + Completion Time: 2023-10-23T06:34:40Z + Conditions: + Last Transition Time: 2023-10-23T06:34:40Z + Message: All 2 Capture jobs are completed + Reason: JobsCompleted + Status: True + Type: complete + Succeeded: 2 +``` diff --git a/docs/CRDs/MetricsConfiguration.md b/docs/CRDs/MetricsConfiguration.md new file mode 100644 index 000000000..23c737140 --- /dev/null +++ b/docs/CRDs/MetricsConfiguration.md @@ -0,0 +1,92 @@ +# MetricsConfiguration CRD + +## Overview + +Retina by default emits node level metrics, however, customers can apply `MetricsConfiguration` custom resource definition (CRD) to enable advanced metrics and they can define in which namespaces the advanced metrics to be enabled. + +## CRD Specification + +The full specification for the `MetricsConfiguration` CRD can be found in the [MetricsConfiguration CRD](https://github.com/microsoft/retina/blob/main/deploy/manifests/controller/helm/retina/crds/retina.io_metricsconfigurations.yaml) file. + +The `MetricsConfiguration` CRD is defined with the following specifications: + +- **API Group:** retina.io +- **API Version:** v1alpha1 +- **Kind:** MetricsConfiguration +- **Plural:** metricsconfigurations +- **Singular:** metricsconfiguration +- **Scope:** Cluster + +### Fields + +- **spec.contextOptions:** Specifies the configuration for retina plugin metrics context. It includes the following properties: + - `additionalLabels`: Represents additional context labels to be collected, such as Direction (ingress/egress). + - `destinationLabels`: Represents the destination context labels, such as IP, Pod, port, workload (deployment/replicaset/statefulset/daemonset). + - `metricName`: Indicates the name of the metric. + - `sourceLabels`: Represents the source context labels, such as IP, Pod, port. + +- **spec.namespaces:** Specifies the namespaces to include or exclude in metric collection. It includes the following properties: + - `exclude`: Specifies namespaces to be excluded from metric collection. + - `include`: Specifies namespaces to be included in metric collection. + +- **status:** Describes the status of the metrics configuration, including the last known specification, reason, and state. + +## Usage + +### Creating a MetricsConfiguration + +To create a `MetricsConfiguration`, create a YAML manifest file with the desired specifications and apply it to the cluster using `kubectl apply`: + +```yaml +apiVersion: retina.io/v1alpha1 +kind: MetricsConfiguration +metadata: + name: metricsconfigcrd +spec: + contextOptions: + - metricName: drop_count + sourceLabels: + - ip + - podname + - port + additionalLabels: + - direction + - metricName: forward_count + sourceLabels: + - ip + - podname + - port + additionalLabels: + - direction + namespaces: + include: + - default + - kube-system +``` + +## Validation of MetricsConfiguration CRD + +The **Operator Pod** acts as a validator for customer-applied CRDs. It reads metrics and/or traces CRDs, validates options, and updates the status of the applied CRDs accordingly. + +### Status States + +The status of the CRD can be one of the following: + +1. **Initiated (optional)**: The initial state upon CRD application. The Operator may set it to "Initiated" when the CRD is first applied. This state can be skipped for simplicity. + +2. **Accepted**: After validating the CRD options, the Operator transitions the status to "Accepted" if everything is valid. + +3. **Error**: In case of validation issues, the Operator updates the status to "Error" along with a reason for the CRD's invalidity. + +### Interaction with Daemon Pods + +Daemon Pods wait for the "Accepted" status before applying configurations. This ensures only validated configurations are processed, reducing errors. + +After validation, the following section is added to the CRD: + +```yaml +status: + state: Initialized/Accepted/Errorred + reason: + acceptedSpec: +``` diff --git a/docs/CRDs/RetinaEndpoint.md b/docs/CRDs/RetinaEndpoint.md new file mode 100644 index 000000000..0711fec09 --- /dev/null +++ b/docs/CRDs/RetinaEndpoint.md @@ -0,0 +1,78 @@ +# RetinaEndpoint CRD + +## Overview + +The `RetinaEndpoint` CustomResourceDefinition (CRD) defines a custom resource called `RetinaEndpoint`, which is a representation of a Kubernetes Pod. This CRD allows users to define and manage additional information about Pod beyond the default Kubernetes metadata as well as reduce the complexity. + +In large-scale API servers, each Retina Pod needs to learn about cluster state, which requires the API server to push a lot of data for each list and watch event. To address this issue, we need to reduce the Pod object and convert it to a more manageable `RetinaEndpoint` object. + +## CRD Specification + +The full specification for the `RetinaEndpoint` CRD can be found in the [RetinaEndpoint CRD]( https://github.com/microsoft/retina/blob/main/deploy/manifests/controller/helm/retina/crds/retina.io_retinaendpoints.yaml) file. + +The `RetinaEndpoint` CRD is defined with the following specifications: + +- **API Group:** retina.io +- **API Version:** v1alpha1 +- **Kind:** RetinaEndpoint +- **Plural:** retinaendpoints +- **Singular:** retinaendpoint +- **Scope:** Namespaced + +### Fields + +- **spec.containers:** An array of container objects associated with the endpoint. Each container object has an `id` and a `name`. + +- **spec.labels:** A set of labels to apply to the endpoint. + +- **spec.nodeIP:** The IP address of the node associated with the endpoint. + +- **spec.ownerReferences:** An array of owner references indicating the resources that own the endpoint. + +- **spec.podIP:** The IP address of the Pod associated with the endpoint. + +## Usage + +### Creating a RetinaEndpoint + +To create a `RetinaEndpoint`, create a YAML manifest file with the desired specifications and apply it to the cluster using `kubectl apply`: + +```yaml +apiVersion: retina.io/v1alpha1 +kind: RetinaEndpoint +metadata: + name: example-retinaendpoint + namespace: test + labels: + app: example-app +spec: + containers: + - id: container-1 + name: web + nodeIP: 192.168.1.10 + ownerReferences: + - apiVersion: v1 + kind: Pod + name: example-pod + podIP: 10.1.2.3 + +``` + +## RetinaEndpoint Workflow + +In high-scale Kubernetes environments, the scenario where each DaemonSet Pod monitors all Pods within a cluster can lead to a significant volume of data propagation. This situation can potentially strain the API server and cause performance issues. +To address this challenge, an **Operator Pod** is introduced to streamline data flow. + +The **Operator** plays a pivotal role in transforming extensive Pod objects into succinct `RetinaEndpoints`. These `RetinaEndpoints` contain only the crucial fields required for Retina functionality. This approach efficiently reduces data size, enabling rapid propagation across all nodes. The subsequent sections delve into the intricacies of this process. + +### Workflow Steps + +1. **Operator's Monitoring**: When advanced features are enabled, the **Operator** component will monitor the Kubernetes cluster for Pods. It is the only one that can watch and update the `RetinaEndpoint` Resource. + +2. **Data Extraction and Transformation**: The **Operator** extracts relevant Pod information and uses this data to generate a corresponding `RetinaEndpoint` CustomResource (CR) object. + +3. **Data Backup**: The newly created `RetinaEndpoint` CR object is backed up to the Kubernetes API server by the **Operator**. + +4. **Daemon's Role**: The K8s watchers in daemon set Pods will list and read `RetinaEndpoints` and Service objects from the API server. + +5. **Data Enrichment**: Subsequently, these K8s watchers convert these objects into an internal cache, enriching kernel data for efficient usage. diff --git a/docs/captures/cli.md b/docs/captures/cli.md new file mode 100644 index 000000000..e82fefd54 --- /dev/null +++ b/docs/captures/cli.md @@ -0,0 +1,280 @@ +# Capture with Retina CLI + +Retina capture command allows the user to capture network traffic and metadata for the capture target, and then send the capture file to the location by Output Configuration. + +Note: captures can also be performed with a [Capture CRD](../CRDs/Capture.md) after [installing Retina](../installation/setup.md) with capture support. + +## Retina capture create + +`retina capture create` creates a Capture with underlying Kubernetes jobs. + +### No wait(default true) + +Do not wait for the long-running capture job to finish. +By default, Retina capture CLI will exit before the jobs are completed. With `--no-wait=false`, the CLI will wait until the jobs are completed and clean up the Kubernetes resources created. + +#### Example + +- do not wait for the long-running capture job to finish + +`kubectl retina capture create --host-path /mnt/capture --node-selectors "kubernetes.io/os=linux" --no-wait=true` + +### Namespace(default "default") + +Namespace to host capture job and the other k8s resources for a network capture. Please make sure the namespace exists. + +#### Example + +- deploy the capture job in namespace `capture` + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux"` + +### Capture Target(required) + +Capture target indicates the target on which the network packets capture will be performed, and the user can select either node, by node-selectors or node-names, or Pods, by pod-selector and namespace-selector pair. + +#### Examples + +- capture network packets on the node selected by node selectors + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux"` + +- capture network packets on the node selected by node names + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-names "aks-nodepool1-41844487-vmss000000,aks-nodepool1-41844487-vmss000001"` + +- capture network packets on the pod selected by pod-selector and namespace-selector pairs + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --pod-selectors="k8s-app=kube-dns" --namespace-selectors="kubernetes.io/metadata.name=kube-system"` + +### Stop Capture(optional with default values) + +The Capture can be stopped in either way below: + +- In a given time, by the `duration` flag, or when the allowed maximum capture file reaches by the `max-size` flag. When both are specified, the capture will stop when either condition first meets. +- On demand by deleting the capture before the specified conditions meets. + +The network traffic will be uploaded to the specified output location. + +#### Capture Duration + +Duration of capturing packets(default 1m) + +##### Example + +- stop the capture in 2 minutes + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --duration=2m` + +#### Maximum Capture Size + +Limit the capture file to MB in size(default 100MB) + +##### Example + +- stop the capture when the capture file size reaches 50MB + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --max-size=50` + +#### Packet Size(optional) + +Limits the each packet to bytes in size and packets longer than PacketSize will be truncated. +This is beneficial when the user wants to reduce the capture file size or hide customer data for security concern. + +##### Example + +- limit each packet size to 96 bytes + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --packet-size=96` + +### Capture Configuration(optional) + +capture configuration indicates the configurations of the network capture. + +#### Packet capture filters + +Packet capture filters represent a range of filters to be included/excluded in the capture. This is not available on Windows. + +##### Example + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --exclude-filter="10.224.0.26:80,10.224.0.33:8080" --include-filter="10.224.0.42:80,10.224.0.33:8080"` + +#### Tcpdump Filter + +Raw tcpdump flags which works only for Linux. Available tcpdump filters can be found in [TCPDUMP MAN PAGE](https://www.tcpdump.org/manpages/tcpdump.1.html) +NOTE: this includes only tcpdump flags, for expression part, please user [Packet include/exclude filters +](#packet-capture-filters). + +##### Example + +- filter DNS query + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --tcpdump-filter="udp port 53"` + +#### Include Metadata + +If true, collect static network metadata into the capture file(default true) + +##### Example + +- disable collecting network metadata + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux" --include-metadata=false` + +### Job Number Limit(optional) + +The maximum number of jobs can be created for each capture. The default value 0 indicates no limit. +This can be configured by CLI flags for each CLI command, or config map consumed by retina-operator. +When creating a job requires job number exceeds this limit, it will fail with prompt like +`Error: the number of capture jobs 3 exceeds the limit 2` + +#### Example + +`kubectl retina capture create --job-num-limit=10 --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux"` + +### Output Configuration(required) + +OutputConfiguration indicates the location capture will be stored, and at least one location should be specified. + +Blob-upload requires a Blob Shared Access Signature with the write permission to the storage account container, to create SAS tokens in the Azure portal, please read [Create SAS Tokens in the Azure Portal](https://learn.microsoft.com/en-us/azure/cognitive-services/translator/document-translation/how-to-guides/create-sas-tokens?tabs=Containers#create-sas-tokens-in-the-azure-portal) + +#### Examples + +- store the capture file in the node host path `/mnt/capture` + +`kubectl retina capture create --host-path /mnt/capture --namespace capture --node-selectors "kubernetes.io/os=linux"` + +- store the capture file to a PVC `mypvc` with access mode ReadWriteMany in namespace `capture` + +`kubectl retina capture create --pvc mypvc --namespace capture --node-selectors "kubernetes.io/os=linux"` + +- store the capture file to a storage account + +`kubectl retina capture create --blob-upload --node-selectors "kubernetes.io/os=linux"` + +### Debug mode + +With debug mode, when `--debug` is specified, we can overwrite the capture job Pod image from the default official `MCR` one. + +#### Examples + +- use `acnpublic` ACR in default debug mode + +`kubectl retina capture create --host-path /mnt/test --namespace capture --node-selectors "kubernetes.io/os=linux" --debug` + +- use customized retina-agent image + +`RETINA_AGENT_IMAGE= kubectl retina capture create --host-path /mnt/test --namespace capture --node-selectors "kubernetes.io/os=linux" --debug` + +## Retina capture delete + +`retina capture delete` deletes a Kubernetes Jobs with the specified Capture name. + +### Examples + +`kubectl retina capture delete --name retina-capture-zlx5v` + +## Retina capture list + +`retina capture list` lists Captures in a namespace or all namespaces. + +### Examples + +- list Captures under namespace `capture` + +`kubectl retina capture list --namespace capture` + +- list Captures under in all namespaces + +`kubectl retina capture list --all-namespaces` + +## Obtain the output + +After downloading or copying the tarball from the location specified, extract the tarball through the `tar` command in either Linux shell or Windows Powershell, for example, + +```shell +tar -xvf retina-capture-aks-nodepool1-41844487-vmss000000-20230320013600UTC.tar.gz +``` + +### Name pattern of the tarball + +the tarball take such name pattern, `$(capturename)-$(hostname)-$(date +%Y%m%d%H%M%S%Z).tar.gz`, for example, `retina-capture-aks-nodepool1-41844487-vmss000000-20230313101436UTC.tar.gz`. + +### File and directory structure inside the tarball + +- Linux + +```text +├── ip-resources.txt +├── iptables-rules.txt +├── retina-capture-aks-nodepool1-41844487-vmss000000-20230320013600UTC.pcap +├── proc-net +│ ├── anycast6 +│ ├── arp +... ... +├── proc-sys-net +│ ├── bridge +... ... +|-- socket-stats.txt +`-- tcpdump.log +``` + +- Windows + +```text +│ retina-capture-akswin000002-20230322010252UTC.etl +│ retina-capture-akswin000002-20230322010252UTC.pcap +│ netsh.log +│ +└───metadata + │ arp.txt + ... ... + │ + ├───adapters + │ 6to4 Adapter_int.txt + ... ... + │ + ├───logs + │ ├───cbs + │ │ CBS.log + │ │ + │ ├───dism + │ │ dism.log + │ │ + │ └───NetSetup + │ service.0.etl + ... ... + │ + ├───wfp + │ filters.xml + │ netevents.xml + │ wfpstate.xml + │ + └───winevt + Application.evtx + ... ... +``` + +### Network metadata + +- Linux + - IP address configuration (ip -d -j addr show) + - IP neighbor status (ip -d -j neighbor show) + - IPtables rule dumps + - iptables-save + - iptables -vnx -L + - iptables -vnx -L -t nat + - iptables -vnx -L -t mangle + - Network statistics information + - ss -s (summary) + - ss -tapionume (socket information) + - networking stats (/proc/net) + - kernel networking configuration (/proc/sys/net) + +- Windows + - reference: [Microsoft SDN Debug tool +](https://github.com/microsoft/SDN/blob/master/Kubernetes/windows/debug/collectlogs.ps1) + +## Cleanup + +When creating a capture, we can specify `--no-wait` to clean up the jobs after the Capture is completed. Otherwise, after creating a Capture, a random Capture name is returned, with which we can delete the jobs by `kubectl retina capture delete` command. diff --git a/docs/captures/img/capture-architecture-with-operator.png b/docs/captures/img/capture-architecture-with-operator.png new file mode 100644 index 000000000..d0e6b8ddd Binary files /dev/null and b/docs/captures/img/capture-architecture-with-operator.png differ diff --git a/docs/captures/img/capture-architecture-without-operator.png b/docs/captures/img/capture-architecture-without-operator.png new file mode 100644 index 000000000..400391f34 Binary files /dev/null and b/docs/captures/img/capture-architecture-without-operator.png differ diff --git a/docs/captures/img/retina-release.png b/docs/captures/img/retina-release.png new file mode 100644 index 000000000..bb2523546 Binary files /dev/null and b/docs/captures/img/retina-release.png differ diff --git a/docs/captures/readme.md b/docs/captures/readme.md new file mode 100644 index 000000000..04a699fff --- /dev/null +++ b/docs/captures/readme.md @@ -0,0 +1,89 @@ +# Captures + +## Overview + +Retina Capture allows users to capture network traffic/metadata for the specified Nodes/Pods. + +Captures are on-demand and can be output to the host filesystem, a storage blob, etc. + +## Usage + +There are two methods for triggering a Capture: +- [CLI command](#option-1-retina-cli) or +- [CRD/YAML configuration](#option-2-capture-crd-custom-resource-definition). + +### Option 1: Retina CLI + +Available after [Installing Retina CLI](../installation/cli.md). + +See [Capture Command](../captures/cli.md) for more details. + +#### Example + +This example captures network traffic for all Linux Nodes, storing the output in the folder */mnt/capture* on each Node. + +```shell +kubectl-retina capture create --host-path /mnt/capture --node-selectors "kubernetes.io/os=linux" +``` + +#### Architecture + +For each Capture, a Kubernetes Job is created for each relevant Node (the Node could be selected and/or could be hosting a selected Pod). +The Job's worker Pod runs for the specified duration, captures and wraps the network information into a tarball, and copies the tarball to the specified output location(s). +As a special case, a Kubernetes secret will be created containing a storage blob SAS for security concerns, then mounted to the Pod. + +A random hashed name is assigned to each Retina Capture to uniquely label it. + +![Overview of Retina Capture without operator](img/capture-architecture-without-operator.png "Overview of Retina Capture without operator") + +### Option 2: Capture CRD (Custom Resource Definition) + +Available after after [installing Retina](../installation/setup.md) with capture support. + +See [Capture CRD](../CRDs/Capture.md) for more details. + +#### Example + +This example creates a Capture and stores the Capture artifacts into a storage account specified by Blob SAS URL. + +Create a secret to store blob SAS URL: + +```yaml +apiVersion: v1 +data: + ## Data key is required to be "blob-upload-url" + blob-upload-url: +kind: Secret +metadata: + name: blob-sas-url + namespace: default +type: Opaque +``` + +Create a Capture specifying the secret created as blobUpload, this example will also store the artifact on the node host path + +```yaml +apiVersion: retina.io/v1alpha1 +kind: Capture +metadata: + name: capture-test +spec: + captureConfiguration: + captureOption: + duration: 30s + captureTarget: + nodeSelector: + matchLabels: + kubernetes.io/hostname: aks-nodepool1-11396069-vmss000000 + outputConfiguration: + hostPath: "/tmp/retina" + blobUpload: blob-sas-url +``` + +More Retina Capture samples can be found [here](https://github.com/microsoft/retina/tree/main/samples/capture) + +#### Architecture + +Similarly to Option 1, a Kubernetes Job is created for each relevant Node. + +![Overview of Retina Capture with operator](img/capture-architecture-with-operator.png "Overview of Retina Capture with operator") diff --git a/docs/contributing/readme.md b/docs/contributing/readme.md new file mode 100644 index 000000000..0f5efad1a --- /dev/null +++ b/docs/contributing/readme.md @@ -0,0 +1,174 @@ +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit [https://cla.opensource.microsoft.com](https://cla.opensource.microsoft.com). + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Development + +### Configurations + +FIXME just link the Config page + +Configurations are passed through `retina-config` configmap in `retina` namespace. Following configurations are currently supported: + +- `apiserver.port` : the port for `retina-agent` Pod +- `logLevel` : supported - `debug`, `info`, `error`, `warn`, `panic`, `fatal` +- `enabledPlugin` : eBPF plugins to be installed in worker node. Currently supported plugins are `dropreason` and `packetforward` +- `metricsInterval` : interval, in seconds, to scrape/publish metrics + +Note: Changes to configmap after retina is deployed would require re-deployment of `retina-agent`. + +### Supported Metrics Plugins + +FIXME just link metrics page + +### Pre-Requisites + +```bash +export LLVM_VERSION=14 +curl -sL https://apt.llvm.org/llvm.sh | sudo bash -s "$LLVM_VERSION" +``` + +Download [Helm](https://helm.sh/) as well. + +### Test + +```bash +make retina-ut # run unit-test locally +make retina-test-image # run tests in docker container +``` + +### Build + +Generate all mocks and BPF programs: + +```bash +make all +``` + +Build the Retina binary: + +```bash +make retina +``` + +To build a `retina-agent` container image with specific tag: + +```bash +TAG= make retina-image +``` + +To build binary of a plugin and test it + +```bash +# Test packetforward. +$ cd /test/plugin/packetforward +$ +$ go build . && sudo ./packetforward +info metrics Metrics initialized +info packetforward Packet forwarding metric initialized +info packetforward Start collecting packet forward metrics +info test-packetforward Started packetforward logger +error packetforward Error reading hash map {"error": "lookup: key does not exist"} +debug packetforward Received PacketForward data {"Data": "IngressBytes:302 IngressPackets:4 EgressBytes:11062 EgressPackets:33"} +debug packetforward Received PacketForward data {"Data": "IngressBytes:898 IngressPackets:12 EgressBytes:11658 EgressPackets:41"} +debug packetforward Received PacketForward data {"Data": "IngressBytes:898 IngressPackets:12 EgressBytes:23808 EgressPackets:70"} +... +``` + +### Deploying Locally on Kind + +```bash +make kind-setup # This deploys a Kind cluster and installs azure NPM +make retina-image && make kind-install # Skip building image if already done +make kind-clean # Delete Kind cluster +``` + +### Deploying on Other Kubernetes Cluster + +1. Create Kubernetes cluster. +2. Build and push the docker image for Retina: `make retina-image-push IMAGE_REGISTRY=` +3. Install Retina: `make helm-install IMAGE_REGISTRY=` + +### Verify Deployment + +Check `Retina` deployment and logs + +```bash +$ kubectl -n kube-system get po +NAME READY STATUS RESTARTS AGE +retina-agent-kq54d 1/1 Running 0 88s +... +$ +$ k -n kube-system logs retina-agent-kq54d -f +info main Reading config ... +info main Initializing metrics +info metrics Metrics initialized +info main Initializing Kubernetes client-go ... +info controller-manager Initializing controller manager ... +info plugin-manager Initializing plugin manager ... +info packetforward Packet forwarding metric initialized +... +info dropreason Start listening for drop reason events... +info packetforward Start collecting packet forward metrics +debug packetforward Received PacketForward data {"Data": "IngressBytes:24688994 IngressPackets:6786 EgressBytes:370647 EgressPackets:4153"} +... +``` + +### Metrics + +Retina generates `Prometheus` metrics and exposes them on port `10093` with path `/metrics`. + +```bash +$ kubectl port-forward retina-agent-wzjld 9090:10093 -n kube-system 2>&1 >/dev/null & +$ +$ ps aux | grep '[p]ort-forward' +anubhab 614516 0.0 0.1 759000 41796 pts/3 Sl+ 14:34 0:00 kubectl port-forward retina-agent-wzjld 9090:10093 -n kube-system +$ +$ curl http://localhost:9090/metrics | grep retina +... +# HELP retina_drop_bytes Total dropped bytes +# TYPE retina_drop_bytes gauge +retina_drop_bytes{direction="unknown",reason="IPTABLE_RULE_DROP"} 480 +# HELP retina_drop_count Total dropped packets +# TYPE retina_drop_count gauge +retina_drop_count{direction="unknown",reason="IPTABLE_RULE_DROP"} 12 +# HELP retina_forward_bytes Total forwarded bytes +# TYPE retina_forward_bytes gauge +retina_forward_bytes{direction="egress"} 1.28357355e+08 +retina_forward_bytes{direction="ingress"} 3.9520696e+08 +# HELP retina_forward_count Total forwarded packets +# TYPE retina_forward_count gauge +retina_forward_count{direction="egress"} 126462 +retina_forward_count{direction="ingress"} 156793 +``` + +### Dashboard/Prometheus/Grafana + +Install `Prometheus` and `Grafana` onto the cluster to visualize metrics. + +Documentation for these technologies: + +- [Prometheus](https://prometheus.io/docs/introduction/overview/) +- [Grafana](https://grafana.com/grafana/) + +### Cleanup + +Uninstall `Retina`: + +```bash +helm uninstall retina -n kube-system +``` + +## Contact + +[Retina Devs](mailto:retina@microsoft.com) diff --git a/docs/installation/cli.md b/docs/installation/cli.md new file mode 100644 index 000000000..44ed47750 --- /dev/null +++ b/docs/installation/cli.md @@ -0,0 +1,23 @@ +# CLI Setup + +Currently, Retina CLI only supports Linux. + +For CLI usage, see [Capture with Retina CLI](../captures/cli.md). + +## Option 1: Download from Release + +Download `kubectl-retina` from the latest [Retina release](https://github.com/microsoft/retina/releases). +Feel free to move the binary to `/usr/local/bin/`, or add it to your `PATH` otherwise. + +## Option 2: Build from source + +Clone the Retina repo and execute: + +```shell +make install-kubectl-retina +``` + +Requirements: + +- go 1.18 or newer +- GNU make diff --git a/docs/installation/config.md b/docs/installation/config.md new file mode 100644 index 000000000..a63f0b173 --- /dev/null +++ b/docs/installation/config.md @@ -0,0 +1,20 @@ +# Retina Config + +## Overview + +To customize metrics and other options, you can modify Retina's ConfigMap called `retina-config`. +Defaults are specified for each component in *deploy/manifests/controller/helm/retina/values.yaml*. + +## Agent Config + +* `enablePodLevel`: When this toggle is set to true, Retina will gather Advanced/Pod-Level metrics. Advanced metrics can attach Pod metadata to Retina's metrics. +* `remoteContext`: When this toggle is set to true, retina will watch Pods on the cluster. +* `enableAnnotations`: When this toggle is set to true, retina will gather metrics for the annotated resources. Namespaces or Pods can be annotated with "retina.io/v1alpha=observe". The operator and enableRetinaEndpoint for the operator should be enabled. +* `enabledPlugin_linux`: Array of enabled plugins for linux. +* `enabledPlugin_win`: Array of enabled plugins for windows. +* `metricsInterval`: the interval for which metrics will be gathered. + +## Operator Config + +* `installCRDs`: When this toggle is set, the operator will handle installing Retina-related CRDs. +* `enableRetinaEndpoint`: When this toggle is set, the operator will watch and update the cache with Pod metadata. diff --git a/docs/installation/grafana.md b/docs/installation/grafana.md new file mode 100644 index 000000000..da3fecfec --- /dev/null +++ b/docs/installation/grafana.md @@ -0,0 +1,34 @@ +# Configuring Grafana + +## Pre-Requisites + +Follow either: + +- [Unmanaged Prometheus/Grafana](./prometheus-unmanaged.md) or +- [Azure-Hosted Prometheus/Grafana](prometheus-azure-managed.md). + +Make sure that you're still port-forwarding your server to localhost:9090, or configure your server for some other HTTP endpoint. + +## Configuration + +1. Check Grafana to make sure the managed Prometheus datasource exists: + + ![alt text](img/portal-grafana.png) + +2. Go to the dashboard page and select "import": + + ![alt text](img/grafana-dashboard-import.png) + +3. Import the [published dashboards](https://grafana.com/grafana/dashboards/) by ID, or import the dashboards by JSON at *deploy/grafana/dashboards/*. + +4. The Grafana dashboard should now be visible. + + ![alt text](img/grafana-dashboard.png) + +FIXME add published dashboard links + +## Pre-Installed Dashboards + +If you're using [Azure-Hosted Prometheus/Grafana](prometheus-azure-managed.md), versions of these dashbaords are pre-installed under: +- Dashboards > Managed Prometheus > Kubernetes / Networking / Clusters +- Dashboards > Managed Prometheus > Kubernetes / Networking / DNS diff --git a/docs/installation/img/grafana-dashboard-import.png b/docs/installation/img/grafana-dashboard-import.png new file mode 100644 index 000000000..0217bcb1d Binary files /dev/null and b/docs/installation/img/grafana-dashboard-import.png differ diff --git a/docs/installation/img/grafana-dashboard.png b/docs/installation/img/grafana-dashboard.png new file mode 100644 index 000000000..c5e87e830 Binary files /dev/null and b/docs/installation/img/grafana-dashboard.png differ diff --git a/docs/installation/img/portal-grafana.png b/docs/installation/img/portal-grafana.png new file mode 100644 index 000000000..9ce5355da Binary files /dev/null and b/docs/installation/img/portal-grafana.png differ diff --git a/docs/installation/img/prom-targets.png b/docs/installation/img/prom-targets.png new file mode 100644 index 000000000..b7ab4eeb9 Binary files /dev/null and b/docs/installation/img/prom-targets.png differ diff --git a/docs/installation/img/prometheus-retina-pods.png b/docs/installation/img/prometheus-retina-pods.png new file mode 100644 index 000000000..153188ac9 Binary files /dev/null and b/docs/installation/img/prometheus-retina-pods.png differ diff --git a/docs/installation/prometheus-azure-managed.md b/docs/installation/prometheus-azure-managed.md new file mode 100644 index 000000000..214e07b39 --- /dev/null +++ b/docs/installation/prometheus-azure-managed.md @@ -0,0 +1,81 @@ +# Azure Managed Prometheus/Grafana + +## Pre-Requisites + +1. Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). +2. [Create an AKS cluster](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli#create-a-resource-group). +3. Install Retina DaemonSet (see [Quick Installation](./setup.md)). + +## Deploying Prometheus and Grafana + +1. Create an Azure Monitor resource: + + ```shell + az resource create \ + --resource-group $RESOURCE_GROUP \ + --namespace microsoft.monitor \ + --resource-type accounts \ + --name $AZURE_MONITOR_NAME \ + --location $REGION \ + --properties '{}' + ``` + +2. Create a Grafana instance: + + ```shell + az grafana create \ + --name $GRAFANA_NAME \ + --resource-group $RESOURCE_GROUP + ``` + +3. Get the Azure Monitor and Grafana resource IDs + + ```bash + export AZMON_RESOURCE_ID=$(az resource show --resource-group $RESOURCE_GROUP --name $AZURE_MONITOR_NAME --resource-type "Microsoft.Monitor/accounts" --query id -o tsv) + export GRAFANA_RESOURCE_ID=$(az resource show --resource-group $RESOURCE_GROUP --name $GRAFANA_NAME --resource-type "microsoft.dashboard/grafana" --query id -o tsv) + ``` + +4. Link both the Azure Monitor Workspace and Grafana instance to your cluster: + + ```shell + az aks update --enable-azure-monitor-metrics \ + -n $NAME \ + -g $RESOURCE_GROUP \ + --azure-monitor-workspace-resource-id $AZMON_RESOURCE_ID \ + --grafana-resource-id $GRAFANA_RESOURCE_ID + ``` + +5. Verify that the Azure Monitor Pods are running. For example: + + ```shell + kubectl get pod -n kube-system + ``` + + ```shell + NAME READY STATUS RESTARTS AGE + ama-metrics-5bc6c6d948-zkgc9 2/2 Running 0 26h + ama-metrics-ksm-556d86b5dc-2ndkv 1/1 Running 0 26h + ama-metrics-node-lbwcj 2/2 Running 0 26h + ama-metrics-node-rzkzn 2/2 Running 0 26h + ama-metrics-win-node-gqnkw 2/2 Running 0 26h + ama-metrics-win-node-tkrm8 2/2 Running 0 26h + ``` + +6. Verify that the Retina Pods are discovered by port-forwarding an AMA node Pod: + + ```bash + kubectl port-forward -n kube-system $(kubectl get pod -n kube-system -l dsName=ama-metrics-node -o name | head -n 1) 9090:9090 + ``` + + ```bash + Forwarding from 127.0.0.1:9090 -> 9090 + Forwarding from [::1]:9090 -> 9090 + ``` + +7. Then go to [http://localhost:9090/targets](http://localhost:9090/targets) to see the Retina Pods being discovered and scraped: + + ![alt text](img/prometheus-retina-pods.png) + +## Configuring Grafana + +In the Azure Portal, find your Grafana instance. Click on the Grafana Endpoint URL, then follow [Configuring Grafana](./configuring-grafana.md). diff --git a/docs/installation/prometheus-unmanaged.md b/docs/installation/prometheus-unmanaged.md new file mode 100644 index 000000000..490dc97d1 --- /dev/null +++ b/docs/installation/prometheus-unmanaged.md @@ -0,0 +1,77 @@ +# Unmanaged Prometheus/Grafana + + +## Pre-Requisites + +1. Create a Kubernetes cluster. +2. Install Retina DaemonSet (see [Quick Installation](./setup.md)). + +## Configuring Prometheus + +1. In this example, we will install Prometheus via the community supported helm chart. If you already have this chart deployed, skip to step 3. + + ```shell + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm repo update + ``` + +2. Install the Prometheus chart + + ```shell + helm install prometheus -n kube-system -f deploy/prometheus/values.yaml prometheus-community/kube-prometheus-stack + ``` + +3. Save the Prometheus values below to `deploy/prometheus/values.yaml` + + ```yaml + prometheus: + prometheusSpec: + additionalScrapeConfigs: | + - job_name: "retina-pods" + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep + regex: retina(.*) + - source_labels: + [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + separator: ":" + regex: ([^:]+)(?::\d+)? + target_label: __address__ + replacement: ${1}:${2} + action: replace + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: instance + metric_relabel_configs: + - source_labels: [__name__] + action: keep + regex: (.*) + ``` + +4. Upgrade deployment with + + ```shell + helm upgrade prometheus -n kube-system -f deploy/prometheus/values.yaml prometheus-community/kube-prometheus-stack + ``` + +5. Install Retina + + ```shell + helm install retina ./deploy/manifests/controller/helm/retina/ --namespace kube-system --dependency-update + ``` + +6. Verify that the Retina Pods are being scraped by port-forwarding the Prometheus server: + + ```shell + kubectl port-forward --namespace kube-system svc/prometheus-operated 9090 + ``` + +7. Then go to [http://localhost:9090/targets](http://localhost:9090/targets) to see the Retina Pods being discovered and scraped: + +![alt text](img/prometheus-retina-pods.png) + +## Configuring Grafana + +Create a Grafana instance at [grafana.com](https://www.grafana.com) and follow [Configuring Grafana](./configuring-grafana.md). diff --git a/docs/installation/setup.md b/docs/installation/setup.md new file mode 100644 index 000000000..f7fccbc70 --- /dev/null +++ b/docs/installation/setup.md @@ -0,0 +1,47 @@ +# Setup + +Clone the [Retina repo](https://github.com/microsoft/retina), then run a `make` command below for your scenario. + +Note: you can also run captures with just the [CLI](./cli.md). + +## Installation + +### Basic Mode + +```shell +make helm-install +``` + +### Basic Mode (with Capture support) + +```shell +make helm-install-with-operator +``` + +### Advanced Mode with Remote Context (with Capture support) + +See [Metric Modes](../metrics/modes.md). + +```shell +make helm-install-advanced-remote-context +``` + +### Advanced Mode with Local Context (with Capture support) + +See [Metric Modes](../metrics/modes.md). + +```shell +make helm-install-advanced-local-context +``` + +## Next Steps: Configuring Prometheus/Grafana + +Follow the guide relevant to your setup: +- [Unmanaged Prometheus/Grafana](./prometheus-unmanaged.md) +- [Azure-Hosted Prometheus/Grafana](./prometheus-azure-managed.md) + +## Managed Solutions + +For a managed experience, eliminating the need to manage helm charts, see these options: + +- [Azure-Managed Installation](https://learn.microsoft.com/en-us/azure/aks/network-observability-managed-cli?tabs=non-cilium) diff --git a/docs/intro.md b/docs/intro.md new file mode 100644 index 000000000..2d9521ec7 --- /dev/null +++ b/docs/intro.md @@ -0,0 +1,71 @@ +# Introduction to Retina + +## What is Retina? + +Retina is a cloud-agnostic, open-source **Kubernetes Network Observability platform** which helps with DevOps, SecOps and compliance use cases. +It provides a **centralized hub for monitoring application and network health and security**, catering to Cluster Network Administrators, Cluster Security Administrators and DevOps Engineers. + +Retina **collects customizable telemetry**, which can be exported to **multiple storage options** (such as Prometheus, Azure Monitor, and other vendors) and **visualized in a variety of ways** (like Grafana, Azure Log Analytics, and other vendors). + +## Features + +- **[eBPF](https://ebpf.io/what-is-ebpf#what-is-ebpf)-based** Network Observability platform for Kubernetes workloads. +- **On-Demand** and **Configurable**. +- Actionable, industry-standard **Prometheus metrics**. +- Streamlined **Packet Captures** for deep dives. +- **Cloud-agnostic**, supporting multiple OS (like Linux, Windows, Azure Linux). + +## Why Retina? + +Retina lets you **investigate network issues on-demand** and **continuously monitor your clusters**. Here are a couple scenarios where Retina shines, minimizing pain points and investigation time. + +### Investigations: Debugging Network Connectivity + +*Why can't my Pods connect to each other any more?* **Typical investigation is time-intensive** and involves performing packet captures, where one must identify the Nodes involved, gain access to each Node, run `tcpdump` commands, and export the results off of each Node. + +With Retina, you can **automate this process** with a **single CLI command** or CRD/YAML that can: +- Run captures on all Nodes hosting the Pods of interest. +- Upload each Node's results to a storage blob. + +To begin using the CLI, see [Quick Start Installation](./installation/cli.md). + +### Monitoring Network Health + +Retina supports actionable insights through **Prometheus** alerting, **Grafana** dashboards, and more. For instance, you can: + +- Monitor dropped traffic in a namespace. +- Alert on a spike in production DNS errors. +- Watch changes in API Server latency while testing your application's scale. +- Notify your Security team if a Pod starts sending too much traffic. + +## Telemetry + +Retina uses two types of telemetry: metrics and captures. + +### Metrics + +Retina metrics provide **continuous observability** into: +- Incoming/outcoming traffic +- Dropped packets +- TCP/UDP +- DNS +- API Server latency +- Node/interface statistics + +Retina provides both: +- **Basic metrics** (default, Node-Level metrics) and +- **Advanced/Pod-Level metrics** (if enabled). + +For more info and a list of metrics, see [Metrics](metrics/modes.md). + +### Captures + +A Retina capture **logs network traffic** and metadata **for the specified Nodes/Pods**. + +Captures are **on-demand** and can be output to multiple destinations. For more info, see [Captures](captures/readme.md). + +## Extendable Architecture + +Retina supports **several options for storage and insights/visualization**. Below is the high-level architecture for Retina, conveying some of these options. + +![Overview of Retina](retina-components.png "Overview of Retina") diff --git a/docs/metrics/_category_.json b/docs/metrics/_category_.json new file mode 100644 index 000000000..16a6dd251 --- /dev/null +++ b/docs/metrics/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Metrics", + "position": 2, + "link": { + "type": "generated-index", + "description": "Currently supported Retina plugins." + } +} diff --git a/docs/metrics/advanced.md b/docs/metrics/advanced.md new file mode 100644 index 000000000..db4fb6f85 --- /dev/null +++ b/docs/metrics/advanced.md @@ -0,0 +1,152 @@ +# Advanced Metrics + +There are two Advanced modes (see [Metric Modes](./modes.md)) which include all [Basic Metrics](./basic.md) plus extra metrics providing Pod-Level context. + +The two Advanced modes are *remote context* and *local context*. The difference lies in the [Context Labels](#context-labels). +Additionally, *local context* supports [Annotations](./annotations.md). + +## Prefix + +All metrics have the prefix `networkobservability_`. + +## Universal Labels + +Node and Cluster metadata are included with the labels: +- `cluster` +- `instance` (Node name) + +## Context Labels + +There are Pod-Level context labels for metrics prepended with `adv_`. + +To customize context labels, see [MetricsConfiguration CRD](../CRDs/MetricsConfiguration.md). + +### Remote Context + +For Advanced mode with *remote context*, default context labels are the following: +- `source_ip` +- `source_namespace` +- `source_pod` +- `source_workload` +- `destination_ip` +- `destination_namespace` +- `destination_pod` +- `destination_workload` + +### Local Context + +For Advanced mode with *local context*, default context labels are the following for *outgoing* traffic: +- `source_ip` +- `source_namespace` +- `source_pod` +- `source_workload` + +For *incoming* traffic: +- `destination_ip` +- `destination_namespace` +- `destination_pod` +- `destination_workload` + +## List of Metrics + +### Plugin: `packetforward` (Linux) + +[Same metrics](./basic.md#plugin-packetforward-linux) as Basic mode. + +### Plugin: `dropreason` (Linux) + +Metrics enabled when `dropreason` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ----------------------- | ---------------------------------------------- | --------------------- | +| `drop_count` | *Basic*: dropped packet count | `direction`, `reason` | +| `drop_bytes` | *Basic*: dropped byte count | `direction`, `reason` | +| `adv_drop_count` | ***Advanced/Pod-Level***: dropped packet count | `direction`, `reason`, context labels | +| `adv_drop_bytes` | ***Advanced/Pod-Level***: dropped byte count | `direction`, `reason`, context labels | + +#### Label Values +See [Context Labels](#context-labels). + +Possible values for `direction`: +- `ingress` (incoming traffic) +- `egress` (outgoing traffic) + +Possible values for `reason`: +- `IPTABLE_RULE_DROP` +- `IPTABLE_NAT_DROP` +- `TCP_CONNECT_BASIC` +- `TCP_ACCEPT_BASIC` +- `TCP_CLOSE_BASIC` +- `CONNTRACK_ADD_DROP` +- `UNKNOWN_DROP` + +### Plugin: `linuxutil` (Linux) + +[Same metrics](./basic.md#plugin-linuxutil-linux) as Basic mode. + +### Plugin: `dns` (Linux) + +Metrics enabled when `dns` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ---------------------------- | ---------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `dns_request_count` | *Basic*: number of DNS requests by query | `query_type`, `query` | +| `dns_response_count` | *Basic*: number of DNS responses by query, error code, and response value | `query_type`, `query`, `return_code`, `response`, `num_response` | +| `adv_dns_request_count` | ***Advanced/Pod-Level***: number of DNS requests by query | `query_type`, `query`, context labels | +| `adv_dns_response_count` | ***Advanced/Pod-Level***: number of DNS responses by query, error code, and response value | `query_type`, `query`, `return_code`, `response`, `num_response`, context labels | + +### Plugin: `hnsstats` (Windows) + +[Same metrics](./basic.md#plugin-hnsstats-windows) as Basic mode. + +### Plugin: `packetparser` (Linux) + +Metrics enabled when `packetparser` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ------------------------------------------ | ----------------------------------------------------------------------------- | --------------------------- | +| `adv_forward_count` | ***Advanced/Pod-Level***: forwarded packet count | `direction`, context labels | +| `adv_forward_bytes` | ***Advanced/Pod-Level***: forwarded byte count | `direction`, context labels | +| `adv_tcpflags_count` | ***Advanced/Pod-Level***: TCP packet count by flag | `flag`, context labels | +| `adv_node_apiserver_latency` | ***Advanced***: API Server round trip time for SYN-ACK (histogram) | `le` (histogram bucket) | +| `adv_node_apiserver_no_response` | ***Advanced***: number of packets that did not get a response from API server | | +| `adv_node_apiserver_tcp_handshake_latency` | ***Advanced***: API Server latency in establishing connection (histogram) | `le` (histogram bucket) | + +Note: API Server metrics help identify degradation of Node-to-API-server connection. +The metrics were born out of a real-life incident, where Node-to-API-server latency was the root cause. + +#### Label Values + +See [Context Labels](#context-labels). + +Possible values for `direction`: +- `ingress` (incoming traffic) +- `egress` (outgoing traffic) + +Possible values for `flag`: +- `FIN` +- `SYN` +- `RST` +- `PSH` +- `ACK` +- `URG` +- `ECE` +- `CWR` +- `NS` + +Possible values for `le` (for API server metrics). Units are in *milliseconds*. `le` stands for "less than or equal". See [Prometheus histogram documentation](https://prometheus.io/docs/concepts/metric_types/#histogram) for more info. +- `0` +- `0.5` +- `1` through `4.5` in increments of 0.5 +- `inf` + +### Plugin: `tcpretrans` (Linux) + +Metrics enabled when `tcpretrans` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ---------------------- | -------------------------------------------------------- | -------------- | +| `adv_tcpretrans_count` | ***Advanced/Pod-Level***: TCP retransmitted packet count | context labels | + +#### Label Values +See [Context Labels](#context-labels). diff --git a/docs/metrics/annotations.md b/docs/metrics/annotations.md new file mode 100644 index 000000000..a8d41a4f8 --- /dev/null +++ b/docs/metrics/annotations.md @@ -0,0 +1,10 @@ +# Annotations + +Annotations let you specify which Pods to observe (create metrics for). +To configure this, specify `enableAnnotations=true` in Retina's [helm installation](../installation/setup.md) or [ConfigMap](../installation/config.md). + +You can then add the annotation `retina.sh/v1alpha1: observe` to either: +- individual Pods +- Namespaces (to observe all the Pods in the namespace). + +An exception: currently all Pods in `kube-system` are always monitored. diff --git a/docs/metrics/basic.md b/docs/metrics/basic.md new file mode 100644 index 000000000..9c99a7dfc --- /dev/null +++ b/docs/metrics/basic.md @@ -0,0 +1,156 @@ +# Basic Metrics + +These metrics are provided in all three modes (see [Metric Modes](./modes.md)). + +## Prefix + +All metrics have the prefix `networkobservability_`. + +## Universal Labels + +All metrics include node and Cluster metadata with the labels: +- `cluster` +- `instance` (Node name) + +## List of Metrics + +### Plugin: `packetforward` (Linux) + +Metrics enabled when `packetforward` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Name | Description | Extra Labels | +| ----------------------- | ----------------------- | ------------- | +| `forward_count` | forwarded packet count | `direction` | +| `forward_bytes` | forwarded byte count | `direction` | + +#### Label Values + +Possible values for `direction`: +- `ingress` (incoming traffic) +- `egress` (outgoing traffic) + +### Plugin: `dropreason` (Linux) + +Metrics enabled when `dropreason` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ----------------------- | ------------------------ | --------------------- | +| `drop_count` | dropped packet count | `direction`, `reason` | +| `drop_bytes` | dropped byte count | `direction`, `reason` | + +#### Label Values + +Possible values for `direction`: +- `ingress` (incoming traffic) +- `egress` (outgoing traffic) + +Possible values for `reason`: +- `IPTABLE_RULE_DROP` +- `IPTABLE_NAT_DROP` +- `TCP_CONNECT_BASIC` +- `TCP_ACCEPT_BASIC` +- `TCP_CLOSE_BASIC` +- `CONNTRACK_ADD_DROP` +- `UNKNOWN_DROP` + +### Plugin: `linuxutil` (Linux) + +Metrics enabled when `linuxutil` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ----------------------- | ------------------------------------------------------------------------------- | ---------------------------------- | +| `tcp_state` | TCP currently active socket count by TCP state (from `netstats` utility) | `state` | +| `tcp_connection_remote` | TCP currently active socket count by remote IP/port (from `netstats` utility) | `address` (IP), `port` | +| `tcp_connection_stats` | TCP connection statistics (from `netstats` utility) | `statistic_name` | +| `ip_connection_stats` | IP connection statistics (from `netstats` utility) | `statistic_name` | +| `udp_connection_stats` | UDP connection statistics (from `netstats` utility) | `statistic_name` | +| `interface_stats` | interface statistics (from `ethtool` utility) | `interface_name`, `statistic_name` | + +#### Label Values + +Possible values for TCP `state`: +- `UNKNOWN` +- `ESTABLISHED` +- `SYN_SENT` +- `SYN_RECV` +- `FIN_WAIT1` +- `FIN_WAIT2` +- `TIME_WAIT` +- `""` (represents CLOSE) +- `CLOSE_WAIT` +- `LAST_ACK` +- `LISTEN` +- `CLOSING` + +Possible values for `statistic_name` (for metric `tcp_connection_stats`): +- `TCPTimeouts` +- `TCPTSReorder` +- `ResetCount` +- and many others (full list [here](./plugins/linuxutil.md#label-values-for-tcp_connection_stats)) + +Possible values for `statistic_name` (for metric `ip_connection_stats`): +- `InNoECTPkts` +- `InNoRoutes` +- `InOctets` +- `OutOctets` + +Possible values for `statistic_name` (for metric `udp_connection_stats`): +- `ACTIVE` (currently active socket count) + +Possible values for `statistic_name` (for metric `interface_stats`): +- `tx_packets` +- `rx_packets` +- `rx0_cache_full` (or `rx1_`, etc.) +- `tx0_nop` (or `tx1_`, etc.) +- `rx_dropped` +- `tx_dropped` +- `rx_comp_full` +- `tx_send_full` +- and many others (as seen by running `ethtool -S ` on the Node) + +### Plugin: `dns` (Linux) + +Metrics enabled when `dns` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Metric Name | Description | Extra Labels | +| ---------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `dns_request_count` | number of DNS requests by query | `query_type`, `query` | +| `dns_response_count` | number of DNS responses by query, error code, and response value | `query_type`, `query`, `return_code`, `response`, `num_response` | + +### Plugin: `hnsstats` (Windows) + +Metrics enabled when `hnsstats` plugin is enabled (see [Metrics Configuration](./configuration.md)). + +| Name | Description | Extra Labels | +| ----------------------- | ----------------------------------------------| --------------------- | +| `forward_count` | forwarded packet count (from HNS) | `direction` | +| `forward_bytes` | forwarded byte count (from HNS) | `direction` | +| `drop_count` | dropped packet count (from HNS or VFP) | `direction`, `reason` | +| `drop_bytes` | dropped byte count (from HNS or VFP) | `direction`, `reason` | +| `tcp_connection_stats` | several TCP connection statistics (from VFP) | `statistic_name` | +| `tcp_flag_counters` | TCP packet count by flag (from VFP) | `direction`, `flag` | + +#### Label Values + +Possible values for `direction`: +- `ingress` (incoming traffic) +- `egress` (outgoing traffic) + +Possible values for `reason`: +- `endpoint` (dropped by HNS endpoint) +- `aclrule` (dropped by ACL firewall rule in VFP) + +Possible values for `statistic_name` (for metric `tcp_connection_stats`): +- `ResetCount` +- `ClosedFin` +- `ResetSyn` +- `TcpHalfOpenTimeouts` +- `Verified` +- `TimedOutCount` +- `TimeWaitExpiredCount` + +Possible values for TCP `flag`: +- `SYN` +- `SYNACK` +- `FIN` +- `RST` diff --git a/docs/metrics/configuration.md b/docs/metrics/configuration.md new file mode 100644 index 000000000..5574a574b --- /dev/null +++ b/docs/metrics/configuration.md @@ -0,0 +1,7 @@ +# Metrics Configuration + +You can enable/disable metrics by including/omitting their Plugin from `enabledPlugins` in either Retina's [helm installation](../installation/setup.md) or [ConfigMap](../installation/config.md). + +Via [MetricsConfiguration CRD](../CRDs/MetricsConfiguration.md), you can further customize the following for your enabled plugins: +- Which metrics to include +- Which metadata to include for a metric. diff --git a/docs/metrics/modes.md b/docs/metrics/modes.md new file mode 100644 index 000000000..87e16f637 --- /dev/null +++ b/docs/metrics/modes.md @@ -0,0 +1,19 @@ +# Metric Modes + +Retina provides **three modes** with their own metrics and scale capabilities. +Each mode is **fully customizable** (only create the metrics/labels you need). + +Note that below, "metric cardinality" refers to the number of time series (metric values with a unique set of labels). +The larger the cardinality, the more load induced on a Prometheus server for instance. + +| Mode | Description | Scale | Metrics | Configuration | +| ---------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------ | +| *Basic* | Metrics aggregated by Node. | Metric cardinality proportional to number of nodes. | [Link to Metrics](./basic.md) | [Link to Installation](../installation/setup.md#basic-mode) | +| *Advanced/Pod-Level with remote context* | Basic metrics plus extra metrics aggregated by source and destination Pod. | Has scale limitations. Metric cardinality is unbounded (proportional to number of source/destination pairs, including external IPs). | [Link to Metrics](./advanced.md) | [Link to Installation](../installation/setup.md#advanced-mode-with-remote-context-with-support-for-captures) | +| *Advanced/Pod-Level with local context* | Basic metrics plus extra metrics aggregated by "local" Pod (source for outgoing traffic, destination for incoming traffic). Also lets you specify which Pods to observe (create metrics for) with [Annotations](./annotations.md). | Designed for scale. Metric cardinality proportional to number of Pods observed. | [Link to Metrics](./advanced.md) | [Link to Installation](../installation/setup.md#advanced-mode-with-local-context-with-support-for-captures) | + +## Where Do Metrics Come From? + +A Retina agent generates metrics on each Node. +The Linux agent inserts eBPF programs and gathers data from Linux utilities. +The Windows agent gathers data from HNS and VFP. diff --git a/docs/metrics/plugins/_category_.json b/docs/metrics/plugins/_category_.json new file mode 100644 index 000000000..862399be1 --- /dev/null +++ b/docs/metrics/plugins/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Plugins", + "position": 4, + "link": { + "type": "generated-index", + "description": "Currently supported Retina plugins." + } +} diff --git a/docs/metrics/plugins/dns.md b/docs/metrics/plugins/dns.md new file mode 100644 index 000000000..31ac7abe7 --- /dev/null +++ b/docs/metrics/plugins/dns.md @@ -0,0 +1,18 @@ +# `dns` (Linux) + +Counts number of packets/bytes dropped on a Node, along with the direction and reason for drop. + +## Metrics + +See metrics for [Basic Mode](../basic.md#plugin-dns-linux) or [Advanced Mode](../advanced.md#plugin-dns-linux). + +## Architecture + +The plugin utilizes eBPF to gather data. +The plugin generates Basic metrics from an eBPF result. +In Advanced mode (see [Metric Modes](../modes.md)), the plugin turns this eBPF result into an enriched `Flow` (adding Pod information based on IP), then sends the `Flow` to an external channel so that a dns module can create extra Pod-Level metrics. + +### Code locations + +- Plugin and eBPF code: *pkg/plugin/dns/* +- Module for extra Advanced metrics: *pkg/module/metrics/dns.go* diff --git a/docs/metrics/plugins/dropreason.md b/docs/metrics/plugins/dropreason.md new file mode 100644 index 000000000..f175a2c42 --- /dev/null +++ b/docs/metrics/plugins/dropreason.md @@ -0,0 +1,62 @@ +# `dropreason` (Linux) + +Counts number of packets/bytes dropped on a Node, along with the direction and reason for drop. + +## Metrics + +See metrics for [Basic Mode](../basic.md#plugin-dropreason-linux) or [Advanced Mode](../advanced.md#plugin-dropreason-linux). + +## Architecture + +The plugin utilizes eBPF to gather data. +The plugin generates Basic metrics from an eBPF result. +In Advanced mode (see [Metric Modes](../modes.md)), the plugin turns this eBPF result into an enriched `Flow` (adding Pod information based on IP), then sends the `Flow` to an external channel so that a drops module can create extra Pod-Level metrics. + +### Code locations + +- Plugin and eBPF code: *pkg/plugin/dropreason/* +- Module for extra Advanced metrics: *pkg/module/metrics/drops.go* + +### Data sources + +This plugin reads data from variable eBPF progs writing into the same eBPF map called `metrics_map`. +It aggregates data for basic metrics at a node level and then control plane of the plugin converts into Prometheus metrics. + +See *pkg/plugin/dropreason/drop_reason.c*: + +```c +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 512); + __type(key, struct metrics_map_key); + __type(value, struct metrics_map_value); +} metrics_map SEC(".maps"); + +struct metrics_map_key +{ + __u32 drop_type; + __u32 return_val; +}; + +struct metrics_map_value +{ + __u64 count; + __u64 bytes; +}; + +``` + +### eBPF Hook Points for Drop Reason + +| Reason | Description | eBPF Hook Point | +|--|--| -- | +| IPTABLE_RULE_DROP | Packets dropped by iptables rule | `kprobe/nf_hook_slow` | +| IPTABLE_NAT_DROP | Packets dropped by iptables NAT rule | `kretprobe/nf_nat_inet_fn` | +| TCP_CONNECT_BASIC | Packets dropped by TCP connect | `kretprobe/tcp_v4_connect` | +| TCP_ACCEPT_BASIC | Packets dropped by TCP accept | `kretprobe/inet_csk_accept` | +| TCP_CLOSE_BASIC | Packets dropped by TCP close | TBD | +| CONNTRACK_ADD_DROP | Packets dropped by conntrack add | `kretprobe/__nf_conntrack_confirm` | +| UNKNOWN_DROP | Packets dropped by unknown reason | NA | + +This list will keep on growing as we add support for more reasons. diff --git a/docs/metrics/plugins/hnsstats.md b/docs/metrics/plugins/hnsstats.md new file mode 100644 index 000000000..fe4e1a66c --- /dev/null +++ b/docs/metrics/plugins/hnsstats.md @@ -0,0 +1,15 @@ +# `hnsstats` (Windows) + +Gathers TCP statistics and counts number of packets/bytes forwarded or dropped in HNS and VFP. + +## Metrics + +See metrics for [Basic Mode](../basic.md#plugin-hnsstats-windows) (Advanced modes have identical metrics). + +## Architecture + +Interfaces with a Windows Node's HNS (Host Networking System) and VFP (Virtual Filtering Platform). + +### Code Locations + +- Plugin code interfacing with HNS/VFP: *pkg/plugin/windows/hnsstats* diff --git a/docs/metrics/plugins/intro.md b/docs/metrics/plugins/intro.md new file mode 100644 index 000000000..13de55178 --- /dev/null +++ b/docs/metrics/plugins/intro.md @@ -0,0 +1,14 @@ +# Plugins + +Each metric is associated with a Plugin. +Associated metrics are linked below. +See [Metrics Configuration](../configuration.md) for info on configuration. + +| Name | Description | Metrics in Basic Mode | Metrics in Advanced Mode | Development Guide | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | --------------------------------------------------------- | ------------------------------- | +| `packetforward` (Linux) | Counts number of packets/bytes passing through the `eth0` interface of a Node, along with the direction of the packets. | [Basic Mode](../basic.md#plugin-packetforward-linux) | Same metrics as Basic mode | [Dev Guide](./packetforward.md) | +| `dropreason` (Linux) | Counts number of packets/bytes dropped on a Node, along with the direction and reason for drop. | [Basic Mode](../basic.md#plugin-dropreason-linux) | [Advanced Mode](../advanced.md#plugin-dropreason-linux) | [Dev Guide](./dropreason.md) | +| `linuxutil` (Linux) | Gathers TCP/UDP statistics and network interface statistics from the `netstats` and `ethtool` Node utilities (respectively). | [Basic Mode](../basic.md#plugin-linuxutil-linux) | Same metrics as Basic mode | [Dev Guide](./linuxutil.md) | +| `dns` (Linux) | Counts DNS requests/responses by query, including error codes, response IPs, and other metadata. | [Basic Mode](../basic.md#plugin-dns-linux) | [Advanced Mode](../advanced.md#plugin-dns-linux) | [Dev Guide](./dns.md) | +| `hnstats` (Windows) | Gathers TCP statistics and counts number of packets/bytes forwarded or dropped in HNS and VFP. | [Basic Mode](../basic.md#plugin-hnsstats-windows) | Same metrics as Basic mode | [Dev Guide](./hnsstats.md) | +| `packetparser` (Linux) | Measures TCP packets passing through `eth0`, providing the ability to calculate TCP-handshake latencies, etc. | No basic metrics | [Advanced Mode](../advanced.md#plugin-packetparser-linux) | [Dev Guide](./packetparser.md) | diff --git a/docs/metrics/plugins/linuxutil.md b/docs/metrics/plugins/linuxutil.md new file mode 100644 index 000000000..296487ee1 --- /dev/null +++ b/docs/metrics/plugins/linuxutil.md @@ -0,0 +1,105 @@ +# `linuxutil` (Linux) + +Gathers TCP/UDP statistics and network interface statistics from the `netstats` and `ethtool` Node utilities (respectively). + +## Metrics + +See metrics for [Basic Mode](../basic.md#plugin-linuxutil-linux) (Advanced modes have identical metrics). + +## Architecture + +The plugin uses the following utilities as data sources: + +1. `netstat` + - TCP Socket information + - UDP Socket information + - "/proc/net/netstat" for IP and UDP statistics +2. `ethtool` + - Interface statistics + +### Code Locations + +- Plugin code interfacing with the Node utilities: *pkg/plugin/linuxutil/* + +### Configuration (in Code) + +Both `ethtool` and `netstat` data can be curated to remove unwanted data. Below options in a struct in *linuxutil.go* can be used to configure the same. + +```go +type EthtoolOpts struct { + // when true will only include keys with err or drop in its name + errOrDropKeysOnly bool + + // when true will include all keys with value 0 + addZeroVal bool +} +``` + +```go +type NetstatOpts struct { + // when true only includes curated list of keys + CuratedKeys bool + + // when true will include all keys with value 0 + AddZeroVal bool + + // get only listening sockets + ListenSock bool +} +``` + +These are initialized in the linuxutil.go file. + +## Label Values for `tcp_connection_stats` + +Below is a running list of all statistics for the metric `tcp_connection_stats`, captured from the `netstats` utility: +- `DelayedACKLocked` +- `DelayedACKLost` +- `DelayedACKs` +- `IPReversePathFilter` +- `PAWSEstab` +- `TCPACKSkippedPAWS` +- `TCPACKSkippedSeq` +- `TCPAbortOnClose` +- `TCPAbortOnData` +- `TCPAckCompressed` +- `TCPAutoCorking` +- `TCPBacklogCoalesce` +- `TCPChallengeACK` +- `TCPDSACKIgnoredNoUndo` +- `TCPDSACKOldSent` +- `TCPDSACKRecv` +- `TCPDSACKRecvSegs` +- `TCPDSACKUndo` +- `TCPDelivered` +- `TCPFastRetrans` +- `TCPFromZeroWindowAdv` +- `TCPFullUndo` +- `TCPHPAcks` +- `TCPHPHits` +- `TCPHystartTrainCwnd` +- `TCPHystartTrainDetect` +- `TCPKeepAlive` +- `TCPLossProbeRecovery` +- `TCPLossProbes` +- `TCPLossUndo` +- `TCPOFOQueue` +- `TCPOrigDataSent` +- `TCPPartialUndo` +- `TCPPureAcks` +- `TCPRcvCoalesce` +- `TCPSACKReorder` +- `TCPSackMerged` +- `TCPSackRecovery` +- `TCPSackShiftFallback` +- `TCPSackShifted` +- `TCPSpuriousRtxHostQueues` +- `TCPSynRetrans` +- `TCPTSReorder` +- `TCPTimeouts` +- `TCPToZeroWindowAdv` +- `TCPWantZeroWindowAdv` +- `TW` +- `TWRecycled` +- `TcpDuplicateDataRehash` +- `TcpTimeoutRehash` diff --git a/docs/metrics/plugins/packetforward.md b/docs/metrics/plugins/packetforward.md new file mode 100644 index 000000000..f8e6d07a2 --- /dev/null +++ b/docs/metrics/plugins/packetforward.md @@ -0,0 +1,19 @@ +# `packetforward` (Linux) + +Counts number of packets/bytes passing through the `eth0` interface of a Node, along with the direction of the packets. + +## Metrics + +See metrics for [Basic Mode](../basic.md#plugin-packetforward-linux) (Advanced modes have identical metrics). + +Note: `adv_forward_count` and `adv_forward_bytes` metrics are NOT associated with `packetforward` plugin despite similarities in name. +These metrics are associated with [`packetparser` plugin](./packetparser.md). + +## Architecture + +The plugin utilizes eBPF to gather data. +The plugin generates Basic metrics from an eBPF result. + +### Code locations + +- Plugin and eBPF code: *pkg/plugin/packetforward/* diff --git a/docs/metrics/plugins/packetparser.md b/docs/metrics/plugins/packetparser.md new file mode 100644 index 000000000..3e35c58eb --- /dev/null +++ b/docs/metrics/plugins/packetparser.md @@ -0,0 +1,45 @@ +# `packetparser` (Linux) + +Measures TCP packets passing through `eth0`, providing the ability to calculate TCP-handshake latencies, etc. + +## Metrics + +See metrics for [Advanced Mode](../advanced.md#plugin-packetparser-linux). For module information, see [below](#modules). + +## Architecture + +The plugin utilizes eBPF to gather data. +The plugin does not generate Basic metrics. +In Advanced mode (see [Metric Modes](../modes.md)), the plugin turns an eBPF result into an enriched `Flow` (adding Pod information based on IP), then sends the `Flow` to an external channel so that *several modules* can create Pod-Level metrics. + +### Code locations + +- Plugin and eBPF code: *pkg/plugin/tcpretrans/* +- Modules for extra Advanced metrics: see section below. + +### Modules + +#### Module: forward + +Code path: *pkg/module/metrics/forward.go* + +Metrics produced: +- `adv_forward_count` +- `adv_forward_bytes` + +#### Module: tcpflags + +Code path: *pkg/module/metrics/tcpflags.go* + +Metrics produced: +- `adv_forward_count` +- `adv_forward_bytes` + +#### Module: latency (API Server) + +Code path: *pkg/module/metrics/latency.go* + +Metrics produced: +- `adv_node_apiserver_latency` +- `adv_node_apiserver_no_response` +- `adv_node_apiserver_tcp_handshake_latency` diff --git a/docs/metrics/plugins/tcpretrans.md b/docs/metrics/plugins/tcpretrans.md new file mode 100644 index 000000000..d409a310b --- /dev/null +++ b/docs/metrics/plugins/tcpretrans.md @@ -0,0 +1,18 @@ +# `tcpretrans` (Linux) + +Measures retransmitted TCP packets. + +## Metrics + +See metrics for [Advanced Mode](../advanced.md#plugin-tcpretrans-linux). + +## Architecture + +The plugin utilizes eBPF to gather data. +The plugin does not generate Basic metrics. +In Advanced mode (see [Metric Modes](../modes.md)), the plugin turns an eBPF result into an enriched `Flow` (adding Pod information based on IP), then sends the `Flow` to an external channel so that a tcpretrans module can create Pod-Level metrics. + +### Code locations + +- Plugin and eBPF code: *pkg/plugin/tcpretrans/* +- Module for extra Advanced metrics: *pkg/module/metrics/tcpretrans.go* diff --git a/docs/retina-components.png b/docs/retina-components.png new file mode 100644 index 000000000..498d4179a Binary files /dev/null and b/docs/retina-components.png differ diff --git a/docs/troubleshooting/_category_.json b/docs/troubleshooting/_category_.json new file mode 100644 index 000000000..0c201161a --- /dev/null +++ b/docs/troubleshooting/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Troubleshooting", + "position": 4, + "link": { + "type": "generated-index", + "description": "Troubleshooting for Retina." + } +} diff --git a/docs/troubleshooting/basic-metrics.md b/docs/troubleshooting/basic-metrics.md new file mode 100644 index 000000000..10a1b2742 --- /dev/null +++ b/docs/troubleshooting/basic-metrics.md @@ -0,0 +1,171 @@ +# Basic Metrics TSG + +## Overview + +Basic metrics is covered by the [Metrics](../metrics/intro.md) section of the Retina documentation. This guide is intended to help you troubleshoot issues with basic metrics. + +## Metrics are not being generated or showing up in Grafana dashboards + +If you are not seeing metrics in Grafana, it is likely that the metrics are not being generated. To troubleshoot this issue, you can check the following: + +### Check Retina Pods are running + +Check that all Retina Pods are running. You can do this by running the following command: + +```shell +kubectl get pods -n kube-system -l k8s-app=retina +``` + +If any of the pods are not running, you can describe the Pod to see what the issue is. You can do this by running the following command: + +```shell +kubectl describe pod -n kube-system +``` + +### Check Retina Pod Logs + +If retina Pods are running, you can check the logs for the Pod to see what the issue is. You can do this by running the following command: + +```shell +kubectl logs -n kube-system +``` + +you should see log lines like the following: + +```shell +2023-04-15T23:52:08.046Z info dropreason dropreason/dropreason_linux.go:208 DropReason data collection complete +2023-04-16T00:10:55.598Z info linuxutil linuxutil/linuxutil.go:100 Updated ethTool metrics +``` + +### Check Retina ConfigMap + +If not then the plugin is not running correctly. Please check retina config map and make sure the plugins is enabled. + +```shell +kubectl get configmap retina-config -n kube-system -oyaml +``` + +```yaml +apiVersion: v1 +data: + config.yaml: |- + apiServer: + host: 0.0.0.0 + port: 10093 + logLevel: info + enabledPlugin: ["dropreason","packetforward","linuxutil"] + metricsInterval: 10 + enableTelemetry: true +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: retina + meta.helm.sh/release-namespace: kube-system + creationTimestamp: "2023-04-13T21:56:08Z" + labels: + app.kubernetes.io/managed-by: Helm + name: retina-config + namespace: kube-system + resourceVersion: "427462" + uid: 1d45f43a-2db5-4f93-a467-aca106ac3fcb + ``` + +### Check Retina Metrics are being generated + +You can check that metrics are being generated by calling the metrics endpoint. You can do this by running the following command from inside one of the nodes: + +```shell +curl http://:10093/metrics | grep -i retina +``` + +You should see metrics like the following: + +```shell +# HELP retina_forward_bytes Total forwarded bytes +# TYPE retina_forward_bytes gauge +retina_forward_bytes{direction="egress"} 1.9064666952e+10 +retina_forward_bytes{direction="ingress"} 2.3619602627e+10 +# HELP retina_forward_count Total forwarded packets +# TYPE retina_forward_count gauge +retina_forward_count{direction="egress"} 4.3139614e+07 +retina_forward_count{direction="ingress"} 3.7254085e+07 +# HELP retina_interface_stats Interface Statistics +# TYPE retina_interface_stats gauge +retina_interface_stats{interface_name="eth0",statistic_name="vf_rx_bytes"} 1.2679472174e+10 +retina_interface_stats{interface_name="eth0",statistic_name="vf_rx_packets"} 1.2948929e+07 +# HELP retina_ip_connection_stats IP connections Statistics +# TYPE retina_ip_connection_stats gauge +retina_ip_connection_stats{statistic_name="InECT0Pkts"} 34713 +retina_ip_connection_stats{statistic_name="InNoECTPkts"} 3.8893357e+07 +retina_ip_connection_stats{statistic_name="InOctets"} 1.6718610902e+10 +retina_ip_connection_stats{statistic_name="OutOctets"} 2.7768258214e+10 +# HELP retina_tcp_connection_remote number of active TCP connections by remote address +# TYPE retina_tcp_connection_remote gauge +retina_tcp_connection_remote{address="0.0.0.0",port="0"} 8 +retina_tcp_connection_remote{address="10.0.0.1",port="443"} 1 +retina_tcp_connection_remote{address="10.224.0.105",port="7070"} 1 +# HELP retina_tcp_connection_stats TCP connections Statistics +# TYPE retina_tcp_connection_stats gauge +retina_tcp_connection_stats{statistic_name="DelayedACKLocked"} 107 +# HELP retina_tcp_state number of active TCP connections by state +# TYPE retina_tcp_state gauge +retina_tcp_state{state="CLOSE_WAIT"} 1 +retina_tcp_state{state="ESTABLISHED"} 16 +retina_tcp_state{state="FIN_WAIT1"} 1 +retina_tcp_state{state="FIN_WAIT2"} 1 +retina_tcp_state{state="LAST_ACK"} 1 +retina_tcp_state{state="LISTEN"} 8 +retina_tcp_state{state="SYN_SENT"} 1 +retina_tcp_state{state="TIME_WAIT"} 89 +# HELP retina_udp_connection_stats UDP connections Statistics +# TYPE retina_udp_connection_stats gauge +retina_udp_connection_stats{statistic_name="ACTIVE"} 5 +``` + +If you are not seeing metrics like the above, then the plugin is not running correctly. Please check retina config map and make sure the plugins is enabled. + +### Check if Managed Prometheus is scraping Retina metrics + +If you are using managed Prometheus, you can check if it is scraping the retina metrics by following below steps. + +1. Check if AMA-Metrics Pods are running + + ```shell + kubectl get pods -n kube-system -l dsName=ama-metrics-node + ``` + +2. If they are running, then port forward one of the AMA metrics Pods to see if it can identify retina Pods + + ```shell + kubectl port-forward -n kube-system 9090:9090 + ``` + +3. Open the following URL in your browser to see if retina Pods are being discovered + + ```shell + http://localhost:9090/targets + ``` + + ![alt text](imgs/prometheus-retina-targets.png) + + a. check if service discovery is recognizing retina Pods + + ```shell + http://localhost:9090/service-discovery#retina-pods + ``` + + ![alt text](imgs/prom-retina-service-discovery.png) + +4. If Retina Pods are not discovered then please check prometheus configuration and make sure the Retina Pod's job is added to the scrape config. + + ```shell + http://localhost:9090/config + ``` + + ![alt text](imgs/prom-retina-config.png) + +5. If the Retina Pod's scrape job is not present in prom config, please check ama-metrics-node config map for the presence of this job or the setting called + + ```shell + kubectl get configmap ama-metrics-prometheus-config-node -n kube-system -oyaml + ``` diff --git a/docs/troubleshooting/capture.md b/docs/troubleshooting/capture.md new file mode 100644 index 000000000..cf8d34f24 --- /dev/null +++ b/docs/troubleshooting/capture.md @@ -0,0 +1,55 @@ +# Capture TSG + +Retina Capture create Kubernetes Jobs and translate the flags into the specification of the Pod. Locating the Pods by the Capture is essential to troubleshooting Retina Capture issues. + +## Find the Pod created for each Capture + +Run the following command to list captures: + +```shell +kubectl retina capture list -n +``` + +or get jobs through the capture name: + +```shell +kubectl get job --selector capture-name= -n +``` + +and then get the Pod through the job name by: + +```shell +kubectl get pod -n --seletor job-name= +``` + +## Capture Pod ImagePullBackOff + +By default, kubectl retina plugin will eventually create Capture Pods from the [MCR](https://mcr.microsoft.com/) image with the same version as the kubectl plugin. If the kubectl plugin is built from a local environment, the MCR image cannot be found. Check [capture CLI Debug Mode](../captures/cli.md#Debug_mode) for local development and testing. + +## Windows node allows only one capture job running at one time + +Retina Capture utilizes [`netsh`](https://learn.microsoft.com/en-us/windows-server/networking/technologies/netsh/netsh-contexts) to capture network traffic on Windows, and as only one netsh trace session is allowed on one Windows node, when a Windows node contains a pending or running Capture Pod, creating a Capture targeting the node will raise an error. +To continue creating a capture on this windows node, you may delete the Capture per + +- List Capture Pods running on the Windows node + +```shell +kubectl get pod -A -o wide |grep +``` + +- Get Capture name from the Pod + +```shell +kubectl describe pod -n |grep capture-name| awk -F "=" '{print $2}' +``` + +- Delete the Capture + +```shelll +kubectl retina capture delete --namespace --name +``` + +## Network packets are not correctly captured + +In case you want to confirm how the network capture is performed, please check tcpdump.log/netsh.log in Capture bundle, or get the pod container log if the Capture is not deleted. +The pcap file name includes UTC time, which records the network capture start time, which might also be helpful. diff --git a/docs/troubleshooting/imgs/prom-retina-config.png b/docs/troubleshooting/imgs/prom-retina-config.png new file mode 100644 index 000000000..5ac318155 Binary files /dev/null and b/docs/troubleshooting/imgs/prom-retina-config.png differ diff --git a/docs/troubleshooting/imgs/prom-retina-service-discovery.png b/docs/troubleshooting/imgs/prom-retina-service-discovery.png new file mode 100644 index 000000000..25c7439af Binary files /dev/null and b/docs/troubleshooting/imgs/prom-retina-service-discovery.png differ diff --git a/docs/troubleshooting/imgs/prometheus-retina-targets.png b/docs/troubleshooting/imgs/prometheus-retina-targets.png new file mode 100644 index 000000000..70d53dec0 Binary files /dev/null and b/docs/troubleshooting/imgs/prometheus-retina-targets.png differ diff --git a/docs/unsorted/README.md b/docs/unsorted/README.md new file mode 100644 index 000000000..d972012f3 --- /dev/null +++ b/docs/unsorted/README.md @@ -0,0 +1 @@ +# Unsorted diff --git a/docs/unsorted/aks-setup.md b/docs/unsorted/aks-setup.md new file mode 100644 index 000000000..8ee13f6bc --- /dev/null +++ b/docs/unsorted/aks-setup.md @@ -0,0 +1,18 @@ +# Setup and Deploy Retina on AKS + +1. Create a resource group +`az group create --name --location ` +2. Create a cluster for the resource group and use your ssh key +`az aks create -g -n --node-count 2 --ssh-key-value ~/.ssh/id_rsa.pub --network-plugin azure` +3. Set credentials +`az aks get-credentials --name --resource-group --overwrite-existing` +4. Have a container registry setup and attach to the cluster (Create your own or login to an existing one) +`az acr login --name ` +5. Additional flags for acr login if using an existing container registry: `--username --password ` +6. Attach credentials to your cluster/resource-group to the container registry +`az aks update -n -g --attach-acr ` +7. Build the docker image for retina `make retina-image` +8. Push to container registry: +`docker push /retina:` +9. Installing retina onto the cluster - +`helm install retina /deploy/manifests/controller/helm/retina/ --create-namespace --namespace retina --dependency-update` diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..38fd5bd6c --- /dev/null +++ b/go.mod @@ -0,0 +1,219 @@ +module github.com/microsoft/retina + +go 1.21.1 + +require ( + github.com/go-chi/chi/v5 v5.0.11 + github.com/google/uuid v1.6.0 + github.com/prometheus/client_golang v1.18.0 + github.com/spf13/cobra v1.8.0 + go.uber.org/zap v1.26.0 + k8s.io/client-go v0.29.2 +) + +require ( + code.cloudfoundry.org/clock v1.0.0 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/continuity v0.4.2 // indirect + github.com/containerd/errdefs v0.1.0 // indirect + github.com/containerd/fifo v1.1.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/ttrpc v1.2.3-0.20231030150553-baadfd8e7956 // indirect + github.com/containerd/typeurl/v2 v2.1.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v25.0.3+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect + github.com/klauspost/compress v1.17.3 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/moby v25.0.3+incompatible // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/signal v0.7.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/nxadm/tail v1.4.11 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20210917134616-9c00a300bb7a // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.2 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.7.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.starlark.net v0.0.0-20230814145427-12f4cb8177e4 // indirect + golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.16.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/grpc v1.62.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + k8s.io/component-base v0.29.2 // indirect + k8s.io/cri-api v0.29.2 // indirect + sigs.k8s.io/kustomize/api v0.14.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect +) + +require ( + github.com/autom8ter/dagger v1.0.1 + github.com/go-chi/chi v4.1.2+incompatible + github.com/go-logr/logr v1.4.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.6.0 + github.com/google/gofuzz v1.2.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.8.4 + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/protobuf v1.32.0 + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20231127182322-b307cd553661 + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 +) + +require ( + github.com/Azure/azure-container-networking/zapai v0.0.3 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 + github.com/Microsoft/hcsshim v0.12.0-rc.3 + github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 + github.com/florianl/go-tc v0.4.3 + github.com/golang/mock v1.6.0 + github.com/inspektor-gadget/inspektor-gadget v0.25.1-0.20240223044605-4ac24c3e3b7f + github.com/jellydator/ttlcache/v3 v3.1.1 + github.com/jsternberg/zap-logfmt v1.3.0 + github.com/microsoft/ApplicationInsights-Go v0.4.4 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/ginkgo/v2 v2.15.0 + github.com/onsi/gomega v1.31.1 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_model v0.5.0 + github.com/safchain/ethtool v0.3.0 + github.com/spf13/viper v1.18.2 + github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0 + go.opentelemetry.io/otel v1.23.1 + go.opentelemetry.io/otel/metric v1.23.1 + go.opentelemetry.io/otel/trace v1.23.1 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gotest.tools v2.2.0+incompatible + gotest.tools/v3 v3.5.1 + k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/cli-runtime v0.29.2 + k8s.io/kubectl v0.29.1 + sigs.k8s.io/controller-runtime v0.16.3 +) + +// Required for inspektor-gadget DNS gadget. +replace sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3 + +replace github.com/vishvananda/netns => github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6 + diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..4005e4c9e --- /dev/null +++ b/go.sum @@ -0,0 +1,892 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= +code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/Azure/azure-container-networking/zapai v0.0.3 h1:73druF1cnne5Ign/ztiXP99Ss5D+UJ80EL2mzPgNRhk= +github.com/Azure/azure-container-networking/zapai v0.0.3/go.mod h1:XV/aKJQAV6KqV4HQtZlDyxg2z7LaY9rsX8dqwyWFmUI= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/autom8ter/dagger v1.0.1 h1:z6SZwfH/dFxPniOukgeCir3a4ki93OPL1DrnuFTmtJA= +github.com/autom8ter/dagger v1.0.1/go.mod h1:asg7HPk+oMpvi9lu9I+IHGlz8FKSxjf61PwKe+CfVeA= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= +github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= +github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= +github.com/cilium/cilium v1.15.0-pre.0 h1:PfPAKZbaOxBOkxb1YdB93ZZDCwVO1OWQwJyKBBDecwU= +github.com/cilium/cilium v1.15.0-pre.0/go.mod h1:w91O6q21E3HWocIELRHHvbb+k7Aie/YatFfGTK9gA9Y= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= +github.com/cilium/ebpf v0.13.2/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/cilium/proxy v0.0.0-20230825101411-5a76016dde9a h1:meOI52EXgI3fY/9zYTaL2FPVh8Zh3bIvDrxrrcu9TDU= +github.com/cilium/proxy v0.0.0-20230825101411-5a76016dde9a/go.mod h1:z8nwlTdLi7qTWTS6Ccq7S0+A63QNob+efxn7Kh6EY90= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= +github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= +github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= +github.com/containerd/ttrpc v1.2.3-0.20231030150553-baadfd8e7956/go.mod h1:ieWsXucbb8Mj9PH0rXCw1i8IunRbbAiDkpXkbfflWBM= +github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY= +github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/florianl/go-tc v0.4.3 h1:xpobG2gFNvEqbclU07zjddALSjqTQTWJkxg5/kRYDpw= +github.com/florianl/go-tc v0.4.3/go.mod h1:uvp6pIlOw7Z8hhfnT5M4+V1hHVgZWRZwwMS8Z0JsRxc= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= +github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inspektor-gadget/inspektor-gadget v0.20.0 h1:U6Xz5ujbA8liZmPJoBju/cWls5Ms23weDTtaDaLc9qI= +github.com/inspektor-gadget/inspektor-gadget v0.20.0/go.mod h1:GAPTHBn7HEeFLJy61E/+HULVLcmNald6oWW0O9I7mD0= +github.com/inspektor-gadget/inspektor-gadget v0.25.1-0.20240223044605-4ac24c3e3b7f h1:RHfZsRHMjl+nv5ZVE2iV26wKhsNNnVFxQMvIuJsXsBU= +github.com/inspektor-gadget/inspektor-gadget v0.25.1-0.20240223044605-4ac24c3e3b7f/go.mod h1:cC5Noa1sI020aQXOGSyWjjUJT/CdeGjr18hqqw6l8mM= +github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6 h1:fQqkJ+WkYfzy6BoUh32fr9uYrXfOGtsfw0skMQkfOic= +github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= +github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa793TP5z5GNAn/VLPzlc0ewzWdeP/25gDfgQ= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jsternberg/zap-logfmt v1.3.0 h1:z1n1AOHVVydOOVuyphbOKyR4NICDQFiJMn1IK5hVQ5Y= +github.com/jsternberg/zap-logfmt v1.3.0/go.mod h1:N3DENp9WNmCZxvkBD/eReWwz1149BK6jEN9cQ4fNwZE= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/moby v25.0.2+incompatible h1:g2oKRI7vgWkiPHZbBghaPbcV/SuKP1g/YLx0I2nxFT4= +github.com/moby/moby v25.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v25.0.3+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= +github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20210917134616-9c00a300bb7a h1:np2nR32/A/VcOG9Hn+IOPA8kMk1gbBzK5LpSsgq5pJI= +github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20210917134616-9c00a300bb7a/go.mod h1:wiP6GQ2T378F+YIyuNw7yXtBxJZR+fqrrn1Z6UHZi0Q= +github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= +github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= +github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= +github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0 h1:CLsXiDYQjYqJVntHkQZL2AW0R8BrvJu1K/hbs+2Q+EQ= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20230814145427-12f4cb8177e4 h1:Ydko8M6UfXgvSpGOnbAjRMQDIvBheUsjBjkm6Azcpf4= +go.starlark.net v0.0.0-20230814145427-12f4cb8177e4/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 h1:mchzmB1XO2pMaKFRqk/+MV3mgGG96aqaPXaMifQU47w= +golang.org/x/exp v0.0.0-20231108232855-2478ac86f678/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= +k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= +k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= +k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= +k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/cri-api v0.29.1 h1:pQwYDahnAX9K8KtdV8PD1eeNexMJojEj1t/5kAMX61E= +k8s.io/cri-api v0.29.1/go.mod h1:9fQTFm+wi4FLyqrkVUoMJiUB3mE74XrVvHz8uFY/sSw= +k8s.io/cri-api v0.29.2/go.mod h1:9fQTFm+wi4FLyqrkVUoMJiUB3mE74XrVvHz8uFY/sSw= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= +k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3 h1:fic0YtUGSr79nv8vn3ziNZJrPZsm64KT/Fd/bc7Q6xY= +sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3/go.mod h1:Qox07m8Gh7skSeOfppEWllPxNMhA7+b93D8Qjj6rBlQ= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.14.0 h1:6+QLmXXA8X4eDM7ejeaNUyruA1DDB3PVIjbpVhDOJRA= +sigs.k8s.io/kustomize/api v0.14.0/go.mod h1:vmOXlC8BcmcUJQjiceUbcyQ75JBP6eg8sgoyzc+eLpQ= +sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= +sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/tools/go.mod b/hack/tools/go.mod new file mode 100644 index 000000000..04ed91e63 --- /dev/null +++ b/hack/tools/go.mod @@ -0,0 +1,201 @@ +module github.com/microsoft/retina/hack/tools + +go 1.21 + +require ( + github.com/golang/mock v1.6.0 + github.com/golangci/golangci-lint v1.45.2 + github.com/onsi/ginkgo v1.16.5 + k8s.io/api v0.26.1 + k8s.io/apimachinery v0.26.1 + k8s.io/client-go v0.26.1 + mvdan.cc/gofumpt v0.3.1 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 + sigs.k8s.io/controller-tools v0.11.3 +) + +require ( + 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Antonboom/errname v0.1.5 // indirect + github.com/Antonboom/nilnil v0.1.0 // indirect + github.com/BurntSushi/toml v1.0.0 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/OpenPeeDeeP/depguard v1.1.0 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.6.1 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/breml/bidichk v0.2.2 // indirect + github.com/breml/errchkjson v0.2.3 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/charithe/durationcheck v0.0.9 // indirect + github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect + github.com/daixiang0/gci v0.3.3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fzipp/gocyclo v0.4.0 // indirect + github.com/go-critic/go-critic v0.6.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/zapr v1.2.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.0 // indirect + github.com/go-toolsmith/astequal v1.0.1 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobuffalo/flect v0.3.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kulti/thelper v0.5.1 // indirect + github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.2 // indirect + github.com/ldez/tagliatelle v0.3.1 // indirect + github.com/leonklingele/grouper v1.1.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/maratori/testpackage v1.0.1 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.1.4 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.7.11 // indirect + github.com/nishanths/predeclared v0.2.1 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onsi/ginkgo/v2 v2.6.1 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quasilyte/go-ruleguard v0.3.15 // indirect + github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/ryancurrah/gomodguard v1.2.3 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect + github.com/securego/gosec/v2 v2.10.0 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sivchari/containedctx v1.0.2 // indirect + github.com/sivchari/tenv v1.4.7 // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.10.1 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/testify v1.8.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/sylvia7788/contextcheck v1.0.4 // indirect + github.com/tdakkota/asciicheck v0.1.1 // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect + github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.0.5 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect + gitlab.com/bosi/decorder v0.2.1 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.1 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/tools v0.6.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.2.2 // indirect + k8s.io/apiextensions-apiserver v0.26.1 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/hack/tools/go.sum b/hack/tools/go.sum new file mode 100644 index 000000000..931f2579e --- /dev/null +++ b/hack/tools/go.sum @@ -0,0 +1,1417 @@ +4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= +github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= +github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA= +github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8= +github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y= +github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= +github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0= +github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= +github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= +github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k= +github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs= +github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= +github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk= +github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I= +github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.5.1 h1:Uf4CUekH0OvzQTFPrWkstJvXgm6pnNEtQu3HiqEkpB0= +github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= +github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= +github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.4 h1:sZOjY6GU35Kr9jKa/wsKSHgrFz8eASIB5i3tqWZMp0A= +github.com/mgechev/revive v1.1.4/go.mod h1:ZZq2bmyssGh8MSPz3VVziqRNIMYTJXzP8MUKG90vZ9A= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= +github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= +github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b h1:/BDyEJWLnDUYKGWdlNx/82qSaVu2bUok/EvPUtIGuvw= +github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.15 h1:iWYzp1z72IlXTioET0+XI6SjQdPfMGfuAiZiKznOt7g= +github.com/quasilyte/go-ruleguard v0.3.15/go.mod h1:NhuWhnlVEM1gT1A4VJHYfy9MuYSxxwHgxWoPsn9llB4= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.12/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJjXKERt/vyPbv/mCHB/zQ4flDYOMN+M= +github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec/v2 v2.10.0 h1:l6BET4EzWtyUXCpY2v7N92v0DDCas0L7ngg3bpqbr8g= +github.com/securego/gosec/v2 v2.10.0/go.mod h1:PVq8Ewh/nCN8l/kKC6zrGXSr7m2NmEK6ITIAWMtIaA0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= +github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= +github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= +github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.5.0 h1:g27SGGHNoQdvHz4KZA9o4v09RcWzylR+b1yueE5ECiw= +github.com/tomarrell/wrapcheck/v2 v2.5.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= +github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 h1:YAaOqqMTstELMMGblt6yJ/fcOt4owSYuw3IttMnKfAM= +github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= +gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= +k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= +k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= +mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= +mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9 h1:ylYUI5uaq/guUFerFRVG81FHSA5/3+fERCE1RQbQUZ4= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20211110210527-619e6b92dab9/go.mod h1:+sJcI1F0QI0Cv+8fp5rH5B2fK1LxzrAQqYnaPx9nY8I= +sigs.k8s.io/controller-tools v0.11.3 h1:T1xzLkog9saiyQSLz1XOImu4OcbdXWytc5cmYsBeBiE= +sigs.k8s.io/controller-tools v0.11.3/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/tools/kapinger/Dockerfile b/hack/tools/kapinger/Dockerfile new file mode 100644 index 000000000..5230c6d29 --- /dev/null +++ b/hack/tools/kapinger/Dockerfile @@ -0,0 +1,12 @@ +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder + +WORKDIR /app +COPY ./hack/tools . +RUN go mod download + +RUN CGO_ENABLED=0 GOOS=linux go build -o app ./kapinger + +FROM scratch +WORKDIR /app +COPY --from=builder /app/app . +CMD ["./app"] diff --git a/hack/tools/kapinger/Dockerfile.windows b/hack/tools/kapinger/Dockerfile.windows new file mode 100644 index 000000000..692e687f1 --- /dev/null +++ b/hack/tools/kapinger/Dockerfile.windows @@ -0,0 +1,16 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder + +WORKDIR /app +COPY ./hack/tools . +ENV GOOS=windows +ENV GOARCH=amd64 + +RUN echo "building kapinger for OS: $GOOS, ARCH: $GOARCH" +RUN go mod download + +RUN CGO_ENABLED=0 go build -o app.exe ./kapinger + +FROM mcr.microsoft.com/windows/nanoserver:ltsc2022 +WORKDIR /app +COPY --from=builder /app/app.exe . +CMD ["app.exe"] diff --git a/hack/tools/kapinger/clients/http.go b/hack/tools/kapinger/clients/http.go new file mode 100644 index 000000000..fd2f2f74f --- /dev/null +++ b/hack/tools/kapinger/clients/http.go @@ -0,0 +1,161 @@ +package clients + +import ( + "context" + "fmt" + "io" + "log" + "net/http" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +type TargetType string + +const ( + Service TargetType = "service" + Pod TargetType = "pod" + + envTargetType = "TARGET_TYPE" +) + +type KapingerHTTPClient struct { + client http.Client + clientset *kubernetes.Clientset + labelselector string + ips []string + port int + targettype TargetType +} + +func NewKapingerHTTPClient(clientset *kubernetes.Clientset, labelselector string, httpPort int) (*KapingerHTTPClient, error) { + k := KapingerHTTPClient{ + client: http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + }, + Timeout: 3 * time.Second, + }, + labelselector: labelselector, + clientset: clientset, + port: httpPort, + } + + targettype := os.Getenv(envTargetType) + if targettype != "" { + k.targettype = TargetType(targettype) + } else { + k.targettype = Service + } + + err := k.getIPS() + if err != nil { + return nil, fmt.Errorf("error getting IPs: %w", err) + } + + return &k, nil +} + +func (k *KapingerHTTPClient) MakeRequest() error { + for _, ip := range k.ips { + url := fmt.Sprintf("http://%s:%d", ip, k.port) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + // Set the "Connection" header to "close" + req.Header.Set("Connection", "close") + + // Send the request + resp, err := k.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + log.Fatalf("Error reading response body from %s: %v", url, err) + return err + } + log.Printf("Response from %s: %s\n", url, string(body)) + } + return nil +} + +func (k *KapingerHTTPClient) getIPS() error { + ips := []string{} + + switch k.targettype { + case Service: + services, err := k.clientset.CoreV1().Services(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: k.labelselector, + }) + if err != nil { + return fmt.Errorf("error getting services: %w", err) + } + + // Extract the Service cluster IP addresses + + for _, svc := range services.Items { + ips = append(ips, svc.Spec.ClusterIP) + } + log.Println("using service IPs:", ips) + + case Pod: + err := waitForPodsRunning(k.clientset, k.labelselector) + if err != nil { + return fmt.Errorf("error waiting for pods to be in Running state: %w", err) + } + + // Get all pods in the cluster with label app=agnhost + pods, err := k.clientset.CoreV1().Pods(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: k.labelselector, + }) + if err != nil { + return fmt.Errorf("error getting pods: %w", err) + } + + for _, pod := range pods.Items { + ips = append(ips, pod.Status.PodIP) + } + + log.Printf("using pod IPs: %v", ips) + default: + return fmt.Errorf("env TARGET_TYPE must be \"service\" or \"pod\"") + } + + k.ips = ips + return nil +} + +// waitForPodsRunning waits for all pods with the specified label to be in the Running phase +func waitForPodsRunning(clientset *kubernetes.Clientset, labelSelector string) error { + return wait.ExponentialBackoff(wait.Backoff{ + Duration: 5 * time.Second, + Factor: 1.5, + }, func() (bool, error) { + pods, err := clientset.CoreV1().Pods(corev1.NamespaceAll).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + log.Printf("error getting pods: %v", err) + return false, nil + } + + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + log.Printf("waiting for pod %s to be in Running state (currently %s)", pod.Name, pod.Status.Phase) + return false, nil + } + } + + return true, nil + }) +} diff --git a/hack/tools/kapinger/main.go b/hack/tools/kapinger/main.go new file mode 100644 index 000000000..2f1a9d6da --- /dev/null +++ b/hack/tools/kapinger/main.go @@ -0,0 +1,69 @@ +package main + +import ( + "log" + "math/rand" + "os" + "strconv" + "time" + + "github.com/microsoft/retina/hack/tools/kapinger/clients" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + delay = 500 * time.Millisecond +) + +func main() { + log.Printf("starting kapinger...") + clientset, err := getKubernetesClientSet() + if err != nil { + log.Fatal(err) + } + + httpPort, err := strconv.Atoi(os.Getenv(envHTTPPort)) + if err != nil { + httpPort = httpport + log.Printf("HTTP_PORT not set, defaulting to port %d\n", httpport) + } + + go StartServers() + + // Create an HTTP client with the custom Transport + client, err := clients.NewKapingerHTTPClient(clientset, "app=kapinger", httpPort) + if err != nil { + log.Fatal(err) + } + + // Initialize the random number generator with a seed based on the current time + rand.New(rand.NewSource(time.Now().UnixNano())) + + // Generate a random number between 1 and 1000 for delay jitter + jitter := rand.Intn(100) + 1 + time.Sleep(time.Duration(jitter) * time.Millisecond) + + for { + err := client.MakeRequest() + if err != nil { + log.Printf("error making request: %v", err) + } + time.Sleep(delay) + } +} + +func getKubernetesClientSet() (*kubernetes.Clientset, error) { + // Use the in-cluster configuration + config, err := rest.InClusterConfig() + if err != nil { + log.Printf("error getting in-cluster config: %v", err) + } + + // Create a Kubernetes clientset using the in-cluster configuration + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Printf("error creating clientset: %v", err) + } + return clientset, err +} diff --git a/hack/tools/kapinger/manifests/deploy.yaml b/hack/tools/kapinger/manifests/deploy.yaml new file mode 100644 index 000000000..b022a2011 --- /dev/null +++ b/hack/tools/kapinger/manifests/deploy.yaml @@ -0,0 +1,232 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kapinger-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kapinger-role + namespace: default +rules: + - apiGroups: [""] + resources: ["services", "pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kapinger-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kapinger-role +subjects: + - kind: ServiceAccount + name: kapinger-sa + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kapinger + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: kapinger + template: + metadata: + labels: + app: kapinger + spec: + serviceAccountName: kapinger-sa + nodeSelector: + "kubernetes.io/os": linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kapinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: kapinger + image: acnpublic.azurecr.io/kapinger:latest + resources: + limits: + memory: 20Mi + requests: + memory: 20Mi + env: + - name: TARGET_TYPE + value: "service" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HTTP_PORT + value: "8080" + - name: TCP_PORT + value: "8085" + - name: UDP_PORT + value: "8086" + ports: + - containerPort: 8080 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kapinger-win + namespace: default +spec: + replicas: 5 + selector: + matchLabels: + app: kapinger + template: + metadata: + labels: + app: kapinger + spec: + serviceAccountName: kapinger-sa + nodeSelector: + "kubernetes.io/os": windows + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kapinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: kapinger + image: acnpublic.azurecr.io/kapinger:windows-ltsc2022-amd64-v35 + env: + - name: TARGET_TYPE + value: "service" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HTTP_PORT + value: "8080" + - name: TCP_PORT + value: "8085" + - name: UDP_PORT + value: "8086" + ports: + - containerPort: 8080 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kapinger-drop + namespace: default +spec: + replicas: 0 + selector: + matchLabels: + app: kapinger-drop + template: + metadata: + labels: + app: kapinger-drop + spec: + serviceAccountName: kapinger-sa + nodeSelector: + "kubernetes.io/os": linux + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kapinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: kapinger + image: acnpublic.azurecr.io/kapinger:latest + resources: + limits: + memory: 20Mi + requests: + memory: 20Mi + env: + - name: TARGET_TYPE + value: "service" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HTTP_PORT + value: "8080" + - name: TCP_PORT + value: "8085" + - name: UDP_PORT + value: "8086" + ports: + - containerPort: 8080 + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-traffic-to-kapinger-drop + namespace: default +spec: + podSelector: + matchLabels: + app: kapinger-drop + policyTypes: + - Ingress + - Egress + ingress: [] + egress: [] +--- +apiVersion: v1 +kind: Service +metadata: + name: kapinger-service + namespace: default + labels: + app: kapinger +spec: + selector: + app: kapinger + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + type: ClusterIP diff --git a/hack/tools/kapinger/server.go b/hack/tools/kapinger/server.go new file mode 100644 index 000000000..99bc586b7 --- /dev/null +++ b/hack/tools/kapinger/server.go @@ -0,0 +1,72 @@ +package main + +import ( + "context" + "log" + "os" + "strconv" + + "github.com/microsoft/retina/hack/tools/kapinger/servers" +) + +const ( + httpport = 8080 + tcpport = 8085 + udpport = 8086 + + envHTTPPort = "HTTP_PORT" + envTCPPort = "TCP_PORT" + envUDPPort = "UDP_PORT" +) + +type Server interface { + Start(ctx context.Context) error +} + +type Kapinger struct { + servers []Server +} + +func (k *Kapinger) Start(ctx context.Context) { + for i := range k.servers { + go func(i int) { + err := k.servers[i].Start(ctx) + if err != nil { + log.Printf("Error starting server: %s\n", err) + } + }(i) + } + <-ctx.Done() +} + +func StartServers() { + tcpPort, err := strconv.Atoi(os.Getenv(envTCPPort)) + if err != nil { + tcpPort = tcpport + log.Printf("TCP_PORT not set, defaulting to port %d\n", tcpport) + } + + udpPort, err := strconv.Atoi(os.Getenv(envUDPPort)) + if err != nil { + udpPort = udpport + log.Printf("UDP_PORT not set, defaulting to port %d\n", udpport) + } + + httpPort, err := strconv.Atoi(os.Getenv(envHTTPPort)) + if err != nil { + httpPort = httpport + log.Printf("HTTP_PORT not set, defaulting to port %d\n", httpport) + } + + k := &Kapinger{ + servers: []Server{ + servers.NewKapingerTCPServer(tcpPort), + servers.NewKapingerUDPServer(udpPort), + servers.NewKapingerHTTPServer(httpPort), + }, + } + + // cancel + ctx := context.Background() + k.Start(ctx) +} diff --git a/hack/tools/kapinger/servers/http.go b/hack/tools/kapinger/servers/http.go new file mode 100644 index 000000000..5810c5e4d --- /dev/null +++ b/hack/tools/kapinger/servers/http.go @@ -0,0 +1,52 @@ +package servers + +import ( + "context" + "fmt" + "log" + "net/http" + "strconv" +) + +type KapingerHTTPServer struct { + port int +} + +func NewKapingerHTTPServer(port int) *KapingerHTTPServer { + return &KapingerHTTPServer{ + port: port, + } +} + +func (k *KapingerHTTPServer) Start(ctx context.Context) error { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write(getResponse(r.RemoteAddr, "http")) + if err != nil { + fmt.Println(err) + } + }) + + addr := ":" + strconv.Itoa(k.port) + + log.Printf("[HTTP] Listening on %+v\n", addr) + + server := &http.Server{ + Addr: addr, + Handler: http.HandlerFunc(handler), + } + + go func() { + err := server.ListenAndServe() + if err != nil { + panic(err) + } + }() + + <-ctx.Done() + err := server.Shutdown(ctx) + if err != nil { + return err + } + + return nil +} diff --git a/hack/tools/kapinger/servers/tcp.go b/hack/tools/kapinger/servers/tcp.go new file mode 100644 index 000000000..7f9f200aa --- /dev/null +++ b/hack/tools/kapinger/servers/tcp.go @@ -0,0 +1,61 @@ +package servers + +import ( + "context" + "fmt" + "log" + "net" +) + +const ( + tcp = "tcp" +) + +type KapingerTCPServer struct { + port int +} + +func NewKapingerTCPServer(port int) *KapingerTCPServer { + return &KapingerTCPServer{ + port: port, + } +} + +func (k *KapingerTCPServer) Start(ctx context.Context) error { + listener, err := net.ListenTCP(tcp, &net.TCPAddr{Port: k.port}) + if err != nil { + fmt.Println(err) + return nil + } + defer listener.Close() + + log.Printf("[TCP] Listening on %+v\n", listener.Addr().String()) + + for { + select { + case <-ctx.Done(): + fmt.Println("Exiting TCP server") + return nil + default: + connection, err := listener.Accept() + if err != nil { + fmt.Println(err) + return err + } + handleConnection(connection) + } + } +} + +func handleConnection(connection net.Conn) { + addressString := fmt.Sprintf("%+v", connection.RemoteAddr()) + _, err := connection.Write(getResponse(addressString, tcp)) + if err != nil { + fmt.Println(err) + } + + err = connection.Close() + if err != nil { + fmt.Println(err) + } +} diff --git a/hack/tools/kapinger/servers/udp.go b/hack/tools/kapinger/servers/udp.go new file mode 100644 index 000000000..19152beb1 --- /dev/null +++ b/hack/tools/kapinger/servers/udp.go @@ -0,0 +1,62 @@ +package servers + +import ( + "context" + "fmt" + "log" + "net" + "strings" +) + +const ( + udp = "udp" +) + +type KapingerUDPServer struct { + buffersize int + port int +} + +func NewKapingerUDPServer(port int) *KapingerUDPServer { + return &KapingerUDPServer{ + buffersize: 1024, + port: port, + } +} + +func (k *KapingerUDPServer) Start(ctx context.Context) error { + connection, err := net.ListenUDP(udp, &net.UDPAddr{Port: k.port}) + if err != nil { + fmt.Println(err) + return err + } + log.Printf("[UDP] Listening on %+v\n", connection.LocalAddr().String()) + + defer connection.Close() + buffer := make([]byte, k.buffersize) + + for { + select { + case <-ctx.Done(): + fmt.Println("Exiting UDP server") + return nil + default: + n, addr, err := connection.ReadFromUDP(buffer) + if err != nil { + fmt.Println(err) + } + payload := strings.TrimSpace(string(buffer[0 : n-1])) + + if payload == "STOP" { + fmt.Println("Exiting UDP server") + return nil + } + + addressString := fmt.Sprintf("%+v", addr) + _, err = connection.WriteToUDP(getResponse(addressString, udp), addr) + if err != nil { + return fmt.Errorf("error writing to UDP connection: %w", err) + } + } + } +} diff --git a/hack/tools/kapinger/servers/util.go b/hack/tools/kapinger/servers/util.go new file mode 100644 index 000000000..497c90071 --- /dev/null +++ b/hack/tools/kapinger/servers/util.go @@ -0,0 +1,11 @@ +package servers + +import ( + "fmt" + "os" +) + +func getResponse(addressString, protocol string) []byte { + podname := os.Getenv("POD_NAME") + return []byte(fmt.Sprintf("connected to: %s via %s, connected from: %v", podname, protocol, addressString)) +} diff --git a/hack/tools/tools.go b/hack/tools/tools.go new file mode 100644 index 000000000..e45e96292 --- /dev/null +++ b/hack/tools/tools.go @@ -0,0 +1,22 @@ +//go:build tools +// +build tools + +/* +How to add new tool: + +$ cd hack/tools +$ export GOBIN=$PWD/bin +$ export PATH=$GOBIN:$PATH +$ go install ... +*/ + +package tools + +import ( + _ "github.com/golang/mock/mockgen" + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "github.com/onsi/ginkgo" + _ "mvdan.cc/gofumpt" + _ "sigs.k8s.io/controller-runtime/tools/setup-envtest" + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" +) diff --git a/init/retina/main.go b/init/retina/main.go new file mode 100644 index 000000000..203351e5b --- /dev/null +++ b/init/retina/main.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package main + +import ( + "github.com/microsoft/retina/pkg/bpf" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" + "go.uber.org/zap" +) + +var ( + applicationInsightsID string //nolint // aiMetadata is set in Makefile + version string +) + +func main() { + // Initialize application insights + telemetry.InitAppInsights(applicationInsightsID, version) + defer telemetry.TrackPanic() + + // Initialize logger + opts := log.GetDefaultLogOpts() + opts.ApplicationInsightsID = applicationInsightsID + opts.EnableTelemetry = true + + log.SetupZapLogger(opts) + log.Logger().AddFields(zap.String("version", version)) + l := log.Logger().Named("init-retina") + + // Setup BPF + bpf.Setup(l) +} diff --git a/operator/Dockerfile b/operator/Dockerfile new file mode 100644 index 000000000..2c9c65ac2 --- /dev/null +++ b/operator/Dockerfile @@ -0,0 +1,17 @@ +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder + +ARG VERSION +ARG APP_INSIGHTS_ID + +WORKDIR /workspace +COPY . . + +RUN make manifests +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" -a -o retina-operator operator/main.go + +FROM scratch +WORKDIR / +COPY --from=builder /workspace/retina-operator . +USER 65532:65532 + +ENTRYPOINT ["/retina-operator"] diff --git a/operator/Dockerfile.windows-2019 b/operator/Dockerfile.windows-2019 new file mode 100644 index 000000000..a007d8c09 --- /dev/null +++ b/operator/Dockerfile.windows-2019 @@ -0,0 +1,23 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.20 as builder + +# Build args +ARG VERSION +ARG APP_INSIGHTS_ID + +ENV GOOS=windows +ENV GOARCH=amd64 + +WORKDIR /usr/src/retina +# Copy the source +COPY . . + +RUN go build -v -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" -o -o /usr/bin/retina-operator.exe retina-operator operator/main.go + +# Copy into final image +FROM mcr.microsoft.com/windows/nanoserver:ltsc2019 +COPY --from=builder /usr/src/retina/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml +COPY --from=builder /usr/src/retina/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 + +COPY --from=builder /usr/bin/retina-operator.exe retina-operator.exe + +CMD ["controller.exe", "start", "--kubeconfig=.\\kubeconfig"] diff --git a/operator/Dockerfile.windows-2022 b/operator/Dockerfile.windows-2022 new file mode 100644 index 000000000..390244e4d --- /dev/null +++ b/operator/Dockerfile.windows-2022 @@ -0,0 +1,23 @@ +FROM --platform=linux/amd64 mcr.microsoft.com/oss/go/microsoft/golang:1.20 as builder + +# Build args +ARG VERSION +ARG APP_INSIGHTS_ID + +ENV GOOS=windows +ENV GOARCH=amd64 + +WORKDIR /usr/src/retina +# Copy the source +COPY . . + +RUN go build -v -ldflags "-X main.version="$VERSION" -X "main.applicationInsightsID"="$APP_INSIGHTS_ID"" -o -o /usr/bin/retina-operator.exe retina-operator operator/main.go + +# Copy into final image +FROM mcr.microsoft.com/windows/nanoserver:ltsc2022 +COPY --from=builder /usr/src/retina/windows/kubeconfigtemplate.yaml kubeconfigtemplate.yaml +COPY --from=builder /usr/src/retina/windows/setkubeconfigpath.ps1 setkubeconfigpath.ps1 + +COPY --from=builder /usr/bin/retina-operator.exe retina-operator.exe + +CMD ["controller.exe", "start", "--kubeconfig=.\\kubeconfig"] diff --git a/operator/cache/types.go b/operator/cache/types.go new file mode 100644 index 000000000..8edc6bfc0 --- /dev/null +++ b/operator/cache/types.go @@ -0,0 +1,49 @@ +package cache + +import ( + "sync" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +type PodCacheObject struct { + Key types.NamespacedName // cache.MetaNamespaceKeyFunc + Pod *corev1.Pod +} + +type MetricsConfigurationCacheObject struct { + Key string // cache.MetaNamespaceKeyFunc + MetricsConfiguration *retinav1alpha1.MetricsConfiguration +} + +type RetinaEndpoint struct { + sync.RWMutex + Name string + Namespace string + OwnerRefs []metav1.OwnerReference + IPv4 string + IPv6 string + OtherIPv4s []string + OtherIPv6s []string + Containers []RetinaContainer + Labels map[string]string +} + +type RetinaContainer struct { + Name string + ID string +} + +type RetinaSvc struct { + sync.RWMutex + Name string + Namespace string + ClusterIP string + ClusterIPs []string + LBIP string + Selector map[string]string +} diff --git a/operator/config/config.go b/operator/config/config.go new file mode 100644 index 000000000..cc6863174 --- /dev/null +++ b/operator/config/config.go @@ -0,0 +1,14 @@ +package config + +import "github.com/microsoft/retina/pkg/config" + +type OperatorConfig struct { + config.CaptureConfig `mapstructure:",squash"` + + InstallCRDs bool `yaml:"installCRDs"` + EnableTelemetry bool `yaml:"enableTelemetry"` + LogLevel string `yaml:"logLevel"` + // EnableRetinaEndpoint indicates whether to enable RetinaEndpoint + EnableRetinaEndpoint bool `yaml:"enableRetinaEndpoint"` + RemoteContext bool `yaml:"remoteContext"` +} diff --git a/operator/main.go b/operator/main.go new file mode 100644 index 000000000..86e41e100 --- /dev/null +++ b/operator/main.go @@ -0,0 +1,293 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "fmt" + "net/http" + "net/http/pprof" + "os" + "time" + + "go.uber.org/zap/zapcore" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + + "github.com/spf13/viper" + "go.uber.org/zap" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + crzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + deploy "github.com/microsoft/retina/deploy" + "github.com/microsoft/retina/operator/cache" + config "github.com/microsoft/retina/operator/config" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" + captureController "github.com/microsoft/retina/pkg/controllers/operator/capture" + metricsconfiguration "github.com/microsoft/retina/pkg/controllers/operator/metricsconfiguration" + podcontroller "github.com/microsoft/retina/pkg/controllers/operator/pod" + retinaendpointcontroller "github.com/microsoft/retina/pkg/controllers/operator/retinaendpoint" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" +) + +var ( + scheme = k8sruntime.NewScheme() + mainLogger *log.ZapLogger + oconfig *config.OperatorConfig + + MAX_POD_CHANNEL_BUFFER = 250 + MAX_METRICS_CONFIGURATION_CHANNEL_BUFFER = 50 + MAX_TRACES_CONFIGURATION_CHANNEL_BUFFER = 50 + MAX_RETINA_ENDPOINT_CHANNEL_BUFFER = 250 + + version = "undefined" + + applicationInsightsID string //nolint // aiMetadata is set in Makefile +) + +func init() { + //+kubebuilder:scaffold:scheme + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(scheme)) + + var err error + oconfig, err = LoadConfig() + if err != nil { + fmt.Printf("failed to load config with err %s", err.Error()) + os.Exit(1) + } + + err = initLogging(oconfig, applicationInsightsID) + if err != nil { + fmt.Printf("failed to initialize logging with err %s", err.Error()) + os.Exit(1) + } + mainLogger = log.Logger().Named("main") +} + +func main() { + go EnablePProf() + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + + mainLogger.Sugar().Infof("Operator configuration", zap.Any("configuration", oconfig)) + + opts := &crzap.Options{ + Development: false, + } + + ctrl.SetLogger(crzap.New(crzap.UseFlagOptions(opts), crzap.Encoder(zapcore.NewConsoleEncoder(log.EncoderConfig())))) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "16937e39.retina.io", + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + mainLogger.Error("Unable to start manager", zap.Error(err)) + os.Exit(1) + } + + ctx := context.Background() + clientset, err := apiextv1.NewForConfig(mgr.GetConfig()) + if err != nil { + mainLogger.Error("Failed to get apiextension clientset", zap.Error(err)) + os.Exit(1) + } + + if oconfig.InstallCRDs { + mainLogger.Sugar().Infof("Installing CRDs") + crds, err := deploy.InstallOrUpdateCRDs(ctx, oconfig.EnableRetinaEndpoint, clientset) + if err != nil { + mainLogger.Error("unable to register CRDs", zap.Error(err)) + os.Exit(1) + } + for name := range crds { + mainLogger.Info("CRD registered", zap.String("name", name)) + } + } + + apiserverURL, err := telemetry.GetK8SApiserverURLFromKubeConfig() + if err != nil { + mainLogger.Error("Apiserver URL is cannot be found", zap.Error(err)) + os.Exit(1) + } + + var tel telemetry.Telemetry + if oconfig.EnableTelemetry { + mainLogger.Info("telemetry enabled", zap.String("applicationInsightsID", applicationInsightsID)) + properties := map[string]string{ + "version": version, + telemetry.PropertyApiserver: apiserverURL, + } + tel = telemetry.NewAppInsightsTelemetryClient("retina-agent", properties) + } else { + mainLogger.Info("telemetry disabled for:", zap.String("apiserver", apiserverURL)) + tel = telemetry.NewNoopTelemetry() + } + + kubeClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + mainLogger.Error("Failed to get clientset", zap.Error(err)) + os.Exit(1) + } + + if err = captureController.NewCaptureReconciler( + mgr.GetClient(), mgr.GetScheme(), kubeClient, oconfig.CaptureConfig, + ).SetupWithManager(mgr); err != nil { + mainLogger.Error("Unable to setup retina capture controller with manager", zap.Error(err)) + os.Exit(1) + } + + ctrlCtx := ctrl.SetupSignalHandler() + + //+kubebuilder:scaffold:builder + + // TODO(mainred): retina-operater is responsible for recycling created retinaendpoints if remotecontext is switched off. + // Tracked by https://github.com/microsoft/retina/issues/522 + if oconfig.RemoteContext { + // Create RetinaEndpoint out of Pod to extract only the necessary fields of Pods to reduce the pressure of APIServer + // when RetinaEndpoint is enabled. + // TODO(mainred): An alternative of RetinaEndpoint, and possible long term solution, is to use CiliumEndpoint + // created for Cilium unmanged Pods. + if oconfig.EnableRetinaEndpoint { + mainLogger.Info("RetinaEndpoint is enabled") + + retinaendpointchannel := make(chan cache.PodCacheObject, MAX_RETINA_ENDPOINT_CHANNEL_BUFFER) + ke := retinaendpointcontroller.New(mgr.GetClient(), retinaendpointchannel) + // start reconcile the cached Pod before manager starts to not miss any events + go ke.ReconcilePod(ctrlCtx) + + pc := podcontroller.New(mgr.GetClient(), mgr.GetScheme(), retinaendpointchannel) + if err = (pc).SetupWithManager(mgr); err != nil { + mainLogger.Error("Unable to create controller", zap.String("controller", "podcontroller"), zap.Error(err)) + os.Exit(1) + } + } + } + + mc := metricsconfiguration.New(mgr.GetClient(), mgr.GetScheme()) + if err = (mc).SetupWithManager(mgr); err != nil { + mainLogger.Error("Unable to create controller", zap.String("controller", "metricsconfiguration"), zap.Error(err)) + os.Exit(1) + } + + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + mainLogger.Error("Unable to set up health check", zap.Error(err)) + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + mainLogger.Error("Unable to set up ready check", zap.Error(err)) + os.Exit(1) + } + + mainLogger.Info("Starting manager") + if err := mgr.Start(ctrlCtx); err != nil { + mainLogger.Error("Problem running manager", zap.Error(err)) + os.Exit(1) + } + + // start heartbeat goroutine for application insights + go tel.Heartbeat(ctx, 5*time.Minute) +} + +func LoadConfig() (*config.OperatorConfig, error) { + viper.SetConfigType("yaml") + viper.SetConfigFile("retina/operator-config.yaml") + err := viper.ReadInConfig() + if err != nil { + return nil, err + } + + viper.AutomaticEnv() + + var config config.OperatorConfig + + // Check pkg/config/config.go for the explanation of setting EnableRetinaEndpoint defaults to true. + viper.SetDefault("EnableRetinaEndpoint", true) + err = viper.Unmarshal(&config) + + // Set Capture config + config.CaptureConfig.CaptureImageVersion = version + config.CaptureConfig.CaptureImageVersionSource = captureUtils.VersionSourceOperatorImageVersion + + return &config, err +} + +func EnablePProf() { + pprofmux := http.NewServeMux() + pprofmux.HandleFunc("/debug/pprof/", pprof.Index) + pprofmux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + pprofmux.HandleFunc("/debug/pprof/profile", pprof.Profile) + pprofmux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + pprofmux.HandleFunc("/debug/pprof/trace", pprof.Trace) + pprofmux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) + + if err := http.ListenAndServe(":8082", pprofmux); err != nil { + panic(err) + } +} + +func initLogging(config *config.OperatorConfig, applicationInsightsID string) error { + logOpts := &log.LogOpts{ + Level: config.LogLevel, + File: false, + MaxFileSizeMB: 100, + MaxBackups: 3, + MaxAgeDays: 30, + ApplicationInsightsID: applicationInsightsID, + EnableTelemetry: config.EnableTelemetry, + } + + log.SetupZapLogger(logOpts) + + return nil +} diff --git a/pkg/api/trace.yml b/pkg/api/trace.yml new file mode 100644 index 000000000..4406544b4 --- /dev/null +++ b/pkg/api/trace.yml @@ -0,0 +1,134 @@ +openapi: "3.0.0" +info: + version: 0.0.1 + title: Trace + description: Retina Trace API + contact: + name: Azure Container Networking + email: acn@microsoft.com + license: + name: MIT License + url: https://github.com/microsoft/retina/blob/main/LICENSE +servers: + - url: http://{host}:{port} +paths: + /trace: + post: + description: Start network trace + operationId: startTrace + requestBody: + description: operation {start}, filter, duration? + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/TraceOperation' + responses: + '202': + description: operation ID + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '200': + description: operation completed + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '500': + description: plugin/gadget not set/init + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + default: + description: unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /trace/{operationid}: + post: + description: Stop network trace + operationId: stop trace + parameters: + - name: operationid + in: path + description: ID of network trace to fetch + required: true + schema: + type: string + responses: + '200': + description: trace stopped + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '404': + description: trace operation not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + default: + description: unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + get: + description: Returns trace output correlating to operation ID + operationId: get trace + parameters: + - name: operationid + in: path + description: ID of network trace to fetch + required: true + schema: + type: string + responses: + '200': + description: trace response + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '404': + description: trace operation not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + default: + description: unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' +components: + schemas: + Trace: + type: object + properties: + operation: + type: string + operationID: + type: string + filter: + type: string + result: + type: string + + Error: + type: object + required: + - code + - message + properties: + code: + type: integer + format: int32 + message: + type: string diff --git a/pkg/bpf/setup.go b/pkg/bpf/setup.go new file mode 100644 index 000000000..350c1e00a --- /dev/null +++ b/pkg/bpf/setup.go @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package bpf + +import ( + "fmt" + "os" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/constants" + "github.com/microsoft/retina/pkg/plugin/filter" + "github.com/cilium/cilium/pkg/mountinfo" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) + +func __mount() error { + // Check if the path exists. + _, err := os.Stat(constants.FilterMapPath) + if err != nil { + if os.IsNotExist(err) { + // Path does not exist. Create it. + err = os.MkdirAll(constants.FilterMapPath, 0o755) + if err != nil { + return err + } + } else { + // Some other error. Return. + return err + } + } + err = unix.Mount(constants.FilterMapPath, constants.FilterMapPath, "bpf", 0, "") + return err +} + +func mountBpfFs() error { + // Check if /sys/fs/bpf is already mounted. + mounted, bpfMount, err := mountinfo.IsMountFS(mountinfo.FilesystemTypeBPFFS, constants.FilterMapPath) + if err != nil { + return err + } + if !mounted { + if err := __mount(); err != nil { + return err + } + return nil + } + // Else mounted. Check the type of mount. + if !bpfMount { + // Custom mount of /sys/fs/bpf. Unknown setup. Exit. + return fmt.Errorf("%+s is already mounted but not as bpf. Not supported", constants.FilterMapPath) + } + return nil +} + +func Setup(l *log.ZapLogger) { + err := mountBpfFs() + if err != nil { + l.Panic("Failed to mount bpf filesystem", zap.Error(err)) + } + l.Info("BPF filesystem mounted successfully", zap.String("path", constants.FilterMapPath)) + + // Delete existing filter map file. + err = os.Remove(constants.FilterMapPath + "/" + constants.FilterMapName) + if err != nil && !os.IsNotExist(err) { + l.Panic("Failed to delete existing filter map file", zap.Error(err)) + } + l.Info("Deleted existing filter map file", zap.String("path", constants.FilterMapPath), zap.String("Map name", constants.FilterMapName)) + + // Initialize the filter map. + // This will create the filter map in kernel and pin it to /sys/fs/bpf. + _, err = filter.Init() + if err != nil { + l.Panic("Failed to initialize filter map", zap.Error(err)) + } + l.Info("Filter map initialized successfully", zap.String("path", constants.FilterMapPath), zap.String("Map name", constants.FilterMapName)) +} diff --git a/pkg/capture/capture_manager.go b/pkg/capture/capture_manager.go new file mode 100644 index 000000000..3ae56f18a --- /dev/null +++ b/pkg/capture/capture_manager.go @@ -0,0 +1,238 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "time" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + captureOutput "github.com/microsoft/retina/pkg/capture/outputlocation" + captureProvider "github.com/microsoft/retina/pkg/capture/provider" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" +) + +// CaptureManager captures network packets and metadata into tar ball, then send the tar ball to the location(s) +// specified by users. +type CaptureManager struct { + l *log.ZapLogger + networkCaptureProvider captureProvider.NetworkCaptureProviderInterface + tel telemetry.Telemetry +} + +func NewCaptureManager(logger *log.ZapLogger, tel telemetry.Telemetry) *CaptureManager { + return &CaptureManager{ + l: logger, + networkCaptureProvider: captureProvider.NewNetworkCaptureProvider(logger), + tel: tel, + } +} + +func (cm *CaptureManager) CaptureNetwork(sigChan <-chan os.Signal) (string, error) { + tmpLocation, err := cm.networkCaptureProvider.Setup(cm.captureName(), cm.captureNodeHostName()) + if err != nil { + return "", err + } + + captureFilter := cm.captureFilter() + + captureDuration, err := cm.captureDuration() + if err != nil { + return "", err + } + + captureMaxSizeMB, err := cm.captureMaxSizeMB() + if err != nil { + return "", err + } + + if err := cm.networkCaptureProvider.CaptureNetworkPacket(captureFilter, captureDuration, captureMaxSizeMB, sigChan); err != nil { + return "", err + } + + if includeMetadata := cm.includeMetadata(); includeMetadata { + if err := cm.networkCaptureProvider.CollectMetadata(); err != nil { + return "", err + } + } + + cm.tel.TrackEvent("capturenetwork", map[string]string{ + "captureName": cm.captureName(), + "nodeName": cm.captureNodeHostName(), + "filter": captureFilter, + "duration": strconv.Itoa(captureDuration), + "maxSizeMB": strconv.Itoa(captureMaxSizeMB), + }) + + return tmpLocation, nil +} + +func (cm *CaptureManager) Cleanup() error { + if err := cm.networkCaptureProvider.Cleanup(); err != nil { + cm.l.Error("Failed to cleanup capture job", zap.String("capture name", cm.captureName()), zap.Error(err)) + return err + } + + return nil +} + +func (cm *CaptureManager) captureName() string { + return os.Getenv(captureConstants.CaptureNameEnvKey) +} + +func (cm *CaptureManager) captureNodeHostName() string { + return os.Getenv(captureConstants.NodeHostNameEnvKey) +} + +func (cm *CaptureManager) captureFilter() string { + captureFilter := os.Getenv(captureConstants.TcpdumpFilterEnvKey) + if runtime.GOOS == "windows" { + captureFilter = os.Getenv(captureConstants.NetshFilterEnvKey) + } + return captureFilter +} + +func (cm *CaptureManager) captureDuration() (int, error) { + captureDurationStr := os.Getenv(captureConstants.CaptureDurationEnvKey) + duration, err := time.ParseDuration(captureDurationStr) + if err != nil { + return 0, err + } + return int(duration.Seconds()), nil +} + +func (cm *CaptureManager) includeMetadata() bool { + includeMetadataStr := os.Getenv(captureConstants.IncludeMetadataEnvKey) + if len(includeMetadataStr) == 0 { + return false + } + + // As strconv.ParseBool returns false when hitting error, considering we enable includemetadata by default, + // we return false and log the error. + includeMetadata, err := strconv.ParseBool(includeMetadataStr) + if err != nil { + cm.l.Error("Failed to parse string to boolean", zap.String("includeMetadata", includeMetadataStr), zap.Error(err)) + } + + return includeMetadata +} + +func (cm *CaptureManager) captureMaxSizeMB() (int, error) { + captureMaxSizeMBStr := os.Getenv(captureConstants.CaptureMaxSizeEnvKey) + if len(captureMaxSizeMBStr) == 0 { + return 0, nil + } + return strconv.Atoi(captureMaxSizeMBStr) +} + +func (cm *CaptureManager) OutputCapture(srcDir string) error { + var errStr string + + if _, err := os.Stat(srcDir); os.IsNotExist(err) { + return fmt.Errorf("capture source directory %s does not exist", srcDir) + } + + dstTarGz := srcDir + ".tar.gz" + if err := compressFolderToTarGz(srcDir, dstTarGz); err != nil { + return err + } + + for _, location := range cm.enabledOutputLocations() { + if err := location.Output(dstTarGz); err != nil { + errStr = errStr + fmt.Sprintf("location %q output error: %s\n", location.Name(), err) + } + } + + if len(errStr) != 0 { + return fmt.Errorf(errStr) + } + + // Remove tarball created inside this function. + if err := os.Remove(dstTarGz); err != nil { + cm.l.Error("Failed to delete tarball", zap.String("tarball name", dstTarGz), zap.Error(err)) + } + + return nil +} + +func (cm *CaptureManager) enabledOutputLocations() []captureOutput.Location { + locations := []captureOutput.Location{} + if hostPath := captureOutput.NewHostPath(cm.l); hostPath.Enabled() { + locations = append(locations, hostPath) + } + if bu := captureOutput.NewBlobUpload(cm.l); bu.Enabled() { + locations = append(locations, bu) + } + if pvc := captureOutput.NewPersistentVolumeClaim(cm.l); pvc.Enabled() { + locations = append(locations, pvc) + } + return locations +} + +func compressFolderToTarGz(src string, dst string) error { + // Create the output file + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + // Create the gzip writer + gz := gzip.NewWriter(out) + defer gz.Close() + + // Create the tar writer + tarWriter := tar.NewWriter(gz) + defer tarWriter.Close() + + // Walk the source directory and add files to the tar archive + err = filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Create a new tar header + header, err := tar.FileInfoHeader(info, info.Name()) + if err != nil { + return err + } + + // Set the header name to the relative path + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + header.Name = relPath + + // Write the header and file contents to the tar archive + if err = tarWriter.WriteHeader(header); err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + if _, err = io.Copy(tarWriter, file); err != nil { + return err + } + + return nil + }) + + return err +} diff --git a/pkg/capture/capture_manager_test.go b/pkg/capture/capture_manager_test.go new file mode 100644 index 000000000..b075f73ae --- /dev/null +++ b/pkg/capture/capture_manager_test.go @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "fmt" + "os" + "strconv" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/capture/provider" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" +) + +func TestCaptureNetwork(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + networkCaptureProvider := provider.NewMockNetworkCaptureProviderInterface(ctrl) + cm := &CaptureManager{ + networkCaptureProvider: networkCaptureProvider, + tel: telemetry.NewNoopTelemetry(), + } + + captureName := "capture-name" + nodeHostName := "node-host-name" + filter := "-i any" + duration := 10 + maxSize := 100 + os.Setenv(captureConstants.CaptureNameEnvKey, captureName) + os.Setenv(captureConstants.NodeHostNameEnvKey, nodeHostName) + os.Setenv(captureConstants.TcpdumpFilterEnvKey, filter) + os.Setenv(captureConstants.CaptureDurationEnvKey, "10s") + os.Setenv(captureConstants.CaptureMaxSizeEnvKey, strconv.Itoa(maxSize)) + + defer func() { + os.Unsetenv(captureConstants.CaptureNameEnvKey) + os.Unsetenv(captureConstants.NodeHostNameEnvKey) + os.Unsetenv(captureConstants.TcpdumpFilterEnvKey) + os.Unsetenv(captureConstants.CaptureDurationEnvKey) + os.Unsetenv(captureConstants.CaptureMaxSizeEnvKey) + }() + + sigChanel := make(chan os.Signal, 1) + + networkCaptureProvider.EXPECT().Setup(captureName, nodeHostName).Return(fmt.Sprintf("%s-%s", captureName, nodeHostName), nil).Times(1) + networkCaptureProvider.EXPECT().CaptureNetworkPacket(filter, duration, maxSize, sigChanel).Return(nil).Times(1) + + _, err := cm.CaptureNetwork(sigChanel) + if err != nil { + t.Errorf("CaptureNetwork should have not fail with error %s", err) + } +} + +func TestEnabledOutputLocation(t *testing.T) { + cases := []struct { + name string + env map[string]string + wantEnabledOutputLocation []string + }{ + { + name: "HostPath output location is enabled", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyHostPath): "/tmp/capture", + }, + wantEnabledOutputLocation: []string{"HostPath"}, + }, + { + name: "PVC output location is enabled", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "mypvc", + }, + wantEnabledOutputLocation: []string{"PersistentVolumeClaim"}, + }, + { + name: "PVC and HostPath output location is enabled", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyHostPath): "/tmp/capture", + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "mypvc", + }, + wantEnabledOutputLocation: []string{"HostPath", "PersistentVolumeClaim"}, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + os.Setenv(k, v) + } + + defer func() { + for k := range tt.env { + os.Unsetenv(k) + } + }() + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + networkCaptureProvider := provider.NewMockNetworkCaptureProviderInterface(ctrl) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + cm := &CaptureManager{ + networkCaptureProvider: networkCaptureProvider, + l: log.Logger().Named("test"), + } + enabledOutputLocations := cm.enabledOutputLocations() + enabledOutputLocationNames := []string{} + for _, enabledOutputLocation := range enabledOutputLocations { + enabledOutputLocationNames = append(enabledOutputLocationNames, enabledOutputLocation.Name()) + } + + if diff := cmp.Diff(tt.wantEnabledOutputLocation, enabledOutputLocationNames); diff != "" { + t.Errorf("CalculateCaptureTargetsOnNode() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestCleanup(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + networkCaptureProvider := provider.NewMockNetworkCaptureProviderInterface(ctrl) + cm := &CaptureManager{ + networkCaptureProvider: networkCaptureProvider, + } + + networkCaptureProvider.EXPECT().Cleanup().Return(nil).Times(1) + + if err := cm.Cleanup(); err != nil { + t.Errorf("Cleanup should have not fail with error %s", err) + } +} diff --git a/pkg/capture/constants/job_env.go b/pkg/capture/constants/job_env.go new file mode 100644 index 000000000..d579866e8 --- /dev/null +++ b/pkg/capture/constants/job_env.go @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package constants + +type ( + CaptureOutputLocationEnvKey string +) + +// Environment variables +const ( + CaptureOutputLocationEnvKeyHostPath CaptureOutputLocationEnvKey = "HOSTPATH" + CaptureOutputLocationEnvKeyPersistentVolumeClaim CaptureOutputLocationEnvKey = "PERSISTENT_VOLUME_CLAIM" + + CaptureNameEnvKey string = "CAPTURE_NAME" + NodeHostNameEnvKey string = "NODE_HOST_NAME" + + CaptureFilterEnvKey string = "CAPTURE_FILTER" + CaptureDurationEnvKey string = "CAPTURE_DURATION" + CaptureMaxSizeEnvKey string = "CAPTURE_MAX_SIZE" + IncludeMetadataEnvKey string = "INCLUDE_METADATA" + PacketSizeEnvKey string = "CAPTURE_PACKET_SIZE" + + TcpdumpFilterEnvKey string = "TCPDUMP_FILTER" + TcpdumpRawFilterEnvKey string = "TCPDUMP_RAW_FILTER" + NetshFilterEnvKey string = "NETSH_FILTER" + + ApiserverEnvKey = "APISERVER" +) diff --git a/pkg/capture/constants/job_specification.go b/pkg/capture/constants/job_specification.go new file mode 100644 index 000000000..4031f768a --- /dev/null +++ b/pkg/capture/constants/job_specification.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package constants + +// Capture job specification +const ( + CaptureHostPathVolumeName string = "hostpath" + CapturePVCVolumeName string = "pvc" + + // PersistentVolumeClaimVolumeMountPathLinux is the PVC volume mount path of container hosted on Linux node. + PersistentVolumeClaimVolumeMountPathLinux string = "/mnt/azure" + // PersistentVolumeClaimVolumeMountPathWin is the PVC volume mount path of container hosted on Windows node. + PersistentVolumeClaimVolumeMountPathWin string = "D:" + + CaptureContainerEntrypoint string = "./retina/captureworkload" + CaptureContainerEntrypointWin string = "captureworkload.exe" + + CaptureAppname string = "capture" + CaptureContainername string = "capture" + + // CaptureOutputLocationBlobUploadSecretName is the name of the secret that stores the blob upload url. + CaptureOutputLocationBlobUploadSecretName string = "capture-blob-upload-secret" + // CaptureOutputLocationBlobUploadSecretPath is the path of the secret that stores the blob upload url. + CaptureOutputLocationBlobUploadSecretPath string = "/etc/blob-upload-secret" + // CaptureOutputLocationBlobUploadSecretKey is the key of the secret that stores the blob upload url. + CaptureOutputLocationBlobUploadSecretKey string = "blob-upload-url" + + // CaptureWorkloadImageName defines the official capture workload image repo and image name + CaptureWorkloadImageName string = "mcr.microsoft.com/containernetworking/retina-agent" + + // DebugCaptureWorkloadImageName defines the capture workload image for testing and debugging + DebugCaptureWorkloadImageName string = "acnpublic.azurecr.io/retina-agent" +) diff --git a/pkg/capture/constants/windows.go b/pkg/capture/constants/windows.go new file mode 100644 index 000000000..f9b625468 --- /dev/null +++ b/pkg/capture/constants/windows.go @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package constants + +// ContainerSandboxMountPointEnvKey provides the absolute host path to the container volume. +const ContainerSandboxMountPointEnvKey = "CONTAINER_SANDBOX_MOUNT_POINT" diff --git a/pkg/capture/crd_to_job.go b/pkg/capture/crd_to_job.go new file mode 100644 index 000000000..85c57512a --- /dev/null +++ b/pkg/capture/crd_to_job.go @@ -0,0 +1,916 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "context" + "fmt" + "net" + "sort" + "strconv" + "strings" + + "go.uber.org/zap" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" + "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/label" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +const anyIPOrPort = "" + +// CaptureTarget indicates on which the network capture will be performed on a given node. +type CaptureTarget struct { + // PodIpAddresses indicates the capture is performed on the Pods per their IP addresses. + PodIpAddresses []string + // CaptureNodeInterface indicates the capture is performed on the host node interface. + CaptureNodeInterface bool + + // OS indicates the operating system of the node hosting the target. + OS string +} + +// CaptureTargetsOnNode maps nodes to the targets network capture will be performed. +// On each node, the network capture can be done on either Pod hosted in the node, or the node interface itself. +// When multiple nodes are selected per capture configuration, there'll be multiple jobs created per node. +type CaptureTargetsOnNode map[string]CaptureTarget + +func (cton CaptureTargetsOnNode) AddPod(hostname string, ipAddresses []string) { + if captureTarget, ok := cton[hostname]; !ok { + cton[hostname] = CaptureTarget{ + PodIpAddresses: ipAddresses, + } + } else { + captureTarget.PodIpAddresses = append(captureTarget.PodIpAddresses, ipAddresses...) + cton[hostname] = captureTarget + } +} + +func (cton CaptureTargetsOnNode) AddNodeInterface(hostname string) { + cton[hostname] = CaptureTarget{ + CaptureNodeInterface: true, + } +} + +func (cton CaptureTargetsOnNode) UpdateNodeOS(hostname string, os string) { + if captureTarget, ok := cton[hostname]; ok { + captureTarget.OS = os + cton[hostname] = captureTarget + } +} + +// CaptureToPodTranslator translate the Capture object to a Job. +type CaptureToPodTranslator struct { + kubeClient kubernetes.Interface + jobTemplate *batchv1.Job + captureWorkloadImage string + + config config.CaptureConfig + + // Apiserver is unique identifier to identify the cluster in the trace capture. + Apiserver string + + l *log.ZapLogger +} + +// NewCaptureToPodTranslator initializes a CaptureToPodTranslator. +func NewCaptureToPodTranslator(kubeClient kubernetes.Interface, logger *log.ZapLogger, config config.CaptureConfig) *CaptureToPodTranslator { + captureWorkloadImage := captureUtils.CaptureWorkloadImage(logger, config.CaptureImageVersion, config.CaptureDebug, config.CaptureImageVersionSource) + + captureToPodTranslator := &CaptureToPodTranslator{ + kubeClient: kubeClient, + captureWorkloadImage: captureWorkloadImage, + config: config, + l: logger, + } + + apierverURL, err := telemetry.GetK8SApiserverURLFromKubeConfig() + if err != nil { + captureToPodTranslator.l.Error("Failed to get apiserver URL", zap.Error(err)) + // TODO(mainred): should we return error here? + captureToPodTranslator.Apiserver = "" + } else { + captureToPodTranslator.Apiserver = apierverURL + } + + return captureToPodTranslator +} + +func (translator *CaptureToPodTranslator) initJobTemplate(capture *retinav1alpha1.Capture) error { + backoffLimit := int32(0) + // NOTE(mainred): We allow the capture pod to run for at most 30 minutes before being deleted to ensure the output is + // uploaded, and this happens when the user want to stop a capture on demand by deleting the capture. + captureTerminationGracePeriodSeconds := int64(1800) + translator.jobTemplate = &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-", capture.Name), + Namespace: capture.Namespace, + Labels: captureUtils.GetJobLabelsFromCaptureName(capture.Name), + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: captureUtils.GetContainerLabelsFromCaptureName(capture.Name), + Namespace: capture.Namespace, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + HostIPC: true, + TerminationGracePeriodSeconds: &captureTerminationGracePeriodSeconds, + Containers: []corev1.Container{ + { + Name: captureConstants.CaptureContainername, + Image: translator.captureWorkloadImage, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", "SYS_ADMIN", + }, + }, + }, + Env: []corev1.EnvVar{ + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: translator.Apiserver, + }, + }, + // Usually, the Capture Pod takes no more than 10m CPU + // and 10Mi memory. And considering the Capture Pod + // does not consumes much resources as requried by + // the workload, it's safe to hard-code the resource + // requests and limits. + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("64Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + // Initial test 128Mi does not always work on Capture on Windows node + // for OutOfMemoryException. + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + }, + }, + + RestartPolicy: corev1.RestartPolicyNever, + + Tolerations: []corev1.Toleration{ + { + Key: "CriticalAddonsOnly", + Operator: "Exists", + }, + { + Effect: "NoExecute", + Operator: "Exists", + }, + { + Effect: "NoSchedule", + Operator: "Exists", + }, + }, + }, + }, + }, + } + + if capture.Spec.OutputConfiguration.HostPath != nil && *capture.Spec.OutputConfiguration.HostPath != "" { + translator.l.Info("HostPath is not empty", zap.String("HostPath", *capture.Spec.OutputConfiguration.HostPath)) + + captureFolderHostPathType := corev1.HostPathDirectoryOrCreate + hostPath := *capture.Spec.OutputConfiguration.HostPath + hostPathVolume := corev1.Volume{ + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + } + translator.jobTemplate.Spec.Template.Spec.Volumes = append(translator.jobTemplate.Spec.Template.Spec.Volumes, hostPathVolume) + + hostPathVolumeMount := corev1.VolumeMount{ + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + } + translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, hostPathVolumeMount) + } + + if capture.Spec.OutputConfiguration.BlobUpload != nil && *capture.Spec.OutputConfiguration.BlobUpload != "" { + translator.l.Info("BlobUpload is not empty") + secret, err := translator.kubeClient.CoreV1().Secrets(capture.Namespace).Get(context.Background(), *capture.Spec.OutputConfiguration.BlobUpload, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + err := SecretNotFoundError{SecretName: *capture.Spec.OutputConfiguration.BlobUpload, Namespace: capture.Namespace} + translator.l.Error(err.Error()) + return err + } + if err != nil { + translator.l.Error("Failed to get secrets for Capture", zap.Error(err), zap.String("CaptureName", capture.Name), zap.String("secretName", *capture.Spec.OutputConfiguration.BlobUpload)) + return err + } + + secretVolume := corev1.Volume{ + Name: *capture.Spec.OutputConfiguration.BlobUpload, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + }, + }, + } + translator.jobTemplate.Spec.Template.Spec.Volumes = append(translator.jobTemplate.Spec.Template.Spec.Volumes, secretVolume) + + secretVolumeMount := corev1.VolumeMount{ + Name: *capture.Spec.OutputConfiguration.BlobUpload, + ReadOnly: true, + MountPath: captureConstants.CaptureOutputLocationBlobUploadSecretPath, + } + + translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, secretVolumeMount) + } + + if capture.Spec.OutputConfiguration.PersistentVolumeClaim != nil && *capture.Spec.OutputConfiguration.PersistentVolumeClaim != "" { + translator.l.Info("PersistentVolumeClaim is not empty", zap.String("PersistentVolumeClaim", *capture.Spec.OutputConfiguration.PersistentVolumeClaim)) + + _, err := translator.kubeClient.CoreV1().PersistentVolumeClaims(capture.Namespace).Get(context.TODO(), *capture.Spec.OutputConfiguration.PersistentVolumeClaim, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pvc %s/%s", capture.Namespace, *capture.Spec.OutputConfiguration.PersistentVolumeClaim) + } + pvcVolume := corev1.Volume{ + Name: captureConstants.CapturePVCVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: *capture.Spec.OutputConfiguration.PersistentVolumeClaim, + }, + }, + } + translator.jobTemplate.Spec.Template.Spec.Volumes = append(translator.jobTemplate.Spec.Template.Spec.Volumes, pvcVolume) + + pvcVolumeMountPath := captureConstants.PersistentVolumeClaimVolumeMountPathLinux + pvcVolumeMount := corev1.VolumeMount{ + Name: captureConstants.CapturePVCVolumeName, + MountPath: pvcVolumeMountPath, + } + translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = append(translator.jobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts, pvcVolumeMount) + } + return nil +} + +// validateNoRunningWindowsCapture checks if there's any running capture jobs on the Windows to deploy capture. +// Windows node allows only one capture job running for only one tracing session is allowed at one time. +func (translator *CaptureToPodTranslator) validateNoRunningWindowsCapture(captureTargetOnNode *CaptureTargetsOnNode) error { + // map is easy for removing duplication. + windowsNodesRunningCapture := map[string]struct{}{} + capturePodSelector := &metav1.LabelSelector{ + MatchLabels: map[string]string{label.AppLabel: captureConstants.CaptureAppname}, + } + labelSelector, _ := labels.Parse(metav1.FormatLabelSelector(capturePodSelector)) + podListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + podList, err := translator.kubeClient.CoreV1().Pods("").List(context.TODO(), podListOpt) + if err != nil { + translator.l.Error("Failed to list capture Pods", zap.String("podSelector", capturePodSelector.String()), zap.Error(err)) + return err + } + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodPending && pod.Status.Phase != corev1.PodRunning { + continue + } + if (*captureTargetOnNode)[pod.Spec.NodeName].OS == "windows" { + windowsNodesRunningCapture[pod.Spec.NodeName] = struct{}{} + } + } + + if len(windowsNodesRunningCapture) != 0 { + windowsNodeToPrint := []string{} + for windowsNode := range windowsNodesRunningCapture { + windowsNodeToPrint = append(windowsNodeToPrint, windowsNode) + } + return fmt.Errorf("Windows node allows only one capture job running at one time, but there is already in-progress capture job on Windows nodes %s", windowsNodeToPrint) + } + return nil +} + +func (translator *CaptureToPodTranslator) TranslateCaptureToJobs(capture *retinav1alpha1.Capture) ([]*batchv1.Job, error) { + if err := translator.validateCapture(capture); err != nil { + return nil, err + } + + if err := translator.initJobTemplate(capture); err != nil { + return nil, err + } + captureTargetOnNode, err := translator.CalculateCaptureTargetsOnNode(capture.Spec.CaptureConfiguration.CaptureTarget) + if err != nil { + return nil, err + } + + jobPodEnv, err := translator.ObtainCaptureJobPodEnv(*capture) + if err != nil { + return nil, err + } + + jobs, err := translator.renderJob(captureTargetOnNode, jobPodEnv) + if err != nil { + return nil, err + } + + if translator.config.CaptureJobNumLimit != 0 && len(jobs) > translator.config.CaptureJobNumLimit { + return nil, CaptureJobNumExceedLimitError{CurrentNum: len(jobs), Limit: translator.config.CaptureJobNumLimit} + } + + return jobs, nil +} + +func (translator *CaptureToPodTranslator) renderJob(captureTargetOnNode *CaptureTargetsOnNode, envCommon map[string]string) ([]*batchv1.Job, error) { + if len(*captureTargetOnNode) == 0 { + return nil, fmt.Errorf("no nodes are selected") + } + + jobs := make([]*batchv1.Job, 0, len(*captureTargetOnNode)) + for nodeName, target := range *captureTargetOnNode { + jobEnv := make(map[string]string, len(envCommon)) + for k, v := range envCommon { + jobEnv[k] = v + } + job := translator.jobTemplate.DeepCopy() + + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{nodeName}, + }, + }, + }, + }, + }, + }, + } + + if target.OS == "linux" { + // tcpdump requires run as a root for Linux, while for pods deployed on Windows node, there's an ongoing + // issue that causes pods to not start on Windows. + // ref: https://github.com/kubernetes/kubernetes/issues/102849 + rootUser := int64(0) + job.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser = &rootUser + job.Spec.Template.Spec.Containers[0].Command = []string{captureConstants.CaptureContainerEntrypoint} + + delete(jobEnv, captureConstants.NetshFilterEnvKey) + // Update tcpdumpfilter to include target POD IP address. + if updatedTcpdumpFilter := updateTcpdumpFilterWithPodIPAddress(target.PodIpAddresses, jobEnv[captureConstants.TcpdumpFilterEnvKey]); len(updatedTcpdumpFilter) != 0 { + jobEnv[captureConstants.TcpdumpFilterEnvKey] = updatedTcpdumpFilter + } + } else { + containerAdministrator := "NT AUTHORITY\\SYSTEM" + useHostProcess := true + job.Spec.Template.Spec.Containers[0].SecurityContext.WindowsOptions = &corev1.WindowsSecurityContextOptions{ + HostProcess: &useHostProcess, + RunAsUserName: &containerAdministrator, + } + job.Spec.Template.Spec.Containers[0].Command = []string{captureConstants.CaptureContainerEntrypointWin} + + // Update to pvc mount path for Windows pods. + for i, volumeMount := range job.Spec.Template.Spec.Containers[0].VolumeMounts { + if volumeMount.MountPath == captureConstants.PersistentVolumeClaimVolumeMountPathLinux { + job.Spec.Template.Spec.Containers[0].VolumeMounts[i].MountPath = captureConstants.PersistentVolumeClaimVolumeMountPathWin + break + } + } + + delete(jobEnv, captureConstants.TcpdumpFilterEnvKey) + if netshFilter := getNetshFilterWithPodIPAddress(target.PodIpAddresses); len(netshFilter) != 0 { + jobEnv[captureConstants.NetshFilterEnvKey] = netshFilter + } + } + + for k, v := range jobEnv { + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: k, Value: v}) + } + job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{Name: captureConstants.NodeHostNameEnvKey, Value: nodeName}) + + jobs = append(jobs, job) + } + + return jobs, nil +} + +func updateTcpdumpFilterWithPodIPAddress(podIPAddresses []string, tcpdumpFilter string) string { + if len(podIPAddresses) == 0 { + return tcpdumpFilter + } + podIPFilterArray := []string{} + for _, podIPAddress := range podIPAddresses { + podIPFilterArray = append(podIPFilterArray, fmt.Sprintf("host %s", podIPAddress)) + } + + // Use logic OR to capture network traffic when any of the Pod IP Address meets. + filterGroup := fmt.Sprintf("(%s)", strings.Join(podIPFilterArray, " or ")) + + if len(tcpdumpFilter) == 0 { + return filterGroup + } + return fmt.Sprintf("%s or %s", tcpdumpFilter, filterGroup) +} + +func getNetshFilterWithPodIPAddress(podIPAddresses []string) string { + if len(podIPAddresses) == 0 { + return "" + } + + // netsh accepts multiple IP address as filters for the trace capture. + // Example: IPv4.Address=(157.59.136.1,157.59.136.11) + // Please check `netsh trace show capturefilterhelp` for the detail. + podIPv4Addresses := []string{} + podIPv6Addresses := []string{} + for _, podIPAddress := range podIPAddresses { + // check ipv4 or ipv6 + parsedIP := net.ParseIP(podIPAddress) + if parsedIP == nil { + continue + } + if parsedIP.To4() != nil { + podIPv4Addresses = append(podIPv4Addresses, podIPAddress) + } else { + podIPv6Addresses = append(podIPv6Addresses, podIPAddress) + } + } + + var podIPv4FilterGroup string + if len(podIPv4Addresses) != 0 { + podIPv4FilterArray := strings.Join(podIPv4Addresses, ",") + podIPv4FilterGroup = fmt.Sprintf("IPv4.Address=(%s)", podIPv4FilterArray) + } + + var podIPv6FilterGroup string + if len(podIPv6Addresses) != 0 { + podIPv6FilterArray := strings.Join(podIPv6Addresses, ",") + podIPv6FilterGroup = fmt.Sprintf("IPv6.Address=(%s)", podIPv6FilterArray) + } + + JoinedFilterGroups := strings.Join([]string{podIPv4FilterGroup, podIPv6FilterGroup}, " ") + return strings.Trim(JoinedFilterGroups, " ") +} + +// validateTargetSelector validate target selectors defined in the capture. +func (translator *CaptureToPodTranslator) validateTargetSelector(captureTarget retinav1alpha1.CaptureTarget) error { + // When NamespaceSelector is nil while PodSelector is specified, the namespace will be determined by capture.Namespace. + if captureTarget.NodeSelector == nil && captureTarget.PodSelector == nil { + return fmt.Errorf("Neither NodeSelector nor NamespaceSelector&PodSelector is set.") + } + if captureTarget.NodeSelector != nil && (captureTarget.NamespaceSelector != nil || captureTarget.PodSelector != nil) { + return fmt.Errorf("NodeSelector is not compatible with NamespaceSelector&PodSelector.") + } + return nil +} + +func (translator *CaptureToPodTranslator) validateCapture(capture *retinav1alpha1.Capture) error { + if err := translator.validateTargetSelector(capture.Spec.CaptureConfiguration.CaptureTarget); err != nil { + return err + } + + // TODO(mainred): do we need to set a limitation to the capture file size anyway? + if capture.Spec.CaptureConfiguration.CaptureOption.Duration == nil && capture.Spec.CaptureConfiguration.CaptureOption.MaxCaptureSize == nil { + return fmt.Errorf("Neither duration nor maxCaptureSize is set to stop the capture") + } + + if capture.Spec.OutputConfiguration.BlobUpload == nil && capture.Spec.OutputConfiguration.HostPath == nil && capture.Spec.OutputConfiguration.PersistentVolumeClaim == nil { + return fmt.Errorf("At least one output configuration should be set") + } + return nil +} + +func (translator *CaptureToPodTranslator) getCaptureTargetsOnNode(captureTarget retinav1alpha1.CaptureTarget) (*CaptureTargetsOnNode, error) { + var err error + captureTargetsOnNode := &CaptureTargetsOnNode{} + if captureTarget.NodeSelector != nil { + if captureTargetsOnNode, err = translator.calculateCaptureTargetsByNodeSelector(captureTarget); err != nil { + return nil, err + } + } + if captureTarget.PodSelector != nil { + if captureTargetsOnNode, err = translator.calculateCaptureTargetsByPodSelector(captureTarget); err != nil { + return nil, err + } + } + + if len(*captureTargetsOnNode) == 0 { + return nil, fmt.Errorf("no targets are selected by node selector or pod selector") + } + return captureTargetsOnNode, nil +} + +func (translator *CaptureToPodTranslator) updateCaptureTargetsOSOnNode(captureTargetsOnNode *CaptureTargetsOnNode) error { + nodeNames := []string{} + for nodeName := range *captureTargetsOnNode { + nodeNames = append(nodeNames, nodeName) + } + nodeLabelSelector := metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: nodeNames, + }}, + } + labelSelector, _ := labels.Parse(metav1.FormatLabelSelector(&nodeLabelSelector)) + + nodeListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + nodeList, err := translator.kubeClient.CoreV1().Nodes().List(context.TODO(), nodeListOpt) + if err != nil { + return err + } + for _, node := range nodeList.Items { + captureTargetsOnNode.UpdateNodeOS(node.Name, node.Labels[corev1.LabelOSStable]) + } + + return nil +} + +// CalculateCaptureTargetsOnNode returns capture target on each node. +func (translator *CaptureToPodTranslator) CalculateCaptureTargetsOnNode(captureTarget retinav1alpha1.CaptureTarget) (*CaptureTargetsOnNode, error) { + if err := translator.validateTargetSelector(captureTarget); err != nil { + return nil, err + } + + captureTargetsOnNode, err := translator.getCaptureTargetsOnNode(captureTarget) + if err != nil { + return nil, err + } + + if err := translator.updateCaptureTargetsOSOnNode(captureTargetsOnNode); err != nil { + return nil, err + } + + if err := translator.validateNoRunningWindowsCapture(captureTargetsOnNode); err != nil { + return nil, err + } + return captureTargetsOnNode, nil +} + +func (translator *CaptureToPodTranslator) calculateCaptureTargetsByNodeSelector(captureTarget retinav1alpha1.CaptureTarget) (*CaptureTargetsOnNode, error) { + captureTargetOnNode := &CaptureTargetsOnNode{} + labelSelector, err := labels.Parse(metav1.FormatLabelSelector(captureTarget.NodeSelector)) + if err != nil { + translator.l.Error("Failed to parse node selector to label", zap.String("nodeSelector", captureTarget.NodeSelector.String()), zap.Error(err)) + return nil, err + } + nodeListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + nodeList, err := translator.kubeClient.CoreV1().Nodes().List(context.TODO(), nodeListOpt) + if err != nil { + translator.l.Error("Failed to list node", zap.String("nodeSelector", fmt.Sprint(captureTarget.NodeSelector.String())), zap.Error(err)) + return nil, err + } + for _, node := range nodeList.Items { + captureTargetOnNode.AddNodeInterface(node.Name) + } + return captureTargetOnNode, nil +} + +func (translator *CaptureToPodTranslator) calculateCaptureTargetsByPodSelector(captureTarget retinav1alpha1.CaptureTarget) (*CaptureTargetsOnNode, error) { + captureTargetOnNode := &CaptureTargetsOnNode{} + nsList := &corev1.NamespaceList{Items: []corev1.Namespace{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: corev1.NamespaceDefault, + }, + }, + }} + + if captureTarget.NamespaceSelector != nil { + var err error + labelSelector, err := labels.Parse(metav1.FormatLabelSelector(captureTarget.NamespaceSelector)) + if err != nil { + translator.l.Error("PersistentVolumeClaim is not empty", zap.String("namespaceSelector", captureTarget.NamespaceSelector.String()), zap.Error(err)) + return nil, err + } + nsListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + nsList, err = translator.kubeClient.CoreV1().Namespaces().List(context.TODO(), nsListOpt) + if err != nil { + translator.l.Error("Failed to list Namespace", zap.String("namespaceSelector", captureTarget.NamespaceSelector.String()), zap.Error(err)) + return nil, err + } + } + + for _, ns := range nsList.Items { + labelSelector, _ := labels.Parse(metav1.FormatLabelSelector(captureTarget.PodSelector)) + podListOpt := metav1.ListOptions{ + LabelSelector: labelSelector.String(), + } + podList, err := translator.kubeClient.CoreV1().Pods(ns.Name).List(context.TODO(), podListOpt) + if err != nil { + translator.l.Error("Failed to list Pod.", zap.String("podSelector", captureTarget.PodSelector.String()), zap.Error(err)) + return nil, err + } + for _, pod := range podList.Items { + // TODO: Need to consider the status when pod has no ip address assigned. + // We want to include all the ip addresses assigned to the Pod in case the Pod is selected. + // This may happen when a pod is allocated with IPv4 and IPv6 addresses. + // And Pod.Status.PodIPs must include pod.Status.PodIP. + podIPs := []string{} + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + captureTargetOnNode.AddPod(pod.Spec.NodeName, podIPs) + } + } + + return captureTargetOnNode, nil +} + +// For tcpdump, we put each filter(ip:port) into parentheses, and all include filters will be grouped in parentheses, +// same case for exclude filters, finally the filters overall will be like: +// ((include1) or (include2)) and not ((exclude1) or (exclude2)) +func tcpdumpFiltersFromIncludeAndExcludeFilters(includeIPPortsFilters, excludeIPPortsFilters map[string][]string) string { + getFilterGroupFunc := func(ipPortsFilters map[string][]string) string { + if len(ipPortsFilters) == 0 { + return "" + } + ips := make([]string, 0) + + for ip := range ipPortsFilters { + ips = append(ips, ip) + } + sort.Strings(ips) + var filterArray []string + for _, ip := range ips { + ports := ipPortsFilters[ip] + sort.Strings(ports) + var filter string + if ip == anyIPOrPort { + for _, port := range ports { + filter = fmt.Sprintf("(port %s)", port) + filterArray = append(filterArray, filter) + } + continue + } + if len(ports) == 1 && ports[0] == anyIPOrPort { + filter = fmt.Sprintf("(host %s)", ip) + filterArray = append(filterArray, filter) + continue + } + for _, port := range ports { + filter = fmt.Sprintf("(host %s and port %s)", ip, port) + filterArray = append(filterArray, filter) + } + } + if len(filterArray) == 0 { + return "" + } + filterGroup := fmt.Sprintf("(%s)", strings.Join(filterArray, " or ")) + return filterGroup + } + + includeFilterGroup := getFilterGroupFunc(includeIPPortsFilters) + excludeFilterGroup := getFilterGroupFunc(excludeIPPortsFilters) + + if len(includeFilterGroup) == 0 && len(excludeFilterGroup) == 0 { + return "" + } + if len(includeFilterGroup) != 0 && len(excludeFilterGroup) != 0 { + return fmt.Sprintf("%s and not %s", includeFilterGroup, excludeFilterGroup) + } + if len(includeFilterGroup) != 0 && len(excludeFilterGroup) == 0 { + return includeFilterGroup + } + if len(includeFilterGroup) == 0 && len(excludeFilterGroup) != 0 { + return fmt.Sprintf("not %s", excludeFilterGroup) + } + return "" +} + +// parseIncludeAndExcludeFilters returns organized include and exclude IP:Port filters. +func parseIncludeAndExcludeFilters(filters *retinav1alpha1.CaptureConfigurationFilters) (map[string][]string, map[string][]string, error) { + if filters == nil { + return nil, nil, nil + } + var includeIPPortFilters, excludeIPPortFilters map[string][]string + var err error + if len(filters.Include) != 0 { + if includeIPPortFilters, err = filterToIPPortsMap(filters.Include); err != nil { + return nil, nil, err + } + } + if len(filters.Exclude) != 0 { + if excludeIPPortFilters, err = filterToIPPortsMap(filters.Exclude); err != nil { + return nil, nil, err + } + } + return includeIPPortFilters, excludeIPPortFilters, nil +} + +// filterToIPPortsMap maps ip address to a list of port sharing this ip address. +// If the key(ip address) is empty, ports, in combination with any ip address, in the list will be included/excluded. +// If the values(ports) contains only one empty string, all the ports together with that key(ip address) will be included/excluded. +func filterToIPPortsMap(filters []string) (map[string][]string, error) { + // map[string]struct{}{} simplifies duplication removal and seek. + filterIPPorts := map[string]map[string]struct{}{} + for _, filter := range filters { + if len(filter) == 0 { + continue + } + var ipAddressFilter, portFilter string + // The filter may include both ip and port with format ip:port with wildcard supported, or just port or + // ip address. + if strings.Contains(filter, ":") { + ipPort := strings.Split(filter, ":") + ipAddressFilter, portFilter = ipPort[0], ipPort[1] + if ipPort[0] == "*" { + ipAddressFilter = anyIPOrPort + } + if ipPort[1] == "*" { + portFilter = anyIPOrPort + } + } else { + // For simplicity, we treat string including "." as IP address, or port otherwise, which will finally go + // against format validation. + if strings.Contains(filter, ".") { + ipAddressFilter = filter + } else { + portFilter = filter + } + } + + // Validate the format of ip address and port. + if len(ipAddressFilter) != 0 { + if addr := net.ParseIP(ipAddressFilter); len(addr) == 0 { + return nil, fmt.Errorf("invalid filter %s", filter) + } + } + if len(portFilter) != 0 { + portInt, err := strconv.Atoi(portFilter) + if err != nil || (portInt > 65535 || portInt < 0) { + return nil, fmt.Errorf("invalid filter %s", filter) + } + } + + // If any(* or empty) ip address is specified together with a port, we should ignore the other ip:this_port + // combinations and keep only *:this_port. + if ipAddressFilter == anyIPOrPort { + for _, port := range filterIPPorts { + delete(port, portFilter) + } + if _, ok := filterIPPorts[anyIPOrPort]; ok { + filterIPPorts[anyIPOrPort][portFilter] = struct{}{} + } else { + filterIPPorts[anyIPOrPort] = map[string]struct{}{portFilter: {}} + } + continue + } + // If any(* or emtpy) port is specified with an ip address, we should ignore the other ports with combination + // with this ip address, and the final combination for this ip address will be this_ipaddress:*. + if portFilter == anyIPOrPort { + filterIPPorts[ipAddressFilter] = map[string]struct{}{ + anyIPOrPort: {}, + } + continue + } + // Otherwise, we'll initialize the port list to this ip address or append the port to the list. + _, ok := filterIPPorts[ipAddressFilter] + if !ok { + filterIPPorts[ipAddressFilter] = map[string]struct{}{portFilter: {}} + } else { + filterIPPorts[ipAddressFilter][portFilter] = struct{}{} + } + + } + + filterIPPortsMap := map[string][]string{} + for ip, portList := range filterIPPorts { + filterIPPortsMap[ip] = []string{} + for port := range portList { + filterIPPortsMap[ip] = append(filterIPPortsMap[ip], port) + } + } + + return filterIPPortsMap, nil +} + +func (translator *CaptureToPodTranslator) obtainTcpdumpFilters(captureConfig retinav1alpha1.CaptureConfiguration) (string, error) { + if captureConfig.Filters == nil && captureConfig.TcpdumpFilter == nil && captureConfig.CaptureOption.PacketSize == nil { + return "", nil + } + includeIPPortFilters, excludeIPPortFilters, err := parseIncludeAndExcludeFilters(captureConfig.Filters) + if err != nil { + return "", err + } + + tcpdumpFilter := tcpdumpFiltersFromIncludeAndExcludeFilters(includeIPPortFilters, excludeIPPortFilters) + if len(tcpdumpFilter) != 0 { + translator.l.Info("Get the parsed filter from include and include filters", + zap.String("parsed filter", tcpdumpFilter), + zap.String("Include filters", strings.Join(captureConfig.Filters.Include, ",")), + zap.String("Exclude filters", strings.Join(captureConfig.Filters.Exclude, ",")), + ) + } + + translator.l.Info(fmt.Sprintf("The Parsed tcpdump filter is %q", tcpdumpFilter)) + return tcpdumpFilter, nil +} + +func (translator *CaptureToPodTranslator) obtainCaptureOutputEnv(outputConfiguration retinav1alpha1.OutputConfiguration) (map[captureConstants.CaptureOutputLocationEnvKey]string, error) { + outputEnv := map[captureConstants.CaptureOutputLocationEnvKey]string{} + if outputConfiguration.HostPath != nil { + outputEnv[captureConstants.CaptureOutputLocationEnvKeyHostPath] = *outputConfiguration.HostPath + } + if outputConfiguration.PersistentVolumeClaim != nil { + outputEnv[captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim] = *outputConfiguration.PersistentVolumeClaim + } + + if len(outputEnv) == 0 && (outputConfiguration.BlobUpload == nil || *outputConfiguration.BlobUpload == "") { + return nil, fmt.Errorf("need to specify at least one outputConfiguration.") + } + + return outputEnv, nil +} + +// obtainCaptureOptionEnv translates CaptureOption to Environment variables to capture job Pod. +func (translator *CaptureToPodTranslator) obtainCaptureOptionEnv(option retinav1alpha1.CaptureOption) (map[string]string, error) { + outputEnv := map[string]string{} + if option.Duration != nil { + outputEnv[captureConstants.CaptureDurationEnvKey] = option.Duration.Duration.String() + } + if option.MaxCaptureSize != nil { + outputEnv[captureConstants.CaptureMaxSizeEnvKey] = strconv.Itoa(*option.MaxCaptureSize) + } + return outputEnv, nil +} + +// ObtainCaptureJobPodEnv translates Capture object to Environment variables to capture job Pod. +func (translator *CaptureToPodTranslator) ObtainCaptureJobPodEnv(capture retinav1alpha1.Capture) (map[string]string, error) { + jobPodEnv := map[string]string{} + + captureOutputEnv, err := translator.obtainCaptureOutputEnv(capture.Spec.OutputConfiguration) + if err != nil { + return nil, err + } + for key, val := range captureOutputEnv { + jobPodEnv[string(key)] = val + } + + captureOptionEnv, err := translator.obtainCaptureOptionEnv(capture.Spec.CaptureConfiguration.CaptureOption) + if err != nil { + return nil, err + } + for key, val := range captureOptionEnv { + jobPodEnv[string(key)] = val + } + + tcpdumpFilter, err := translator.obtainTcpdumpFilters(capture.Spec.CaptureConfiguration) + if err != nil { + return nil, err + } + if len(tcpdumpFilter) != 0 { + jobPodEnv[captureConstants.TcpdumpFilterEnvKey] = tcpdumpFilter + } + + if capture.Spec.CaptureConfiguration.CaptureOption.PacketSize != nil { + jobPodEnv[captureConstants.PacketSizeEnvKey] = strconv.Itoa(*capture.Spec.CaptureConfiguration.CaptureOption.PacketSize) + } + + if capture.Spec.CaptureConfiguration.TcpdumpFilter != nil { + jobPodEnv[captureConstants.TcpdumpRawFilterEnvKey] = *capture.Spec.CaptureConfiguration.TcpdumpFilter + } + + if len(capture.Name) != 0 { + jobPodEnv[captureConstants.CaptureNameEnvKey] = capture.Name + } + jobPodEnv[captureConstants.IncludeMetadataEnvKey] = strconv.FormatBool(capture.Spec.CaptureConfiguration.IncludeMetadata) + + // TODO: more env to be added + return jobPodEnv, nil +} diff --git a/pkg/capture/crd_to_job_test.go b/pkg/capture/crd_to_job_test.go new file mode 100644 index 000000000..96234dd7a --- /dev/null +++ b/pkg/capture/crd_to_job_test.go @@ -0,0 +1,2159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + pointerUtil "k8s.io/utils/pointer" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" + "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/label" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/telemetry" +) + +const ( + retinaAgentImageForTest = "acnpublic.azurecr.io/retina-agent:v0.0.6-52-g07caaaf" + apiServerURL = "https://retina-test-c4528d-zn0ugsna.hcp.southeastasia.azmk8s.io:443" +) + +func NewCaptureToPodTranslatorForTest(kubeClient kubernetes.Interface) *CaptureToPodTranslator { + log.SetupZapLogger(log.GetDefaultLogOpts()) + config := config.CaptureConfig{ + CaptureDebug: true, + CaptureImageVersion: "v0.0.6-52-g07caaaf", + CaptureImageVersionSource: captureUtils.VersionSourceOperatorImageVersion, + CaptureJobNumLimit: 10, + } + + captureToPodTranslator := NewCaptureToPodTranslator(kubeClient, log.Logger().Named("test"), config) + captureToPodTranslator.Apiserver = apiServerURL + return captureToPodTranslator +} + +func Test_CaptureTargetsOnNode_AddPod(t *testing.T) { + cases := []struct { + existingTargets map[string]CaptureTarget + newTargetNode string + newTargetPodIPs []string + wantCaptureTargetsOnNode CaptureTargetsOnNode + }{ + { + existingTargets: map[string]CaptureTarget{}, + newTargetNode: "node1", + newTargetPodIPs: []string{"10.225.0.4"}, + wantCaptureTargetsOnNode: CaptureTargetsOnNode{ + "node1": {PodIpAddresses: []string{"10.225.0.4"}}, + }, + }, + { + existingTargets: map[string]CaptureTarget{ + "node1": {PodIpAddresses: []string{"10.225.0.4"}}, + }, + newTargetNode: "node1", + newTargetPodIPs: []string{"10.225.0.5"}, + wantCaptureTargetsOnNode: CaptureTargetsOnNode{ + "node1": {PodIpAddresses: []string{"10.225.0.4", "10.225.0.5"}}, + }, + }, + { + existingTargets: map[string]CaptureTarget{ + "node1": {PodIpAddresses: []string{"10.225.0.4"}}, + }, + newTargetNode: "node2", + newTargetPodIPs: []string{"10.225.0.5"}, + wantCaptureTargetsOnNode: CaptureTargetsOnNode{ + "node1": {PodIpAddresses: []string{"10.225.0.4"}}, + "node2": {PodIpAddresses: []string{"10.225.0.5"}}, + }, + }, + } + for _, c := range cases { + gotCaptureTargetsOnNode := CaptureTargetsOnNode{} + for k, v := range c.existingTargets { + gotCaptureTargetsOnNode[k] = v + } + gotCaptureTargetsOnNode.AddPod(c.newTargetNode, c.newTargetPodIPs) + if diff := cmp.Diff(c.wantCaptureTargetsOnNode, gotCaptureTargetsOnNode); diff != "" { + t.Errorf("AddPod() mismatch (-want, +got):\n%s", diff) + } + } +} + +func Test_CaptureTargetsOnNode_AddNodeInterface(t *testing.T) { + cases := []struct { + existingTargets map[string]CaptureTarget + newTargetNode string + wantCaptureTargetsOnNode CaptureTargetsOnNode + }{ + { + existingTargets: map[string]CaptureTarget{}, + newTargetNode: "node1", + wantCaptureTargetsOnNode: CaptureTargetsOnNode{ + "node1": {CaptureNodeInterface: true}, + }, + }, + { + existingTargets: map[string]CaptureTarget{"node1": {CaptureNodeInterface: true}}, + newTargetNode: "node1", + wantCaptureTargetsOnNode: CaptureTargetsOnNode{ + "node1": {CaptureNodeInterface: true}, + }, + }, + } + for _, c := range cases { + gotCaptureTargetsOnNode := CaptureTargetsOnNode{} + for k, v := range c.existingTargets { + gotCaptureTargetsOnNode[k] = v + } + gotCaptureTargetsOnNode.AddNodeInterface(c.newTargetNode) + + if diff := cmp.Diff(c.wantCaptureTargetsOnNode, gotCaptureTargetsOnNode); diff != "" { + t.Errorf("AddPod() mismatch (-want, +got):\n%s", diff) + } + } +} + +func Test_CaptureToPodTranslator_GetCaptureTargetsOnNode(t *testing.T) { + cases := []struct { + name string + captureTarget retinav1alpha1.CaptureTarget + nodeList *corev1.NodeList + namespaceList *corev1.NamespaceList + podList *corev1.PodList + wantCaptureTargetsOnNode *CaptureTargetsOnNode + wantErr bool + }{ + { + name: "No selector is specified", + captureTarget: retinav1alpha1.CaptureTarget{}, + wantCaptureTargetsOnNode: nil, + wantErr: true, + }, + { + name: "either node or pod selector is specified", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{corev1.LabelHostname: "node1"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "server"}, + }, + }, + wantCaptureTargetsOnNode: nil, + wantErr: true, + }, + { + name: "Get target through node selector when MatchExpressions is set", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1"}}}}, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + CaptureNodeInterface: true, + }, + }, + wantErr: false, + }, + { + name: "Get target through node selector when MatchLabels is set", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{corev1.LabelOSStable: "linux"}, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelOSStable: "linux"}}}}, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + CaptureNodeInterface: true, + }, + }, + wantErr: false, + }, + { + name: "Get target through node selector when MatchLabels and MatchExpressions are both set and met", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{corev1.LabelOSStable: "linux"}, + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelOSStable: "linux", corev1.LabelHostname: "node1"}}}}, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + CaptureNodeInterface: true, + }, + }, + wantErr: false, + }, + { + name: "Get target through node selector and raise error when MatchLabels and MatchExpressions are both set but not met", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{corev1.LabelOSStable: "linux"}, + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelOSStable: "windows", corev1.LabelHostname: "node1"}}}}, + }, + wantCaptureTargetsOnNode: nil, + wantErr: true, + }, + { + name: "Get target through pod selector", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "test-capture-ns"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + namespaceList: &corev1.NamespaceList{ + Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "test-capture-ns", Labels: map[string]string{"name": "test-capture-ns"}}}}, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "test-capture-ns", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + }, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + PodIpAddresses: []string{"10.225.0.4"}, + }, + }, + wantErr: false, + }, + { + name: "Get target through pod selector for dual-stack cluster", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "test-capture-ns"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + namespaceList: &corev1.NamespaceList{ + Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "test-capture-ns", Labels: map[string]string{"name": "test-capture-ns"}}}}, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "test-capture-ns", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{ + {IP: "10.225.0.4"}, + {IP: "fd5c:d9f1:79c5:fd83::21e"}, + }, + }, + }, + }, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + PodIpAddresses: []string{"10.225.0.4", "fd5c:d9f1:79c5:fd83::21e"}, + }, + }, + wantErr: false, + }, + { + name: "Get target through pod selector when multiple pods in one node", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "test-capture-ns"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + namespaceList: &corev1.NamespaceList{ + Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "test-capture-ns", Labels: map[string]string{"name": "test-capture-ns"}}}}, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod1", Namespace: "test-capture-ns", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod2", Namespace: "test-capture-ns", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.5", + PodIPs: []corev1.PodIP{{IP: "10.225.0.5"}}, + }, + }, + }, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + PodIpAddresses: []string{"10.225.0.4", "10.225.0.5"}, + }, + }, + wantErr: false, + }, + { + name: "Get target through pod selector and raise error when no targets are selected", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"name": "test-capture-ns"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + namespaceList: &corev1.NamespaceList{ + Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "test-capture-ns", Labels: map[string]string{"name": "test-capture-ns"}}}}, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "test-capture-ns-different", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + }, + }, + wantCaptureTargetsOnNode: nil, + wantErr: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + objects := []runtime.Object{} + if tt.nodeList != nil { + objects = append(objects, tt.nodeList) + } + if tt.namespaceList != nil { + objects = append(objects, tt.namespaceList) + } + if tt.podList != nil { + objects = append(objects, tt.podList) + } + + k8sClient := fakeclientset.NewSimpleClientset(objects...) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + gotCaptureTargetsOnNode, err := captureToPodTranslator.getCaptureTargetsOnNode(tt.captureTarget) + if tt.wantErr != (err != nil) { + t.Errorf("getCaptureTargetsOnNode() want(%t) error, got error %s", tt.wantErr, err) + } + if tt.wantErr { + return + } + if diff := cmp.Diff(tt.wantCaptureTargetsOnNode, gotCaptureTargetsOnNode); diff != "" { + t.Errorf("getCaptureTargetsOnNode() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_CaptureToPodTranslator_updateCaptureTargetsOSOnNode(t *testing.T) { + cases := []struct { + name string + captureTargetsOnNode *CaptureTargetsOnNode + nodeList *corev1.NodeList + wantCaptureTargetsOnNode *CaptureTargetsOnNode + wantErr bool + }{ + { + name: "update target node OS", + captureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + PodIpAddresses: []string{"10.225.0.4", "10.225.0.5"}, + }, + "node2": CaptureTarget{ + PodIpAddresses: []string{"10.225.1.4", "10.225.1.5"}, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelOSStable: "windows", corev1.LabelHostname: "node1"}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{corev1.LabelOSStable: "linux", corev1.LabelHostname: "node1"}}, + }, + }, + }, + wantCaptureTargetsOnNode: &CaptureTargetsOnNode{ + "node1": CaptureTarget{ + PodIpAddresses: []string{"10.225.0.4", "10.225.0.5"}, + OS: "windows", + }, + "node2": CaptureTarget{ + PodIpAddresses: []string{"10.225.1.4", "10.225.1.5"}, + OS: "linux", + }, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + objects := []runtime.Object{} + if tt.nodeList != nil { + objects = append(objects, tt.nodeList) + } + + k8sClient := fakeclientset.NewSimpleClientset(objects...) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + err := captureToPodTranslator.updateCaptureTargetsOSOnNode(tt.captureTargetsOnNode) + if tt.wantErr != (err != nil) { + t.Errorf("updateCaptureTargetsOSOnNode() want(%t) error, got error %s", tt.wantErr, err) + } + if tt.wantErr { + return + } + if diff := cmp.Diff(tt.wantCaptureTargetsOnNode, tt.captureTargetsOnNode); diff != "" { + t.Errorf("updateCaptureTargetsOSOnNode() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_CaptureToPodTranslator_ObtainCaptureJobPodEnv(t *testing.T) { + var packetSize int = 96 + cases := []struct { + name string + capture retinav1alpha1.Capture + wantJobEnv map[string]string + wantErr bool + }{ + { + name: "output configuration is empty", + capture: retinav1alpha1.Capture{}, + wantErr: true, + }, + { + name: "use hostpath", + capture: retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: pointerUtil.String("/tmp/capture"), + }, + }, + }, + wantJobEnv: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyHostPath): "/tmp/capture", + captureConstants.IncludeMetadataEnvKey: "false", + }, + }, + { + name: "use pvc", + capture: retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: pointerUtil.String("capture-pvc"), + }, + }, + }, + wantJobEnv: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "capture-pvc", + captureConstants.IncludeMetadataEnvKey: "false", + }, + }, + { + name: "use blob", + capture: retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + BlobUpload: pointerUtil.String("sasURL"), + }, + }, + }, + wantJobEnv: map[string]string{ + captureConstants.IncludeMetadataEnvKey: "false", + }, + }, + { + name: "include metadata", + capture: retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: pointerUtil.String("capture-pvc"), + }, + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + IncludeMetadata: true, + }, + }, + }, + wantJobEnv: map[string]string{ + captureConstants.IncludeMetadataEnvKey: "true", + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "capture-pvc", + }, + }, + { + name: "packet size", + capture: retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: pointerUtil.String("capture-pvc"), + }, + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + IncludeMetadata: true, + CaptureOption: retinav1alpha1.CaptureOption{ + PacketSize: &packetSize, + }, + }, + }, + }, + wantJobEnv: map[string]string{ + captureConstants.IncludeMetadataEnvKey: "true", + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "capture-pvc", + captureConstants.PacketSizeEnvKey: strconv.Itoa(packetSize), + }, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + k8sClient := fakeclientset.NewSimpleClientset() + log.SetupZapLogger(log.GetDefaultLogOpts()) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + jobEnv, err := captureToPodTranslator.ObtainCaptureJobPodEnv(tt.capture) + + if tt.wantErr != (err != nil) { + t.Errorf("ObtainCaptureJobPodEnv() want(%t) error, got error %s", tt.wantErr, err) + } + + if diff := cmp.Diff(tt.wantJobEnv, jobEnv); diff != "" { + t.Errorf("ObtainCaptureJobPodEnv() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_CaptureToPodTranslator_RenderJob_NodeSelected(t *testing.T) { + cases := []struct { + name string + captureTargetOnNode *CaptureTargetsOnNode + env map[string]string + wantAffinities []*corev1.Affinity + wantErr bool + }{ + { + name: "no nodes are selected", + captureTargetOnNode: &CaptureTargetsOnNode{}, + env: map[string]string{}, + wantErr: true, + }, + { + name: "single node is selected", + captureTargetOnNode: &CaptureTargetsOnNode{"node1": {}}, + env: map[string]string{}, + wantAffinities: []*corev1.Affinity{ + { + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{"node1"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "multiple nodes are selected", + captureTargetOnNode: &CaptureTargetsOnNode{"node1": {}, "node2": {}}, + env: map[string]string{}, + wantAffinities: []*corev1.Affinity{ + { + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{"node1"}, + }, + }, + }, + }, + }, + }, + }, + { + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{"node2"}, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + k8sClient := fakeclientset.NewSimpleClientset() + log.SetupZapLogger(log.GetDefaultLogOpts()) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + err := captureToPodTranslator.initJobTemplate(&retinav1alpha1.Capture{ + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{}, + OutputConfiguration: retinav1alpha1.OutputConfiguration{}, + }, + }) + if err != nil { + t.Errorf("initJobTemplate() want no error, got error %s", err) + } + jobs, err := captureToPodTranslator.renderJob(tt.captureTargetOnNode, tt.env) + + if tt.wantErr != (err != nil) { + t.Errorf("renderJob() want(%t) error, got error %s", tt.wantErr, err) + } + var affinities []*corev1.Affinity + for _, job := range jobs { + affinities = append(affinities, job.Spec.Template.Spec.Affinity) + } + + cmpOption := cmpopts.SortSlices( + func(affinity1, affinity2 *corev1.Affinity) bool { + nodeName1 := affinity1.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values[0] + nodeName2 := affinity2.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values[0] + return nodeName1 < nodeName2 + }, + ) + + if diff := cmp.Diff(tt.wantAffinities, affinities, cmpOption); diff != "" { + t.Errorf("renderJob() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_CaptureToPodTranslator_ValidateCapture(t *testing.T) { + captureName := "capture-test" + hostPath := "/tmp/capture" + nodeName := "node-name" + cases := []struct { + name string + capture retinav1alpha1.Capture + wantErr bool + }{ + { + name: "raise error when target is not specified", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 10 * time.Second}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + wantErr: true, + }, + { + name: "raise error when duration and maxcaptureSize is not specified", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nodename": nodeName, + }, + }, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + wantErr: true, + }, + { + name: "raise error when output configuration is not specified", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nodename": nodeName, + }, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 10 * time.Second}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "validation is ok when all are set", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "nodename": nodeName, + }, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 10 * time.Second}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + k8sClient := fakeclientset.NewSimpleClientset() + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + err := captureToPodTranslator.validateCapture(&tt.capture) + if tt.wantErr != (err != nil) { + t.Errorf("validateCapture() want(%t) error, got error %s", tt.wantErr, err) + } + if tt.wantErr { + return + } + }) + } +} + +func Test_CaptureToPodTranslator_TranslateCaptureToJobs(t *testing.T) { + captureName := "capture-test" + hostPath := "/tmp/capture" + pvc := "capture-pvc" + backoffLimit := int32(0) + rootUser := int64(0) + tcpdumpFilter := "-i eth0" + captureFolderHostPathType := corev1.HostPathDirectoryOrCreate + commonJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-", captureName), + Labels: map[string]string{ + label.CaptureNameLabel: captureName, + label.AppLabel: captureConstants.CaptureContainername, + }, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backoffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + label.CaptureNameLabel: captureName, + label.AppLabel: captureConstants.CaptureContainername, + }, + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + HostIPC: true, + TerminationGracePeriodSeconds: pointerUtil.Int64(1800), + Containers: []corev1.Container{ + { + Name: captureConstants.CaptureContainername, + Image: retinaAgentImageForTest, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &rootUser, + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", "SYS_ADMIN", + }, + }, + }, + Command: []string{captureConstants.CaptureContainerEntrypoint}, + Env: []corev1.EnvVar{ + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("64Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{"node1"}, + }, + }, + }, + }, + }, + }, + }, + Tolerations: []corev1.Toleration{ + { + Key: "CriticalAddonsOnly", + Operator: "Exists", + }, + { + Effect: "NoExecute", + Operator: "Exists", + }, + { + Effect: "NoSchedule", + Operator: "Exists", + }, + }, + }, + }, + }, + } + cases := []struct { + name string + capture retinav1alpha1.Capture + podList *corev1.PodList + nodeList *corev1.NodeList + secret *corev1.Secret + volumeMounts []corev1.VolumeMount + volumes []corev1.Volume + podEnv []v1.EnvVar + existingPVC *corev1.PersistentVolumeClaim + isWindows bool + wantErr bool + }{ + { + name: "configurations: hostpath as output configurations", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + }, + { + name: "configurations: secret is not found", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Second}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + BlobUpload: pointerUtil.String("secretName"), + }, + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secretName", + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + wantErr: true, + }, + { + name: "configurations: pvc is not found", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Second}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: &pvc, + }, + }, + }, + wantErr: true, + }, + { + name: "configurations: pvc as output configurations", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: &pvc, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CapturePVCVolumeName, + MountPath: captureConstants.PersistentVolumeClaimVolumeMountPathLinux, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CapturePVCVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim), Value: pvc}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + existingPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvc, + }, + }, + }, + { + name: "configurations: pvc as output configurations for Windows", + isWindows: true, + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + PersistentVolumeClaim: &pvc, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "windows"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CapturePVCVolumeName, + MountPath: captureConstants.PersistentVolumeClaimVolumeMountPathWin, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CapturePVCVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim), Value: pvc}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + existingPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvc, + }, + }, + }, + { + name: "configurations: pvc and hostpath as output configurations", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + PersistentVolumeClaim: &pvc, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + { + Name: captureConstants.CapturePVCVolumeName, + MountPath: captureConstants.PersistentVolumeClaimVolumeMountPathLinux, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + { + Name: captureConstants.CapturePVCVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim), Value: pvc}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + existingPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvc, + }, + }, + }, + { + name: "tcpdumpfilter: pod ip adddress and tcpdumpfilter coexist", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + TcpdumpFilter: &tcpdumpFilter, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "default", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + {Name: captureConstants.TcpdumpRawFilterEnvKey, Value: "-i eth0"}, + {Name: captureConstants.TcpdumpFilterEnvKey, Value: "(host 10.225.0.4)"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + }, + { + name: "tcpdumpfilter: pod ip adddress exits", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "default", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + {Name: captureConstants.TcpdumpFilterEnvKey, Value: "(host 10.225.0.4)"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + }, + { + name: "tcpdumpfilter: dual-stack Pod", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "default", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{ + {IP: "10.225.0.4"}, + {IP: "fd5c:d9f1:79c5:fd83::21e"}, + }, + }, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + {Name: captureConstants.TcpdumpFilterEnvKey, Value: "(host 10.225.0.4 or host fd5c:d9f1:79c5:fd83::21e)"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + }, + { + name: "netshfilter: dual-stack Pod", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "default", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{ + {IP: "10.225.0.4"}, + {IP: "fd5c:d9f1:79c5:fd83::21e"}, + }, + }, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "windows"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + {Name: captureConstants.NetshFilterEnvKey, Value: "IPv4.Address=(10.225.0.4) IPv6.Address=(fd5c:d9f1:79c5:fd83::21e)"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + isWindows: true, + }, + { + name: "netshfilter: pod ip adddress and tcpdumpfilter coexist", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": "test-capture-pod"}, + }, + }, + TcpdumpFilter: &tcpdumpFilter, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "test-capture-pod", Namespace: "default", Labels: map[string]string{"type": "test-capture-pod"}}, + Spec: corev1.PodSpec{NodeName: "node1"}, + Status: corev1.PodStatus{ + PodIP: "10.225.0.4", + PodIPs: []corev1.PodIP{{IP: "10.225.0.4"}}, + }, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "windows"}}}}, + }, + volumeMounts: []corev1.VolumeMount{ + { + Name: captureConstants.CaptureHostPathVolumeName, + MountPath: hostPath, + }, + }, + volumes: []corev1.Volume{ + { + Name: captureConstants.CaptureHostPathVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + Type: &captureFolderHostPathType, + }, + }, + }, + }, + podEnv: []v1.EnvVar{ + {Name: captureConstants.CaptureDurationEnvKey, Value: "1m0s"}, + {Name: string(captureConstants.CaptureOutputLocationEnvKeyHostPath), Value: hostPath}, + {Name: captureConstants.CaptureNameEnvKey, Value: captureName}, + {Name: captureConstants.IncludeMetadataEnvKey, Value: "false"}, + {Name: captureConstants.NodeHostNameEnvKey, Value: "node1"}, + {Name: captureConstants.TcpdumpRawFilterEnvKey, Value: "-i eth0"}, + {Name: captureConstants.NetshFilterEnvKey, Value: "IPv4.Address=(10.225.0.4)"}, + { + Name: telemetry.EnvPodName, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: captureConstants.ApiserverEnvKey, + Value: apiServerURL, + }, + }, + isWindows: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + objects := []runtime.Object{} + if tt.podList != nil { + objects = append(objects, tt.podList) + } + if tt.nodeList != nil { + objects = append(objects, tt.nodeList) + } + if tt.existingPVC != nil { + objects = append(objects, tt.existingPVC) + } + k8sClient := fakeclientset.NewSimpleClientset(objects...) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + jobs, err := captureToPodTranslator.TranslateCaptureToJobs(&tt.capture) + if tt.wantErr != (err != nil) { + t.Errorf("TranslateCaptureToJobs() want(%t) error, got error %s", tt.wantErr, err) + } + if tt.wantErr { + return + } + job := commonJob.DeepCopy() + job.Spec.Template.Spec.Containers[0].Env = tt.podEnv + job.Spec.Template.Spec.Containers[0].VolumeMounts = tt.volumeMounts + job.Spec.Template.Spec.Volumes = tt.volumes + + if tt.isWindows { + containerAdministrator := "NT AUTHORITY\\SYSTEM" + useHostProcess := true + job.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "NET_ADMIN", "SYS_ADMIN", + }, + }, + WindowsOptions: &corev1.WindowsSecurityContextOptions{ + HostProcess: &useHostProcess, + RunAsUserName: &containerAdministrator, + }, + } + job.Spec.Template.Spec.Containers[0].Command = []string{captureConstants.CaptureContainerEntrypointWin} + } + + cmpOption := cmpopts.SortSlices(func(enVar1, enVar2 corev1.EnvVar) bool { return enVar1.Name < enVar2.Name }) + + if diff := cmp.Diff([]*batchv1.Job{job}, jobs, cmpOption); diff != "" { + t.Errorf("TranslateCaptureToJobs() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func Test_CaptureToPodTranslator_TranslateCaptureToJobs_JobNumLimit(t *testing.T) { + captureName := "capture-test" + hostPath := "/tmp/capture" + cases := []struct { + name string + capture retinav1alpha1.Capture + nodeList *corev1.NodeList + jobNumLimit int + wantErr bool + }{ + { + name: "job number limit does not reach", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1", "node2"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{corev1.LabelHostname: "node2", corev1.LabelOSStable: "linux"}}}, + }, + }, + jobNumLimit: 2, + }, + { + name: "job number limit reach", + capture: retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: captureName, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: corev1.LabelHostname, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"node1", "node2", "node3"}, + }}, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: &metav1.Duration{Duration: 1 * time.Minute}, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + }, + nodeList: &corev1.NodeList{ + Items: []corev1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{corev1.LabelHostname: "node1", corev1.LabelOSStable: "linux"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{corev1.LabelHostname: "node2", corev1.LabelOSStable: "linux"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{corev1.LabelHostname: "node3", corev1.LabelOSStable: "linux"}}}, + }, + }, + jobNumLimit: 2, + wantErr: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + objects := []runtime.Object{} + if tt.nodeList != nil { + objects = append(objects, tt.nodeList) + } + + k8sClient := fakeclientset.NewSimpleClientset(objects...) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + captureToPodTranslator.config.CaptureJobNumLimit = tt.jobNumLimit + _, err := captureToPodTranslator.TranslateCaptureToJobs(&tt.capture) + if tt.wantErr != (err != nil) { + t.Errorf("TranslateCaptureToJobs() want(%t) error, got error %s", tt.wantErr, err) + } + if !tt.wantErr { + return + } + + require.IsType(t, err, CaptureJobNumExceedLimitError{}, "TranslateCaptureToJobs() does not raise CaptureJobNumExceedLimitError") + }) + } +} + +func Test_CaptureToPodTranslator_ValidateTargetSelector(t *testing.T) { + nodeSelector := map[string]string{"agent-pool": "agent-pool"} + namespaceSelector := map[string]string{"kubernetes.io/cluster-service": "true"} + podSelector := map[string]string{"kubernetes.io/os": "linux"} + + cases := []struct { + name string + captureTarget retinav1alpha1.CaptureTarget + wantErr bool + }{ + { + name: "Provided: nodeSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: nodeSelector, + }, + }, + wantErr: false, + }, + { + name: "Provided: namespaceSelector, podSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: namespaceSelector, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: podSelector, + }, + }, + wantErr: false, + }, + { + name: "Provided: nodeSelector, namespaceSelector, podSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: nodeSelector, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: namespaceSelector, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: podSelector, + }, + }, + wantErr: true, + }, + { + name: "Provided: nodeSelector, namespaceSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: nodeSelector, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: namespaceSelector, + }, + }, + wantErr: true, + }, + { + name: "Provided: nodeSelector, podSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: nodeSelector, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: podSelector, + }, + }, + wantErr: true, + }, + { + name: "Provided: namespaceSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: namespaceSelector, + }, + }, + wantErr: true, + }, + { + name: "Provided: podSelector", + captureTarget: retinav1alpha1.CaptureTarget{ + PodSelector: &metav1.LabelSelector{ + MatchLabels: podSelector, + }, + }, + wantErr: false, + }, + { + name: "Provided: (none)", + captureTarget: retinav1alpha1.CaptureTarget{}, + wantErr: true, + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + objects := []runtime.Object{} + k8sClient := fakeclientset.NewSimpleClientset(objects...) + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + err := captureToPodTranslator.validateTargetSelector(tt.captureTarget) + if tt.wantErr != (err != nil) { + t.Errorf("%s validateTargetSelector() want(%t) error, got error %s", tt.name, tt.wantErr, err) + } + }) + } +} + +func Test_CaptureToPodTranslator_obtainTcpdumpFilters(t *testing.T) { + cases := []struct { + name string + captureConfig retinav1alpha1.CaptureConfiguration + wantedTcpdumpFilter string + wantErr bool + }{ + { + name: "empty filter", + captureConfig: retinav1alpha1.CaptureConfiguration{}, + wantedTcpdumpFilter: "", + }, + { + name: "include filter is empty", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Exclude: []string{"192.168.1.1:80", "192.168.1.2:90", "*:101", "192.168.1.3"}, + }, + }, + wantedTcpdumpFilter: "not ((port 101) or (host 192.168.1.1 and port 80) or (host 192.168.1.2 and port 90) or (host 192.168.1.3))", + }, + { + name: "exclude filter is empty", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"192.168.0.1:80", "192.168.0.2:90", "*:100", "192.168.0.3"}, + }, + }, + wantedTcpdumpFilter: "((port 100) or (host 192.168.0.1 and port 80) or (host 192.168.0.2 and port 90) or (host 192.168.0.3))", + }, + { + name: "combine any ip and limited ports", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"*:100", "200"}, + }, + }, + wantedTcpdumpFilter: "((port 100) or (port 200))", + }, + { + name: "combine limited ip with any port", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"192.168.0.1", "192.168.0.2"}, + }, + }, + wantedTcpdumpFilter: "((host 192.168.0.1) or (host 192.168.0.2))", + }, + { + name: "include filter and exclude filter", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"192.168.0.1:80", "192.168.0.2:90", "*:100", "192.168.0.3"}, + Exclude: []string{"192.168.1.1:80", "192.168.1.2:90", "*:101", "192.168.1.3"}, + }, + }, + wantedTcpdumpFilter: "((port 100) or (host 192.168.0.1 and port 80) or (host 192.168.0.2 and port 90) or (host 192.168.0.3)) and not ((port 101) or (host 192.168.1.1 and port 80) or (host 192.168.1.2 and port 90) or (host 192.168.1.3))", + }, + { + name: "include and exclude filters", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"192.168.0.1:80"}, + Exclude: []string{"192.168.1.1:80"}, + }, + }, + wantedTcpdumpFilter: "((host 192.168.0.1 and port 80)) and not ((host 192.168.1.1 and port 80))", + }, + { + name: "raise error when when the port filter exceeds lower limit", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"-1"}, + }, + }, + wantErr: true, + }, + { + name: "raise error when the port filter exceeds upper limit", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"65536"}, + }, + }, + wantErr: true, + }, + { + name: "raise error when specifying bad ip address", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{"192.168.0.1:80", "192.168.0.256"}, + }, + }, + wantErr: true, + }, + { + name: "return empty filter when filter is empty", + captureConfig: retinav1alpha1.CaptureConfiguration{ + Filters: &retinav1alpha1.CaptureConfigurationFilters{ + Include: []string{}, + }, + }, + wantedTcpdumpFilter: "", + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + k8sClient := fakeclientset.NewSimpleClientset() + captureToPodTranslator := NewCaptureToPodTranslatorForTest(k8sClient) + tcpdumpFilter, err := captureToPodTranslator.obtainTcpdumpFilters(tt.captureConfig) + if tt.wantErr != (err != nil) { + t.Errorf("obtainTcpdumpFilters() want(%t) error, got error %s", tt.wantErr, err) + } + + if diff := cmp.Diff(tt.wantedTcpdumpFilter, tcpdumpFilter); diff != "" { + t.Errorf("TranslateCaptureToJobs() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestUpdateTcpdumpFilterWithPodIPAddress(t *testing.T) { + cases := []struct { + name string + podIPAddresses []string + tcpdumpFilter string + wantedUpdatedTcpdumpFilter string + }{ + { + name: "empty pod ip address and filter", + podIPAddresses: []string{}, + tcpdumpFilter: "", + wantedUpdatedTcpdumpFilter: "", + }, + { + name: "empty pod ip address", + podIPAddresses: []string{}, + tcpdumpFilter: "((host 192.168.1.1) or (host 192.168.1.2))", + wantedUpdatedTcpdumpFilter: "((host 192.168.1.1) or (host 192.168.1.2))", + }, + { + name: "one pod ip address and empty filter", + podIPAddresses: []string{"192.168.0.1"}, + tcpdumpFilter: "", + wantedUpdatedTcpdumpFilter: "(host 192.168.0.1)", + }, + { + name: "multiple pod ip address and empty filter", + podIPAddresses: []string{"192.168.0.1", "192.168.0.2"}, + tcpdumpFilter: "", + wantedUpdatedTcpdumpFilter: "(host 192.168.0.1 or host 192.168.0.2)", + }, + { + name: "pod ip address and filter", + podIPAddresses: []string{"192.168.1.1", "192.168.1.2"}, + tcpdumpFilter: "((host 192.168.0.1) or (host 192.168.0.2))", + wantedUpdatedTcpdumpFilter: "((host 192.168.0.1) or (host 192.168.0.2)) or (host 192.168.1.1 or host 192.168.1.2)", + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + gotUpdatedTcpdumpFilter := updateTcpdumpFilterWithPodIPAddress(tt.podIPAddresses, tt.tcpdumpFilter) + if diff := cmp.Diff(tt.wantedUpdatedTcpdumpFilter, gotUpdatedTcpdumpFilter); diff != "" { + t.Errorf("updateTcpdumpFilterWithPodIPAddress() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestGetNetshFilterWithPodIPAddress(t *testing.T) { + cases := []struct { + name string + podIPAddresses []string + wantedNetshFilter string + }{ + { + name: "empty pod ip address", + podIPAddresses: []string{}, + wantedNetshFilter: "", + }, + { + name: "one pod ip address", + podIPAddresses: []string{"192.168.1.1"}, + wantedNetshFilter: "IPv4.Address=(192.168.1.1)", + }, + { + name: "multiple pod ip addresses", + podIPAddresses: []string{"192.168.1.1", "192.168.1.2"}, + wantedNetshFilter: "IPv4.Address=(192.168.1.1,192.168.1.2)", + }, + { + name: "one ipv4 and ipv6 address", + podIPAddresses: []string{"192.168.1.1", "2001:1234:5678:9abc::5"}, + wantedNetshFilter: "IPv4.Address=(192.168.1.1) IPv6.Address=(2001:1234:5678:9abc::5)", + }, + { + name: "multiple ipv4 and ipv6 addresses", + podIPAddresses: []string{"192.168.1.1", "192.168.1.2", "2001:1234:5678:9abc::5", "2001:1234:5678:9abc::6"}, + wantedNetshFilter: "IPv4.Address=(192.168.1.1,192.168.1.2) IPv6.Address=(2001:1234:5678:9abc::5,2001:1234:5678:9abc::6)", + }, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + gotNetshFilter := getNetshFilterWithPodIPAddress(tt.podIPAddresses) + if diff := cmp.Diff(tt.wantedNetshFilter, gotNetshFilter); diff != "" { + t.Errorf("getNetshFilterWithPodIPAddress() mismatch (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/capture/doc.go b/pkg/capture/doc.go new file mode 100644 index 000000000..5eb865fc8 --- /dev/null +++ b/pkg/capture/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package capture contains functions related to handling Retina/Capture. +package capture // import "github.com/microsoft/retina/pkg/capture" diff --git a/pkg/capture/error.go b/pkg/capture/error.go new file mode 100644 index 000000000..7df8c8bc0 --- /dev/null +++ b/pkg/capture/error.go @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package capture + +import "fmt" + +var _ error = SecretNotFoundError{} + +type SecretNotFoundError struct { + SecretName string + Namespace string +} + +func (err SecretNotFoundError) Error() string { + return fmt.Sprintf("secret of Capture is not found when blobupload is enabled, secretName: %s/%s", err.SecretName, err.Namespace) +} + +// define a error struct for capture job number limit +var _ error = CaptureJobNumExceedLimitError{} + +type CaptureJobNumExceedLimitError struct { + CurrentNum int + Limit int +} + +func (err CaptureJobNumExceedLimitError) Error() string { + return fmt.Sprintf("the number of capture jobs %d exceeds the limit %d", err.CurrentNum, err.Limit) +} diff --git a/pkg/capture/outputlocation/blob.go b/pkg/capture/outputlocation/blob.go new file mode 100644 index 000000000..fb31560b8 --- /dev/null +++ b/pkg/capture/outputlocation/blob.go @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +import ( + "context" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +type BlobUpload struct { + l *log.ZapLogger +} + +var _ Location = &BlobUpload{} + +func NewBlobUpload(logger *log.ZapLogger) Location { + return &BlobUpload{l: logger} +} + +func (bu *BlobUpload) Name() string { + return "BlobUpload" +} + +func (bu *BlobUpload) Enabled() bool { + _, err := readBlobURL() + if err != nil { + bu.l.Debug("Output location is not enabled", zap.String("location", bu.Name())) + return false + } + return true +} + +func (bu *BlobUpload) Output(srcFilePath string) error { + bu.l.Info("Upload capture file to blob.", zap.String("location", bu.Name())) + blobURL, err := readBlobURL() + if err != nil { + bu.l.Error("Failed to read blob url", zap.Error(err)) + return err + } + + if err = validateBlobURL(blobURL); err != nil { + bu.l.Error("Failed to validate blob url", zap.Error(err)) + return err + } + + // TODO: add retry policy + azClient, err := azblob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + bu.l.Error("Failed to create blob client", zap.String("location", bu.Name()), zap.Error(err)) + return err + } + + blobFile, err := os.Open(srcFilePath) + if err != nil { + bu.l.Error("Failed to open capture file", zap.Error(err)) + return err + } + defer blobFile.Close() + + blobName := filepath.Base(srcFilePath) + _, err = azClient.UploadFile( + context.TODO(), + "", + blobName, + blobFile, + // TODO: add metadata + &azblob.UploadFileOptions{}) + if err != nil { + bu.l.Error("Failed to upload file to storage account", zap.String("location", bu.Name()), zap.Error(err)) + return err + } + bu.l.Info("Done for uploading capture file to storage account", zap.String("location", bu.Name())) + return nil +} + +func readBlobURL() (string, error) { + secretPath := filepath.Join(captureConstants.CaptureOutputLocationBlobUploadSecretPath, captureConstants.CaptureOutputLocationBlobUploadSecretKey) + if runtime.GOOS == "windows" { + containerSandboxMountPoint := os.Getenv(captureConstants.ContainerSandboxMountPointEnvKey) + if len(containerSandboxMountPoint) == 0 { + return "", fmt.Errorf("failed to find sandbox mount path through env %s", captureConstants.ContainerSandboxMountPointEnvKey) + } + secretPath = filepath.Join(containerSandboxMountPoint, captureConstants.CaptureOutputLocationBlobUploadSecretPath, captureConstants.CaptureOutputLocationBlobUploadSecretKey) + } + secretBytes, err := ioutil.ReadFile(secretPath) + return string(secretBytes), err +} + +func validateBlobURL(blobURL string) error { + u, err := url.Parse(blobURL) + if err != nil { + return err + } + + // Split the path into storage account container and blob + path := strings.TrimPrefix(u.Path, "/") + if path == "" { + return fmt.Errorf("invalid blob URL") + } + + return nil +} diff --git a/pkg/capture/outputlocation/blob_test.go b/pkg/capture/outputlocation/blob_test.go new file mode 100644 index 000000000..ce8f982c8 --- /dev/null +++ b/pkg/capture/outputlocation/blob_test.go @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +import ( + "fmt" + "net/url" + "testing" +) + +func TestValidateBlobURL(t *testing.T) { + tests := []struct { + name string + inputURL string + expectedError error + }{ + { + name: "valid input URL with sas token", + inputURL: "https://retina.blob.core.windows.net/container/blob?sp=r&st=2023-02-17T19:13:30Z&se=2023-02-18T03:13:30Z&spr=https&sv=2021-06-08&sr=c&sig=NtSxlRK5Vs4kVs1dIOfr%2FMdLKBVTA4t3uJ0gqLZ9exk%3D", + expectedError: nil, + }, + { + name: "valid input URL without sas token", + inputURL: "https://retina.blob.core.windows.net/container/blob", + expectedError: nil, + }, + { + name: "missing container name", + inputURL: "https://retina.blob.core.windows.net", + expectedError: fmt.Errorf("invalid blob url"), + }, + { + name: "missing blob name", + inputURL: "https://retina.blob.core.windows.net/container", + expectedError: nil, + }, + { + name: "empty input URL", + inputURL: "", + expectedError: &url.Error{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateBlobURL(tt.inputURL) + + if err != nil && tt.expectedError == nil { + t.Errorf("Unexpected error: %v", err) + } + + if err == nil && tt.expectedError != nil { + t.Errorf("Expected error: %v, but got none", tt.expectedError) + } + }) + } +} diff --git a/pkg/capture/outputlocation/hostpath.go b/pkg/capture/outputlocation/hostpath.go new file mode 100644 index 000000000..bff6d52c2 --- /dev/null +++ b/pkg/capture/outputlocation/hostpath.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +import ( + "io" + "os" + "path/filepath" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +type HostPath struct { + l *log.ZapLogger +} + +var _ Location = &HostPath{} + +func NewHostPath(logger *log.ZapLogger) Location { + return &HostPath{l: logger} +} + +func (hp *HostPath) Name() string { + return "HostPath" +} + +func (hp *HostPath) Enabled() bool { + hostPath := os.Getenv(string(captureConstants.CaptureOutputLocationEnvKeyHostPath)) + if len(hostPath) == 0 { + hp.l.Debug("Output location is not enabled", zap.String("location", hp.Name())) + return false + } + return true +} + +func (hp *HostPath) Output(srcFilePath string) error { + hostPath := os.Getenv(string(captureConstants.CaptureOutputLocationEnvKeyHostPath)) + hp.l.Info("Copy file", + zap.String("location", hp.Name()), + zap.String("source file path", srcFilePath), + zap.String("destination file path", hostPath), + ) + + srcFile, err := os.Open(srcFilePath) + if err != nil { + return err + } + defer srcFile.Close() + + fileName := filepath.Base(srcFilePath) + fileHostPath := filepath.Join(hostPath, fileName) + destFile, err := os.Create(fileHostPath) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return err +} diff --git a/pkg/capture/outputlocation/output.go b/pkg/capture/outputlocation/output.go new file mode 100644 index 000000000..f7e01a168 --- /dev/null +++ b/pkg/capture/outputlocation/output.go @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +type Location interface { + // Name returns the name of the output location. + Name() string + // Enabled checks whether a output location is enabled. + Enabled() bool + // Output outputs source file to the location specified by the users. + Output(srcFilePath string) error +} diff --git a/pkg/capture/outputlocation/output_test.go b/pkg/capture/outputlocation/output_test.go new file mode 100644 index 000000000..40fb519f1 --- /dev/null +++ b/pkg/capture/outputlocation/output_test.go @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +func TestEnabled(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + cases := []struct { + name string + env map[string]string + enabled bool + outputLocation Location + }{ + { + name: "HostPath output location is enabled", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyHostPath): "/tmp/capture", + }, + enabled: true, + outputLocation: NewHostPath(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyHostPath))), + }, + { + name: "HostPath output location is disabled", + env: map[string]string{}, + enabled: false, + outputLocation: NewHostPath(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyHostPath))), + }, + { + name: "PVC output location is enabled", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "mypvc", + }, + enabled: true, + outputLocation: NewPersistentVolumeClaim(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim))), + }, + { + name: "PVC output location is disabled", + env: map[string]string{}, + enabled: false, + outputLocation: NewPersistentVolumeClaim(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim))), + }, + { + name: "Blob output location is disabled", + env: map[string]string{}, + enabled: false, + outputLocation: NewBlobUpload(log.Logger().Named("blob")), + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + os.Setenv(k, v) + } + + defer func() { + for k := range tt.env { + os.Unsetenv(k) + } + }() + + isEnabled := tt.outputLocation.Enabled() + assert.Equal(t, isEnabled, tt.enabled, "Failed enablement check for output location") + }) + } +} + +func TestOutput(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + cases := []struct { + name string + env map[string]string + outputLocation Location + srcPath string + hasError bool + }{ + { + name: "HostPath output failed on no src file", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyHostPath): "out", + }, + outputLocation: NewHostPath(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyHostPath))), + srcPath: "src.out", + hasError: true, + }, + { + name: "PVC output failed on no src file", + env: map[string]string{ + string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim): "out", + }, + outputLocation: NewPersistentVolumeClaim(log.Logger().Named(string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim))), + srcPath: "src.out", + hasError: true, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + os.Setenv(k, v) + } + + defer func() { + for k := range tt.env { + os.Unsetenv(k) + } + }() + + err := tt.outputLocation.Output(tt.srcPath) + assert.Equal(t, tt.hasError, err != nil, "Output check failed on source file open") + }) + } +} diff --git a/pkg/capture/outputlocation/pvc.go b/pkg/capture/outputlocation/pvc.go new file mode 100644 index 000000000..f56d507f5 --- /dev/null +++ b/pkg/capture/outputlocation/pvc.go @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package outputlocation + +import ( + "io" + "os" + "path/filepath" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +type PersistentVolumeClaim struct { + l *log.ZapLogger +} + +var _ Location = &PersistentVolumeClaim{} + +func NewPersistentVolumeClaim(logger *log.ZapLogger) Location { + return &PersistentVolumeClaim{l: logger} +} + +func (pvc *PersistentVolumeClaim) Name() string { + return "PersistentVolumeClaim" +} + +func (pvc *PersistentVolumeClaim) Enabled() bool { + pvcOutput := os.Getenv(string(captureConstants.CaptureOutputLocationEnvKeyPersistentVolumeClaim)) + if len(pvcOutput) == 0 { + pvc.l.Debug("Output location is not enabled", zap.String("location", pvc.Name())) + return false + } + return true +} + +func (pvc *PersistentVolumeClaim) Output(srcFilePath string) error { + dstDir := captureConstants.PersistentVolumeClaimVolumeMountPathLinux + pvc.l.Info("Copy file", + zap.String("location", pvc.Name()), + zap.String("source file path", srcFilePath), + zap.String("destination file path", dstDir), + ) + srcFile, err := os.Open(srcFilePath) + if err != nil { + return err + } + defer srcFile.Close() + + fileName := filepath.Base(srcFilePath) + fileHostPath := filepath.Join(dstDir, fileName) + destFile, err := os.Create(fileHostPath) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return err +} diff --git a/pkg/capture/provider/interface.go b/pkg/capture/provider/interface.go new file mode 100644 index 000000000..2fe12fbd5 --- /dev/null +++ b/pkg/capture/provider/interface.go @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package provider + +import "os" + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=interface.go -destination=mock_network_capture.go -package=provider Interface +type NetworkCaptureProviderInterface interface { + // Setup prepares the provider with folder to store network capture for temporary. + Setup(captureJobName, nodeHostname string) (string, error) + // CaptureNetworkPacket capture network traffic per user input and store the captured network packets in local directory. + CaptureNetworkPacket(filter string, duration, maxSize int, sigChan <-chan os.Signal) error + // CollectMetadata collects network metadata and store network metadata info in local directory. + CollectMetadata() error + // Cleanup removes created resources. + Cleanup() error +} diff --git a/pkg/capture/provider/mock_network_capture.go b/pkg/capture/provider/mock_network_capture.go new file mode 100644 index 000000000..ec4c5d7c9 --- /dev/null +++ b/pkg/capture/provider/mock_network_capture.go @@ -0,0 +1,92 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package provider is a generated GoMock package. +package provider + +import ( + os "os" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockNetworkCaptureProviderInterface is a mock of NetworkCaptureProviderInterface interface. +type MockNetworkCaptureProviderInterface struct { + ctrl *gomock.Controller + recorder *MockNetworkCaptureProviderInterfaceMockRecorder +} + +// MockNetworkCaptureProviderInterfaceMockRecorder is the mock recorder for MockNetworkCaptureProviderInterface. +type MockNetworkCaptureProviderInterfaceMockRecorder struct { + mock *MockNetworkCaptureProviderInterface +} + +// NewMockNetworkCaptureProviderInterface creates a new mock instance. +func NewMockNetworkCaptureProviderInterface(ctrl *gomock.Controller) *MockNetworkCaptureProviderInterface { + mock := &MockNetworkCaptureProviderInterface{ctrl: ctrl} + mock.recorder = &MockNetworkCaptureProviderInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetworkCaptureProviderInterface) EXPECT() *MockNetworkCaptureProviderInterfaceMockRecorder { + return m.recorder +} + +// CaptureNetworkPacket mocks base method. +func (m *MockNetworkCaptureProviderInterface) CaptureNetworkPacket(filter string, duration, maxSize int, sigChan <-chan os.Signal) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CaptureNetworkPacket", filter, duration, maxSize, sigChan) + ret0, _ := ret[0].(error) + return ret0 +} + +// CaptureNetworkPacket indicates an expected call of CaptureNetworkPacket. +func (mr *MockNetworkCaptureProviderInterfaceMockRecorder) CaptureNetworkPacket(filter, duration, maxSize, sigChan interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CaptureNetworkPacket", reflect.TypeOf((*MockNetworkCaptureProviderInterface)(nil).CaptureNetworkPacket), filter, duration, maxSize, sigChan) +} + +// Cleanup mocks base method. +func (m *MockNetworkCaptureProviderInterface) Cleanup() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cleanup") + ret0, _ := ret[0].(error) + return ret0 +} + +// Cleanup indicates an expected call of Cleanup. +func (mr *MockNetworkCaptureProviderInterfaceMockRecorder) Cleanup() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockNetworkCaptureProviderInterface)(nil).Cleanup)) +} + +// CollectMetadata mocks base method. +func (m *MockNetworkCaptureProviderInterface) CollectMetadata() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CollectMetadata") + ret0, _ := ret[0].(error) + return ret0 +} + +// CollectMetadata indicates an expected call of CollectMetadata. +func (mr *MockNetworkCaptureProviderInterfaceMockRecorder) CollectMetadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectMetadata", reflect.TypeOf((*MockNetworkCaptureProviderInterface)(nil).CollectMetadata)) +} + +// Setup mocks base method. +func (m *MockNetworkCaptureProviderInterface) Setup(captureJobName, nodeHostname string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Setup", captureJobName, nodeHostname) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Setup indicates an expected call of Setup. +func (mr *MockNetworkCaptureProviderInterfaceMockRecorder) Setup(captureJobName, nodeHostname interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockNetworkCaptureProviderInterface)(nil).Setup), captureJobName, nodeHostname) +} diff --git a/pkg/capture/provider/network_capture_common.go b/pkg/capture/provider/network_capture_common.go new file mode 100644 index 000000000..7534bdf54 --- /dev/null +++ b/pkg/capture/provider/network_capture_common.go @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package provider + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" +) + +// captureLabelFolderName is the folder name to label the path as created by Kapppie. +const captureLabelFolderName = "retina-capture" //nolint:unused + +// captureNodetimestampName returns a unique name with the current UTC timestamp and provided variables. +func (ncpc *NetworkCaptureProviderCommon) CaptureNodetimestampName(captureName, nodeHostname string) string { + formatedUTCTime := strings.Replace(time.Now().UTC().Format("2006#01#02#03#04#05UTC"), "#", "", -1) + uniqueName := fmt.Sprintf("%s-%s-%s", captureName, nodeHostname, formatedUTCTime) + return uniqueName +} + +type NetworkCaptureProviderCommon struct { + TmpCaptureDir string + l *log.ZapLogger +} + +func (ncpc *NetworkCaptureProviderCommon) Setup(captureName, nodeHostname string) (string, error) { + captureFolderName := ncpc.CaptureNodetimestampName(captureName, nodeHostname) + captureFolderDir := filepath.Join(os.TempDir(), captureFolderName) + err := os.MkdirAll(captureFolderDir, 0o750) + if err != nil { + return "", err + } + ncpc.TmpCaptureDir = captureFolderDir + return ncpc.TmpCaptureDir, nil +} + +func (ncpc *NetworkCaptureProviderCommon) Cleanup() { + if err := os.RemoveAll(ncpc.TmpCaptureDir); err != nil { + ncpc.l.Error("Failed to delete folder", zap.String("folder name", ncpc.TmpCaptureDir), zap.Error(err)) + } +} + +func (ncpc *NetworkCaptureProviderCommon) networkCaptureCommandLog(logFileName string, captureCommand *exec.Cmd) (*os.File, error) { + captureCommandLogFilePath := filepath.Join(ncpc.TmpCaptureDir, logFileName) + captureCommandLogFile, err := os.OpenFile(captureCommandLogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + ncpc.l.Error("Failed to create network capture command log file", zap.String("network capture command log file path", captureCommandLogFilePath), zap.Error(err)) + return nil, err + } + + if _, err := captureCommandLogFile.WriteString(fmt.Sprintf("%s\n\n", captureCommand.String())); err != nil { + ncpc.l.Error("Failed to write capture command to file", zap.String("file", captureCommandLogFile.Name()), zap.Error(err)) + } + + captureCommand.Stdout = captureCommandLogFile + captureCommand.Stderr = captureCommandLogFile + return captureCommandLogFile, nil +} diff --git a/pkg/capture/provider/network_capture_linux.go b/pkg/capture/provider/network_capture_linux.go new file mode 100644 index 000000000..0bf7e3d14 --- /dev/null +++ b/pkg/capture/provider/network_capture_linux.go @@ -0,0 +1,378 @@ +//go:build linux +// +build linux + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package provider + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "time" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +type NetworkCaptureProvider struct { + NetworkCaptureProviderCommon + TmpCaptureDir string + CaptureName string + NodeHostName string + + l *log.ZapLogger +} + +var _ NetworkCaptureProviderInterface = &NetworkCaptureProvider{} + +func NewNetworkCaptureProvider(logger *log.ZapLogger) NetworkCaptureProviderInterface { + return &NetworkCaptureProvider{ + NetworkCaptureProviderCommon: NetworkCaptureProviderCommon{l: logger}, + l: logger, + } +} + +func (ncp *NetworkCaptureProvider) Setup(captureName, nodeHostname string) (string, error) { + captureFolderDir, err := ncp.NetworkCaptureProviderCommon.Setup(captureName, nodeHostname) + if err != nil { + return "", err + } + ncp.l.Info("Created temporary folder for network capture", zap.String("capture temporary folder", captureFolderDir)) + + ncp.TmpCaptureDir = captureFolderDir + ncp.CaptureName = captureName + ncp.NodeHostName = nodeHostname + return ncp.TmpCaptureDir, nil +} + +func (ncp *NetworkCaptureProvider) CaptureNetworkPacket(filter string, duration, maxSizeMB int, sigChan <-chan os.Signal) error { + captureFileName := ncp.NetworkCaptureProviderCommon.CaptureNodetimestampName(ncp.CaptureName, ncp.NodeHostName) + captureFileName = fmt.Sprintf("%s.pcap", captureFileName) + captureFilePath := filepath.Join(ncp.TmpCaptureDir, captureFileName) + + // Remove the folder in case it already exists to mislead the file size check. + os.Remove(captureFilePath) //nolint:errcheck + + // NOTE(mainred): The tcpdump release of debian:bullseye image, which is for preparing clang and tools, runs as + // tcpdump user by default for savefiles for output, but when the binary and library are copied to the distroless + // base image, we lost tcpdump user, and the following error will be raised when running tcpdump in our capture pod. + // tcpdump: Couldn't find user 'tcpdump' + // To disable this behavior, we use `--relinquish-privileges=root` same as `-Z root`. + // ref: https://manpages.debian.org/bullseye/tcpdump/tcpdump.8.en.html#Z + captureStartCmd := exec.Command( + "tcpdump", + "-w", captureFilePath, + "--relinquish-privileges=root", + ) + + if packetSize := os.Getenv(captureConstants.PacketSizeEnvKey); len(packetSize) != 0 { + captureStartCmd.Args = append( + captureStartCmd.Args, + "-s", packetSize, + ) + } + + // If we set flag and value into the arg item of args, the space between flag and value will not treated as part of + // value, for example, "-i eth0" will be treated as "-i" and " eth0", thus brings a tcpdump unknown interface error. + if tcpdumpRawFilter := os.Getenv(captureConstants.TcpdumpRawFilterEnvKey); len(tcpdumpRawFilter) != 0 { + tcpdumpRawFilterSlice := strings.Split(tcpdumpRawFilter, " ") + captureStartCmd.Args = append(captureStartCmd.Args, tcpdumpRawFilterSlice...) + } + + if len(filter) != 0 { + captureStartCmd.Args = append( + captureStartCmd.Args, + filter, + ) + } + + ncp.l.Info("Running tcpdump with args", zap.String("tcpdump command", captureStartCmd.String()), zap.Any("tcpdump args", captureStartCmd.Args)) + + tcpdumpLogFile, err := ncp.NetworkCaptureProviderCommon.networkCaptureCommandLog("tcpdump.log", captureStartCmd) + if err != nil { + return err + } + + // Store tcpdpump log as part of capture artifacts. + defer func() { + if tcpdumpLog, err := os.ReadFile(tcpdumpLogFile.Name()); err != nil { + ncp.l.Warn("Failed to read tcpdump log", zap.Error(err)) + } else { + ncp.l.Info(fmt.Sprintf("Tcpdump command output: %s", string(tcpdumpLog))) + } + tcpdumpLogFile.Close() + }() + + err = captureStartCmd.Start() + + if err != nil { + ncp.l.Error("Failed to start tcpdump", zap.Error(err)) + return err + } + + doneChan := make(chan bool) + errChan := make(chan error) + + // NOTE(mainred): We tried to use `-W=1` plus `-G=$Duration` to exit tcpdump, but when the rotate duration, + // specified by `-G`, reaches, tcpdump does not stop and capture file is rotated somehow. + if duration != 0 { + ncp.l.Info(fmt.Sprintf("Tcpdump will stop after %v seconds", duration)) + go func() { + time.Sleep(time.Second * time.Duration(duration)) + doneChan <- true + }() + } + + // TODO(mainred): make check interval configurable. + fileSizeCheckIntervalInSecond := 5 + // Tcpdump cannot stop when a specified size reaches, so we check the capture file size with a const time interval, + // and stop tcpdump process when the file size meets the requirement. + if maxSizeMB != 0 { + ncp.l.Info(fmt.Sprintf("Tcpdump will stop when the capture file size reaches %dMB.", maxSizeMB)) + go func() { + // Chances are that the capture file is not created when we check the file size. + time.Sleep(time.Second * time.Duration(fileSizeCheckIntervalInSecond)) + captureFile, err := os.Open(captureFilePath) + if err != nil { + ncp.l.Error("Failed to open capture file", zap.String("capture file path", captureFilePath), zap.Error(err)) + ncp.l.Error("Please make sure tcpdump command is constructed with expected arguments", zap.String("tcpdump args", fmt.Sprintf("%+q", captureStartCmd.Args))) + errChan <- fmt.Errorf("tcpdump command is not constructed with expected arguments") + return + } + + for { + fileStat, err := captureFile.Stat() + if err != nil { + ncp.l.Error("Failed to get capture file info", zap.String("capture file path", captureFilePath), zap.Error(err)) + continue + } + fileSizeBytes := fileStat.Size() + if int(fileSizeBytes) > maxSizeMB*1024*1224 { + break + } + + time.Sleep(time.Second * time.Duration(fileSizeCheckIntervalInSecond)) + } + doneChan <- true + }() + } + + select { + case <-doneChan: + case sig := <-sigChan: + ncp.l.Info("Got OS signal, tcpdump will be stopped", zap.String("signal", sig.String())) + case err := <-errChan: + return err + } + ncp.l.Info("Stop tcpdump") + // Kill signal will not wait until the process has actually existed, thus the captured network packets may not be + // flushed to the capture file. Instead, we signal terminate and wait until the process to exit. + if err := captureStartCmd.Process.Signal(syscall.SIGTERM); err != nil { + ncp.l.Error("Failed to signal terminate to process, will kill the process", zap.Error(err)) + if killErr := captureStartCmd.Process.Kill(); err != nil { + return fmt.Errorf("tcpdump stop failed, error: %s", killErr) + } + return err + } + if _, err = captureStartCmd.Process.Wait(); err != nil { + ncp.l.Error("Failed to wait for the process to exit", zap.Error(err)) + return err + } + + return nil +} + +type command struct { + name string + args []string + description string +} + +func (ncp *NetworkCaptureProvider) CollectMetadata() error { + ncp.l.Info("Start to collect network metadata") + + iptablesMode := obtainIptablesMode() + ncp.l.Info(fmt.Sprintf("Iptables mode %s is used", iptablesMode)) + iptablesSaveCmdName := fmt.Sprintf("iptables-%s-save", iptablesMode) + iptablesCmdName := fmt.Sprintf("iptables-%s", iptablesMode) + + metadataList := []struct { + commands []command + fileName string + }{ + { + commands: []command{ + { + name: "ip", + args: []string{"-d", "-j", "addr", "show"}, + description: "IP address configuration", + }, + { + name: "ip", + args: []string{"-d", "-j", "neighbor", "show"}, + description: "IP neighbor status", + }, + { + name: "ip", + args: []string{"rule", "list"}, + description: "Policy routing list", + }, + { + name: "ip", + args: []string{"route", "show", "table", "all"}, + description: "Routes of all route tables", + }, + }, + fileName: "ip-resources.txt", + }, + { + commands: []command{ + { + name: iptablesSaveCmdName, + description: "IPtables rules", + }, + { + name: iptablesCmdName, + args: []string{"-vnx", "-L"}, + description: "IPtables rules and stats in filter table", + }, + { + name: iptablesCmdName, + args: []string{"-vnx", "-L", "-t", "nat"}, + description: "IPtables rules and stats in nat table", + }, + { + name: iptablesCmdName, + args: []string{"-vnx", "-L", "-t", "mangle"}, + description: "IPtables rules and stats in mangle table", + }, + }, + fileName: "iptables-rules.txt", + }, + { + commands: []command{ + { + name: "ss", + args: []string{"-s"}, + description: "Socket statistics summary", + }, + { + name: "ss", + args: []string{"-tapionume"}, + description: "Socket statistics details", + }, + }, + fileName: "socket-stats.txt", + }, + { + commands: []command{ + { + name: "cp", + // '/proc/net' is a symbolic link to /proc/self/net since Linux 2.6.25 to honor network stack + // virtualization as the advent of network namespaces. + // https://man7.org/linux/man-pages/man5/proc.5.html + // NOTE(qinhao): We now clone only node host network net(self) stats here even when the capture + // target is container(s), for simplicity. + args: []string{"-r", "/proc/self/net", filepath.Join(ncp.TmpCaptureDir, "proc-net")}, + description: "networking stats", + }, + { + name: "cp", + args: []string{"-r", "/proc/sys/net", filepath.Join(ncp.TmpCaptureDir, "proc-sys-net")}, + description: "kernel networking configuration", + }, + }, + }, + } + + for _, metadata := range metadataList { + if len(metadata.fileName) != 0 { + captureMetadataFilePath := filepath.Join(ncp.TmpCaptureDir, metadata.fileName) + outfile, err := os.OpenFile(captureMetadataFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + ncp.l.Error("Failed to create metadata file", zap.String("metadata file path", captureMetadataFilePath), zap.Error(err)) + continue + } + defer outfile.Close() + + if _, err := outfile.WriteString("Summary:\n\n"); err != nil { + ncp.l.Error("Failed to write summary to file", zap.String("file", outfile.Name()), zap.Error(err)) + } + + // Print headlines for all commands in output file. + cmds := []*exec.Cmd{} + for _, command := range metadata.commands { + cmd := exec.Command(command.name, command.args...) + cmds = append(cmds, cmd) + commandSummary := fmt.Sprintf("%s(%s)\n", cmd.String(), command.description) + if _, err := outfile.WriteString(commandSummary); err != nil { + ncp.l.Error("Failed to write command description to file", zap.String("file", outfile.Name()), zap.Error(err)) + } + } + + if _, err := outfile.WriteString("\nExecute:\n\n"); err != nil { + ncp.l.Error("Failed to write command output to file", zap.String("file", outfile.Name()), zap.Error(err)) + } + + // Write command stdout and stderr to output file + for _, cmd := range cmds { + if _, err := outfile.WriteString(fmt.Sprintf("%s\n\n", cmd.String())); err != nil { + ncp.l.Error("Failed to write string to file", zap.String("file", outfile.Name()), zap.Error(err)) + } + + cmd.Stdout = outfile + cmd.Stderr = outfile + if err := cmd.Run(); err != nil { + // Don't return for error to continue capturing following network metadata. + ncp.l.Error("Failed to execute command", zap.String("command", cmd.String()), zap.Error(err)) + // Log the error in output file because this error does not stop capture job pod from finishing, + // and the job can be recycled automatically leaving no info to debug. + if _, err = outfile.WriteString(fmt.Sprintf("Failed to run %q, error: %s)", cmd.String(), err.Error())); err != nil { + ncp.l.Error("Failed to write command run failure", zap.String("command", cmd.String()), zap.Error(err)) + } + } + } + } else { + for _, command := range metadata.commands { + cmd := exec.Command(command.name, command.args...) + // Errors will when copying kernel networking configuration for not all files under /proc/sys/net are + // readable, like '/proc/sys/net/ipv4/route/flush', which doesn't implement the read function. + if output, err := cmd.CombinedOutput(); err != nil { + // Don't return for error to continue capturing following network metadata. + ncp.l.Error("Failed to execute command", zap.String("command", cmd.String()), zap.String("output", string(output)), zap.Error(err)) + } + } + } + } + + ncp.l.Info("Done for collecting network metadata") + + return nil +} + +func (ncp *NetworkCaptureProvider) Cleanup() error { + ncp.l.Info("Cleanup network capture", zap.String("capture name", ncp.CaptureName), zap.String("temporary dir", ncp.TmpCaptureDir)) + ncp.NetworkCaptureProviderCommon.Cleanup() + return nil +} + +func obtainIptablesMode() string { + // Since iptables v1.8, nf_tables are introduced as an improvement of legacy iptables, but provides the same user + // interface as legacy iptables through iptables-nft command. + // based on: https://github.com/kubernetes-sigs/iptables-wrappers/blob/97b01f43a8e8db07840fc4b95e833a37c0d36b12/iptables-wrapper-installer.sh + legacySaveOut, _ := exec.Command("iptables-legacy-save").CombinedOutput() + legacySaveLineNum := len(strings.Split(string(legacySaveOut), "\n")) + nftSaveOut, _ := exec.Command("iptables-nft-save").CombinedOutput() + nftSaveLineNum := len(strings.Split(string(nftSaveOut), "\n")) + if legacySaveLineNum > nftSaveLineNum { + return "legacy" + } + return "nft" +} diff --git a/pkg/capture/provider/network_capture_test.go b/pkg/capture/provider/network_capture_test.go new file mode 100644 index 000000000..7bbb773ca --- /dev/null +++ b/pkg/capture/provider/network_capture_test.go @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package provider + +import ( + "os" + "strings" + "testing" + + "github.com/microsoft/retina/pkg/log" +) + +func TestSetupAndCleanup(t *testing.T) { + captureName := "capture-test" + nodeHostName := "node1" + log.SetupZapLogger(log.GetDefaultLogOpts()) + networkCaptureprovider := &NetworkCaptureProvider{l: log.Logger().Named("test")} + tmpCaptureLocation, err := networkCaptureprovider.Setup(captureName, nodeHostName) + + // remove temporary capture dir anyway in case Cleanup() fails. + defer os.RemoveAll(tmpCaptureLocation) + + if err != nil { + t.Errorf("Setup should have not fail with error %s", err) + } + if !strings.Contains(tmpCaptureLocation, captureName) { + t.Errorf("Temporary capture dir name %s should contains capture name %s", tmpCaptureLocation, captureName) + } + if !strings.Contains(tmpCaptureLocation, nodeHostName) { + t.Errorf("Temporary capture dir name %s should contains node host name %s", tmpCaptureLocation, nodeHostName) + } + + if _, err := os.Stat(tmpCaptureLocation); os.IsNotExist(err) { + t.Errorf("Temporary capture dir %s should be created", tmpCaptureLocation) + } + + err = networkCaptureprovider.Cleanup() + if err != nil { + t.Errorf("Cleanup should have not fail with error %s", err) + } + + if _, err := os.Stat(tmpCaptureLocation); !os.IsNotExist(err) { + t.Errorf("Temporary capture dir %s should be deleted", tmpCaptureLocation) + } +} diff --git a/pkg/capture/provider/network_capture_win.go b/pkg/capture/provider/network_capture_win.go new file mode 100644 index 000000000..ea3e5b97e --- /dev/null +++ b/pkg/capture/provider/network_capture_win.go @@ -0,0 +1,299 @@ +//go:build windows +// +build windows + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package provider + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +type NetworkCaptureProvider struct { + NetworkCaptureProviderCommon + TmpCaptureDir string + CaptureName string + NodeHostName string + + l *log.ZapLogger +} + +var _ NetworkCaptureProviderInterface = &NetworkCaptureProvider{} + +func NewNetworkCaptureProvider(logger *log.ZapLogger) NetworkCaptureProviderInterface { + return &NetworkCaptureProvider{ + NetworkCaptureProviderCommon: NetworkCaptureProviderCommon{l: logger}, + l: logger, + } +} + +func (ncp *NetworkCaptureProvider) Setup(captureName, nodeHostname string) (string, error) { + captureFolderDir, err := ncp.NetworkCaptureProviderCommon.Setup(captureName, nodeHostname) + if err != nil { + return "", err + } + ncp.l.Info("Created temporary folder for network capture", zap.String("capture temporary folder", captureFolderDir)) + + ncp.TmpCaptureDir = captureFolderDir + ncp.CaptureName = captureName + ncp.NodeHostName = nodeHostname + return ncp.TmpCaptureDir, nil +} + +func (ncp *NetworkCaptureProvider) CaptureNetworkPacket(filter string, duration, maxSizeMB int, sigChan <-chan os.Signal) error { + stopTrace, err := ncp.needToStopTraceSession() + if err != nil { + return err + } + if stopTrace { + ncp.l.Info("Stopping netsh trace session before starting a new one") + _ = ncp.stopNetworkCapture() + } + + captureFileName := ncp.NetworkCaptureProviderCommon.CaptureNodetimestampName(ncp.CaptureName, ncp.NodeHostName) + captureFileName = captureFileName + ".etl" + captureFilePath := filepath.Join(ncp.TmpCaptureDir, captureFileName) + + captureStartCmd := exec.Command( + "cmd", "/C", + fmt.Sprintf("netsh trace start capture=yes report=disabled overwrite=yes"), + fmt.Sprintf("tracefile=%s", captureFilePath), + ) + + // We should split arguments organized in a string delimited by spaces as + // seperate ones, otherwise the whole string will be treated as one argument. + // For example, given the following filter, exec lib will treat IPv4.Address + // as the argument and the rest as the value of IPv4.Address. + // "IPv4.Address=(10.244.1.85,10.244.1.235) IPv6.Address=(fd5c:d9f1:79c5:fd83::1bc,fd5c:d9f1:79c5:fd83::11b)" + if len(filter) != 0 { + netshFilterSlice := strings.Split(filter, " ") + captureStartCmd.Args = append(captureStartCmd.Args, netshFilterSlice...) + } + + // NOTE: netsh cannot stop when the given max size of reach reaches, but we can use maxSizeMB to limit the size of + // trace file. + if maxSizeMB != 0 { + captureStartCmd.Args = append(captureStartCmd.Args, fmt.Sprintf("maxSize=%d", maxSizeMB)) + } + ncp.l.Info("Running netsh with args", zap.String("netsh command", captureStartCmd.String()), zap.Any("netsh args", captureStartCmd.Args)) + + netshLogFile, err := ncp.NetworkCaptureProviderCommon.networkCaptureCommandLog("netsh.log", captureStartCmd) + if err != nil { + return err + } + + defer func() { + if netshLog, err := os.ReadFile(netshLogFile.Name()); err != nil { + ncp.l.Warn("Failed to read netsh log", zap.Error(err)) + } else { + ncp.l.Info(fmt.Sprintf("Netsh command output: %s", string(netshLog))) + } + netshLogFile.Close() + }() + + err = captureStartCmd.Start() + if err != nil { + ncp.l.Error("Failed to start netsh", zap.Error(err)) + return err + } + + doneChan := make(chan bool, 1) + if duration != 0 { + go func() { + time.Sleep(time.Second * time.Duration(duration)) + doneChan <- true + }() + } + + select { + case <-doneChan: + case sig := <-sigChan: + ncp.l.Info("Got OS signal, netsh will be stopped", zap.String("signal", sig.String())) + } + + ncp.l.Info("Stop netsh") + if err := ncp.stopNetworkCapture(); err != nil { + ncp.l.Error("Failed to stop netsh trace by 'netsh trace stop', will kill the process", zap.Error(err)) + _ = captureStartCmd.Process.Kill() + return fmt.Errorf("netsh stop failed: Output: %s", err) + } + + if err := etl2pcapng(captureFilePath); err != nil { + return err + } + + return nil +} + +// needToStopTraceSession returns true when a running trace session started by Retina capture exists, otherwise returns +// false. Specially, when the trace session is not started by Retina capture, determined from the capture file path, an +// error will be raised. +func (ncp *NetworkCaptureProvider) needToStopTraceSession() (bool, error) { + command := exec.Command("cmd", "/C", "netsh trace show status") + output, err := command.CombinedOutput() + + // When there's no running trace session, `netsh trace show status` will exist with error code 1, in which case we + // should not raise error. + if strings.Contains(string(output), "There is no trace session currently in progress") { + ncp.l.Info("There is no running trace session") + return false, nil + } + + if err != nil { + ncp.l.Error("Failed to get netsh trace status", zap.String("command", command.String()), zap.String("command output", string(output)), zap.Error(err)) + return false, err + } + + if strings.Contains(string(output), captureLabelFolderName) { + ncp.l.Info("There is a running trace session created by Retina capture") + return true, nil + } + ncp.l.Info("There is a running trace session NOT created by Retina capture", zap.String("command output", string(output))) + return false, fmt.Errorf("cannot stop trace session because it's not created by Retina capture") +} + +func (ncp *NetworkCaptureProvider) stopNetworkCapture() error { + ncp.l.Info("Stopping netsh trace session") + + command := exec.Command("cmd", "/C", "netsh trace stop") + output, err := command.CombinedOutput() + // ignore the error when stop the trace when no live trace session exists. + if strings.Contains(string(output), "There is no trace session currently in progress") { + return nil + } + if err != nil { + ncp.l.Error("Failed to stop netsh trace by 'netsh trace stop'", zap.Error(err), zap.String("command output", string(output))) + return err + } + ncp.l.Info("Done for stopping netsh trace session") + return nil +} + +func etl2pcapng(etlCaptureFilePath string) error { + captureFileDir := filepath.Dir(etlCaptureFilePath) + captureFilePathWithoutExt := strings.TrimSuffix(filepath.Base(etlCaptureFilePath), filepath.Ext(etlCaptureFilePath)) + pcapCaptureFileName := captureFilePathWithoutExt + ".pcap" + pcapCaptureFilePath := filepath.Join(captureFileDir, pcapCaptureFileName) + containerSandboxMountPoint := os.Getenv(captureConstants.ContainerSandboxMountPointEnvKey) + if len(containerSandboxMountPoint) == 0 { + return fmt.Errorf("failed to find sandbox mount path through env %s", captureConstants.ContainerSandboxMountPointEnvKey) + } + etl2pcapngBinPath := filepath.Join(containerSandboxMountPoint, "etl2pcapng.exe") + etl2pcapngCmd := exec.Command( + "cmd", "/C", + fmt.Sprintf("%s %s %s", etl2pcapngBinPath, etlCaptureFilePath, pcapCaptureFilePath), + ) + if output, err := etl2pcapngCmd.CombinedOutput(); err != nil { + return fmt.Errorf("etl2pcapng failed to convert %s to pcap, Cmd: %s, Error: %s, Output: %s", etlCaptureFilePath, etl2pcapngCmd.String(), err, string(output)) + } + // Keep etl trace file as well to enrich networking debugging info. + return nil +} + +func (ncp *NetworkCaptureProvider) CollectMetadata() error { + ncp.l.Info("Start to collect network metadata") + + // Download collectlogs.ps1 if not exists. + // Platforms like Azure Kubernetes Service caches collectlogs.ps1 for debug and it's beneficial for air-gapped + // environment in the hard-coded path `c:\\k\\debug` as required by the script. + // ref: https://github.com/Azure/AgentBaker/tree/master/staging/cse/windows/debug + collectLogFilePath := "c:\\k\\debug\\collectlogs.ps1" + if _, err := os.Stat(collectLogFilePath); os.IsNotExist(err) { + // TODO(mainred): Should we delete collectlogs.ps1 if we download it? Besides, collectlogs.ps1 will download a + // series of tool scripts under folder c:\\k\\debug. + out, err := os.Create(collectLogFilePath) + if err != nil { + return err + } + defer out.Close() + + // NOTE(mainred): Scripts downloaded by collectlogs.ps1 always use master branch, so we here use master branch + // as well to keep version compatible. And unfortunately, there's no tag/release in microsoft/SDN for us to + // pin a specific version. + url := "https://raw.githubusercontent.com/microsoft/SDN/master/Kubernetes/windows/debug/collectlogs.ps1" + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + // Write the body to file + _, err = io.Copy(out, resp.Body) + if err != nil { + return err + } + } + + // NOTE(mainred): Windows image built lost the windows powershell in PATH environment variable, tracked by https://github.com/microsoft/retina/issues/307 + // Before the issue is resolved, we hard-code powershell path as a workaround for this issue. This workaround will + // not work if powershell path changes in the future. + pathEnv := os.Getenv("PATH") + powershellCommand := "powershell" + powershellPath := "C:\\WINDOWS\\System32\\WindowsPowerShell\\v1.0" + if !strings.Contains(pathEnv, powershellPath) { + pathEnvWithPowershell := fmt.Sprintf("%s;%s", pathEnv, powershellPath) + if err := os.Setenv("PATH", pathEnvWithPowershell); err != nil { + ncp.l.Warn("Failed to set PATH environment variable", zap.Error(err)) + powershellCommand = powershellPath + "\\powershell" + } + } + + out, err := exec.Command(powershellCommand, "-file", collectLogFilePath).CombinedOutput() + if err != nil { + ncp.l.Error("Failed to collect windows metadata through collectlogs.ps1", zap.String("collect log script path", collectLogFilePath), zap.String("output", string(out))) + return err + } + + ncp.l.Info("Succeeded in running collectlogs.ps1", zap.String("output", string(out))) + + // Normally, the log path will be printed at the end of the output of collectlogs.ps1, with the following pattern. + // Logs are available at C:\k\debug\y5iyszva.4o4\n\r\n\r\n + re := regexp.MustCompile(`Logs are available at ([^\r\n]+)`) + matches := re.FindStringSubmatch(string(out)) + if len(matches) == 0 { + return fmt.Errorf("failed to get diagnostic log file") + } + logFilePath := matches[1] + logFilePath = strings.Trim(logFilePath, " ") + defer os.RemoveAll(logFilePath) //nolint:errcheck + + // Create a separate folder for the long list of metadata files. + captureMetadataFolderDir := filepath.Join(ncp.TmpCaptureDir, "metadata") + if err := os.Mkdir(captureMetadataFolderDir, 0o750); err != nil { + return err + } + + if output, err := exec.Command( + "cmd", "/C", + "xcopy", + logFilePath, + captureMetadataFolderDir, + "/e", // `/e` copies directories and subdirectories, including empty ones. + ).CombinedOutput(); err != nil { + ncp.l.Error("Failed to copy log file", zap.String("output", string(output))) + return err + } + + ncp.l.Info("Done for collecting network metadata") + return nil +} + +func (ncp *NetworkCaptureProvider) Cleanup() error { + ncp.l.Info("Cleanup network capture", zap.String("capture name", ncp.CaptureName), zap.String("temporary dir", ncp.TmpCaptureDir)) + ncp.NetworkCaptureProviderCommon.Cleanup() + return nil +} diff --git a/pkg/capture/utils/capture_image.go b/pkg/capture/utils/capture_image.go new file mode 100644 index 000000000..9d5a5512a --- /dev/null +++ b/pkg/capture/utils/capture_image.go @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package utils + +import ( + "fmt" + "os" + + "go.uber.org/zap" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/log" +) + +const ( + // captureWorkloadImageEnvKey defines the environment variable for retina-agent(capture workload) image. + // Normally, retina-agent image version is determined by the CLI version but we allow specifying the image through + // environment variables for testing. + captureWorkloadImageEnvKey string = "RETINA_AGENT_IMAGE" + + DebugModeEnvKey string = "DEBUG" +) + +type VersionSource string + +const ( + // VersionSourceCLI defines the version source as CLI version. + VersionSourceCLIVersion VersionSource = "CLI version" + // VersionSourceImage defines the version source as image version. + VersionSourceOperatorImageVersion VersionSource = "Operator Image" +) + +// TODO: currently, we return only the default capture workload image for official release in the phase of preview, and +// using the same version for CLI and capture workload image makes sure there's no compatibility issue. +// We can consider exposing the image name and version through CLI flags and adding version compatibility validation for +// CLI and capture workload image. +func CaptureWorkloadImage(logger *log.ZapLogger, imageVersion string, debug bool, vs VersionSource) string { + defaultCaptureWorkloadImageVersion := imageVersion + + // For testing. + if debug { + if captureWorkloadImageFromEnv := os.Getenv(captureWorkloadImageEnvKey); len(captureWorkloadImageFromEnv) != 0 { + logger.Info("Debug mode: obtained capture workload image from environment variable", zap.String("environment variable key", captureWorkloadImageEnvKey), zap.String("image", captureWorkloadImageFromEnv)) + return captureWorkloadImageFromEnv + } + + debugCaptureWorkloadImageName := captureConstants.DebugCaptureWorkloadImageName + debugCaptureWorkloadImage := debugCaptureWorkloadImageName + ":" + defaultCaptureWorkloadImageVersion + logger.Info(fmt.Sprintf("Debug mode: Using capture workload image %s with version determined by %s", debugCaptureWorkloadImage, vs)) + return debugCaptureWorkloadImage + } + + defaultCaptureWorkloadImageName := captureConstants.CaptureWorkloadImageName + captureWorkloadImage := defaultCaptureWorkloadImageName + ":" + defaultCaptureWorkloadImageVersion + logger.Info(fmt.Sprintf("Using capture workload image %s with version determined by %s", captureWorkloadImage, vs)) + + return captureWorkloadImage +} diff --git a/pkg/capture/utils/capture_image_test.go b/pkg/capture/utils/capture_image_test.go new file mode 100644 index 000000000..b96c8815a --- /dev/null +++ b/pkg/capture/utils/capture_image_test.go @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package utils + +import ( + "os" + "testing" + + "github.com/microsoft/retina/pkg/log" +) + +func TestCaptureWorkloadImage(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + logger := log.Logger().Named("TestCaptureWorkloadImage") + cases := []struct { + name string + env map[string]string + sourceVersion string + versionSource VersionSource + expectedImage string + }{ + { + name: "Official image", + env: map[string]string{}, + sourceVersion: "v1.0.0", + versionSource: VersionSourceCLIVersion, + expectedImage: "mcr.microsoft.com/containernetworking/retina-agent:v1.0.0", + }, + { + name: "Debug mode: image determined by CLI version", + env: map[string]string{ + "DEBUG": "true", + }, + sourceVersion: "v1.0.0", + versionSource: VersionSourceCLIVersion, + + expectedImage: "acnpublic.azurecr.io/retina-agent:v1.0.0", + }, + { + name: "Debug mode: image determined by environment variable RETINA_AGENT_IMAGE", + sourceVersion: "v1.0.0", + env: map[string]string{ + "DEBUG": "true", + "RETINA_AGENT_IMAGE": "test.com/retina-agent:v1.0.1", + }, + versionSource: VersionSourceCLIVersion, + expectedImage: "test.com/retina-agent:v1.0.1", + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + os.Setenv(k, v) + } + + defer func() { + for k := range tt.env { + os.Unsetenv(k) + } + }() + debug := false + if _, ok := tt.env["DEBUG"]; ok { + debug = true + } + + actualImage := CaptureWorkloadImage(logger, tt.sourceVersion, debug, VersionSourceCLIVersion) + if actualImage != tt.expectedImage { + t.Errorf("Expected image %s, but got %s", tt.expectedImage, actualImage) + } + }) + } +} diff --git a/pkg/capture/utils/label.go b/pkg/capture/utils/label.go new file mode 100644 index 000000000..4882f1cf2 --- /dev/null +++ b/pkg/capture/utils/label.go @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package utils + +import ( + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/label" +) + +func GetSerectLabelsFromCaptureName(captureName string) map[string]string { + return map[string]string{ + label.AppLabel: captureConstants.CaptureAppname, + label.CaptureNameLabel: captureName, + } +} + +func GetJobLabelsFromCaptureName(captureName string) map[string]string { + return map[string]string{ + label.AppLabel: captureConstants.CaptureAppname, + label.CaptureNameLabel: captureName, + } +} + +func GetContainerLabelsFromCaptureName(captureName string) map[string]string { + return map[string]string{ + label.AppLabel: captureConstants.CaptureAppname, + label.CaptureNameLabel: captureName, + } +} diff --git a/pkg/capture/utils/label_test.go b/pkg/capture/utils/label_test.go new file mode 100644 index 000000000..f977bef45 --- /dev/null +++ b/pkg/capture/utils/label_test.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/label" +) + +func TestGetSerectLabelsFromCaptureName(t *testing.T) { + captureName := "test" + labels := GetSerectLabelsFromCaptureName(captureName) + assert.Equal(t, labels[label.AppLabel], captureConstants.CaptureAppname) + assert.Equal(t, labels[label.CaptureNameLabel], captureName) +} + +func TestGetJobLabelsFromCaptureName(t *testing.T) { + captureName := "test" + labels := GetJobLabelsFromCaptureName(captureName) + assert.Equal(t, labels[label.AppLabel], captureConstants.CaptureAppname) + assert.Equal(t, labels[label.CaptureNameLabel], captureName) +} + +func TestGetContainerLabelsFromCaptureName(t *testing.T) { + captureName := "test" + labels := GetContainerLabelsFromCaptureName(captureName) + assert.Equal(t, labels[label.AppLabel], captureConstants.CaptureAppname) + assert.Equal(t, labels[label.CaptureNameLabel], captureName) +} diff --git a/pkg/client/client.go b/pkg/client/client.go new file mode 100644 index 000000000..cd6e85aee --- /dev/null +++ b/pkg/client/client.go @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package client + +import ( + "fmt" + "net/http" + "time" +) + +// Retina API +const ( + startTrace = "%s/trace" + trace = "%s/trace/%s" +) + +type Retina struct { + RetinaEndpoint string + Client *http.Client +} + +func NewRetinaClient(endpoint string) *Retina { + return &Retina{ + RetinaEndpoint: endpoint, + Client: &http.Client{Timeout: time.Second * 30}, + } +} + +func (c *Retina) StartTrace(operationID, filter string) error { + startTraceURL := fmt.Sprintf(startTrace, c.RetinaEndpoint) + response, err := c.Client.Post(startTraceURL, "application/json", nil) + if err != nil { + return err + } + + response.Body.Close() + return nil +} + +func (c *Retina) GetTrace(operationID string) error { + getTraceURL := fmt.Sprintf(trace, c.RetinaEndpoint, operationID) + response, err := c.Client.Get(getTraceURL) + if err != nil { + return err + } + + response.Body.Close() + return nil +} diff --git a/pkg/common/apiretry/apiretry.go b/pkg/common/apiretry/apiretry.go new file mode 100644 index 000000000..032310933 --- /dev/null +++ b/pkg/common/apiretry/apiretry.go @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package apiretry provides the retry logic for API calls. +package apiretry + +import ( + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" +) + +// Do will retry the do func only when the error is transient. +func Do(do func() error) error { + backOffPeriod := retry.DefaultBackoff + backOffPeriod.Cap = time.Second * 1 + + return retry.OnError(backOffPeriod, func(err error) bool { + return retriable(err) + }, do) +} + +func retriable(err error) bool { + if apierrors.IsTimeout(err) || + apierrors.IsServerTimeout(err) || + apierrors.IsTooManyRequests(err) { + return true + } + return false +} diff --git a/pkg/common/baseobject.go b/pkg/common/baseobject.go new file mode 100644 index 000000000..45f5995cb --- /dev/null +++ b/pkg/common/baseobject.go @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "fmt" + "sync" +) + +func GetBaseObject(name, namespace string, ips *IPAddresses) BaseObject { + return BaseObject{ + name: name, + namespace: namespace, + ips: ips, + RWMutex: &sync.RWMutex{}, + } +} + +func (b *BaseObject) Key() string { + return fmt.Sprintf("%s/%s", b.namespace, b.name) +} + +func (b *BaseObject) DeepCopy() BaseObject { + b.RLock() + defer b.RUnlock() + + newB := BaseObject{ + name: b.name, + namespace: b.namespace, + ips: b.ips.DeepCopy(), + RWMutex: &sync.RWMutex{}, + } + + return newB +} + +func (b *BaseObject) Name() string { + b.RLock() + defer b.RUnlock() + + return b.name +} + +func (b *BaseObject) Namespace() string { + b.RLock() + defer b.RUnlock() + + return b.namespace +} + +func (b *BaseObject) NamespacedName() string { + b.RLock() + defer b.RUnlock() + + return fmt.Sprintf("%s/%s", b.namespace, b.name) +} + +func (b *BaseObject) IPs() *IPAddresses { + b.RLock() + defer b.RUnlock() + + return b.ips +} diff --git a/pkg/common/dirtycache.go b/pkg/common/dirtycache.go new file mode 100644 index 000000000..86f87e861 --- /dev/null +++ b/pkg/common/dirtycache.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import "sync" + +type DirtyCache struct { + // dirty is a map of dirty objects. + toAdd map[string]interface{} + // toDelete is a map of dirty objects. + toDelete map[string]interface{} + + sync.RWMutex +} + +func NewDirtyCache() *DirtyCache { + return &DirtyCache{ + toAdd: make(map[string]interface{}), + toDelete: make(map[string]interface{}), + RWMutex: sync.RWMutex{}, + } +} + +func (d *DirtyCache) ToAdd(key string, obj interface{}) { + d.Lock() + defer d.Unlock() + delete(d.toDelete, key) + d.toAdd[key] = obj +} + +func (d *DirtyCache) ToDelete(key string, obj interface{}) { + d.Lock() + defer d.Unlock() + delete(d.toAdd, key) + d.toDelete[key] = obj +} + +func (d *DirtyCache) GetAddList() []interface{} { + d.RLock() + defer d.RUnlock() + + al := make([]interface{}, 0, len(d.toAdd)) + for _, v := range d.toAdd { + al = append(al, v) + } + return al +} + +func (d *DirtyCache) GetDeleteList() []interface{} { + d.RLock() + defer d.RUnlock() + + dl := make([]interface{}, 0, len(d.toDelete)) + for _, v := range d.toDelete { + dl = append(dl, v) + } + return dl +} + +func (d *DirtyCache) ClearAdd() { + d.Lock() + defer d.Unlock() + d.toAdd = make(map[string]interface{}) +} + +func (d *DirtyCache) ClearDelete() { + d.Lock() + defer d.Unlock() + d.toDelete = make(map[string]interface{}) +} diff --git a/pkg/common/dirtycache_test.go b/pkg/common/dirtycache_test.go new file mode 100644 index 000000000..a686cedd8 --- /dev/null +++ b/pkg/common/dirtycache_test.go @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDirtyCache(t *testing.T) { + dc := NewDirtyCache() + assert.NotNil(t, dc) + + tests := []struct { + name string + toAdd map[string]interface{} + toDel map[string]interface{} + expectedAddList []interface{} + expectedDelList []interface{} + }{ + { + name: "empty", + toAdd: map[string]interface{}{}, + toDel: map[string]interface{}{}, + expectedAddList: []interface{}{}, + expectedDelList: []interface{}{}, + }, + { + name: "add", + toAdd: map[string]interface{}{ + "key1": "value1", + }, + toDel: map[string]interface{}{}, + expectedAddList: []interface{}{ + "value1", + }, + expectedDelList: []interface{}{}, + }, + { + name: "delete", + toAdd: map[string]interface{}{}, + toDel: map[string]interface{}{ + "key1": "value1", + }, + expectedAddList: []interface{}{}, + expectedDelList: []interface{}{ + "value1", + }, + }, + { + name: "add and delete", + toAdd: map[string]interface{}{ + "key1": "value1", + }, + toDel: map[string]interface{}{ + "key2": "value2", + }, + expectedAddList: []interface{}{ + "value1", + }, + expectedDelList: []interface{}{ + "value2", + }, + }, + { + name: "add and delete same key", + toAdd: map[string]interface{}{ + "key1": "value1", + }, + toDel: map[string]interface{}{ + "key1": "value2", + }, + expectedAddList: []interface{}{}, + expectedDelList: []interface{}{ + "value2", + }, + }, + } + + for _, test := range tests { + fmt.Printf("Running test %s\n", test.name) + for k, v := range test.toAdd { + dc.ToAdd(k, v) + } + for k, v := range test.toDel { + dc.ToDelete(k, v) + } + assert.Equal(t, test.expectedAddList, dc.GetAddList()) + assert.Equal(t, test.expectedDelList, dc.GetDeleteList()) + dc.ClearAdd() + assert.Equal(t, []interface{}{}, dc.GetAddList()) + + dc.ClearDelete() + assert.Equal(t, []interface{}{}, dc.GetDeleteList()) + } +} diff --git a/pkg/common/endpoint.go b/pkg/common/endpoint.go new file mode 100644 index 000000000..2fc7c66cc --- /dev/null +++ b/pkg/common/endpoint.go @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "fmt" + "net" +) + +func NewRetinaEndpoint(name, namespace string, ips *IPAddresses) *RetinaEndpoint { + return &RetinaEndpoint{ + BaseObject: GetBaseObject(name, namespace, ips), + } +} + +func (ep *RetinaEndpoint) DeepCopy() interface{} { + ep.RLock() + defer ep.RUnlock() + + newEp := &RetinaEndpoint{ + BaseObject: ep.BaseObject.DeepCopy(), + } + + if ep.ownerRefs != nil { + newEp.ownerRefs = make([]*OwnerReference, len(ep.ownerRefs)) + for i, ownerRef := range ep.ownerRefs { + newEp.ownerRefs[i] = ownerRef.DeepCopy() + } + } + + if ep.containers != nil { + newEp.containers = make([]*RetinaContainer, len(ep.containers)) + for i, container := range ep.containers { + newEp.containers[i] = container.DeepCopy() + } + } + + if ep.labels != nil { + newEp.labels = make(map[string]string) + for k, v := range ep.labels { + newEp.labels[k] = v + } + } + + if ep.annotations != nil { + newEp.annotations = make(map[string]string) + for k, v := range ep.annotations { + newEp.annotations[k] = v + } + } + + return newEp +} + +func (ep *RetinaEndpoint) IPs() *IPAddresses { + ep.RLock() + defer ep.RUnlock() + + return ep.ips +} + +func (ep *RetinaEndpoint) SetIPs(ips *IPAddresses) { + ep.Lock() + defer ep.Unlock() + ep.ips = ips +} + +func (ep *RetinaEndpoint) OwnerRefs() []*OwnerReference { + ep.RLock() + defer ep.RUnlock() + return ep.ownerRefs +} + +func (ep *RetinaEndpoint) SetOwnerRefs(ownerRefs []*OwnerReference) { + ep.Lock() + defer ep.Unlock() + ep.ownerRefs = ownerRefs +} + +func (ep *RetinaEndpoint) Containers() []*RetinaContainer { + ep.RLock() + defer ep.RUnlock() + return ep.containers +} + +func (ep *RetinaEndpoint) SetContainers(containers []*RetinaContainer) { + ep.Lock() + defer ep.Unlock() + ep.containers = containers +} + +func (ep *RetinaEndpoint) Labels() map[string]string { + ep.RLock() + defer ep.RUnlock() + return ep.labels +} + +func (ep *RetinaEndpoint) FormattedLabels() []string { + ep.RLock() + defer ep.RUnlock() + labels := make([]string, 0) + for k, v := range ep.labels { + labels = append(labels, fmt.Sprintf("%s=%s", k, v)) + } + + return labels +} + +func (ep *RetinaEndpoint) SetLabels(labels map[string]string) { + ep.Lock() + defer ep.Unlock() + ep.labels = labels +} + +func (ep *RetinaEndpoint) Annotations() map[string]string { + ep.RLock() + defer ep.RUnlock() + return ep.annotations +} + +func (ep *RetinaEndpoint) SetAnnotations(annotations map[string]string) { + ep.Lock() + defer ep.Unlock() + if annotations != nil { + ep.annotations = make(map[string]string) + if _, ok := annotations[RetinaPodAnnotation]; ok { + ep.annotations[RetinaPodAnnotation] = annotations[RetinaPodAnnotation] + } + } +} + +func (e *RetinaEndpoint) PrimaryIP() (string, error) { + e.RLock() + defer e.RUnlock() + + if e.ips != nil { + pip := e.ips.PrimaryIP() + if pip != "" { + return pip, nil + } + } + + return "", fmt.Errorf("no primary IP found for endpoint %s", e.Key()) +} + +func (e *RetinaEndpoint) PrimaryNetIP() (net.IP, error) { + e.RLock() + defer e.RUnlock() + + if e.ips != nil { + pip := e.ips.PrimaryNetIP() + if pip != nil { + return pip, nil + } + } + + return nil, fmt.Errorf("no primary IP found for endpoint %s", e.Key()) +} + +func (o *OwnerReference) DeepCopy() *OwnerReference { + return &OwnerReference{ + Kind: o.Kind, + Name: o.Name, + UID: o.UID, + Controller: o.Controller, + } +} + +func (c *RetinaContainer) DeepCopy() *RetinaContainer { + return &RetinaContainer{ + Name: c.Name, + ID: c.ID, + } +} diff --git a/pkg/common/ipaddr.go b/pkg/common/ipaddr.go new file mode 100644 index 000000000..bc220c0a9 --- /dev/null +++ b/pkg/common/ipaddr.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import "net" + +func NewIPAddress(ipv4, ipv6 net.IP) *IPAddresses { + return &IPAddresses{ + IPv4: ipv4, + IPv6: ipv6, + } +} + +func (i *IPAddresses) AddIPv4(ip net.IP) { + i.OtherIPv4s = append(i.OtherIPv4s, ip) +} + +func (i *IPAddresses) AddIPv6(ip net.IP) { + i.OtherIPv6s = append(i.OtherIPv6s, ip) +} + +func (i *IPAddresses) GetIPs() []net.IP { + ips := []net.IP{} + if i.IPv4 != nil { + ips = append(ips, i.IPv4) + } + if i.IPv6 != nil { + ips = append(ips, i.IPv6) + } + ips = append(ips, i.OtherIPv4s...) + ips = append(ips, i.OtherIPv6s...) + return ips +} + +func (i *IPAddresses) GetIPv4s() []net.IP { + ips := []net.IP{} + if i.IPv4 != nil { + ips = append(ips, i.IPv4) + } + ips = append(ips, i.OtherIPv4s...) + return ips +} + +func (i *IPAddresses) GetIPv6s() []net.IP { + ips := []net.IP{} + if i.IPv6 != nil { + ips = append(ips, i.IPv6) + } + ips = append(ips, i.OtherIPv6s...) + return ips +} + +func (i *IPAddresses) PrimaryIP() string { + if i.IPv4 != nil { + return i.IPv4.String() + } + if i.IPv6 != nil { + return i.IPv6.String() + } + return "" +} + +func (i *IPAddresses) PrimaryNetIP() net.IP { + if i.IPv4 != nil { + return i.IPv4 + } + if i.IPv6 != nil { + return i.IPv6 + } + return nil +} + +func (i *IPAddresses) DeepCopy() *IPAddresses { + if i == nil { + return nil + } + + newObj := &IPAddresses{ + IPv4: i.IPv4, + IPv6: i.IPv6, + } + if len(i.OtherIPv4s) > 0 { + newObj.OtherIPv4s = make([]net.IP, len(i.OtherIPv4s)) + copy(newObj.OtherIPv4s, i.OtherIPv4s) + } + + if len(i.OtherIPv6s) > 0 { + newObj.OtherIPv6s = make([]net.IP, len(i.OtherIPv6s)) + copy(newObj.OtherIPv6s, i.OtherIPv6s) + } + + return newObj +} diff --git a/pkg/common/node.go b/pkg/common/node.go new file mode 100644 index 000000000..46735bbdf --- /dev/null +++ b/pkg/common/node.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import "net" + +func NewRetinaNode(name string, ip net.IP) *RetinaNode { + return &RetinaNode{ + name: name, + ip: ip, + } +} + +func (n *RetinaNode) DeepCopy() interface{} { + newN := &RetinaNode{ + name: n.name, + } + + if n.ip != nil { + newN.ip = make(net.IP, len(n.ip)) + copy(newN.ip, n.ip) + } + + return newN +} + +func (n *RetinaNode) IPString() string { + return n.ip.String() +} + +func (n *RetinaNode) Name() string { + return n.name +} diff --git a/pkg/common/pubsubtopics.go b/pkg/common/pubsubtopics.go new file mode 100644 index 000000000..756a46db3 --- /dev/null +++ b/pkg/common/pubsubtopics.go @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import "github.com/microsoft/retina/pkg/pubsub" + +const ( + // PubSubPods topic + PubSubPods pubsub.PubSubTopic = "pods" + // PubSubEndpoints topic + PubSubEndpoints pubsub.PubSubTopic = "endpoints" + // PubSubSvc topic + PubSubSvc pubsub.PubSubTopic = "svc" + // PubSubNode topic + PubSubNode pubsub.PubSubTopic = "node" + // PubSubVeth topic + PubSubVeth pubsub.PubSubTopic = "veth" + // PubSubNamespace topic + PubSubNamespace pubsub.PubSubTopic = "namespace" + // PubSubFilterRule topic + PubSubFilterRule pubsub.PubSubTopic = "filterrule" + // PubSubAPIServer topic + PubSubAPIServer pubsub.PubSubTopic = "apiserver" +) diff --git a/pkg/common/service.go b/pkg/common/service.go new file mode 100644 index 000000000..a3aa85dfe --- /dev/null +++ b/pkg/common/service.go @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "fmt" + "net" +) + +func NewRetinaSvc(name, namespace string, ips *IPAddresses, lbIP net.IP, selector map[string]string) *RetinaSvc { + return &RetinaSvc{ + BaseObject: GetBaseObject(name, namespace, ips), + lbIP: lbIP, + selector: selector, + } +} + +func (s *RetinaSvc) DeepCopy() interface{} { + s.RLock() + defer s.RUnlock() + + newS := &RetinaSvc{ + BaseObject: s.BaseObject.DeepCopy(), + } + + if s.lbIP != nil { + newS.lbIP = make(net.IP, len(s.lbIP)) + copy(newS.lbIP, s.lbIP) + } + + if s.selector != nil { + newS.selector = make(map[string]string) + for k, v := range s.selector { + newS.selector[k] = v + } + } + + return newS +} + +func (s *RetinaSvc) GetPrimaryIP() (string, error) { + s.RLock() + defer s.RUnlock() + + if s.ips != nil { + pip := s.ips.PrimaryIP() + if pip != "" { + return pip, nil + } + } + + return "", fmt.Errorf("no primary IP found for service %s", s.Key()) +} + +func (s *RetinaSvc) LBIP() net.IP { + s.RLock() + defer s.RUnlock() + + return s.lbIP +} + +func (s *RetinaSvc) SetLBIP(lbIP net.IP) { + s.Lock() + defer s.Unlock() + s.lbIP = lbIP +} + +func (s *RetinaSvc) Selector() map[string]string { + s.RLock() + defer s.RUnlock() + + return s.selector +} + +func (s *RetinaSvc) SetSelector(selector map[string]string) { + s.Lock() + defer s.Unlock() + s.selector = selector +} + +func (s *RetinaSvc) SetIPs(ips *IPAddresses) { + s.Lock() + defer s.Unlock() + s.ips = ips +} + +func (s *RetinaSvc) IPs() *IPAddresses { + s.RLock() + defer s.RUnlock() + + return s.ips +} diff --git a/pkg/common/types.go b/pkg/common/types.go new file mode 100644 index 000000000..5905dc6ce --- /dev/null +++ b/pkg/common/types.go @@ -0,0 +1,260 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "net" + "sync" + + corev1 "k8s.io/api/core/v1" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +const ( + APIServerEndpointName = "kubernetes-apiserver" + // default value for annotations + RetinaPodAnnotation = "retina.sh" + RetinaPodAnnotationValue = "observe" +) + +// Important note: any changes to these structs must be reflected in the DeepCopy() method. + +// PublishObj is an interface that all objects that are published +type PublishObj interface { + DeepCopy() interface{} +} + +// BaseObject is a common struct that is embedded in all objects that are published. +type BaseObject struct { + *sync.RWMutex + name string + namespace string + ips *IPAddresses +} + +// RetinaEndpoint represents a Kubernetes endpoint. +type RetinaEndpoint struct { + BaseObject + ownerRefs []*OwnerReference + containers []*RetinaContainer + labels map[string]string + annotations map[string]string +} + +func isIPV4(ipAddress string) bool { + parsedIP := net.ParseIP(ipAddress) + return parsedIP.To4() != nil +} + +func RetinaEndpointCommonFromAPI(retinaEndpoint *retinav1alpha1.RetinaEndpoint) *RetinaEndpoint { + retinaEndpointCommon := &RetinaEndpoint{ + BaseObject: BaseObject{ + name: retinaEndpoint.Name, + namespace: retinaEndpoint.Namespace, + ips: &IPAddresses{}, + RWMutex: &sync.RWMutex{}, + }, + ownerRefs: []*OwnerReference{}, + containers: []*RetinaContainer{}, + labels: retinaEndpoint.Labels, + annotations: make(map[string]string), + } + + for _, ownerRef := range retinaEndpoint.Spec.OwnerReferences { + retinaEndpointCommon.ownerRefs = append(retinaEndpointCommon.ownerRefs, &OwnerReference{ + APIVersion: ownerRef.APIVersion, + Kind: ownerRef.Kind, + Name: ownerRef.Name, + }) + } + + for _, container := range retinaEndpoint.Spec.Containers { + retinaEndpointCommon.containers = append(retinaEndpointCommon.containers, &RetinaContainer{ + Name: container.Name, + ID: container.ID, + }) + } + + podIP := net.ParseIP(retinaEndpoint.Spec.PodIP) + if isIPV4(retinaEndpoint.Spec.PodIP) { + retinaEndpointCommon.BaseObject.ips.IPv4 = podIP + } else { + retinaEndpointCommon.BaseObject.ips.IPv6 = podIP + } + + OtherIPv4s := []net.IP{} + OtherIPv6s := []net.IP{} + for _, podIP := range retinaEndpoint.Spec.PodIPs { + if podIP == retinaEndpoint.Spec.PodIP { + continue + } + if isIPV4(podIP) { + OtherIPv4s = append(OtherIPv4s, net.ParseIP(podIP)) + continue + } + OtherIPv6s = append(OtherIPv6s, net.ParseIP(podIP)) + } + retinaEndpointCommon.ips.OtherIPv4s = OtherIPv4s + retinaEndpointCommon.ips.OtherIPv6s = OtherIPv6s + + for k, v := range retinaEndpoint.Spec.Annotations { + if k == RetinaPodAnnotation { + retinaEndpointCommon.annotations[k] = v + } + } + + return retinaEndpointCommon +} + +func RetinaEndpointCommonFromPod(pod *corev1.Pod) *RetinaEndpoint { + retinaEndpointCommon := &RetinaEndpoint{ + BaseObject: BaseObject{ + name: pod.Name, + namespace: pod.Namespace, + ips: &IPAddresses{}, + RWMutex: &sync.RWMutex{}, + }, + ownerRefs: []*OwnerReference{}, + containers: []*RetinaContainer{}, + labels: pod.Labels, + annotations: make(map[string]string), + } + + for _, ownerRef := range pod.ObjectMeta.OwnerReferences { + retinaEndpointCommon.ownerRefs = append(retinaEndpointCommon.ownerRefs, &OwnerReference{ + APIVersion: ownerRef.APIVersion, + Kind: ownerRef.Kind, + Name: ownerRef.Name, + }) + } + + for _, container := range pod.Status.ContainerStatuses { + retinaEndpointCommon.containers = append(retinaEndpointCommon.containers, &RetinaContainer{ + Name: container.Name, + ID: container.ContainerID, + }) + } + + podIP := net.ParseIP(pod.Status.PodIP) + if isIPV4(pod.Status.PodIP) { + retinaEndpointCommon.BaseObject.ips.IPv4 = podIP + } else { + retinaEndpointCommon.BaseObject.ips.IPv6 = podIP + } + + OtherIPv4s := []net.IP{} + OtherIPv6s := []net.IP{} + for _, podIP := range pod.Status.PodIPs { + if podIP.IP == pod.Status.PodIP { + continue + } + if isIPV4(podIP.IP) { + OtherIPv4s = append(OtherIPv4s, net.ParseIP(podIP.IP)) + continue + } + OtherIPv6s = append(OtherIPv6s, net.ParseIP(podIP.IP)) + } + retinaEndpointCommon.ips.OtherIPv4s = OtherIPv4s + retinaEndpointCommon.ips.OtherIPv6s = OtherIPv6s + + for k, v := range pod.GetAnnotations() { + if k == RetinaPodAnnotation { + retinaEndpointCommon.annotations[k] = v + } + } + + return retinaEndpointCommon +} + +// IPAddresses represents a set of IP addresses. +type IPAddresses struct { + IPv4 net.IP + IPv6 net.IP + OtherIPv4s []net.IP + OtherIPv6s []net.IP +} + +// RetinaContainer represents a container in a Kubernetes endpoint. +type RetinaContainer struct { + Name string + ID string +} + +// RetinaSvc represents a Kubernetes service. +// ClusterIPs are saved as IPs in base object. +type RetinaSvc struct { + BaseObject + lbIP net.IP + selector map[string]string +} + +// OwnerReference contains enough information to let you identify an owning object. +// Reference: https://github.com/kubernetes/apimachinery/blob/v0.26.4/pkg/apis/meta/v1/types.go#L291 +type OwnerReference struct { + APIVersion string + Kind string + Name string + UID string + Controller bool +} + +// RetinaNode represents a Kubernetes node. +type RetinaNode struct { + name string + ip net.IP +} + +type APIServerObject struct { + EP *RetinaEndpoint +} + +func (a *APIServerObject) IPs() []net.IP { + a.EP.RLock() + defer a.EP.RUnlock() + + ips := []net.IP{} + if a.EP.ips.IPv4 != nil { + ips = append(ips, a.EP.ips.IPv4) + } + if a.EP.ips.OtherIPv4s != nil { + ips = append(ips, a.EP.ips.OtherIPv4s...) + } + return ips +} + +func NewAPIServerObject(stringIPs []string) *APIServerObject { + ips := []net.IP{} + for _, stringIP := range stringIPs { + ip := net.ParseIP(stringIP) + if ip != nil { + ips = append(ips, ip) + } + } + + if len(ips) == 0 { + return &APIServerObject{} + } + + primaryIP := ips[0] + ips = ips[1:] + return &APIServerObject{ + EP: &RetinaEndpoint{ + BaseObject: BaseObject{ + name: APIServerEndpointName, + namespace: APIServerEndpointName, + ips: &IPAddresses{ + IPv4: primaryIP, + OtherIPv4s: ips, + }, + RWMutex: &sync.RWMutex{}, + }, + }, + } +} + +func (a *APIServerObject) DeepCopy() interface{} { + return &APIServerObject{ + EP: a.EP.DeepCopy().(*RetinaEndpoint), + } +} diff --git a/pkg/common/types_test.go b/pkg/common/types_test.go new file mode 100644 index 000000000..5eb0574dc --- /dev/null +++ b/pkg/common/types_test.go @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "net" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +func TestRetinaEndpointCommonFromAPI(t *testing.T) { + ownerReference := retinav1alpha1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "retina-agent", + } + + hostIP := "10.224.0.253" + podIP := "10.0.0.1" + podIPv6 := "2001:1234:5678:9abc::4" + parsedPodIP := net.ParseIP(podIP) + parsedPodIPV6 := net.ParseIP(podIPv6) + + container := retinav1alpha1.RetinaEndpointStatusContainers{ + Name: "test", + ID: "5d4afda7-490b-43c3-bcfa-ed8f5d23720e", + } + + tests := []struct { + name string + retinaendpoint *retinav1alpha1.RetinaEndpoint + wantRetinaendpointCommon *RetinaEndpoint + }{ + { + name: "pod has one ip address", + retinaendpoint: &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + Containers: []retinav1alpha1.RetinaEndpointStatusContainers{ + container, + }, + Labels: map[string]string{"test": "test"}, + OwnerReferences: []retinav1alpha1.OwnerReference{ + ownerReference, + }, + NodeIP: hostIP, + PodIP: podIP, + PodIPs: []string{podIP}, + Annotations: map[string]string{ + RetinaPodAnnotation: RetinaPodAnnotationValue, + }, + }, + }, + wantRetinaendpointCommon: &RetinaEndpoint{ + BaseObject: BaseObject{ + name: "test", + namespace: "test", + ips: &IPAddresses{ + IPv4: parsedPodIP, + IPv6: net.IP(""), + OtherIPv4s: []net.IP{}, + OtherIPv6s: []net.IP{}, + }, + }, + ownerRefs: []*OwnerReference{ + { + APIVersion: ownerReference.APIVersion, + Kind: ownerReference.Kind, + Name: ownerReference.Name, + }, + }, + containers: []*RetinaContainer{ + { + Name: container.Name, + ID: container.ID, + }, + }, + annotations: map[string]string{ + RetinaPodAnnotation: RetinaPodAnnotationValue, + }, + }, + }, + { + name: "pod has dual-stack ip addresses", + retinaendpoint: &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + Containers: []retinav1alpha1.RetinaEndpointStatusContainers{ + container, + }, + Labels: map[string]string{"test": "test"}, + OwnerReferences: []retinav1alpha1.OwnerReference{ + ownerReference, + }, + NodeIP: hostIP, + PodIP: podIP, + PodIPs: []string{podIP, podIPv6}, + Annotations: map[string]string{ + "test": "test", + RetinaPodAnnotation: RetinaPodAnnotationValue, + }, + }, + }, + wantRetinaendpointCommon: &RetinaEndpoint{ + BaseObject: BaseObject{ + name: "test", + namespace: "test", + ips: &IPAddresses{ + IPv4: parsedPodIP, + IPv6: net.IP(""), + OtherIPv4s: []net.IP{}, + OtherIPv6s: []net.IP{parsedPodIPV6}, + }, + }, + ownerRefs: []*OwnerReference{ + { + APIVersion: ownerReference.APIVersion, + Kind: ownerReference.Kind, + Name: ownerReference.Name, + }, + }, + containers: []*RetinaContainer{ + { + Name: container.Name, + ID: container.ID, + }, + }, + annotations: map[string]string{ + RetinaPodAnnotation: RetinaPodAnnotationValue, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRetinaendpointCommon := RetinaEndpointCommonFromAPI(tt.retinaendpoint) + + if diff := cmp.Diff(gotRetinaendpointCommon, tt.wantRetinaendpointCommon, cmpopts.IgnoreFields(BaseObject{}, "RWMutex"), cmp.AllowUnexported(BaseObject{}, RetinaEndpoint{})); diff != "" { + t.Fatalf("RetinaEndpointCommonFromAPI mismatch (-got, +want)\n%s", diff) + } + }) + } +} + +func TestRetinaEndpointCommonFromPod(t *testing.T) { + ownerReference := metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "retina-agent", + } + + podIP := "10.0.0.1" + parsedPodIP := net.ParseIP(podIP) + + container := corev1.ContainerStatus{ + Name: "test", + ContainerID: "5d4afda7-490b-43c3-bcfa-ed8f5d23720e", + } + + tests := []struct { + name string + pod *corev1.Pod + wantRetinaendpointCommon *RetinaEndpoint + }{ + { + name: "pod retina endpoint", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + ownerReference, + }, + Annotations: map[string]string{ + RetinaPodAnnotation: RetinaPodAnnotationValue, + "test": "test", + }, + Labels: map[string]string{ + "test": "test", + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + container, + }, + PodIP: podIP, + PodIPs: []corev1.PodIP{ + { + IP: podIP, + }, + }, + }, + }, + wantRetinaendpointCommon: &RetinaEndpoint{ + BaseObject: BaseObject{ + name: "test", + namespace: "test", + ips: &IPAddresses{ + IPv4: parsedPodIP, + IPv6: net.IP(""), + OtherIPv4s: []net.IP{}, + OtherIPv6s: []net.IP{}, + }, + }, + ownerRefs: []*OwnerReference{ + { + APIVersion: ownerReference.APIVersion, + Kind: ownerReference.Kind, + Name: ownerReference.Name, + }, + }, + containers: []*RetinaContainer{ + { + Name: container.Name, + ID: container.ContainerID, + }, + }, + annotations: map[string]string{ + RetinaPodAnnotation: RetinaPodAnnotationValue, + }, + labels: map[string]string{ + "test": "test", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRetinaendpointCommon := RetinaEndpointCommonFromPod(tt.pod) + + if diff := cmp.Diff(gotRetinaendpointCommon, tt.wantRetinaendpointCommon, cmpopts.IgnoreFields(BaseObject{}, "RWMutex"), cmp.AllowUnexported(BaseObject{}, RetinaEndpoint{})); diff != "" { + t.Fatalf("RetinaEndpointCommonFromPod mismatch (-got, +want)\n%s", diff) + } + }) + } +} diff --git a/pkg/config/capture.go b/pkg/config/capture.go new file mode 100644 index 000000000..f34f680bb --- /dev/null +++ b/pkg/config/capture.go @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package config + +import ( + captureUtils "github.com/microsoft/retina/pkg/capture/utils" +) + +// CaptureConfig defines the configuration for capture controller in the operator. +type CaptureConfig struct { + // Configurations to determine the capture workload image. + // + // Debug indicates whether to enable debug mode. + // If true, the operator will pick the image from the test container registry for the capture workload. + // Check pkg/capture/utils/capture_image.go for the detailed explanation of how debug capture image version is picked. + // NOTE: CaptureImageVersion and CaptureImageVersionSource are used internally and not visible to the user. + CaptureDebug bool `yaml:"captureDebug"` + // ImageVersion defines the image version of the capture workload. + CaptureImageVersion string `yaml:"-"` + // VersionSource defines the source of the image version. + CaptureImageVersionSource captureUtils.VersionSource `yaml:"-"` + + // JobNumLimit indicates the maximum number of jobs that can be created for each Capture. + CaptureJobNumLimit int `yaml:"captureJobNumLimit"` +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 000000000..fa06341ff --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package config + +import ( + "fmt" + "time" + + "github.com/spf13/viper" +) + +type Server struct { + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +type Config struct { + ApiServer Server `yaml:"apiServer"` + LogLevel string `yaml:"logLevel"` + EnabledPlugin []string `yaml:"enabledPlugin"` + MetricsInterval time.Duration `yaml:"metricsInterval"` + EnableTelemetry bool `yaml:"enableTelemetry"` + EnableRetinaEndpoint bool `yaml:"enableRetinaEndpoint"` + EnablePodLevel bool `yaml:"enablePodLevel"` + RemoteContext bool `yaml:"remoteContext"` + EnableAnnotations bool `yaml:"enableAnnotations"` + BypassLookupIPOfInterest bool `yaml:"bypassLookupIPOfInterest"` +} + +func GetConfig(cfgFilename string) (*Config, error) { + if cfgFilename != "" { + viper.SetConfigFile(cfgFilename) + } else { + viper.SetConfigName("config") + viper.AddConfigPath("/retina/config") + } + + viper.SetEnvPrefix("retina") + viper.AutomaticEnv() + // NOTE(mainred): RetinaEndpoint is currently the only supported solution to cache Pod, and before an alternative is implemented, + // we make EnableRetinaEndpoint true and cannot be configurable. + viper.SetDefault("EnableRetinaEndpoint", true) + + err := viper.ReadInConfig() + if err != nil { + return nil, fmt.Errorf("fatal error config file: %s", err) + } + var config Config + err = viper.Unmarshal(&config) + if err != nil { + return nil, fmt.Errorf("fatal error config file: %s", err) + } + // Convert to second. + config.MetricsInterval = config.MetricsInterval * time.Second + + return &config, nil +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 000000000..25b0d38bb --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package config + +import ( + "testing" + "time" +) + +func TestGetConfig(t *testing.T) { + c, err := GetConfig("./testwith/config.yaml") + if err != nil { + t.Fatalf("Expected no error, instead got %+v", err) + } + if c.ApiServer.Host != "0.0.0.0" || + c.ApiServer.Port != 10093 || + c.LogLevel != "info" || + c.MetricsInterval != 10*time.Second || + len(c.EnabledPlugin) != 3 || + c.EnablePodLevel || + !c.EnableRetinaEndpoint || + c.RemoteContext || + c.EnableAnnotations { + t.Fatalf("Expeted config should be same as ./testwith/config.yaml; instead got %+v", c) + } +} diff --git a/pkg/config/testwith/config-mock.yaml b/pkg/config/testwith/config-mock.yaml new file mode 100644 index 000000000..ce656b195 --- /dev/null +++ b/pkg/config/testwith/config-mock.yaml @@ -0,0 +1,10 @@ +apiServer: + host: "0.0.0.0" + port: 10093 +# Supported - debug, info, error, warn, panic, fatal. +logLevel: info +enabledPlugin: ["mockplugin"] +# Interval, in seconds, to scrape/publish metrics. +metricsInterval: 10 +# used to export telemetry to AppInsights +telemetryEnabled: true diff --git a/pkg/config/testwith/config-win.yaml b/pkg/config/testwith/config-win.yaml new file mode 100644 index 000000000..2bf7438fe --- /dev/null +++ b/pkg/config/testwith/config-win.yaml @@ -0,0 +1,10 @@ +apiServer: + host: "0.0.0.0" + port: 10093 +# Supported - debug, info, error, warn, panic, fatal. +logLevel: info +enabledPlugin: ["hnsstats"] +# Interval, in seconds, to scrape/publish metrics. +metricsInterval: 10 +# used to export telemetry to AppInsights +telemetryEnabled: true diff --git a/pkg/config/testwith/config.yaml b/pkg/config/testwith/config.yaml new file mode 100644 index 000000000..8c55954d5 --- /dev/null +++ b/pkg/config/testwith/config.yaml @@ -0,0 +1,10 @@ +apiServer: + host: "0.0.0.0" + port: 10093 +# Supported - debug, info, error, warn, panic, fatal. +logLevel: info +enabledPlugin: ["dropreason", "packetforward", "linuxutil"] +# Interval, in seconds, to scrape/publish metrics. +metricsInterval: 10 +# used to export telemetry to AppInsights +telemetryEnabled: true diff --git a/pkg/controllers/cache/cache.go b/pkg/controllers/cache/cache.go new file mode 100644 index 000000000..0b1da6a97 --- /dev/null +++ b/pkg/controllers/cache/cache.go @@ -0,0 +1,487 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cache + +import ( + "fmt" + "net" + "sync" + + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "go.uber.org/zap" +) + +type Cache struct { + sync.RWMutex + l *log.ZapLogger + // endpointMap is a map of pod key (namespace/name) to RetinaEndpoint + epMap map[string]*common.RetinaEndpoint + + // svcMap is a map of service key (namespace/name) to RetinaSvc + svcMap map[string]*common.RetinaSvc + + // ipToEpKey is a map of pod IP to list of pod keys (namespace/name) + ipToEpKey map[string]string + + // ipToSvcKey is a map of service IP to service key (namespace/name) + ipToSvcKey map[string]string + + // nodeMap is a map of node name to Node + nodeMap map[string]*common.RetinaNode + + // ipToNodeName is a map of node IP to node name + ipToNodeName map[string]string + + // nsMap is a map of namespace to no of pods in the namespace + nsMap map[string]int + + // nsAnnotated is a map of annotated namespaces to watch + nsAnnotated map[string]bool + + pubsub pubsub.PubSubInterface +} + +// NewCache returns a new instance of Cache. +func New(p pubsub.PubSubInterface) *Cache { + c := &Cache{ + l: log.Logger().Named(string("Cache")), + epMap: make(map[string]*common.RetinaEndpoint), + svcMap: make(map[string]*common.RetinaSvc), + ipToEpKey: make(map[string]string), + ipToSvcKey: make(map[string]string), + nodeMap: make(map[string]*common.RetinaNode), + ipToNodeName: make(map[string]string), + nsMap: make(map[string]int), + nsAnnotated: make(map[string]bool), + pubsub: p, + } + + cbFunc := pubsub.CallBackFunc(c.SubscribeAPIServerFn) + c.pubsub.Subscribe(common.PubSubAPIServer, &cbFunc) + return c +} + +// GetPodByIP returns the retina endpoint for the given IP. +func (c *Cache) GetPodByIP(ip string) *common.RetinaEndpoint { + c.RLock() + defer c.RUnlock() + + obj := c.getObjByIPType(ip, TypeEndpoint) + switch obj := obj.(type) { + case *common.RetinaEndpoint: + return obj + default: + return nil + } +} + +// GetSvcByIP returns the retina service for the given IP. +func (c *Cache) GetSvcByIP(ip string) *common.RetinaSvc { + c.RLock() + defer c.RUnlock() + + obj := c.getObjByIPType(ip, TypeSvc) + switch obj := obj.(type) { + case *common.RetinaSvc: + return obj + default: + return nil + } +} + +// GetNodeByIP returns the retina node for the given IP. +func (c *Cache) GetNodeByIP(ip string) *common.RetinaNode { + c.RLock() + defer c.RUnlock() + + obj := c.getObjByIPType(ip, TypeNode) + switch obj := obj.(type) { + case *common.RetinaNode: + return obj + default: + return nil + } +} + +// getObjByIPType returns the retina endpoint for the given IP. +func (c *Cache) getObjByIPType(ip string, t objectType) interface{} { + switch t { + case TypeEndpoint: + podKey, ok := c.ipToEpKey[ip] + if !ok { + c.l.Debug("pod not found for IP", zap.String("ip", ip)) + return nil + } + + ep, ok := c.epMap[podKey] + if ok { + c.l.Debug("pod found for IP", zap.String("ip", ip), zap.String("pod", podKey)) + return ep + } + case TypeSvc: + svcKey, ok := c.ipToSvcKey[ip] + if !ok { + c.l.Debug("service not found for IP", zap.String("ip", ip)) + return nil + } + + svc, ok := c.svcMap[svcKey] + if ok { + c.l.Debug("service found for IP", zap.String("ip", ip), zap.String("svc", svcKey)) + return svc + } + case TypeNode: + nodeName, ok := c.ipToNodeName[ip] + if !ok { + c.l.Debug("node not found for IP", zap.String("ip", ip)) + return nil + } + + node, ok := c.nodeMap[nodeName] + if ok { + c.l.Debug("node found for IP", zap.String("ip", ip), zap.String("node", nodeName)) + return node + } + } + + return nil +} + +// GetObjByIP returns the retina object for the given IP. +func (c *Cache) GetObjByIP(ip string) interface{} { + if ep := c.GetPodByIP(ip); ep != nil { + c.l.Debug("pod found for IP", zap.String("ip", ip), zap.String("pod Name", ep.Key())) + return ep + } + + if svc := c.GetSvcByIP(ip); svc != nil { + return svc + } + + if node := c.GetNodeByIP(ip); node != nil { + return node + } + + return nil +} + +func (c *Cache) GetIPsByNamespace(ns string) []net.IP { + c.RLock() + defer c.RUnlock() + + var ips []net.IP + unique := make(map[string]struct{}) + for _, ep := range c.epMap { + if ep.Namespace() == ns { + ip, err := ep.PrimaryNetIP() + if err != nil { + c.l.Error("GetIPsByNamespace: error getting primary IP for pod", zap.String("pod", ep.Key()), zap.Error(err)) + continue + } + + if _, ok := unique[ip.String()]; !ok { + ips = append(ips, ip) + unique[ip.String()] = struct{}{} + } + } + } + + return ips +} + +// UpdateRetinaEndpoint updates the cache with the given retina endpoint. +func (c *Cache) UpdateRetinaEndpoint(ep *common.RetinaEndpoint) error { + c.Lock() + defer c.Unlock() + + return c.updateEndpoint(ep) +} + +// updateEndpoint updates the cache with the given retina endpoint. +func (c *Cache) updateEndpoint(ep *common.RetinaEndpoint) error { + ip, err := ep.PrimaryIP() + if err != nil { + c.l.Error("updateEndpoint: error getting primary IP for pod", zap.String("pod", ep.Key()), zap.Error(err)) + return err + } + // delete if any existing object is using this IP + // send a delete event for the existing object + err = c.deleteByIP(ip, ep.Key()) + if err != nil { + c.l.Error("updateEndpoint: error deleting existing object for IP", + zap.String("pod", ep.Key()), + zap.String("ip", ip), + zap.Error(err), + ) + return err + } + + c.epMap[ep.Key()] = ep + c.ipToEpKey[ip] = ep.Key() + + // notify pubsub that the endpoint has been updated + c.publish(EventTypePodAdded, ep) + + return nil +} + +// UpdateRetinaSvc updates the cache with the given retina service. +func (c *Cache) UpdateRetinaSvc(svc *common.RetinaSvc) error { + c.Lock() + defer c.Unlock() + + return c.updateSvc(svc) +} + +// updateSvc updates the cache with the given retina service. +func (c *Cache) updateSvc(svc *common.RetinaSvc) error { + // TODO check if a service already exists here + ip, err := svc.GetPrimaryIP() + if err != nil { + c.l.Error("updateSvc: error getting primary IP for service", zap.String("service", svc.Key()), zap.Error(err)) + return err + } + + // delete if any existing object is using this IP + // send a delete event for the existing object + err = c.deleteByIP(ip, svc.Key()) + if err != nil { + c.l.Error("updateSvc: error deleting existing object for IP", + zap.String("svc", svc.Key()), + zap.String("ip", ip), + zap.Error(err), + ) + return err + } + + c.ipToSvcKey[ip] = svc.Key() + c.svcMap[svc.Key()] = svc + + // notify pubsub + c.publish(EventTypeSvcAdded, svc) + + return nil +} + +// UpdateRetinaNode updates the cache with the given retina node. +func (c *Cache) UpdateRetinaNode(node *common.RetinaNode) error { + c.Lock() + defer c.Unlock() + + return c.updateNode(node) +} + +// updateNode updates the cache with the given retina node. +func (c *Cache) updateNode(node *common.RetinaNode) error { + ip := node.IPString() + + // delete if any existing object is using this IP + // send a delete event for the existing object + err := c.deleteByIP(ip, node.Name()) + if err != nil { + c.l.Error("updateNode: error deleting existing object for IP", + zap.String("node", node.Name()), + zap.String("ip", ip), + zap.Error(err), + ) + return err + } + + c.nodeMap[node.Name()] = node + c.ipToNodeName[node.IPString()] = node.Name() + + // notify pubsub + c.publish(EventTypeNodeAdded, node) + + return nil +} + +// DeleteRetinaEndpoint deletes the given retina endpoint from the cache. +func (c *Cache) DeleteRetinaEndpoint(epKey string) error { + c.Lock() + defer c.Unlock() + + return c.deleteEndpoint(epKey) +} + +// deleteEndpoint deletes the given retina endpoint from the cache. +func (c *Cache) deleteEndpoint(epKey string) error { + ep, ok := c.epMap[epKey] + if !ok { + c.l.Debug("endpoint not found in cache", zap.String("endpoint", epKey)) + // ignore the error if the endpoint is not found + return nil + } + + ip, err := ep.PrimaryIP() + if err != nil { + c.l.Error("error getting primary IP for pod", zap.String("pod", ep.Key()), zap.Error(err)) + return err + } + + delete(c.epMap, epKey) + delete(c.ipToEpKey, ip) + + c.publish(EventTypePodDeleted, ep) + + return nil +} + +// DeleteRetinaSvc deletes the given retina service from the cache. +func (c *Cache) DeleteRetinaSvc(svcKey string) error { + c.Lock() + defer c.Unlock() + + return c.deleteSvc(svcKey) +} + +// deleteScv deletes the given retina service from the cache. +func (c *Cache) deleteSvc(svcKey string) error { + svc, ok := c.svcMap[svcKey] + if !ok { + c.l.Debug("service not found in cache", zap.String("service", svcKey)) + return fmt.Errorf("service not found in cache: %s", svcKey) + } + + ip, err := svc.GetPrimaryIP() + if err != nil { + c.l.Error("error getting primary IP for service", zap.String("service", svc.Key()), zap.Error(err)) + return err + } + + delete(c.svcMap, svcKey) + delete(c.ipToSvcKey, ip) + + // notify pubsub + c.publish(EventTypeSvcDeleted, svc) + + return nil +} + +// DeleteRetinaNode deletes the given retina node from the cache. +func (c *Cache) DeleteRetinaNode(nodeName string) error { + c.Lock() + defer c.Unlock() + + return c.deleteNode(nodeName) +} + +// deleteNode deletes the given retina node from the cache. +func (c *Cache) deleteNode(nodeName string) error { + node, ok := c.nodeMap[nodeName] + if !ok { + c.l.Debug("node not found in cache", zap.String("node", nodeName)) + return fmt.Errorf("node not found in cache: %s", nodeName) + } + + delete(c.nodeMap, nodeName) + delete(c.ipToNodeName, node.IPString()) + + c.publish(EventTypeNodeDeleted, node) + + return nil +} + +// deleteByIP deletes the given IP from the cache. +func (c *Cache) deleteByIP(ip, key string) error { + if svcKey, ok := c.ipToSvcKey[ip]; ok { + if svcKey == key { + return nil + } + c.l.Debug("deleting service by IP", zap.String("ip", ip), zap.String("service", svcKey)) + return c.deleteSvc(svcKey) + } + if epKey, ok := c.ipToEpKey[ip]; ok { + if epKey == key { + return nil + } + c.l.Debug("deleting pod by IP", zap.String("ip", ip), zap.String("pod", epKey)) + return c.deleteEndpoint(epKey) + } + if nodeName, ok := c.ipToNodeName[ip]; ok { + if nodeName == key { + return nil + } + c.l.Debug("deleting node by IP", zap.String("ip", ip), zap.String("node", nodeName)) + return c.deleteNode(nodeName) + } + + c.l.Debug("IP not found in cache", zap.String("ip", ip)) + return nil +} + +func (c *Cache) publish(t EventType, obj common.PublishObj) { + if c.pubsub == nil { + c.l.Warn("pubsub not initialized, skipping publish", zap.String("event", t.String())) + return + } + cev := NewCacheEvent(t, obj) + topic := common.PubSubPods + if t == EventTypeSvcAdded || t == EventTypeSvcDeleted { + topic = common.PubSubSvc + } else if t == EventTypeNodeAdded || t == EventTypeNodeDeleted { + topic = common.PubSubNode + } + + go func() { + c.pubsub.Publish(topic, cev) + }() +} + +func (c *Cache) SubscribeAPIServerFn(obj interface{}) { + event := obj.(*CacheEvent) + if event == nil { + return + } + + apiServer := event.Obj.(*common.APIServerObject) + if apiServer == nil || apiServer.EP == nil { + c.l.Debug("invalid or nil APIServer object in callback function") + return + } + + c.Lock() + defer c.Unlock() + + switch event.Type { + case EventTypeAddAPIServerIPs: + err := c.updateEndpoint(apiServer.EP) + if err != nil { + c.l.Error("error updating APIServer endpoint in callback function", zap.Error(err)) + } + case EventTypeDeleteAPIServerIPs: + err := c.deleteEndpoint(apiServer.EP.Key()) + if err != nil { + c.l.Error("error deleting APIServer endpoint in callback function", zap.Error(err)) + } + default: + c.l.Warn("invalid event type in callback function", zap.String("event", event.Type.String())) + + } +} + +func (c *Cache) DeleteAnnotatedNamespace(ns string) { + c.Lock() + defer c.Unlock() + + delete(c.nsAnnotated, ns) +} + +func (c *Cache) AddAnnotatedNamespace(ns string) { + c.Lock() + defer c.Unlock() + + c.nsAnnotated[ns] = true +} + +func (c *Cache) GetAnnotatedNamespaces() []string { + c.RLock() + defer c.RUnlock() + ns := make([]string, 0, len(c.nsAnnotated)) + for k := range c.nsAnnotated { + ns = append(ns, k) + } + return ns +} diff --git a/pkg/controllers/cache/cache_test.go b/pkg/controllers/cache/cache_test.go new file mode 100644 index 000000000..d94472f03 --- /dev/null +++ b/pkg/controllers/cache/cache_test.go @@ -0,0 +1,350 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cache + +import ( + "net" + "testing" + "time" + + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +const ( + until = 1 * time.Millisecond +) + +func TestNewCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) +} + +func TestCacheEndpoints(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Publish(common.PubSubPods, gomock.Any()).Times(2) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + addEndpoints.SetAnnotations(map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }) + + err := c.UpdateRetinaEndpoint(addEndpoints) + assert.Error(t, err) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err = c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + ep := obj.(*common.RetinaEndpoint) + assert.Equal(t, addEndpoints.Name(), ep.Name()) + assert.Equal(t, addEndpoints.Namespace(), ep.Namespace()) + assert.Equal(t, addEndpoints.Labels()["app"], ep.Labels()["app"]) + assert.Equal(t, addEndpoints.Annotations()[common.RetinaPodAnnotation], ep.Annotations()[common.RetinaPodAnnotation]) + + // normal get + ep = c.GetPodByIP("1.2.3.4") + assert.Equal(t, addEndpoints.Name(), ep.Name()) + assert.Equal(t, addEndpoints.Namespace(), ep.Namespace()) + assert.Equal(t, addEndpoints.Labels()["app"], ep.Labels()["app"]) + assert.Equal(t, addEndpoints.Annotations()[common.RetinaPodAnnotation], ep.Annotations()[common.RetinaPodAnnotation]) + + // delete + err = c.DeleteRetinaEndpoint(addEndpoints.Key()) + assert.NoError(t, err) + + time.Sleep(until) +} + +func TestCacheServices(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addSvc := common.NewRetinaSvc("svc1", "ns1", nil, nil, nil) + + p.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(2) + err := c.UpdateRetinaSvc(addSvc) + assert.Error(t, err) + + addSvc.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err = c.UpdateRetinaSvc(addSvc) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + svc := obj.(*common.RetinaSvc) + assert.Equal(t, addSvc.Name(), svc.Name()) + assert.Equal(t, addSvc.Namespace(), svc.Namespace()) + assert.Equal(t, addSvc.Selector(), svc.Selector()) + assert.Equal(t, addSvc.LBIP(), svc.LBIP()) + + // normal get + svc = c.GetSvcByIP("1.2.3.4") + assert.Equal(t, addSvc.Name(), svc.Name()) + assert.Equal(t, addSvc.Namespace(), svc.Namespace()) + assert.Equal(t, addSvc.Selector(), svc.Selector()) + assert.Equal(t, addSvc.LBIP(), svc.LBIP()) + + // delete + err = c.DeleteRetinaSvc(addSvc.Key()) + assert.NoError(t, err) + + time.Sleep(until) +} + +func TestCacheNodes(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addNode := common.NewRetinaNode("node1", net.IPv4(1, 2, 3, 4)) + + p.EXPECT().Publish(gomock.Any(), gomock.Any()).Times(2) + err := c.UpdateRetinaNode(addNode) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + node := obj.(*common.RetinaNode) + assert.Equal(t, addNode.Name(), node.Name()) + assert.Equal(t, addNode.IPString(), node.IPString()) + + // normal get + node = c.GetNodeByIP("1.2.3.4") + assert.Equal(t, addNode.Name(), node.Name()) + assert.Equal(t, addNode.IPString(), node.IPString()) + + // delete + err = c.DeleteRetinaNode(addNode.Name()) + assert.NoError(t, err) + + time.Sleep(until) +} + +func TestAddPodSvcNodeSameIP(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Publish(common.PubSubPods, gomock.Any()).Times(2) + p.EXPECT().Publish(common.PubSubSvc, gomock.Any()).Times(2) + p.EXPECT().Publish(common.PubSubNode, gomock.Any()).Times(1) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err := c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + addSvc := common.NewRetinaSvc("svc1", "ns1", + &common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }, nil, nil) + + err = c.UpdateRetinaSvc(addSvc) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + svc := obj.(*common.RetinaSvc) + assert.Equal(t, addSvc.Name(), svc.Name()) + assert.Equal(t, addSvc.Namespace(), svc.Namespace()) + + addNode := common.NewRetinaNode("node1", net.IPv4(1, 2, 3, 4)) + + err = c.UpdateRetinaNode(addNode) + assert.NoError(t, err) + + obj = c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + node := obj.(*common.RetinaNode) + assert.Equal(t, addNode.Name(), node.Name()) + assert.Equal(t, addNode.IPString(), node.IPString()) + + time.Sleep(until) +} + +func TestAddPodSvcNodeSameIPDiffNS(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Publish(common.PubSubPods, gomock.Any()).Times(2) + p.EXPECT().Publish(common.PubSubSvc, gomock.Any()).Times(2) + p.EXPECT().Publish(common.PubSubNode, gomock.Any()).Times(1) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err := c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + addSvc := common.NewRetinaSvc("svc1", "ns2", + &common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }, nil, nil) + + err = c.UpdateRetinaSvc(addSvc) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + assert.NotNil(t, obj) + svc := obj.(*common.RetinaSvc) + assert.Equal(t, addSvc.Name(), svc.Name()) + assert.Equal(t, addSvc.Namespace(), svc.Namespace()) + + addNode := common.NewRetinaNode("node1", net.IPv4(1, 2, 3, 4)) + + err = c.UpdateRetinaNode(addNode) + assert.NoError(t, err) + + obj = c.GetObjByIP("1.2.3.4") + + assert.NotNil(t, obj) + node := obj.(*common.RetinaNode) + assert.Equal(t, addNode.Name(), node.Name()) + assert.Equal(t, addNode.IPString(), node.IPString()) + + time.Sleep(until) +} + +func TestAddPodDiffNs(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Publish(common.PubSubPods, gomock.Any()).Times(3) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err := c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + addEndpoints = common.NewRetinaEndpoint("pod1", "ns2", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 2, 3, 4), + }) + + err = c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + obj := c.GetObjByIP("1.2.3.4") + + assert.NotNil(t, obj) + ep := obj.(*common.RetinaEndpoint) + assert.Equal(t, addEndpoints.Name(), ep.Name()) + assert.Equal(t, addEndpoints.Namespace(), ep.Namespace()) + + time.Sleep(until) +} + +func TestFailDelete(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + assert.NotNil(t, c) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + + // Delete non-existing retina endpoint returns no error. + err := c.DeleteRetinaEndpoint(addEndpoints.Key()) + assert.NoError(t, err) + + svc := common.NewRetinaSvc("svc1", "ns1", nil, nil, nil) + err = c.DeleteRetinaSvc(svc.Key()) + assert.Error(t, err) + + node := common.NewRetinaNode("node1", net.IPv4(1, 2, 3, 4)) + + err = c.DeleteRetinaNode(node.Name()) + assert.Error(t, err) + + time.Sleep(until) +} + +func TestCachingNamespace(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + p := pubsub.NewMockPubSubInterface(ctrl) + p.EXPECT().Subscribe(common.PubSubAPIServer, gomock.Any()).Times(1) + c := New(p) + ns := "test-ns" + + c.AddAnnotatedNamespace(ns) + namespaces := c.GetAnnotatedNamespaces() + assert.Equal(t, 1, len(namespaces)) + assert.Equal(t, ns, namespaces[0]) + c.DeleteAnnotatedNamespace(ns) + namespaces = c.GetAnnotatedNamespaces() + assert.Equal(t, 0, len(namespaces)) +} diff --git a/pkg/controllers/cache/mock_cacheinterface.go b/pkg/controllers/cache/mock_cacheinterface.go new file mode 100644 index 000000000..42dad18b7 --- /dev/null +++ b/pkg/controllers/cache/mock_cacheinterface.go @@ -0,0 +1,234 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/controllers/cache (interfaces: CacheInterface) + +// Package cache is a generated GoMock package. +package cache + +import ( + net "net" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + common "github.com/microsoft/retina/pkg/common" +) + +// MockCacheInterface is a mock of CacheInterface interface. +type MockCacheInterface struct { + ctrl *gomock.Controller + recorder *MockCacheInterfaceMockRecorder +} + +// MockCacheInterfaceMockRecorder is the mock recorder for MockCacheInterface. +type MockCacheInterfaceMockRecorder struct { + mock *MockCacheInterface +} + +// NewMockCacheInterface creates a new mock instance. +func NewMockCacheInterface(ctrl *gomock.Controller) *MockCacheInterface { + mock := &MockCacheInterface{ctrl: ctrl} + mock.recorder = &MockCacheInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCacheInterface) EXPECT() *MockCacheInterfaceMockRecorder { + return m.recorder +} + +// AddAnnotatedNamespace mocks base method. +func (m *MockCacheInterface) AddAnnotatedNamespace(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddAnnotatedNamespace", arg0) +} + +// AddAnnotatedNamespace indicates an expected call of AddAnnotatedNamespace. +func (mr *MockCacheInterfaceMockRecorder) AddAnnotatedNamespace(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAnnotatedNamespace", reflect.TypeOf((*MockCacheInterface)(nil).AddAnnotatedNamespace), arg0) +} + +// DeleteAnnotatedNamespace mocks base method. +func (m *MockCacheInterface) DeleteAnnotatedNamespace(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteAnnotatedNamespace", arg0) +} + +// DeleteAnnotatedNamespace indicates an expected call of DeleteAnnotatedNamespace. +func (mr *MockCacheInterfaceMockRecorder) DeleteAnnotatedNamespace(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAnnotatedNamespace", reflect.TypeOf((*MockCacheInterface)(nil).DeleteAnnotatedNamespace), arg0) +} + +// DeleteRetinaEndpoint mocks base method. +func (m *MockCacheInterface) DeleteRetinaEndpoint(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRetinaEndpoint", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRetinaEndpoint indicates an expected call of DeleteRetinaEndpoint. +func (mr *MockCacheInterfaceMockRecorder) DeleteRetinaEndpoint(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRetinaEndpoint", reflect.TypeOf((*MockCacheInterface)(nil).DeleteRetinaEndpoint), arg0) +} + +// DeleteRetinaNode mocks base method. +func (m *MockCacheInterface) DeleteRetinaNode(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRetinaNode", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRetinaNode indicates an expected call of DeleteRetinaNode. +func (mr *MockCacheInterfaceMockRecorder) DeleteRetinaNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRetinaNode", reflect.TypeOf((*MockCacheInterface)(nil).DeleteRetinaNode), arg0) +} + +// DeleteRetinaSvc mocks base method. +func (m *MockCacheInterface) DeleteRetinaSvc(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRetinaSvc", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteRetinaSvc indicates an expected call of DeleteRetinaSvc. +func (mr *MockCacheInterfaceMockRecorder) DeleteRetinaSvc(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRetinaSvc", reflect.TypeOf((*MockCacheInterface)(nil).DeleteRetinaSvc), arg0) +} + +// GetAnnotatedNamespaces mocks base method. +func (m *MockCacheInterface) GetAnnotatedNamespaces() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAnnotatedNamespaces") + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetAnnotatedNamespaces indicates an expected call of GetAnnotatedNamespaces. +func (mr *MockCacheInterfaceMockRecorder) GetAnnotatedNamespaces() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotatedNamespaces", reflect.TypeOf((*MockCacheInterface)(nil).GetAnnotatedNamespaces)) +} + +// GetIPsByNamespace mocks base method. +func (m *MockCacheInterface) GetIPsByNamespace(arg0 string) []net.IP { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIPsByNamespace", arg0) + ret0, _ := ret[0].([]net.IP) + return ret0 +} + +// GetIPsByNamespace indicates an expected call of GetIPsByNamespace. +func (mr *MockCacheInterfaceMockRecorder) GetIPsByNamespace(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIPsByNamespace", reflect.TypeOf((*MockCacheInterface)(nil).GetIPsByNamespace), arg0) +} + +// GetNodeByIP mocks base method. +func (m *MockCacheInterface) GetNodeByIP(arg0 string) *common.RetinaNode { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeByIP", arg0) + ret0, _ := ret[0].(*common.RetinaNode) + return ret0 +} + +// GetNodeByIP indicates an expected call of GetNodeByIP. +func (mr *MockCacheInterfaceMockRecorder) GetNodeByIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeByIP", reflect.TypeOf((*MockCacheInterface)(nil).GetNodeByIP), arg0) +} + +// GetObjByIP mocks base method. +func (m *MockCacheInterface) GetObjByIP(arg0 string) interface{} { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjByIP", arg0) + ret0, _ := ret[0].(interface{}) + return ret0 +} + +// GetObjByIP indicates an expected call of GetObjByIP. +func (mr *MockCacheInterfaceMockRecorder) GetObjByIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjByIP", reflect.TypeOf((*MockCacheInterface)(nil).GetObjByIP), arg0) +} + +// GetPodByIP mocks base method. +func (m *MockCacheInterface) GetPodByIP(arg0 string) *common.RetinaEndpoint { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodByIP", arg0) + ret0, _ := ret[0].(*common.RetinaEndpoint) + return ret0 +} + +// GetPodByIP indicates an expected call of GetPodByIP. +func (mr *MockCacheInterfaceMockRecorder) GetPodByIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodByIP", reflect.TypeOf((*MockCacheInterface)(nil).GetPodByIP), arg0) +} + +// GetSvcByIP mocks base method. +func (m *MockCacheInterface) GetSvcByIP(arg0 string) *common.RetinaSvc { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSvcByIP", arg0) + ret0, _ := ret[0].(*common.RetinaSvc) + return ret0 +} + +// GetSvcByIP indicates an expected call of GetSvcByIP. +func (mr *MockCacheInterfaceMockRecorder) GetSvcByIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSvcByIP", reflect.TypeOf((*MockCacheInterface)(nil).GetSvcByIP), arg0) +} + +// UpdateRetinaEndpoint mocks base method. +func (m *MockCacheInterface) UpdateRetinaEndpoint(arg0 *common.RetinaEndpoint) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRetinaEndpoint", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateRetinaEndpoint indicates an expected call of UpdateRetinaEndpoint. +func (mr *MockCacheInterfaceMockRecorder) UpdateRetinaEndpoint(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRetinaEndpoint", reflect.TypeOf((*MockCacheInterface)(nil).UpdateRetinaEndpoint), arg0) +} + +// UpdateRetinaNode mocks base method. +func (m *MockCacheInterface) UpdateRetinaNode(arg0 *common.RetinaNode) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRetinaNode", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateRetinaNode indicates an expected call of UpdateRetinaNode. +func (mr *MockCacheInterfaceMockRecorder) UpdateRetinaNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRetinaNode", reflect.TypeOf((*MockCacheInterface)(nil).UpdateRetinaNode), arg0) +} + +// UpdateRetinaSvc mocks base method. +func (m *MockCacheInterface) UpdateRetinaSvc(arg0 *common.RetinaSvc) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRetinaSvc", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateRetinaSvc indicates an expected call of UpdateRetinaSvc. +func (mr *MockCacheInterfaceMockRecorder) UpdateRetinaSvc(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRetinaSvc", reflect.TypeOf((*MockCacheInterface)(nil).UpdateRetinaSvc), arg0) +} diff --git a/pkg/controllers/cache/types.go b/pkg/controllers/cache/types.go new file mode 100644 index 000000000..5e32a9f2a --- /dev/null +++ b/pkg/controllers/cache/types.go @@ -0,0 +1,120 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cache + +import ( + "net" + + "github.com/microsoft/retina/pkg/common" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mock_cacheinterface.go -copyright_file=../../lib/ignore_headers.txt -package=cache github.com/microsoft/retina/pkg/controllers/cache CacheInterface +type CacheInterface interface { + // GetPodByIP returns the retina endpoint for the given IP. + GetPodByIP(ip string) *common.RetinaEndpoint + // GetSvcByIP returns the retina service for the given IP. + GetSvcByIP(ip string) *common.RetinaSvc + // GetNodeByIP returns the retina node for the given IP. + GetNodeByIP(ip string) *common.RetinaNode + // GetObjByIP returns the object for the given IP. + GetObjByIP(ip string) interface{} + // GetIPsByNamespace returns the net.IPs for a given namespace. + GetIPsByNamespace(ns string) []net.IP + // GetAnnotatedNamespaces returns list of namespaces that are annotated with retina to observe. + GetAnnotatedNamespaces() []string + + // UpdateRetinaEndpoint updates the retina endpoint in the cache. + UpdateRetinaEndpoint(ep *common.RetinaEndpoint) error + // UpdateRetinaSvc updates the retina service in the cache. + UpdateRetinaSvc(svc *common.RetinaSvc) error + // UpdateRetinaNode updates the retina node in the cache. + UpdateRetinaNode(node *common.RetinaNode) error + AddAnnotatedNamespace(ns string) + + // DeleteRetinaEndpoint deletes the retina endpoint from the cache. + DeleteRetinaEndpoint(epKey string) error + // DeleteRetinaSvc deletes the retina service from the cache. + DeleteRetinaSvc(svcKey string) error + // DeleteRetinaNode deletes the retina node from the cache. + DeleteRetinaNode(nodeName string) error + DeleteAnnotatedNamespace(ns string) +} + +type CacheEvent struct { + // Type is the type of the event. + Type EventType + // Obj is the object that the event is about. + Obj interface{} +} + +func NewCacheEvent(t EventType, obj common.PublishObj) *CacheEvent { + return &CacheEvent{ + Type: t, + Obj: obj.DeepCopy(), + } +} + +type EventType int + +const ( + // EventTypePodAdded is the event type for a pod add event. + EventTypePodAdded EventType = iota + // EventTypePodDeleted is the event type for a pod delete event. + EventTypePodDeleted + // EventTypeSvcAdded is the event type for a service add event. + EventTypeSvcAdded + // EventTypeSvcDeleted is the event type for a service delete event. + EventTypeSvcDeleted + // EventTypeNodeAdded is the event type for a node add event. + EventTypeNodeAdded + // EventTypeNodeDeleted is the event type for a node delete event. + EventTypeNodeDeleted + // EventTypeAddAPIServerIPs is the event type for adding API server IPs. + EventTypeAddAPIServerIPs + // EventTypeDeleteAPIServerIPs is the event type for deleting API server IPs. + EventTypeDeleteAPIServerIPs +) + +func (e EventType) String() string { + switch e { + case EventTypePodAdded: + return "pod added" + case EventTypePodDeleted: + return "pod deleted" + case EventTypeSvcAdded: + return "service added" + case EventTypeSvcDeleted: + return "service deleted" + case EventTypeNodeAdded: + return "node added" + case EventTypeNodeDeleted: + return "node deleted" + case EventTypeAddAPIServerIPs: + return "add API server IPs" + case EventTypeDeleteAPIServerIPs: + return "delete API server IPs" + default: + return "unknown" + } +} + +type objectType int + +const ( + TypeEndpoint objectType = iota + TypeSvc + TypeNode +) + +func (o objectType) String() string { + switch o { + case TypeEndpoint: + return "endpoint" + case TypeSvc: + return "service" + case TypeNode: + return "node" + default: + return "unknown" + } +} diff --git a/pkg/controllers/daemon/kappieendpoint/controller.go b/pkg/controllers/daemon/kappieendpoint/controller.go new file mode 100644 index 000000000..0f909ebfc --- /dev/null +++ b/pkg/controllers/daemon/kappieendpoint/controller.go @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package retinaendpoint + +import ( + "context" + "time" + + "github.com/microsoft/retina/pkg/common/apiretry" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + operatorv1 "github.com/microsoft/retina/crd/api/v1alpha1" + retinaCommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" +) + +// RetinaEndpointReconciler reconciles a RetinaEndpoint object +type RetinaEndpointReconciler struct { + client.Client + + cache *cache.Cache + l *log.ZapLogger +} + +func New(client client.Client, cache *cache.Cache) *RetinaEndpointReconciler { + return &RetinaEndpointReconciler{ + Client: client, + cache: cache, + l: log.Logger().Named(string("RetinaEndpointReconciler")), + } +} + +// +kubebuilder:rbac:groups=retina.io,resources=retinaendpoints,verbs=get;list;watch +func (r *RetinaEndpointReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + startTime := time.Now() + r.l.Info("Reconciling RetinaEndpoint", zap.String("RetinaEndpoint", req.NamespacedName.String())) + + defer func() { + latency := time.Since(startTime).String() + r.l.Info("Reconciliation ends", zap.String("RetinaEndpoint", req.NamespacedName.String()), zap.String("latency", latency)) + }() + + retinaEndpoint := &operatorv1.RetinaEndpoint{} + if err := apiretry.Do( + func() error { + return r.Get(ctx, req.NamespacedName, retinaEndpoint) + }, + ); err != nil { + if errors.IsNotFound(err) { + // RetinaEndpoint deleted since reconcile request received. + r.l.Debug("RetinaEndpoint is not found", zap.String("RetinaEndpoint", req.NamespacedName.String())) + // It's safe to use req.NamespacedName as the Pod and RetinaEndpoint share the same namespace and name. + if err := r.cache.DeleteRetinaEndpoint(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaEndpoint in Cache", zap.Error(err), zap.String("RetinaEndpoint", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + r.l.Error("Failed to fetch RetinaEndpoint", zap.Error(err), zap.String("RetinaEndpoint", req.NamespacedName.String())) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !retinaEndpoint.ObjectMeta.DeletionTimestamp.IsZero() { + r.l.Info("RetinaEndpoint is being deleted", zap.String("RetinaEndpoint", req.NamespacedName.String())) + if err := r.cache.DeleteRetinaEndpoint(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaEndpoint in Cache", zap.Error(err), zap.String("RetinaEndpoint", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + + retinaEndpointCommon := retinaCommon.RetinaEndpointCommonFromAPI(retinaEndpoint) + if err := r.cache.UpdateRetinaEndpoint(retinaEndpointCommon); err != nil { + r.l.Error("Failed to update RetinaEndpoint in Cache", zap.Error(err), zap.String("RetinaEndpoint", req.NamespacedName.String())) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *RetinaEndpointReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.l.Info("Setting up RetinaEndpoint controller") + return ctrl.NewControllerManagedBy(mgr). + For(&operatorv1.RetinaEndpoint{}). + Complete(r) +} diff --git a/pkg/controllers/daemon/kappieendpoint/controller_test.go b/pkg/controllers/daemon/kappieendpoint/controller_test.go new file mode 100644 index 000000000..2170689bb --- /dev/null +++ b/pkg/controllers/daemon/kappieendpoint/controller_test.go @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package retinaendpoint + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + retinaCommon "github.com/microsoft/retina/pkg/common" +) + +func retinaEndpoitCmpComparer() cmp.Option { + return cmp.Comparer(func(x, y *retinaCommon.RetinaEndpoint) bool { + if (x != nil && y == nil) || (x == nil && y != nil) { + return false + } + if x == nil && y == nil { + return true + } + if !cmp.Equal(x.BaseObject, y.BaseObject, cmpopts.IgnoreTypes(sync.RWMutex{}), cmp.AllowUnexported(retinaCommon.BaseObject{})) { + return false + } + if !cmp.Equal(x.Containers(), y.Containers()) { + return false + } + if !cmp.Equal(x.OwnerRefs(), y.OwnerRefs()) { + return false + } + if !cmp.Equal(x.Labels(), y.Labels()) { + return false + } + return true + }) +} + +var _ = Describe("Test Retina Capture Controller", func() { + const ( + timeout = time.Second * 10 + interval = time.Second * 1 + ) + var ( + node *corev1.Node + testNamespace string + captureName string + captureRef types.NamespacedName + ns corev1.Namespace + ) + BeforeEach(func() { + By("Creating a node with label 'kubernetes.io/role: agent'") + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{ + "kubernetes.io/role": "agent", + }, + }, + } + Expect(k8sClient.Create(context.Background(), node)).Should(Succeed()) + }) + + AfterEach(func() { + By("Deleting the node") + Expect(k8sClient.Delete(ctx, node)).Should(Succeed()) + }) + + Context("RetinaEndpoint is created successfully", func() { + BeforeEach(func() { + testNamespace = fmt.Sprintf("test-capture-%s", utilrand.String(5)) + By("Creating test namespace") + ns = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, &ns)).Should(Succeed()) + + captureName = "test-capture" + captureRef = types.NamespacedName{Name: captureName, Namespace: testNamespace} + }) + + AfterEach(func() { + By("Deleting test namespace") + Expect(k8sClient.Delete(ctx, &ns)).Should(Succeed()) + }) + + It("Should create capture successfully", func() { + By("Creating a new RetinaEndpoint") + ctx := context.Background() + retinaEndpoint := &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-capture", + Namespace: testNamespace, + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + NodeIP: "10.10.10.10", + PodIP: "10.0.0.1", + PodIPs: []string{"10.0.0.1"}, + Containers: []retinav1alpha1.RetinaEndpointStatusContainers{ + { + Name: "testcontainer", + ID: "docker://1234567890", + }, + }, + }, + } + Expect(k8sClient.Create(ctx, retinaEndpoint)).Should(Succeed()) + + createdRetinaEndpoint := &retinav1alpha1.RetinaEndpoint{} + By("Checking if the RetinaEndpoint was successfully created") + Eventually(func() bool { + err := k8sClient.Get(ctx, captureRef, createdRetinaEndpoint) + return err == nil + }, timeout, interval).Should(BeTrue()) + + By("Checking the common RetinaEndpoint is created in Cache") + expectedRetinaEndpointCommon := retinaCommon.RetinaEndpointCommonFromAPI(createdRetinaEndpoint) + Eventually(func() string { + retinaEndpointCommon := retinaEndpointReconciler.cache.GetPodByIP(createdRetinaEndpoint.Spec.PodIP) + return cmp.Diff(retinaEndpointCommon, expectedRetinaEndpointCommon, retinaEndpoitCmpComparer()) + }, timeout, interval).Should(BeEmpty()) + + By("Updating RetinaEndpoint") + createdRetinaEndpoint.Spec.PodIP = "10.0.0.3" + createdRetinaEndpoint.Spec.PodIPs = []string{"10.0.0.3"} + Expect(k8sClient.Update(ctx, createdRetinaEndpoint)).Should(Succeed()) + + By("Checking the common RetinaEndpoint is updated in Cache") + expectedRetinaEndpointCommon = retinaCommon.RetinaEndpointCommonFromAPI(createdRetinaEndpoint) + Eventually(func() string { + retinaEndpointCommon := retinaEndpointReconciler.cache.GetPodByIP(retinaEndpoint.Spec.PodIP) + return cmp.Diff(retinaEndpointCommon, expectedRetinaEndpointCommon, retinaEndpoitCmpComparer()) + }, timeout, interval).Should(BeEmpty()) + + By("Deleting RetinaEndpoint") + Expect(k8sClient.Delete(ctx, createdRetinaEndpoint)).Should(Succeed()) + + By("Checking the common RetinaEndpoint is not in Cache") + expectedRetinaEndpointCommon = retinaCommon.RetinaEndpointCommonFromAPI(createdRetinaEndpoint) + Eventually(func() bool { + retinaEndpointCommon := retinaEndpointReconciler.cache.GetPodByIP(retinaEndpoint.Spec.PodIP) + return retinaEndpointCommon == nil + }, timeout, interval).Should(BeTrue()) + }) + }) +}) diff --git a/pkg/controllers/daemon/kappieendpoint/suite_test.go b/pkg/controllers/daemon/kappieendpoint/suite_test.go new file mode 100644 index 000000000..859c6b2f7 --- /dev/null +++ b/pkg/controllers/daemon/kappieendpoint/suite_test.go @@ -0,0 +1,94 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package retinaendpoint + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + controllercache "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" +) + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + + retinaEndpointReconciler *RetinaEndpointReconciler +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Capture Controller Suite") +} + +var _ = BeforeSuite(func() { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("Bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../..", "deploy/manifests/controller/helm/retina/crds")}, + ErrorIfCRDPathMissing: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = retinav1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // Check pkg/controllers/operator/capture/suite_test.go for more details to why metric is disabled. + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + + pubSub := pubsub.New() + controllerCache := controllercache.New(pubSub) + retinaEndpointReconciler = New(k8sManager.GetClient(), controllerCache) + + err = retinaEndpointReconciler.SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller.go b/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller.go new file mode 100644 index 000000000..78d58dc85 --- /dev/null +++ b/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller.go @@ -0,0 +1,109 @@ +/* +Copyright 2023. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsconfigurationcontroller + +import ( + "context" + "sync" + + "go.uber.org/zap" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/crd/api/v1alpha1/validations" + "github.com/microsoft/retina/pkg/log" + mm "github.com/microsoft/retina/pkg/module/metrics" +) + +// MetricsConfigurationReconciler reconciles a MetricsConfiguration object +type MetricsConfigurationReconciler struct { + *sync.Mutex + client.Client + Scheme *runtime.Scheme + mcCache map[string]*retinav1alpha1.MetricsConfiguration + metricsModule *mm.Module + l *log.ZapLogger +} + +func New(client client.Client, scheme *runtime.Scheme, metricsModule *mm.Module) *MetricsConfigurationReconciler { + return &MetricsConfigurationReconciler{ + Mutex: &sync.Mutex{}, + l: log.Logger().Named(string("metricsconfiguration-controller")), + Client: client, + Scheme: scheme, + mcCache: make(map[string]*retinav1alpha1.MetricsConfiguration), + metricsModule: metricsModule, + } +} + +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfiguration,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfiguration/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfiguration/finalizers,verbs=update + +func (r *MetricsConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + mcc := &retinav1alpha1.MetricsConfiguration{} + if err := r.Client.Get(ctx, req.NamespacedName, mcc); err != nil { + if apierrors.IsNotFound(err) { + // Object not found, it has probably been deleted + r.l.Info("deleted", zap.String("name", req.NamespacedName.String())) + r.Lock() + if _, ok := r.mcCache[req.NamespacedName.String()]; ok { + delete(r.mcCache, req.NamespacedName.String()) + r.l.Info("deleted from cache", zap.String("name", req.NamespacedName.String())) + } + r.Unlock() + return ctrl.Result{}, nil + } else { + r.l.Info("error getting metricsconfiguration", zap.String("name", req.NamespacedName.String())) + return ctrl.Result{}, err + } + } + + r.l.Info("reconciled", zap.String("name", req.NamespacedName.String())) + r.Lock() + defer r.Unlock() + if len(r.mcCache) == 0 || r.mcCache[req.NamespacedName.String()] != nil { + if mcc.Status.State != retinav1alpha1.StateAccepted { + r.l.Info("ignoring this CRD as it is not configured and accepted by operator") + } else { + r.l.Info("adding to cache", zap.String("name", req.NamespacedName.String())) + currentMcc := r.mcCache[req.NamespacedName.String()] + + if validations.CompareMetricsConfig(currentMcc, mcc) { + r.l.Info("no change in metrics configuration, skipping reconcile", zap.String("name", req.NamespacedName.String())) + return ctrl.Result{}, nil + } + + err := r.metricsModule.Reconcile(&mcc.Spec) + if err != nil { + r.l.Info("error reconciling metrics configurations", zap.String("name", req.NamespacedName.String())) + } + r.mcCache[req.NamespacedName.String()] = mcc + } + } else { + r.l.Error("Metrics Configuration is already configured on this cluster, cannot reconcile this new config.", zap.String("name", req.NamespacedName.String())) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MetricsConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&retinav1alpha1.MetricsConfiguration{}). + Complete(r) +} diff --git a/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller_test.go b/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller_test.go new file mode 100644 index 000000000..cd2ffda1b --- /dev/null +++ b/pkg/controllers/daemon/metricsconfiguration/metricsconfiguration_controller_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2023. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsconfigurationcontroller + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" +) + +var fakescheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(fakescheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(fakescheme)) + + _ = clientgoscheme.AddToScheme(fakescheme) + fakescheme.AddKnownTypes(retinav1alpha1.GroupVersion, &retinav1alpha1.RetinaEndpoint{}) +} + +func TestMetricsConfigurationReconciler_Reconcile(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + type fields struct { + existingObjects []client.Object + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + wantNil bool + }{ + { + name: "should create a new metrics configuration", + fields: fields{ + existingObjects: []client.Object{ + &retinav1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: "default", + }, + }, + }, + wantNil: false, + }, + { + name: "should send nil to channel when metrics configuration is deleted", + fields: fields{ + existingObjects: []client.Object{}, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: "default", + }, + }, + }, + wantNil: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(tt.fields.existingObjects...).Build() + + r := New(client, fakescheme, nil) + + _, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + require.Error(t, err) + } + + // reactivate when this channel is used again + // verify that the pod is deleted + /* + select { + case config := <-metricsconfigchannel: + if tt.wantNil { + require.Nil(t, config.MetricsConfiguration) + } else { + require.NotNil(t, config.MetricsConfiguration) + } + + case <-time.After(5 * time.Second): + require.Errorf(t, nil, "controller didn't write to channel after allotted time") + } + */ + }) + } +} diff --git a/pkg/controllers/daemon/namespace/namespace_controller.go b/pkg/controllers/daemon/namespace/namespace_controller.go new file mode 100644 index 000000000..4664c03f0 --- /dev/null +++ b/pkg/controllers/daemon/namespace/namespace_controller.go @@ -0,0 +1,110 @@ +package namespacecontroller + +import ( + "context" + "time" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + mm "github.com/microsoft/retina/pkg/module/metrics" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +const ( + interval = 1 * time.Second +) + +type NamespaceReconciler struct { + client.Client + + cache cache.CacheInterface + metricsModule mm.IModule + l *log.ZapLogger +} + +func New(client client.Client, cache cache.CacheInterface, metricsModule mm.IModule) *NamespaceReconciler { + return &NamespaceReconciler{ + Client: client, + cache: cache, + metricsModule: metricsModule, + l: log.Logger().Named(string("NamespaceReconciler")), + } +} + +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list +func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Get Namespace and check if it has the annotation + r.l.Info("Reconciling Namespace", zap.String("namespace", req.NamespacedName.String())) + namespace := &corev1.Namespace{} + // If namespace is deleted, we ignore the error and continue + if err := r.Client.Get(ctx, req.NamespacedName, namespace); err != nil { + if client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, err + } + namespace.Name = req.Name + namespace.Namespace = req.Namespace + } + // if the namespace has an annotation and was not deleted + if namespace.GetAnnotations()[common.RetinaPodAnnotation] == common.RetinaPodAnnotationValue && namespace.DeletionTimestamp.IsZero() { + r.l.Info("Namespace has annotation", zap.String("namespace", namespace.Name)) + r.cache.AddAnnotatedNamespace(namespace.Name) + } else { + // namespace updated with annotation removed or the namespace was deleted + r.l.Info("Namespace does not have annotation", zap.String("namespace", namespace.Name), zap.Any("annotations", namespace.GetAnnotations())) + r.cache.DeleteAnnotatedNamespace(namespace.Name) + } + return ctrl.Result{}, nil +} + +// Reconciles the metrics module with included namespaces from annotation on namespaces +func (r *NamespaceReconciler) Start(ctx context.Context) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + ns := r.cache.GetAnnotatedNamespaces() + r.l.Debug("Reconciling metrics module", zap.Any("namespaces", ns)) + spec := (&api.MetricsSpec{}). + WithIncludedNamespaces(ns). + WithMetricsContextOptions(mm.DefaultMetrics(), mm.DefaultCtxOptions(), mm.DefaultCtxOptions()) + // MetricsModule will check the diff between namespaces and spec when reconciling. + r.metricsModule.Reconcile(spec) + case <-ctx.Done(): + return + } + } +} + +func getPredicateFuncs() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + _, hasAnnotation := e.Object.GetAnnotations()[common.RetinaPodAnnotation] + return hasAnnotation + }, + UpdateFunc: func(e event.UpdateEvent) bool { + _, annotOld := e.ObjectNew.GetAnnotations()[common.RetinaPodAnnotation] + _, annotNew := e.ObjectOld.GetAnnotations()[common.RetinaPodAnnotation] + return annotOld || annotNew + }, + DeleteFunc: func(e event.DeleteEvent) bool { + _, hasAnnotation := e.Object.GetAnnotations()[common.RetinaPodAnnotation] + return hasAnnotation + }, + } +} + +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.l.Info("Setting up Namespace controller") + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + WithEventFilter(getPredicateFuncs()). + Complete(r) +} diff --git a/pkg/controllers/daemon/namespace/namespace_controller_test.go b/pkg/controllers/daemon/namespace/namespace_controller_test.go new file mode 100644 index 000000000..de42b6076 --- /dev/null +++ b/pkg/controllers/daemon/namespace/namespace_controller_test.go @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package namespacecontroller + +import ( + "context" + "testing" + "time" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/module/metrics" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +var fakescheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(fakescheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(fakescheme)) + + _ = clientgoscheme.AddToScheme(fakescheme) + fakescheme.AddKnownTypes(retinav1alpha1.GroupVersion, &retinav1alpha1.RetinaEndpoint{}) +} + +func TestNamespaceControllerReconcile(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + type fields struct { + existingObjects []client.Object + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + deleteCalls int + addCalls int + }{ + { + name: "Test Namespace Controller Reconcile namespace not found (deleted)", + fields: fields{ + existingObjects: []client.Object{}, + }, + args: args{ + req: ctrl.Request{ + NamespacedName: client.ObjectKey{ + Name: "test", + }, + }, + }, + wantErr: false, + deleteCalls: 1, + addCalls: 0, + }, + { + name: "Test Namespace Controller Reconcile with annotation (namespace updated/added with annotation)", + fields: fields{ + existingObjects: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + }, + }, + }, + }, + args: args{ + req: ctrl.Request{ + NamespacedName: client.ObjectKey{ + Name: "test", + }, + }, + }, + wantErr: false, + deleteCalls: 0, + addCalls: 1, + }, + { + name: "Test Namespace Controller Reconcile without annotation delete", + fields: fields{ + existingObjects: []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + }, + }, + }, + args: args{ + req: ctrl.Request{ + NamespacedName: client.ObjectKey{ + Name: "test", + }, + }, + }, + wantErr: false, + deleteCalls: 1, + addCalls: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(tt.fields.existingObjects...).Build() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + cache := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + cache.EXPECT().AddAnnotatedNamespace(gomock.Any()).Return().Times(tt.addCalls) + cache.EXPECT().DeleteAnnotatedNamespace(gomock.Any()).Return().Times(tt.deleteCalls) + r := New(client, cache, &metrics.Module{}) + _, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("NamespaceControllerReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} + +func TestPredicateFuncs(t *testing.T) { + funcs := getPredicateFuncs() + assert.True(t, funcs.Create(event.CreateEvent{ + Object: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + }, + }, + })) + assert.True(t, funcs.Update(event.UpdateEvent{ + ObjectOld: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + }, + }, + ObjectNew: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + }, + })) + assert.True(t, funcs.Delete(event.DeleteEvent{ + Object: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + }, + }, + })) +} + +func TestStart(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + existingObjects := []client.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + }, + } + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(existingObjects...).Build() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + cache := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + cache.EXPECT().GetAnnotatedNamespaces().Return([]string{"test"}).MinTimes(1) + mm := metrics.NewMockIModule(ctrl) //nolint:typecheck + // metrics_module Reconcile only called 1 time since namespaces is not dirty after first call. + mm.EXPECT().Reconcile(gomock.Any()).Return(nil).MinTimes(2) + r := New(client, cache, mm) + // add multiple reocncile calls to the channel to confirm reconcile is only called once + ctx, cancel := context.WithCancel(context.Background()) + go r.Start(ctx) + time.Sleep(15 * time.Second) + cancel() +} diff --git a/pkg/controllers/daemon/node/controller.go b/pkg/controllers/daemon/node/controller.go new file mode 100644 index 000000000..60221106c --- /dev/null +++ b/pkg/controllers/daemon/node/controller.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package node + +import ( + "context" + "net" + + "github.com/microsoft/retina/pkg/common/apiretry" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinaCommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" +) + +// NodeReconciler reconciles a Node object +type NodeReconciler struct { + client.Client + + cache *cache.Cache + l *log.ZapLogger +} + +func New(client client.Client, cache *cache.Cache) *NodeReconciler { + return &NodeReconciler{ + Client: client, + cache: cache, + l: log.Logger().Named(string("NodeReconciler")), + } +} + +// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list +func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // startTime := time.Now() + r.l.Debug("Reconciling Node", zap.String("Node", req.NamespacedName.String())) + + /* + // commenting out the latency calculation for now + // we will need to create these as metrics + defer func() { + latency := time.Since(startTime).String() + r.l.Info("Reconciliation ends", zap.String("Node", req.NamespacedName.String()), zap.String("latency", latency)) + }() + */ + node := &corev1.Node{} + if err := apiretry.Do( + func() error { + return r.Client.Get(ctx, req.NamespacedName, node) + }, + ); err != nil { + if errors.IsNotFound(err) { + // Node deleted since reconcile request received. + r.l.Debug("Node is not found", zap.String("Node", req.NamespacedName.String())) + // It's safe to use req.NamespacedName as the Node and RetinaNode share the same namespace and name. + if err := r.cache.DeleteRetinaNode(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaNode in Cache from Node", zap.Error(err), zap.String("RetinaNode", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + r.l.Error("Failed to fetch Node", zap.Error(err), zap.String("Node", req.NamespacedName.String())) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !node.ObjectMeta.DeletionTimestamp.IsZero() { + r.l.Info("Node is being deleted", zap.String("Node", req.Name)) + if err := r.cache.DeleteRetinaNode(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaNode in Cache from Node", zap.Error(err), zap.String("Node", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + + retinaNodeCommon := retinaCommon.NewRetinaNode(node.Name, net.ParseIP(node.Status.Addresses[0].Address)) + if err := r.cache.UpdateRetinaNode(retinaNodeCommon); err != nil { + r.l.Error("Failed to update RetinaNode in Cache", zap.Error(err), zap.String("Node", req.NamespacedName.String())) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.l.Info("Setting up Node controller") + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Node{}). + Complete(r) +} diff --git a/pkg/controllers/daemon/pod/controller.go b/pkg/controllers/daemon/pod/controller.go new file mode 100644 index 000000000..3b86a2ed8 --- /dev/null +++ b/pkg/controllers/daemon/pod/controller.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package pod + +import ( + "context" + + "github.com/microsoft/retina/pkg/common/apiretry" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinaCommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" +) + +// PodReconciler reconciles a Pod object +type PodReconciler struct { + client.Client + + cache *cache.Cache + l *log.ZapLogger +} + +func New(client client.Client, cache *cache.Cache) *PodReconciler { + return &PodReconciler{ + Client: client, + cache: cache, + l: log.Logger().Named(string("PodReconciler")), + } +} + +// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list +func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.l.Info("Reconciling Pod", zap.String("Pod", req.NamespacedName.String())) + + pod := &corev1.Pod{} + if err := apiretry.Do( + func() error { + return r.Client.Get(ctx, req.NamespacedName, pod) + }, + ); err != nil { + if errors.IsNotFound(err) { + // RetinaEndpoint deleted since reconcile request received. + r.l.Debug("Pod is not found", zap.String("Pod", req.NamespacedName.String())) + // It's safe to use req.NamespacedName as the Pod and RetinaEndpoint share the same namespace and name. + if err := r.cache.DeleteRetinaEndpoint(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaEndpoint in Cache from Pod", zap.Error(err), zap.String("RetinaEndpoint", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + r.l.Error("Failed to fetch Pod", zap.Error(err), zap.String("Pod", req.NamespacedName.String())) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if pod.Spec.HostNetwork { + r.l.Debug("Ignoring host network pod", zap.String("Pod", req.NamespacedName.String())) + return ctrl.Result{}, nil + } + + if !pod.ObjectMeta.DeletionTimestamp.IsZero() { + r.l.Info("Pod is being deleted", zap.String("Pod", req.Name)) + if err := r.cache.DeleteRetinaEndpoint(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaEndpoint in Cache from Pod", zap.Error(err), zap.String("Pod", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + + if pod.Status.PodIP == "" { + r.l.Debug("Pod has no IP address", zap.String("Pod", req.NamespacedName.String())) + return ctrl.Result{}, nil + } + + retinaEndpointCommon := retinaCommon.RetinaEndpointCommonFromPod(pod) + if err := r.cache.UpdateRetinaEndpoint(retinaEndpointCommon); err != nil { + r.l.Error("Failed to update RetinaEndpoint in Cache", zap.Error(err), zap.String("Pod", req.NamespacedName.String())) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.l.Info("Setting up Pod controller") + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Pod{}). + Complete(r) +} diff --git a/pkg/controllers/daemon/service/controller.go b/pkg/controllers/daemon/service/controller.go new file mode 100644 index 000000000..297e3c60e --- /dev/null +++ b/pkg/controllers/daemon/service/controller.go @@ -0,0 +1,99 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package service + +import ( + "context" + "net" + + "github.com/microsoft/retina/pkg/common/apiretry" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinaCommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" +) + +// ServiceReconciler reconciles a Service object +type ServiceReconciler struct { + client.Client + + cache *cache.Cache + l *log.ZapLogger +} + +func New(client client.Client, cache *cache.Cache) *ServiceReconciler { + return &ServiceReconciler{ + Client: client, + cache: cache, + l: log.Logger().Named(string("ServiceReconciler")), + } +} + +// +kubebuilder:rbac:groups="",resources=services,verbs=get;list +func (r *ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // startTime := time.Now() + r.l.Debug("Reconciling Service", zap.String("Service", req.NamespacedName.String())) + /* + // commenting out the latency calculation for now + // we will need to create these as metrics + defer func() { + latency := time.Since(startTime).String() + r.l.Info("Reconciliation ends", zap.String("Service", req.NamespacedName.String()), zap.String("latency", latency)) + }() + */ + + service := &corev1.Service{} + if err := apiretry.Do( + func() error { + return r.Client.Get(ctx, req.NamespacedName, service) + }, + ); err != nil { + if errors.IsNotFound(err) { + // RetinaService deleted since reconcile request received. + r.l.Debug("Service is not found", zap.String("Service", req.NamespacedName.String())) + // It's safe to use req.NamespacedName as the Service and RetinaService share the same namespace and name. + if err := r.cache.DeleteRetinaSvc(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaService in Cache from Service", zap.Error(err), zap.String("RetinaService", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + r.l.Error("Failed to fetch Service", zap.Error(err), zap.String("Service", req.NamespacedName.String())) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !service.ObjectMeta.DeletionTimestamp.IsZero() { + r.l.Info("Service is being deleted", zap.String("Service", req.Name)) + if err := r.cache.DeleteRetinaSvc(req.NamespacedName.String()); err != nil { + r.l.Warn("Failed to delete RetinaService in Cache from Service", zap.Error(err), zap.String("Service", req.NamespacedName.String())) + } + return ctrl.Result{}, nil + } + ips := retinaCommon.IPAddresses{} + ips.IPv4 = net.ParseIP(service.Spec.ClusterIP) + net.ParseIP(service.Spec.ClusterIP) + var lbIP net.IP + if service.Status.LoadBalancer.Ingress != nil && len(service.Status.LoadBalancer.Ingress) > 0 { + lbIP = net.ParseIP(service.Status.LoadBalancer.Ingress[0].IP) + } + retinaSvcCommon := retinaCommon.NewRetinaSvc(service.Name, service.Namespace, &ips, lbIP, service.Spec.Selector) + if err := r.cache.UpdateRetinaSvc(retinaSvcCommon); err != nil { + r.l.Error("Failed to update RetinaService in Cache", zap.Error(err), zap.String("Service", req.NamespacedName.String())) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.l.Info("Setting up Service controller") + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Service{}). + Complete(r) +} diff --git a/pkg/controllers/operator/capture/controller.go b/pkg/controllers/operator/capture/controller.go new file mode 100644 index 000000000..89d1552c0 --- /dev/null +++ b/pkg/controllers/operator/capture/controller.go @@ -0,0 +1,367 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package capture features the retina capture controller. +package capture + +import ( + "context" + "fmt" + "reflect" + "time" + + "go.uber.org/zap" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + pkgcapture "github.com/microsoft/retina/pkg/capture" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" + "github.com/microsoft/retina/pkg/common/apiretry" + "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" +) + +const ( + captureFinalizer = "kappio.io/capture-cleanup" + + captureErrorReasonExceedJobNumLimit = "ExceedJobNumLimit" + captureErrorReasonFindSecretFailed = "FindSecretFailed" + captureErrorReasonOthers = "OtherError" + captureErrorReasonCreateJobFailed = "CreateSecretFailed" + captureErrorReasonRunJobFailed = "RunJobFailed" + + captureInPogressReason = "JobsInProgress" + captureCompleteReason = "JobsCompleted" + captureInPogressMessage = "%d/%d Capture jobs are in progress, waiting for completion" + captureFailedJobFailedMessage = "%d Capture jobs are in failed state" + captureCompleteMessage = "All %d Capture jobs are completed" +) + +// CaptureReconciler reconciles a Capture object +type CaptureReconciler struct { + client.Client + scheme *runtime.Scheme + + logger *log.ZapLogger + + captureToPodTranslator *pkgcapture.CaptureToPodTranslator +} + +func NewCaptureReconciler(client client.Client, scheme *runtime.Scheme, kubeClient kubernetes.Interface, config config.CaptureConfig) *CaptureReconciler { + cr := &CaptureReconciler{ + Client: client, + scheme: scheme, + logger: log.Logger().Named("Capture"), + } + + cr.captureToPodTranslator = pkgcapture.NewCaptureToPodTranslator(kubeClient, cr.logger, config) + + return cr +} + +//+kubebuilder:rbac:groups=retina.io,resources=captures,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=retina.io,resources=captures/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=retina.io,resources=captures/finalizers,verbs=update +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list +//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list +//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (cr *CaptureReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + capture := retinav1alpha1.Capture{} + captureRef := types.NamespacedName{ + Namespace: req.Namespace, + Name: req.Name, + } + + startTime := time.Now() + cr.logger.Info("Reconciliation starts", zap.String("Capture", captureRef.String())) + + defer func() { + latency := time.Since(startTime).String() + cr.logger.Info("Reconciliation ends", zap.String("Capture", captureRef.String()), zap.String("latency", latency)) + }() + + if err := cr.Get(ctx, captureRef, &capture); err != nil { + cr.logger.Error("Failed to get Capture", zap.Error(err), zap.String("Capture", captureRef.String())) + // We'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if capture.ObjectMeta.DeletionTimestamp != nil { + return cr.handleDelete(ctx, &capture) + } + + // Register finalizer + if !controllerutil.ContainsFinalizer(&capture, captureFinalizer) { + controllerutil.AddFinalizer(&capture, captureFinalizer) + if err := cr.Client.Update(ctx, &capture); err != nil { + cr.logger.Error("Failed to add capture finalizer", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{Requeue: true}, err + } + } + + return cr.handleUpdate(ctx, &capture) +} + +// Capture status condition types are mutually exclusive, and there can be only one condition in given time. +func (cr *CaptureReconciler) updateCaptureStatusFromJobs(ctx context.Context, capture *retinav1alpha1.Capture, captureJobs []batchv1.Job) (ctrl.Result, error) { + captureRef := types.NamespacedName{ + Namespace: capture.Namespace, + Name: capture.Name, + } + + isJobFinished := func(job *batchv1.Job) (bool, batchv1.JobConditionType) { + for _, c := range job.Status.Conditions { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == corev1.ConditionTrue { + return true, c.Type + } + } + + return false, "" + } + + // update status of the capture depending on the status of the jobs + // if any job failed, the capture is failed + // if all jobs succeeded, the capture is succeeded + var activeJobs []*batchv1.Job + var successfulJobs []*batchv1.Job + var failedJobs []*batchv1.Job + for i, job := range captureJobs { + _, finishedType := isJobFinished(&job) + switch finishedType { + case "": // ongoing + activeJobs = append(activeJobs, &captureJobs[i]) + case batchv1.JobFailed: + failedJobs = append(failedJobs, &captureJobs[i]) + case batchv1.JobComplete: + successfulJobs = append(successfulJobs, &captureJobs[i]) + } + } + + capture.Status.Active = int32(len(activeJobs)) + capture.Status.Failed = int32(len(failedJobs)) + capture.Status.Succeeded = int32(len(successfulJobs)) + // Once we detect jobs are in failed state, we'll update the status of the Capture to error, meanwhile we keep + // updating the status of the Capture to inProgress if there are still active jobs. + if len(failedJobs) != 0 { + cr.logger.Error("Failed to run the Capture job", zap.String("Capture", captureRef.String())) + + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureError), + Status: metav1.ConditionTrue, + Reason: captureErrorReasonRunJobFailed, + Message: fmt.Sprintf(captureFailedJobFailedMessage, len(failedJobs)), + }) + } + // Update Capture inProgress status if there are still active jobs. + if len(successfulJobs) != len(captureJobs) { + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureComplete), + Status: metav1.ConditionFalse, + Reason: captureInPogressReason, + Message: fmt.Sprintf(captureInPogressMessage, len(activeJobs), len(captureJobs)), + }) + + return cr.updateStatus(ctx, capture) + } + + // Update status of Capture to complete when all jobs are completed. + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureComplete), + Status: metav1.ConditionTrue, + Reason: captureCompleteReason, + Message: fmt.Sprintf(captureCompleteMessage, len(successfulJobs)), + }) + + lastCompleteTime := metav1.Now() + for _, job := range captureJobs { + if job.Status.CompletionTime != nil && job.Status.CompletionTime.After(lastCompleteTime.Time) { + lastCompleteTime = *job.Status.CompletionTime + } + } + capture.Status.CompletionTime = &lastCompleteTime + return cr.updateStatus(ctx, capture) +} + +func (cr *CaptureReconciler) createJobsFromCapture(ctx context.Context, capture *retinav1alpha1.Capture) (ctrl.Result, error) { + captureRef := types.NamespacedName{ + Namespace: capture.Namespace, + Name: capture.Name, + } + + jobs, err := cr.captureToPodTranslator.TranslateCaptureToJobs(capture) + if err != nil { + cr.logger.Error("Failed to translate Capture to jobs", zap.Error(err), zap.String("Capture", captureRef.String())) + var errorReason string + switch err.(type) { + case pkgcapture.CaptureJobNumExceedLimitError: + errorReason = captureErrorReasonExceedJobNumLimit + cr.logger.Error("Job number exceed limited", zap.Error(err), zap.String("Capture", captureRef.String())) + case pkgcapture.SecretNotFoundError: + errorReason = captureErrorReasonFindSecretFailed + cr.logger.Error("Failed to find Capture secret", zap.Error(err), zap.String("Capture", captureRef.String())) + default: + errorReason = captureErrorReasonOthers + cr.logger.Error("Failed to translate Capture to jobs", zap.Error(err), zap.String("Capture", captureRef.String())) + } + // Update status of capture to error + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureError), + Status: metav1.ConditionTrue, + Reason: errorReason, + Message: err.Error(), + }) + + return cr.updateStatus(ctx, capture) + } + + for _, job := range jobs { + if opRet, err := controllerutil.CreateOrUpdate(ctx, cr.Client, job, func() error { + // Set capture as the owner of the above job + if err := controllerutil.SetControllerReference(capture, job, cr.scheme); err != nil { + return err + } + return nil + }); err != nil { + // TODO(mainred): should we delete the created jobs when creating job failed? + cr.logger.Error("Failed to create Capture job", zap.Error(err), zap.String("Capture", captureRef.String()), zap.String("operation result", string(opRet))) + // Update status of Capture to error + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureError), + Status: metav1.ConditionTrue, + Reason: captureErrorReasonCreateJobFailed, + Message: fmt.Sprintf("Failed to create Capture job %s/%s", job.Name, job.Namespace), + }) + + return cr.updateStatus(ctx, capture) + } + cr.logger.Info("Capture job is created", zap.String("namespace", capture.Namespace), zap.String("Capture job", job.Name)) + } + + // Update the status of Capture to inProgress after all jobs are created successfully. + meta.SetStatusCondition(&capture.Status.Conditions, metav1.Condition{ + Type: string(retinav1alpha1.CaptureComplete), + Status: metav1.ConditionFalse, + Reason: captureInPogressReason, + Message: fmt.Sprintf(captureInPogressMessage, len(jobs), len(jobs)), + }) + + return cr.updateStatus(ctx, capture) +} + +// handleUpdate creates the capture jobs if not found, otherwise update the status of the capture when jobs' status change +func (cr *CaptureReconciler) handleUpdate(ctx context.Context, capture *retinav1alpha1.Capture) (ctrl.Result, error) { + captureRef := types.NamespacedName{ + Namespace: capture.Namespace, + Name: capture.Name, + } + + // create resources if not found + captureJobList := &batchv1.JobList{} + if err := apiretry.Do( + func() error { + return cr.Client.List(ctx, captureJobList, client.InNamespace(capture.Namespace), client.MatchingLabels(captureUtils.GetJobLabelsFromCaptureName(capture.Name))) + }, + ); err != nil { + cr.logger.Error("Failed to list Capture jobs", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{}, err + } + + // Once the jobs are created, we'll update the status of the Capture according to the status of the jobs. + if len(captureJobList.Items) != 0 { + return cr.updateCaptureStatusFromJobs(ctx, capture, captureJobList.Items) + } + + return cr.createJobsFromCapture(ctx, capture) +} + +func (cr *CaptureReconciler) handleDelete(ctx context.Context, capture *retinav1alpha1.Capture) (ctrl.Result, error) { + captureRef := types.NamespacedName{ + Namespace: capture.Namespace, + Name: capture.Name, + } + // The capture is being deleted + if !controllerutil.ContainsFinalizer(capture, captureFinalizer) { + cr.logger.Info("Capture is being deleted", zap.String("Capture", captureRef.String())) + return ctrl.Result{}, nil + } + + cr.logger.Info("Removing Capture", zap.String("Capture", captureRef.String())) + + deletePropagationBackground := metav1.DeletePropagationBackground + if err := apiretry.Do( + func() error { + return cr.Client.DeleteAllOf(ctx, &batchv1.Job{}, client.InNamespace(capture.Namespace), &client.DeleteAllOfOptions{ + ListOptions: client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(captureUtils.GetJobLabelsFromCaptureName(capture.Name))), + }, + DeleteOptions: client.DeleteOptions{ + PropagationPolicy: &deletePropagationBackground, + }, + }) + }, + ); err != nil { + cr.logger.Error("Failed to delete Capture jobs", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{}, err + } + + controllerutil.RemoveFinalizer(capture, captureFinalizer) + if err := cr.Client.Update(ctx, capture); err != nil { + cr.logger.Error("Failed to remove Capture finalizer", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (cr *CaptureReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&retinav1alpha1.Capture{}). + Owns(&batchv1.Job{}). // Once the job owned by capture is created /deleted/updated, the capture will be reconciled. + Complete(cr) +} + +// get latest version of the capture before updating its status to avoid update conflicts +// reference: https://github.com/operator-framework/operator-sdk/issues/3968 +func (cr *CaptureReconciler) updateStatus(ctx context.Context, capture *retinav1alpha1.Capture) (ctrl.Result, error) { + captureRef := types.NamespacedName{ + Namespace: capture.Namespace, + Name: capture.Name, + } + + latestCapture := &retinav1alpha1.Capture{} + if err := cr.Client.Get(ctx, captureRef, latestCapture); err != nil { + cr.logger.Error("Failed to get Capture", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{}, err + } + if reflect.DeepEqual(capture.Status, latestCapture.Status) { + return ctrl.Result{}, nil + } + latestCapture.Status = capture.Status + capture = latestCapture + if err := cr.Client.Status().Update(ctx, latestCapture); err != nil { + cr.logger.Error("Failed to update status of Capture", zap.Error(err), zap.String("Capture", captureRef.String())) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} diff --git a/pkg/controllers/operator/capture/controller_test.go b/pkg/controllers/operator/capture/controller_test.go new file mode 100644 index 000000000..310b62c0c --- /dev/null +++ b/pkg/controllers/operator/capture/controller_test.go @@ -0,0 +1,367 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package capture + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + captureUtils "github.com/microsoft/retina/pkg/capture/utils" +) + +var _ = Describe("Test Retina Capture Controller", func() { + const ( + timeout = time.Second * 10 + interval = time.Second * 1 + ) + var ( + node *corev1.Node + testNamespace string + captureName string + captureRef types.NamespacedName + ns corev1.Namespace + ) + BeforeEach(func() { + By("Creating a node with label 'kubernetes.io/role: agent'") + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{ + "kubernetes.io/role": "agent", + }, + }, + } + Expect(k8sClient.Create(context.Background(), node)).Should(Succeed()) + }) + + AfterEach(func() { + By("Deleting the node") + Expect(k8sClient.Delete(ctx, node)).Should(Succeed()) + }) + + Context("Capture is created successfully", func() { + BeforeEach(func() { + testNamespace = fmt.Sprintf("test-capture-%s", utilrand.String(5)) + By("Creating test namespace") + ns = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, &ns)).Should(Succeed()) + + captureName = "test-capture" + captureRef = types.NamespacedName{Name: captureName, Namespace: testNamespace} + }) + + AfterEach(func() { + By("Deleting test namespace") + Expect(k8sClient.Delete(ctx, &ns)).Should(Succeed()) + }) + + It("Should create capture successfully when using Blob SAS URL", func() { + By("Creating a Capture secret") + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: testNamespace, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + captureConstants.CaptureOutputLocationBlobUploadSecretKey: []byte("blob sas url"), + }, + } + Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) + + By("Creating a new Capture") + hostPath := "/mnt/azure" + ctx := context.Background() + capture := &retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-capture", + Namespace: testNamespace, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/role": "agent", + }, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: time.Minute * 1, + MaxCaptureSize: 100, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + BlobUpload: &secret.Name, + }, + }, + } + Expect(k8sClient.Create(ctx, capture)).Should(Succeed()) + + createdCapture := &retinav1alpha1.Capture{} + + By("Checking if the Capture was successfully created") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + return controllerutil.ContainsFinalizer(createdCapture, captureFinalizer) + }, timeout, interval).Should(BeTrue()) + + By("Checking the Capture is inProgress") + Eventually(func() bool { + Expect(k8sClient.Get(ctx, captureRef, createdCapture)).Should(Succeed()) + for _, condition := range createdCapture.Status.Conditions { + if condition.Type == string(retinav1alpha1.CaptureComplete) && condition.Status == metav1.ConditionFalse { + return true + } + } + return false + }, timeout, interval).Should(BeTrue()) + Expect(createdCapture.Status.Active).Should(Equal(int32(1))) + Expect(createdCapture.Status.Failed).Should(Equal(int32(0))) + Expect(createdCapture.Status.Succeeded).Should(Equal(int32(0))) + + By("Updating job status to completed") + jobList := &batchv1.JobList{} + Expect(k8sClient.List(ctx, jobList, client.InNamespace(testNamespace), client.MatchingLabels(captureUtils.GetJobLabelsFromCaptureName(capture.Name)))).Should(Succeed()) + Expect(len(jobList.Items) > 0).Should(BeTrue()) + for _, job := range jobList.Items { + job.Status.Conditions = []batchv1.JobCondition{ + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + } + Expect(k8sClient.Status().Update(ctx, &job)).Should(Succeed()) + } + + By("Waiting for capture's status to complete") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + for _, condition := range createdCapture.Status.Conditions { + if condition.Type == string(retinav1alpha1.CaptureComplete) && condition.Status == metav1.ConditionTrue { + return true + } + } + return false + }, timeout, interval).Should(BeTrue()) + Expect(createdCapture.Status.Active).Should(Equal(int32(0))) + Expect(createdCapture.Status.Failed).Should(Equal(int32(0))) + Expect(createdCapture.Status.Succeeded).Should(Equal(int32(1))) + Expect(createdCapture.Status.CompletionTime).ShouldNot(BeNil()) + + By("Deleting the Capture") + Expect(k8sClient.Delete(ctx, createdCapture)).Should(Succeed()) + + By("Checking if jobs created has been deleted") + Eventually(func() bool { + Expect(k8sClient.List(ctx, jobList, client.InNamespace(testNamespace), client.MatchingLabels(captureUtils.GetJobLabelsFromCaptureName(capture.Name)))).Should(Succeed()) + return len(jobList.Items) == 0 + }, timeout, interval).Should(BeTrue()) + + By("Checking if the Capture was successfully deleted") + Eventually(func() bool { + return apierrors.IsNotFound(k8sClient.Get(ctx, captureRef, createdCapture)) + }, timeout, interval).Should(BeTrue()) + }) + }) + Context("Capture is created with error", func() { + var createdCapture *retinav1alpha1.Capture + + BeforeEach(func() { + createdCapture = &retinav1alpha1.Capture{} + + testNamespace = fmt.Sprintf("test-capture-%s", utilrand.String(5)) + By("Creating test namespace") + ns = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, &ns)).Should(Succeed()) + + captureName = "test-capture" + captureRef = types.NamespacedName{Name: captureName, Namespace: testNamespace} + }) + + AfterEach(func() { + By("Deleting the Capture") + Expect(k8sClient.Delete(ctx, createdCapture)).Should(Succeed()) + + jobList := &batchv1.JobList{} + By("Checking if jobs created has been deleted") + Eventually(func() bool { + Expect(k8sClient.List(ctx, jobList, client.InNamespace(testNamespace), client.MatchingLabels(captureUtils.GetJobLabelsFromCaptureName(captureName)))).Should(Succeed()) + return len(jobList.Items) == 0 + }, timeout, interval).Should(BeTrue()) + + By("Checking if the Capture was successfully deleted") + Eventually(func() bool { + return apierrors.IsNotFound(k8sClient.Get(ctx, captureRef, createdCapture)) + }, timeout, interval).Should(BeTrue()) + + By("Deleting test namespace") + Expect(k8sClient.Delete(ctx, &ns)).Should(Succeed()) + }) + It("Should create capture with error when running job failed", func() { + By("creating a new Capture") + hostPath := "/mnt/azure" + ctx := context.Background() + capture := &retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-capture", + Namespace: testNamespace, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/role": "agent", + }, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: time.Minute * 1, + MaxCaptureSize: 100, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + }, + }, + } + Expect(k8sClient.Create(ctx, capture)).Should(Succeed()) + + By("Checking if the Capture was successfully created") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + return controllerutil.ContainsFinalizer(createdCapture, captureFinalizer) + }, timeout, interval).Should(BeTrue()) + + By("Checking if the Capture was inProgress") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + completeCondition := metav1.Condition{} + for _, condition := range createdCapture.Status.Conditions { + if condition.Type == string(retinav1alpha1.CaptureComplete) { + completeCondition = condition + break + } + } + return completeCondition.Status == metav1.ConditionFalse + }, timeout, interval).Should(BeTrue()) + + Expect(createdCapture.Status.Active).Should(Equal(int32(1))) + Expect(createdCapture.Status.Failed).Should(Equal(int32(0))) + Expect(createdCapture.Status.Succeeded).Should(Equal(int32(0))) + + By("Updating job status to failed") + jobList := &batchv1.JobList{} + Expect(k8sClient.List(ctx, jobList, client.InNamespace(testNamespace), client.MatchingLabels(captureUtils.GetJobLabelsFromCaptureName(capture.Name)))).Should(Succeed()) + Expect(len(jobList.Items) > 0).Should(BeTrue()) + for _, job := range jobList.Items { + job.Status.Conditions = []batchv1.JobCondition{ + { + Type: batchv1.JobFailed, + Status: corev1.ConditionTrue, + }, + } + Expect(k8sClient.Status().Update(ctx, &job)).Should(Succeed()) + } + + By("Waiting for capture's status to error") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + for _, condition := range createdCapture.Status.Conditions { + if condition.Type == string(retinav1alpha1.CaptureError) && condition.Status == metav1.ConditionTrue { + return true + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Expect(createdCapture.Status.Active).Should(Equal(int32(0))) + Expect(createdCapture.Status.Failed).Should(Equal(int32(1))) + Expect(createdCapture.Status.Succeeded).Should(Equal(int32(0))) + }) + + It("Should create capture with error when secret is not found", func() { + secretName := "test-secret" + + By("Creating a new Capture") + hostPath := "/mnt/azure" + ctx := context.Background() + capture := &retinav1alpha1.Capture{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-capture", + Namespace: testNamespace, + }, + Spec: retinav1alpha1.CaptureSpec{ + CaptureConfiguration: retinav1alpha1.CaptureConfiguration{ + CaptureTarget: retinav1alpha1.CaptureTarget{ + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/role": "agent", + }, + }, + }, + CaptureOption: retinav1alpha1.CaptureOption{ + Duration: time.Minute * 1, + MaxCaptureSize: 100, + }, + }, + OutputConfiguration: retinav1alpha1.OutputConfiguration{ + HostPath: &hostPath, + BlobUpload: &secretName, + }, + }, + } + Expect(k8sClient.Create(ctx, capture)).Should(Succeed()) + + By("Checking if the Capture was created with error") + Eventually(func() bool { + if err := k8sClient.Get(ctx, captureRef, createdCapture); err != nil { + return false + } + for _, condition := range createdCapture.Status.Conditions { + if condition.Type == string(retinav1alpha1.CaptureError) && condition.Status == metav1.ConditionTrue && condition.Reason == captureErrorReasonFindSecretFailed { + return true + } + } + return false + }, timeout, interval).Should(BeTrue()) + }) + }) +}) diff --git a/pkg/controllers/operator/capture/suite_test.go b/pkg/controllers/operator/capture/suite_test.go new file mode 100644 index 000000000..a0d81d57e --- /dev/null +++ b/pkg/controllers/operator/capture/suite_test.go @@ -0,0 +1,94 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package capture + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" +) + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Capture Controller Suite") +} + +var _ = BeforeSuite(func() { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("Bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../..", "deploy/manifests/controller/helm/retina/crds")}, + ErrorIfCRDPathMissing: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = retinav1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // Disable metrics serving because chances are that the metric port is already in use when a new test starts to + // start metrics serving before the previous test has stopped it. + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + + kubeClient, err := kubernetes.NewForConfig(k8sManager.GetConfig()) + Expect(err).ToNot(HaveOccurred()) + + captureReconciler := NewCaptureReconciler(k8sManager.GetClient(), k8sManager.GetScheme(), kubeClient, "test") + err = captureReconciler.SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller.go b/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller.go new file mode 100644 index 000000000..785f83af2 --- /dev/null +++ b/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller.go @@ -0,0 +1,123 @@ +/* +Copyright 2023. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsconfigurationcontroller + +import ( + "context" + "fmt" + "sync" + + "go.uber.org/zap" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + validate "github.com/microsoft/retina/crd/api/v1alpha1/validations" + "github.com/microsoft/retina/pkg/log" +) + +// MetricsConfigurationReconciler reconciles a MetricsConfiguration object +type MetricsConfigurationReconciler struct { + *sync.Mutex + client.Client + Scheme *runtime.Scheme + mcCache map[string]*retinav1alpha1.MetricsConfiguration + l *log.ZapLogger +} + +func init() { + log.SetupZapLogger(&log.LogOpts{ + File: false, + }) +} + +func New(client client.Client, scheme *runtime.Scheme) *MetricsConfigurationReconciler { + return &MetricsConfigurationReconciler{ + Mutex: &sync.Mutex{}, + l: log.Logger().Named(string("metricsconfiguration-controller")), + Client: client, + Scheme: scheme, + mcCache: make(map[string]*retinav1alpha1.MetricsConfiguration), + } +} + +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfigurations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfigurations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=operator.retina.io,resources=metricsconfigurations/finalizers,verbs=update + +func (r *MetricsConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + mcc := &retinav1alpha1.MetricsConfiguration{} + if err := r.Client.Get(ctx, req.NamespacedName, mcc); err != nil { + if apierrors.IsNotFound(err) { + // Object not found, it has probably been deleted + r.l.Info("deleted", zap.String("name", req.NamespacedName.String())) + r.Lock() + _, ok := r.mcCache[req.NamespacedName.String()] + if ok { + delete(r.mcCache, req.NamespacedName.String()) + r.l.Info("deleted from cache", zap.String("name", req.NamespacedName.String())) + } + r.Unlock() + return ctrl.Result{}, nil + } else { + r.l.Info("error getting metricsconfiguration", zap.String("name", req.NamespacedName.String())) + return ctrl.Result{}, err + } + } + + r.l.Info("reconciled", zap.String("name", req.NamespacedName.String())) + r.Lock() + defer r.Unlock() + if len(r.mcCache) == 0 || r.mcCache[req.NamespacedName.String()] != nil { + err := validate.MetricsCRD(mcc) + if err != nil { + r.l.Error("Error validating metrics configuration", zap.Error(err)) + + mcc.Status = retinav1alpha1.MetricsStatus{ + State: retinav1alpha1.StateErrored, + Reason: fmt.Sprintf("Validation of CRD failed with: %s", err.Error()), + } + } else { + r.l.Info("metrics configuration is valid", zap.String("crd Name", mcc.Name)) + mcc.Status = retinav1alpha1.MetricsStatus{ + State: retinav1alpha1.StateAccepted, + Reason: "CRD is Accepted", + } + r.mcCache[req.NamespacedName.String()] = mcc + } + } else { + r.l.Info("Metrics Conguration is already configured on this cluster, cannot reconcile this new config.") + + mcc.Status = retinav1alpha1.MetricsStatus{ + State: retinav1alpha1.StateErrored, + Reason: "Metrics Configuration is already configured on this cluster, cannot reconcile this new config.", + } + } + + if err := r.Client.Status().Update(ctx, mcc); err != nil { + r.l.Error("Error updating metrics configuration", zap.Error(err)) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MetricsConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&retinav1alpha1.MetricsConfiguration{}). + Complete(r) +} diff --git a/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller_test.go b/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller_test.go new file mode 100644 index 000000000..8c25c891d --- /dev/null +++ b/pkg/controllers/operator/metricsconfiguration/metricsconfiguration_controller_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2023. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsconfigurationcontroller + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +var fakescheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(fakescheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(fakescheme)) + + _ = clientgoscheme.AddToScheme(fakescheme) + fakescheme.AddKnownTypes(retinav1alpha1.GroupVersion, &retinav1alpha1.RetinaEndpoint{}) +} + +func TestMetricsConfigurationReconciler_Reconcile(t *testing.T) { + type fields struct { + existingObjects []client.Object + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + wantNil bool + }{ + { + name: "should create a new metrics configuration", + fields: fields{ + existingObjects: []client.Object{ + &retinav1alpha1.MetricsConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: "default", + }, + }, + }, + wantNil: false, + }, + { + name: "should send nil to channel when metrics configuration is deleted", + fields: fields{ + existingObjects: []client.Object{}, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: "default", + }, + }, + }, + wantNil: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(tt.fields.existingObjects...).Build() + + r := New(client, fakescheme) + + _, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + require.Error(t, err) + } + + // reactivate when this channel is used again + // verify that the pod is deleted + /* + select { + case config := <-metricsconfigchannel: + if tt.wantNil { + require.Nil(t, config.MetricsConfiguration) + } else { + require.NotNil(t, config.MetricsConfiguration) + } + + case <-time.After(5 * time.Second): + require.Errorf(t, nil, "controller didn't write to channel after allotted time") + } + */ + }) + } +} diff --git a/pkg/controllers/operator/pod/pod_controller.go b/pkg/controllers/operator/pod/pod_controller.go new file mode 100644 index 000000000..05ba45278 --- /dev/null +++ b/pkg/controllers/operator/pod/pod_controller.go @@ -0,0 +1,179 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podcontroller + +import ( + "context" + "reflect" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + controller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/operator/cache" + "github.com/microsoft/retina/pkg/log" +) + +// arbitrary selection, but no issues so far +const MAX_RECONCILERS = 5 + +// PodReconciler reconciles a pod object +type PodReconciler struct { + client.Client + Scheme *k8sruntime.Scheme + podchannel chan<- cache.PodCacheObject + l *log.ZapLogger +} + +func init() { + log.SetupZapLogger(&log.LogOpts{ + File: false, + }) +} + +func New(client client.Client, scheme *k8sruntime.Scheme, podchannel chan<- cache.PodCacheObject) *PodReconciler { + return &PodReconciler{ + l: log.Logger().Named(string("pod-controller")), + Client: client, + Scheme: scheme, + podchannel: podchannel, + } +} + +//+kubebuilder:rbac:groups=operator.retina.io,resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=operator.retina.io,resources=pods/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=operator.retina.io,resources=pods/finalizers,verbs=update + +func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + pod := &corev1.Pod{} + if err := r.Client.Get(ctx, req.NamespacedName, pod); err != nil { + if apierrors.IsNotFound(err) { + + // Object not found, it has probably been deleted, forward to pod channel with nil pod + r.l.Debug("pod deleted", zap.String("name", req.NamespacedName.String())) + r.podchannel <- cache.PodCacheObject{Key: req.NamespacedName, Pod: nil} + return ctrl.Result{}, nil + } + + // Error getting object, return error + return ctrl.Result{}, err + } + + r.podchannel <- cache.PodCacheObject{Key: req.NamespacedName, Pod: pod} + r.l.Debug("pod reconciled", zap.String("name", req.NamespacedName.String())) + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Pod{}). + WithEventFilter(onlyRunningOrDeletedPredicate()). + WithOptions(controller.Options{ + // TODO: Remove comment when CR is upgraded. + // MaxConcurrentReconciles: MAX_RECONCILERS, + }). + Complete(r) +} + +// Ignore noisy pod create pod events, only filter if created event and pod is running +func filterPodCreateEvents(objMeta metav1.Object) bool { + if pod, ok := objMeta.(*v1.Pod); ok { + if pod.Spec.HostNetwork { + return false + } + + if pod.Status.Phase == v1.PodRunning || pod.DeletionTimestamp != nil { + return true + } + + if pod.Status.PodIP != "" { + return false + } + } + return false +} + +// Ignore noisy pod update events, only filter if pod ip, pod ips, pod labels, or owner references update +func filterPodUpdateEvents(old metav1.Object, new metav1.Object) bool { + oldpod, oldok := old.(*v1.Pod) + newpod, newok := new.(*v1.Pod) + + // we only care if update is for pod ip or pod ips, and pod labels update + if oldok && newok { + if newpod.Spec.HostNetwork { + return false + } + + if newpod.Status.PodIP != oldpod.Status.PodIP { + return true + } + + if !reflect.DeepEqual(newpod.Status.PodIPs, oldpod.Status.PodIPs) { + return true + } + + if !reflect.DeepEqual(newpod.OwnerReferences, oldpod.OwnerReferences) { + return true + } + + if !reflect.DeepEqual(newpod.Labels, oldpod.Labels) { + return true + } + + if !reflect.DeepEqual(newpod.Annotations, oldpod.Annotations) { + return true + } + + if newpod.Status.PodIP != "" && len(newpod.Status.PodIPs) > 0 && !reflect.DeepEqual(getContainers(oldpod), getContainers(newpod)) { + return true + } + } + return false +} + +func getContainers(pod *v1.Pod) []retinav1alpha1.RetinaEndpointStatusContainers { + containers := []retinav1alpha1.RetinaEndpointStatusContainers{} + for _, container := range pod.Status.ContainerStatuses { + containers = append(containers, retinav1alpha1.RetinaEndpointStatusContainers{ + Name: container.Name, + ID: container.ContainerID, + }) + } + return containers +} + +func onlyRunningOrDeletedPredicate() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filterPodCreateEvents(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filterPodUpdateEvents(e.ObjectOld, e.ObjectNew) + }, + } +} diff --git a/pkg/controllers/operator/pod/pod_controller_test.go b/pkg/controllers/operator/pod/pod_controller_test.go new file mode 100644 index 000000000..211877976 --- /dev/null +++ b/pkg/controllers/operator/pod/pod_controller_test.go @@ -0,0 +1,441 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podcontroller + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "gotest.tools/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/operator/cache" +) + +var fakescheme = runtime.NewScheme() + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(fakescheme)) + utilruntime.Must(retinav1alpha1.AddToScheme(fakescheme)) + + _ = clientgoscheme.AddToScheme(fakescheme) + fakescheme.AddKnownTypes(retinav1alpha1.GroupVersion, &retinav1alpha1.RetinaEndpoint{}) +} + +func TestPodReconciler_Reconcile(t *testing.T) { + type fields struct { + existingObjects []client.Object + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + wantNil bool + }{ + { + name: "test if pod is created", + fields: fields{ + existingObjects: []client.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-pod", + Namespace: "test-ns", + }, + }, + }, + wantNil: false, + }, + { + name: "test if pod is deleted", + fields: fields{ + existingObjects: []client.Object{}, + }, + args: args{ + ctx: context.TODO(), + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test-pod", + Namespace: "test-ns", + }, + }, + }, + wantNil: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(tt.fields.existingObjects...).Build() + podchannel := make(chan cache.PodCacheObject, 10) + + r := New(client, fakescheme, podchannel) + + got, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + require.Error(t, err) + } + require.Equal(t, got, tt.want) + + // verify that the pod is deleted + select { + case pod := <-podchannel: + if tt.wantNil { + require.Nil(t, pod.Pod) + } else { + require.NotNil(t, pod.Pod) + } + + case <-time.After(5 * time.Second): + require.Errorf(t, nil, "pod controller didn't write to channel after allotted time") + } + }) + } +} + +func Test_isUpdatedPod(t *testing.T) { + type args struct { + old metav1.Object + new metav1.Object + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "test if pod not is updated", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + }, + }, + }, + want: false, + }, + { + name: "test if pod is updated with single IP", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.2", + }, + }, + }, + want: true, + }, + { + name: "test if pod is updated with IP slice", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + { + IP: "10.0.0.1", + }, + }, + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{ + { + IP: "10.0.0.2", + }, + }, + }, + }, + }, + want: true, + }, + { + name: "test if pod is updated with labels", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Labels: map[string]string{ + "test": "test", + }, + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Labels: map[string]string{ + "test": "test2", + }, + }, + }, + }, + want: true, + }, + { + name: "test if pod is updated with annotations", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Annotations: map[string]string{ + "test": "test", + }, + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + Annotations: map[string]string{ + "test": "test2", + }, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := filterPodUpdateEvents(tt.args.old, tt.args.new); got != tt.want { + t.Errorf("isUpdatedPod() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFilterPodCreateEvents(t *testing.T) { + type args struct { + objMeta metav1.Object + } + tests := []struct { + name string + args args + want bool + }{ + // TODO: Add test cases. + { + name: "test if pod is scheduled but not running", + args: args{ + objMeta: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPhase(corev1.PodScheduled), + }, + }, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := filterPodCreateEvents(tt.args.objMeta); got != tt.want { + t.Errorf("filterPodCreateEvents() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFilterPodUpdateEvents(t *testing.T) { + type args struct { + old metav1.Object + new metav1.Object + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "test host network pod", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Spec: corev1.PodSpec{ + HostNetwork: true, + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + }, + }, + want: false, + }, + { + name: "test if pod not is updated", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.41", + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.4", + }, + }, + }, + want: true, + }, + { + name: "test if pod is updated with single IP", + args: args{ + old: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.0", + }, + }, + new: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.1", + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + if got := filterPodUpdateEvents(tt.args.old, tt.args.new); got != tt.want { + t.Errorf("filterPodUpdateEvents() = %v, want %v", got, tt.want) + } + } +} + +func TestGetContainers(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "test-ns", + }, + Status: corev1.PodStatus{ + PodIP: "10.0.0.0", + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-container", + ContainerID: "123", + }, + { + Name: "test-container2", + ContainerID: "1234", + }, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + }, + }, + }, + } + + kesc := getContainers(pod) + + assert.Equal(t, 2, len(kesc), "should have 2 containers") + assert.Equal(t, "test-container", kesc[0].Name, "should have test-container") + + assert.Equal(t, "123", kesc[0].ID, "should have test-container") + assert.Equal(t, "test-container2", kesc[1].Name, "should have test-container2") + assert.Equal(t, "1234", kesc[1].ID, "should have test-container") +} diff --git a/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller.go b/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller.go new file mode 100644 index 000000000..d8b08724b --- /dev/null +++ b/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller.go @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package retinaendpointcontroller + +import ( + "context" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/operator/cache" + "github.com/microsoft/retina/pkg/log" +) + +const ( + REQUEST_TIMEOUT = 15 * time.Second + MAX_RETRIES = 5 +) + +// RetinaEndpointReconciler managed the lifecycle of RetinaEndpoints from Pods. +type RetinaEndpointReconciler struct { + client.Client + podchannel chan cache.PodCacheObject + l *log.ZapLogger + rtmu sync.RWMutex + retries map[types.NamespacedName]int +} + +func init() { + log.SetupZapLogger(&log.LogOpts{ + File: false, + }) +} + +func New(client client.Client, podchannel chan cache.PodCacheObject) *RetinaEndpointReconciler { + return &RetinaEndpointReconciler{ + Client: client, + podchannel: podchannel, + retries: make(map[types.NamespacedName]int), + l: log.Logger().Named(string("retina-endpoint-controller")), + } +} + +// NOTE(mainrerd): Chances are that pod cache channel lost pods events during controller manager restart, we need to +// have full-set reconciliation to make sure all RetinaEndpoints are reconciled to Pods. +// when a pod reaches here, it indicates that there is a metricsconfiguration that references it, +// or doesn't and a RetinaEndpoint needs to be created or updated. +// This is a blocking function, and will wait on the pod channel until a pod is received. +func (r *RetinaEndpointReconciler) ReconcilePod(pctx context.Context) { + r.l.Info("Start to reconcile Pods for RetinaEndpoints") + for { + select { + case pod := <-r.podchannel: + if pod.Pod != nil && pod.Pod.Spec.HostNetwork { + r.l.Debug("pod is host networked, skipping", zap.String("name", pod.Key.String())) + continue + } + r.l.Debug("reconcileRetinaEndpointFromPod", zap.String("name", pod.Key.String())) + ctx, cancel := context.WithTimeout(pctx, REQUEST_TIMEOUT) + err := r.reconcileRetinaEndpointFromPod(ctx, pod) + if err != nil { + r.l.Error("error creating retina endpoint", zap.Error(err), zap.String("name", pod.Key.String())) + r.l.Info("requeuing pod", zap.String("name", pod.Key.String())) + r.requeuePodToRetinaEndpoint(ctx, pod) + } + cancel() + case <-pctx.Done(): + r.l.Info("Stop reconciling Pods for RetinaEndpoints") + return + } + } +} + +// requeuePodToRetinaEndpoint is called when a pod is received from the pod channel, and it will writeback to the +// pod channel if there is an error +func (r *RetinaEndpointReconciler) requeuePodToRetinaEndpoint(ctx context.Context, pod cache.PodCacheObject) { + r.rtmu.RLock() + value, exists := r.retries[pod.Key] + r.rtmu.RUnlock() + if exists && value >= MAX_RETRIES { + r.l.Error("max retries reached, not retrying", zap.String("name", pod.Key.String())) + delete(r.retries, pod.Key) + return + } + r.rtmu.Lock() + r.retries[pod.Key]++ + value = r.retries[pod.Key] + r.rtmu.Unlock() + select { + case r.podchannel <- pod: + r.l.Debug(fmt.Sprintf("starting retry %d", value), zap.String("name", pod.Key.String())) + case <-ctx.Done(): + r.l.Error(ctx.Err().Error(), zap.String("name", pod.Key.String())) + } +} + +// reconcileRetinaEndpointFromPod create/update or delete a RetinaEndpoint based on the existence of its corresponding Pod. +func (r *RetinaEndpointReconciler) reconcileRetinaEndpointFromPod(ctx context.Context, pod cache.PodCacheObject) error { + // Delete the RetinaEndpoint if the Pod from the cache is nil. + // it means that the pod either has been deleted, or no longer matches a configuration, + if pod.Pod == nil { + // first check if a retina endpoint exists with the same key + got := &retinav1alpha1.RetinaEndpoint{} + err := r.Client.Get(ctx, pod.Key, got, &client.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + r.l.Error("Failed to get RetinaEndpoint from the Pod", zap.Error(err), zap.String("name", pod.Key.String())) + return err + } else if errors.IsNotFound(err) { + // we don't want a retina endpoint to exist if there is no pod, and in this case there is no pod, + // so we don't need to do anything + r.l.Debug("RetinaEndpoint is not found, no delete is required", zap.String("name", pod.Key.String())) + return nil + } else { + r.l.Info("deleting RetinaEndpoint", zap.String("name", pod.Key.String())) + // if there is a retina endpoint, then we need to delete it + err = r.Client.Delete(ctx, got) + if err != nil { + r.l.Error("error deleting RetinaEndpoint", zap.Error(err), zap.String("name", pod.Key.String())) + return err + } + } + return nil + } + + // Create Or Update the RetinaEndpoint from a running Pod. + + if pod.Pod.Status.Phase != corev1.PodRunning { + r.l.Debug("pod is not running, skipping", zap.String("name", pod.Key.String()), zap.String("pod phase", string(pod.Pod.Status.Phase))) + return nil + } + + containers := []retinav1alpha1.RetinaEndpointStatusContainers{} + for _, container := range pod.Pod.Status.ContainerStatuses { + containers = append(containers, retinav1alpha1.RetinaEndpointStatusContainers{ + Name: container.Name, + ID: container.ContainerID, + }) + } + + ips := []string{} + for _, ip := range pod.Pod.Status.PodIPs { + ips = append(ips, ip.IP) + } + + refs := []retinav1alpha1.OwnerReference{} + for _, ref := range pod.Pod.OwnerReferences { + refs = append(refs, retinav1alpha1.OwnerReference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Name: ref.Name, + }) + } + + // check to see if a RetinaEndpoint exists with the same key + got := &retinav1alpha1.RetinaEndpoint{} + err := r.Client.Get(ctx, pod.Key, got, &client.GetOptions{}) + + // if there is an error getting the retina endpoint, and the error is not that the retina endpoint can't be found + if err != nil && !errors.IsNotFound(err) { + r.l.Error("Failed to get RetinaEndpoint from the Pod", zap.Error(err), zap.String("name", pod.Key.String())) + return err + } + + if errors.IsNotFound(err) { + if pod.Pod.Status.PodIP == "" || pod.Pod.Status.HostIP == "" { + r.l.Debug("pod has no podIP or hostIP, skipping", zap.String("name", pod.Key.String())) + return nil + } + + // since the retina endpoint doesn't exist, then we need to create it + new := &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: v1.ObjectMeta{ + Name: pod.Key.Name, + Namespace: pod.Key.Namespace, + }, + TypeMeta: v1.TypeMeta{ + Kind: "RetinaEndpoint", + APIVersion: "v1alpha1", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + PodIP: pod.Pod.Status.PodIP, + PodIPs: ips, + Containers: containers, + NodeIP: pod.Pod.Status.HostIP, + Labels: pod.Pod.Labels, + Annotations: pod.Pod.Annotations, + OwnerReferences: refs, + }, + } + + r.l.Info("creating RetinaEndpoint", zap.String("name", pod.Key.String())) + if err := r.Client.Create(ctx, new); err != nil { + r.l.Error(err.Error(), zap.String("name", pod.Key.String())) + return err + } + return nil + } + + // if the retina endpoint exists, then we need to update it, but only on running pods. + // If terminating or pending, we don't care. On delete we'll recnil pods, and we'll delete the retina endpoint + // by creating a deep copy of the retinaendpoint we got, and patching the spec + new := got.DeepCopy() + new.Spec = retinav1alpha1.RetinaEndpointSpec{ + PodIP: pod.Pod.Status.PodIP, + PodIPs: ips, + Containers: containers, + NodeIP: pod.Pod.Status.HostIP, + Labels: pod.Pod.Labels, + Annotations: pod.Pod.Annotations, + OwnerReferences: refs, + } + + r.l.Info("updating RetinaEndpoint", zap.String("name", pod.Key.String())) + if err = r.Client.Patch(ctx, new, client.MergeFrom(got)); err != nil { + r.l.Error("failed to patch RetinaEndpoint", zap.Error(err), zap.String("name", pod.Key.String())) + return err + } + return nil +} diff --git a/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller_test.go b/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller_test.go new file mode 100644 index 000000000..59e38abf0 --- /dev/null +++ b/pkg/controllers/operator/retinaendpoint/retinaendpoint_controller_test.go @@ -0,0 +1,278 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package retinaendpointcontroller + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/operator/cache" +) + +var fakescheme = runtime.NewScheme() + +func TestRetinaEndpointReconciler_ReconcilePod(t *testing.T) { + _ = clientgoscheme.AddToScheme(fakescheme) + fakescheme.AddKnownTypes(retinav1alpha1.GroupVersion, &retinav1alpha1.RetinaEndpoint{}) + + type fields struct { + newlyCachedPod cache.PodCacheObject + existingObjects []client.Object + } + tests := []struct { + name string + fields fields + wantedRetinaEndpoint *retinav1alpha1.RetinaEndpoint + }{ + { + name: "update existing retina endpoint", + fields: fields{ + existingObjects: []client.Object{ + &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + PodIP: "10.0.0.1", + }, + }, + }, + newlyCachedPod: cache.PodCacheObject{ + Key: types.NamespacedName{ + Name: "pod", + Namespace: "default", + }, + Pod: &corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "10.0.0.2", + Phase: corev1.PodRunning, + }, + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "pods", + Kind: "Daemonset", + }, + }, + }, + }, + }, + }, + wantedRetinaEndpoint: &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + ResourceVersion: "1000", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + PodIP: "10.0.0.2", + OwnerReferences: []retinav1alpha1.OwnerReference{ + { + Name: "pods", + Kind: "Daemonset", + }, + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "RetinaEndpoint", + APIVersion: "retina.io/v1alpha1", + }, + }, + }, + { + name: "delete existing retina endpoint", + fields: fields{ + existingObjects: []client.Object{ + &retinav1alpha1.RetinaEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + }, + }, + }, + newlyCachedPod: cache.PodCacheObject{ + Key: types.NamespacedName{ + Name: "pod", + Namespace: "default", + }, + Pod: nil, + }, + }, + wantedRetinaEndpoint: nil, + }, + { + name: "create retina endpoint from pod", + fields: fields{ + newlyCachedPod: cache.PodCacheObject{ + Key: types.NamespacedName{ + Name: "pod", + Namespace: "default", + }, + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + HostIP: "10.10.10.10", + PodIPs: []corev1.PodIP{ + { + IP: "10.0.0.2", + }, + }, + PodIP: "10.0.0.1", + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "testcontainer", + ContainerID: "docker://1234567890", + }, + }, + }, + }, + }, + }, + wantedRetinaEndpoint: &retinav1alpha1.RetinaEndpoint{ + TypeMeta: metav1.TypeMeta{ + Kind: "RetinaEndpoint", + APIVersion: "retina.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + ResourceVersion: "1", + }, + Spec: retinav1alpha1.RetinaEndpointSpec{ + NodeIP: "10.10.10.10", + PodIP: "10.0.0.1", + PodIPs: []string{"10.0.0.2"}, + Containers: []retinav1alpha1.RetinaEndpointStatusContainers{ + { + Name: "testcontainer", + ID: "docker://1234567890", + }, + }, + }, + }, + }, + { + name: "create retina endpoint from non-running pod", + fields: fields{ + newlyCachedPod: cache.PodCacheObject{ + Key: types.NamespacedName{ + Name: "pod", + Namespace: "default", + }, + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + }, + }, + wantedRetinaEndpoint: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).WithObjects(tt.fields.existingObjects...).Build() + podchannel := make(chan cache.PodCacheObject, 10) + r := New(client, podchannel) + ctx, cancel := context.WithCancel(context.Background()) + go r.ReconcilePod(ctx) + defer cancel() + + got := retinav1alpha1.RetinaEndpoint{} + + podchannel <- tt.fields.newlyCachedPod + + // Nil wantedRetinaEndpoint indicates no RetinaEndpoint is created from the newlyCachedPod. + if tt.wantedRetinaEndpoint == nil { + // No retinaEndpoint should be created consistently within timeout. + require.Eventually(t, func() bool { + err := client.Get(context.Background(), tt.fields.newlyCachedPod.Key, &got) + return apierrors.IsNotFound(err) + }, 5*time.Second, 1*time.Second, "RetinaEndpoint should not exist") + } else { + require.Eventually(t, func() bool { + err := client.Get(context.Background(), tt.fields.newlyCachedPod.Key, &got) + fmt.Println(err) + return !apierrors.IsNotFound(err) + }, 5*time.Second, 1*time.Second, "RetinaEndpoint should be created") + require.Equal(t, *tt.wantedRetinaEndpoint, got) + } + }) + } +} + +func TestRetinaEndpointReconciler_reqeuePodToRetinaEndpoint(t *testing.T) { + type args struct { + pod cache.PodCacheObject + } + tests := []struct { + name string + args args + attempts int + }{ + { + name: "requeue pod", + attempts: 1, + args: args{ + pod: cache.PodCacheObject{}, + }, + }, + + { + name: "requeue pod twice", + attempts: 2, + args: args{ + pod: cache.PodCacheObject{}, + }, + }, + { + name: "more than max retries", + attempts: MAX_RETRIES + 1, + args: args{ + pod: cache.PodCacheObject{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(fakescheme).Build() + podchannel := make(chan cache.PodCacheObject, 10) + + r := New(client, podchannel) + for i := 0; i < tt.attempts; i++ { + r.requeuePodToRetinaEndpoint(context.Background(), tt.args.pod) + } + + if tt.attempts >= MAX_RETRIES { + require.Exactlyf(t, MAX_RETRIES, len(podchannel), fmt.Sprintf("podchannel length %d should be 0", len(podchannel))) + require.Exactly(t, 0, len(r.retries), "retries should be empty") + } else { + require.Exactlyf(t, tt.attempts, len(podchannel), fmt.Sprintf("podchannel length %d should be %d", len(podchannel), tt.attempts)) + require.Exactlyf(t, tt.attempts, r.retries[tt.args.pod.Key], "retries should have %d entry", tt.attempts) + } + }) + } +} diff --git a/pkg/controllers/operator/tracesconfiguration/tracesconfiguration_controller.go b/pkg/controllers/operator/tracesconfiguration/tracesconfiguration_controller.go new file mode 100644 index 000000000..ef3157a78 --- /dev/null +++ b/pkg/controllers/operator/tracesconfiguration/tracesconfiguration_controller.go @@ -0,0 +1,55 @@ +/* +Copyright 2023. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracesconfigurationcontroller + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + retinav1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +// TracesConfigurationReconciler reconciles a TracesConfiguration object +type TracesConfigurationReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=operator.retina.io,resources=tracesconfiguration,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=operator.retina.io,resources=tracesconfiguration/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=operator.retina.io,resources=tracesconfiguration/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *TracesConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + // TODO: implement the reconcile logic + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *TracesConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&retinav1alpha1.TracesConfiguration{}). + Complete(r) +} diff --git a/pkg/deprecated/cache/cache.go b/pkg/deprecated/cache/cache.go new file mode 100644 index 000000000..eccaf3ba2 --- /dev/null +++ b/pkg/deprecated/cache/cache.go @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package cache + +import ( + "context" + "errors" + "time" + + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" + v1 "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + crmgr "sigs.k8s.io/controller-runtime/pkg/manager" +) + +type Cache struct { + // local cache containing services + pods + objCache map[string]client.Object + // informer to track pod events + podInformer v1.PodInformer + // informer to track service events + svcInformer v1.ServiceInformer + // mgr mostly used to start the cache in this pkg + mgr crmgr.Manager + informerFactory informers.SharedInformerFactory + logger *log.ZapLogger +} + +const ( + ResyncTime time.Duration = 5 * time.Minute +) + +func (c *Cache) add(obj interface{}) { + cObj, ok := obj.(client.Object) + if !ok { + c.logger.Error("Failed to cast to Kubernetes client object") + return + } + objIP, err := GetObjectIP(cObj) + if err != nil { + c.logger.Error("Failed to get IP", zap.Error(err)) + return + } + if len(objIP) > 0 { + c.objCache[objIP] = cObj + } +} + +func (c *Cache) update(old, new interface{}) { + oObj, ook := old.(client.Object) + nObj, nok := new.(client.Object) + if !ook || !nok { + c.logger.Error("Failed to cast to Kubernetes client object") + return + } + oObjIP, oerr := GetObjectIP(oObj) + nObjIP, nerr := GetObjectIP(nObj) + if oerr != nil || nerr != nil { + c.logger.Error("Failed to get IP", zap.Error(oerr), zap.Error(nerr)) + return + } + if len(nObjIP) > 0 { + if len(oObjIP) > 0 && oObjIP != nObjIP { + delete(c.objCache, oObjIP) + } + c.objCache[nObjIP] = nObj + } +} + +func (c *Cache) delete(obj interface{}) { + cObj, ok := obj.(client.Object) + if !ok { + c.logger.Error("Failed to cast to Kubernetes client object") + return + } + objIP, err := GetObjectIP(cObj) + if err != nil { + c.logger.Error("Failed to get IP", zap.Error(err)) + return + } + delete(c.objCache, objIP) +} + +func New(logger *log.ZapLogger, mgr crmgr.Manager, cl kubernetes.Interface, factory informers.SharedInformerFactory) *Cache { + cacheMap := make(map[string]client.Object) + return &Cache{ + objCache: cacheMap, + mgr: mgr, + informerFactory: factory, + logger: logger, + } +} + +func (lc *Cache) Start(ctx context.Context) error { + lc.logger.Info("Starting cache...") + lc.SetInformers(lc.informerFactory) + err := lc.mgr.GetCache().Start(ctx) + return err +} + +func (lc *Cache) SetInformers(informerFactory informers.SharedInformerFactory) { + lc.SetPodInformer(informerFactory) + lc.SetServiceInformer(informerFactory) +} + +func (lc *Cache) SetPodInformer(informerFactory informers.SharedInformerFactory) { + podInformer := informerFactory.Core().V1().Pods() + _, err := podInformer.Informer().AddEventHandler( + &cache.ResourceEventHandlerFuncs{ + AddFunc: lc.add, + UpdateFunc: lc.update, + DeleteFunc: lc.delete, + }, + ) + if err != nil { + lc.logger.Error("Failed to add pod event handler", zap.Error(err)) + } + lc.podInformer = podInformer +} + +func (lc *Cache) SetServiceInformer(informerFactory informers.SharedInformerFactory) { + svcInformer := informerFactory.Core().V1().Services() + lc.svcInformer = svcInformer + _, err := svcInformer.Informer().AddEventHandler( + &cache.ResourceEventHandlerFuncs{ + AddFunc: lc.add, + UpdateFunc: lc.update, + DeleteFunc: lc.delete, + }, + ) + if err != nil { + lc.logger.Error("Failed to add service event handler", zap.Error(err)) + } +} + +func (c *Cache) LookupObjectByIP(ip string) (client.Object, error) { + obj, ok := c.objCache[ip] + if ok { + return obj, nil + } + // check cache podlist + var podList corev1.PodList + obj, _ = c.CacheLookupObjectByIP(ip, &podList) + if obj != nil { + return obj, nil + } + // check cache service list + var svcList corev1.ServiceList + obj, _ = c.CacheLookupObjectByIP(ip, &svcList) + if obj != nil { + return obj, nil + } + return nil, errors.New("Pod or service not found") +} + +func (c *Cache) CacheLookupObjectByIP(ip string, list client.ObjectList) (client.Object, error) { + err := c.mgr.GetCache().List(context.TODO(), list) + if err != nil { + return nil, err + } + switch list.(type) { + case *corev1.PodList: + return c.LookupPodByIP(list, ip) + case *corev1.ServiceList: + return c.LookupServiceByIP(list, ip) + default: + return nil, errors.New("List type not supported") + } +} + +func (c *Cache) LookupServiceByIP(list client.ObjectList, ip string) (client.Object, error) { + v, ok := list.(*corev1.ServiceList) + if !ok { + return nil, errors.New("Object is not a Service List") + } + for _, obj := range v.Items { + if oip, err := GetObjectIP(&obj); err == nil && (oip == ip) { + c.add(&obj) + return c.objCache[ip], nil + } + } + return nil, errors.New("Service not found") +} + +func (c *Cache) LookupPodByIP(list client.ObjectList, ip string) (client.Object, error) { + v, ok := list.(*corev1.PodList) + if !ok { + return nil, errors.New("Object is not a PodList") + } + for _, obj := range v.Items { + if oip, err := GetObjectIP(&obj); err == nil && (oip == ip) { + c.add(&obj) + return c.objCache[ip], nil + } + } + return nil, errors.New("Pod not found") +} + +func (c *Cache) GetPodOwner(obj interface{}) (string, string) { + var name, kind string + switch p := obj.(type) { + case *corev1.Pod: + if len(p.OwnerReferences) == 0 { + return name, kind + } + name = p.OwnerReferences[0].Name + switch v := p.OwnerReferences[0].Kind; v { + case "DaemonSet", "StatefulSet": + kind = v + case "ReplicaSet": + kind = v + rs := &appsv1.ReplicaSet{} + rk := types.NamespacedName{ + Namespace: p.Namespace, + Name: name, + } + err := c.mgr.GetClient().Get(context.TODO(), rk, rs) + if err != nil { + c.logger.Warn("Error finding replicaset", zap.Error(err)) + } + if len(rs.OwnerReferences) > 0 { + kind = "Deployment" + name = rs.OwnerReferences[0].Name + } + } + } + return name, kind +} + +func GetObjectIP(obj interface{}) (string, error) { + switch v := obj.(type) { + case *corev1.Service: + return v.Spec.ClusterIP, nil + case *corev1.Pod: + return v.Status.PodIP, nil + } + return "", errors.New("Non supported type") +} diff --git a/pkg/deprecated/cache/cache_test.go b/pkg/deprecated/cache/cache_test.go new file mode 100644 index 000000000..05c23ca7e --- /dev/null +++ b/pkg/deprecated/cache/cache_test.go @@ -0,0 +1,368 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build unit +// +build unit + +package cache + +import ( + "context" + "testing" + + "github.com/microsoft/retina/pkg/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + v1 "k8s.io/client-go/informers/core/v1" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + crc "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + crmgr "sigs.k8s.io/controller-runtime/pkg/manager" +) + +type mockMgr struct { + crmgr.Manager +} + +type mockCache struct { + crc.Cache +} + +type mockClient struct { + client.Client +} + +func (mm *mockMgr) GetCache() crc.Cache { + var mc crc.Cache = &mockCache{} + return mc +} + +func (mm *mockMgr) GetClient() client.Client { + var mc client.Client = &mockClient{} + return mc +} + +func (mc *mockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + obj.SetOwnerReferences([]metav1.OwnerReference{ + { + Name: "Test", + }, + }) + return nil +} + +func (mm *mockMgr) GetConfig() *rest.Config { + return &rest.Config{} +} + +func (mc *mockCache) Start(ctx context.Context) error { + return nil +} + +func (mc *mockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + podItems := make([]corev1.Pod, 1) + podItems[0] = corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "0.0.0.1", + }, + } + svcItems := make([]corev1.Service, 1) + svcItems[0] = corev1.Service{ + Spec: corev1.ServiceSpec{ + ClusterIP: "0.0.0.2", + }, + } + switch list.(type) { + case *corev1.PodList: + listPtr := (list).(*corev1.PodList) + listPtr.Items = podItems + case *corev1.ServiceList: + listPtr := (list).(*corev1.ServiceList) + listPtr.Items = svcItems + } + return nil +} + +func TestCache_LookupObjectByIP(t *testing.T) { + type fields struct { + objCache map[string]client.Object + podInformer v1.PodInformer + svcInformer v1.ServiceInformer + } + type args struct { + ip string + } + var mm crmgr.Manager = &mockMgr{} + tests := []struct { + name string + fields fields + args args + wantIP string + wantErr bool + }{ + { + name: "Pod in local cache", + fields: fields{ + objCache: map[string]client.Object{ + "0.0.0.01": &corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "0.0.0.01", + }, + }, + }, + }, + args: args{ + ip: "0.0.0.01", + }, + wantIP: "0.0.0.01", + wantErr: false, + }, + { + name: "Pod in cache", + fields: fields{ + objCache: map[string]client.Object{}, + }, + args: args{ + ip: "0.0.0.1", + }, + wantIP: "0.0.0.1", + wantErr: false, + }, + { + name: "Service in cache", + fields: fields{ + objCache: map[string]client.Object{}, + }, + args: args{ + ip: "0.0.0.2", + }, + wantIP: "0.0.0.2", + wantErr: false, + }, + { + name: "Pod/Svc not found", + fields: fields{ + objCache: map[string]client.Object{}, + }, + args: args{ + ip: "0.0.0.3", + }, + wantIP: "0.0.0.3", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Cache{ + objCache: tt.fields.objCache, + podInformer: tt.fields.podInformer, + svcInformer: tt.fields.svcInformer, + mgr: mm, + } + c.SetInformers(informers.NewSharedInformerFactory(nil, 0)) + got, err := c.LookupObjectByIP(tt.args.ip) + if err != nil && tt.wantErr { + return + } + if (err != nil) != tt.wantErr { + t.Errorf("Cache.LookupObjectByIP() error = %v, wantErr %v", err, tt.wantErr) + return + } + switch v := got.(type) { + case *corev1.Pod: + if v.Status.PodIP != tt.wantIP { + t.Errorf("Cache.LookupObjectByIP() = %v, want %v", got, tt.wantIP) + } + case *corev1.Service: + if v.Spec.ClusterIP != tt.wantIP { + t.Errorf("Cache.LookupObjectByIP() = %v, want %v", got, tt.wantIP) + } + } + }) + } +} + +func TestCache_update(t *testing.T) { + type fields struct { + objCache map[string]client.Object + podInformer v1.PodInformer + svcInformer v1.ServiceInformer + } + type args struct { + old interface{} + new interface{} + } + cObj := corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "0.0.0.01", + }, + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "Test update for object cache where ip > 0", + fields: fields{ + objCache: map[string]client.Object{ + "0.0.0.01": &cObj, + }, + }, + args: args{ + old: &cObj, + new: &cObj, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Cache{ + objCache: tt.fields.objCache, + podInformer: tt.fields.podInformer, + svcInformer: tt.fields.svcInformer, + } + c.update(tt.args.old, tt.args.new) + }) + } +} + +func TestCache_delete(t *testing.T) { + type fields struct { + objCache map[string]client.Object + podInformer v1.PodInformer + svcInformer v1.ServiceInformer + } + type args struct { + obj interface{} + } + cObj := corev1.Pod{ + Status: corev1.PodStatus{ + PodIP: "0.0.0.01", + }, + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "Test update for object cache where ip > 0", + fields: fields{ + objCache: map[string]client.Object{ + "0.0.0.01": &cObj, + }, + }, + args: args{ + obj: &cObj, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Cache{ + objCache: tt.fields.objCache, + podInformer: tt.fields.podInformer, + svcInformer: tt.fields.svcInformer, + } + c.delete(tt.args.obj) + if c.objCache[cObj.Status.PodIP] != nil { + t.Errorf("Cache.delete() = cache entry was not deleted %v", cObj.Status.PodIP) + } + }) + } +} + +func TestStart(t *testing.T) { + tests := []struct { + name string + wantErr bool + }{ + { + name: "Test new cache", + wantErr: false, + }, + } + cl := k8sfake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(cl, ResyncTime) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + c := New(log.Logger(), &mockMgr{}, cl, factory) + err := c.Start(context.TODO()) + if err != nil { + t.Errorf("Error starting the cache %v", err) + } + }) + } +} + +func TestGetPodOwner(t *testing.T) { + type args struct { + obj interface{} + } + tests := []struct { + name string + args args + ownerName string + ownerKind string + }{ + { + name: "Test daemonset", + args: args{ + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Name: "Test", + }, + }, + }, + }, + }, + ownerName: "Test", + ownerKind: "DaemonSet", + }, + { + name: "Test deployment", + args: args{ + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "ReplicaSet", + Name: "Test", + }, + }, + }, + }, + }, + ownerName: "Test", + ownerKind: "Deployment", + }, + { + name: "Test unkown owner", + args: args{ + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{}, + }, + }, + }, + }, + } + mm := &mockMgr{} + c := &Cache{ + mgr: mm, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got1, got2 := c.GetPodOwner(tt.args.obj) + if got1 != tt.ownerName || got2 != tt.ownerKind { + t.Errorf("GetPodOwner() got1 = %v, want1 %v, got2 = %v, want2: %v", got1, tt.ownerName, got2, tt.ownerKind) + } + }) + } +} diff --git a/pkg/enricher/enricher.go b/pkg/enricher/enricher.go new file mode 100644 index 000000000..fd7ff9a38 --- /dev/null +++ b/pkg/enricher/enricher.go @@ -0,0 +1,190 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package enricher + +import ( + "context" + "reflect" + "sync" + + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/cilium/pkg/hubble/container" + "go.uber.org/zap" +) + +var ( + e *Enricher + once sync.Once +) + +type Enricher struct { + // ctx is the context of enricher + ctx context.Context + + // l is the logger + l *log.ZapLogger + + // cache is the cache of all the objects + cache cache.CacheInterface + + inputRing *container.Ring + + Reader *container.RingReader + + outputRing *container.Ring +} + +func New(ctx context.Context, cache cache.CacheInterface) *Enricher { + once.Do(func() { + ir := container.NewRing(container.Capacity1023) + e = &Enricher{ + ctx: ctx, + l: log.Logger().Named("enricher"), + cache: cache, + inputRing: ir, + Reader: container.NewRingReader(ir, ir.OldestWrite()), + outputRing: container.NewRing(container.Capacity1023), + } + e.init() + }) + + return e +} + +func Instance() *Enricher { + return e +} + +func (e *Enricher) init() { + // todo +} + +func (e *Enricher) Run() { + go func() { + for { + select { + case <-e.ctx.Done(): + e.l.Debug("context is done for enricher") + return + default: + ev := e.Reader.NextFollow(e.ctx) + //if err != nil { + //e.l.Error("error while reading from input channel for enricher", zap.Error(err)) + // continue + //} + if ev == nil { + e.l.Debug("received nil from input channel for enricher") + continue + } + // todo + switch ev.Event.(type) { + case *flow.Flow: + e.l.Debug("Enriching flow", zap.Any("flow", ev.Event.(*flow.Flow))) + e.enrich(ev) + default: + e.l.Debug("received unknown type from input channel for enricher", + zap.Any("obj", ev), + zap.Any("type", reflect.TypeOf(ev)), + ) + } + } + } + }() +} + +// enrich takes the flow and enriches it with the information from the cache +func (e *Enricher) enrich(ev *v1.Event) { + flow := ev.Event.(*flow.Flow) + + // IPversion is a enum in the flow proto + // 0: IPVersion_IP_NOT_USED + // 1: IPVersion_IPv4 + // 2: IPVersion_IPv6 + if flow.IP.IpVersion > 1 { + e.l.Error("IP version is not supported", zap.Any("IPVersion", flow.IP.IpVersion)) + return + } + if flow.IP.Source == "" { + e.l.Debug("source IP is empty") + return + } + srcObj := e.cache.GetObjByIP(flow.IP.Source) + if srcObj != nil { + flow.Source = e.getEndpoint(srcObj) + } + + if flow.IP.Destination == "" { + e.l.Debug("destination IP is empty") + return + } + + dstObj := e.cache.GetObjByIP(flow.IP.Destination) + if dstObj != nil { + flow.Destination = e.getEndpoint(dstObj) + } + + ev.Event = flow + e.l.Debug("enriched flow", zap.Any("flow", flow)) + e.export(ev) +} + +// export forwards the flow to other modules +func (e *Enricher) export(ev *v1.Event) { + e.outputRing.Write(ev) +} + +func (e *Enricher) getEndpoint(obj interface{}) *flow.Endpoint { + if obj == nil { + return nil + } + + switch o := obj.(type) { + case *common.RetinaEndpoint: + // TODO add service type + return &flow.Endpoint{ + Namespace: o.Namespace(), + PodName: o.Name(), + Labels: o.FormattedLabels(), + Workloads: e.getWorkloads(o.OwnerRefs()), + } + + case *common.RetinaSvc: + // todo + return nil + + default: + e.l.Debug("received unknown type from cache", zap.Any("obj", obj), zap.Any("type", reflect.TypeOf(obj))) + return nil + } +} + +func (e *Enricher) getWorkloads(ownerRefs []*common.OwnerReference) []*flow.Workload { + if ownerRefs == nil { + return nil + } + workloads := make([]*flow.Workload, 0) + + for _, ownerRef := range ownerRefs { + w := &flow.Workload{ + Name: ownerRef.Name, + Kind: ownerRef.Kind, + } + + workloads = append(workloads, w) + } + + return workloads +} + +func (e *Enricher) Write(ev *v1.Event) { + e.inputRing.Write(ev) +} + +func (e *Enricher) ExportReader() *container.RingReader { + return container.NewRingReader(e.outputRing, e.outputRing.OldestWrite()) +} diff --git a/pkg/enricher/enricher_test.go b/pkg/enricher/enricher_test.go new file mode 100644 index 000000000..95ff53b24 --- /dev/null +++ b/pkg/enricher/enricher_test.go @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package enricher + +import ( + "context" + "net" + "sync" + "testing" + "time" + + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestNewEnricher(t *testing.T) { + evCount := 20 + // by design per ring, the last written item is not readable + // since we have two rings here, there will be a diff of 2 items + // between the input and output ring + expectedOutputCount := 18 + + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-enricher") + + ctx, cancel := context.WithCancel(context.Background()) + c := cache.New(pubsub.New()) + + addEndpoints := common.NewRetinaEndpoint("pod1", "ns1", nil) + addEndpoints.SetLabels(map[string]string{ + "app": "app1", + }) + + addEndpoints.SetOwnerRefs([]*common.OwnerReference{ + { + Kind: "Pod", + Name: "pod1-deployment", + }, + }) + + addEndpoints.SetIPs(&common.IPAddresses{ + IPv4: net.IPv4(1, 1, 1, 1), + }) + err := c.UpdateRetinaEndpoint(addEndpoints) + assert.NoError(t, err) + + e := New(ctx, c) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + for i := 0; i < evCount; i++ { + addEvent(e) + } + wg.Done() + }() + + e.Run() + + wg.Add(1) + go func() { + count := 0 + oreader := e.ExportReader() + for { + ev := oreader.NextFollow(ctx) + if ev == nil { + break + } + + l.Info("One Received event", zap.Any("event", ev)) + count++ + } + assert.Equal(t, expectedOutputCount, count, "one") + wg.Done() + }() + + wg.Add(1) + go func() { + count := 0 + oreader := e.ExportReader() + for { + ev := oreader.NextFollow(ctx) + if ev == nil { + break + } + count++ + } + assert.Equal(t, expectedOutputCount, count, "two") + wg.Done() + }() + + time.Sleep(3 * time.Second) + cancel() + wg.Wait() +} + +func addEvent(e *Enricher) { + l := log.Logger().Named("addev") + ev := &v1.Event{ + Timestamp: timestamppb.Now(), + Event: &flow.Flow{ + IP: &flow.IP{ + IpVersion: 1, + Source: "1.1.1.1", + Destination: "2.2.2.2", + }, + }, + } + + l.Info("Adding event", zap.Any("event", ev)) + time.Sleep(100 * time.Millisecond) + e.Write(ev) +} diff --git a/pkg/enricher/mock_enricherinterface.go b/pkg/enricher/mock_enricherinterface.go new file mode 100644 index 000000000..837eae01c --- /dev/null +++ b/pkg/enricher/mock_enricherinterface.go @@ -0,0 +1,80 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/enricher (interfaces: EnricherInterface) + +// Package enricher is a generated GoMock package. +package enricher + +import ( + reflect "reflect" + + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + container "github.com/cilium/cilium/pkg/hubble/container" + gomock "github.com/golang/mock/gomock" +) + +// MockEnricherInterface is a mock of EnricherInterface interface. +type MockEnricherInterface struct { + ctrl *gomock.Controller + recorder *MockEnricherInterfaceMockRecorder +} + +// MockEnricherInterfaceMockRecorder is the mock recorder for MockEnricherInterface. +type MockEnricherInterfaceMockRecorder struct { + mock *MockEnricherInterface +} + +// NewMockEnricherInterface creates a new mock instance. +func NewMockEnricherInterface(ctrl *gomock.Controller) *MockEnricherInterface { + mock := &MockEnricherInterface{ctrl: ctrl} + mock.recorder = &MockEnricherInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEnricherInterface) EXPECT() *MockEnricherInterfaceMockRecorder { + return m.recorder +} + +// ExportReader mocks base method. +func (m *MockEnricherInterface) ExportReader() *container.RingReader { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExportReader") + ret0, _ := ret[0].(*container.RingReader) + return ret0 +} + +// ExportReader indicates an expected call of ExportReader. +func (mr *MockEnricherInterfaceMockRecorder) ExportReader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportReader", reflect.TypeOf((*MockEnricherInterface)(nil).ExportReader)) +} + +// Run mocks base method. +func (m *MockEnricherInterface) Run() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Run") +} + +// Run indicates an expected call of Run. +func (mr *MockEnricherInterfaceMockRecorder) Run() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockEnricherInterface)(nil).Run)) +} + +// Write mocks base method. +func (m *MockEnricherInterface) Write(arg0 *v1.Event) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Write", arg0) +} + +// Write indicates an expected call of Write. +func (mr *MockEnricherInterfaceMockRecorder) Write(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockEnricherInterface)(nil).Write), arg0) +} diff --git a/pkg/enricher/types.go b/pkg/enricher/types.go new file mode 100644 index 000000000..97d88202d --- /dev/null +++ b/pkg/enricher/types.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package enricher + +import ( + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/cilium/pkg/hubble/container" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mock_enricherinterface.go -copyright_file=../lib/ignore_headers.txt -package=enricher github.com/microsoft/retina/pkg/enricher EnricherInterface + +type EnricherInterface interface { + Run() + Write(ev *v1.Event) + ExportReader() *container.RingReader +} diff --git a/pkg/exporter/otel_agent.go b/pkg/exporter/otel_agent.go new file mode 100644 index 000000000..fbfa2fff9 --- /dev/null +++ b/pkg/exporter/otel_agent.go @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package exporter + +// import ( +// "context" +// "time" + +// "github.com/microsoft/retina/pkg/config" +// "github.com/microsoft/retina/pkg/log" +// "github.com/microsoft/retina/pkg/plugin/api" +// "go.opentelemetry.io/otel" +// "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" +// "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" +// "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +// "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" +// "go.opentelemetry.io/otel/metric/global" +// "go.opentelemetry.io/otel/propagation" +// controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" +// processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" +// "go.opentelemetry.io/otel/sdk/metric/selector/simple" +// "go.opentelemetry.io/otel/sdk/resource" +// sdktrace "go.opentelemetry.io/otel/sdk/trace" +// semconv "go.opentelemetry.io/otel/semconv/v1.12.0" +// "go.uber.org/zap" +// "google.golang.org/grpc" +// ) + +// type OtelAgent struct { +// l *log.ZapLogger +// agentAddress string +// collectPeriodSeconds time.Duration +// } + +// func NewOtelAgent(l *log.ZapLogger, c *config.Config) *OtelAgent { +// return &OtelAgent{ +// l: l, +// agentAddress: c.OtelAgent.AgentAddress, +// collectPeriodSeconds: time.Duration(c.OtelAgent.CollectPeriodSeconds) * time.Second, +// } +// } + +// func (o *OtelAgent) Start(ctx context.Context) func() { +// metricPusher := setUpMetricPusher(ctx, o.agentAddress, o.collectPeriodSeconds, o.l) +// traceExp := setUpTracerExporter(ctx, o.agentAddress, o.l) + +// return func() { +// cxt, cancel := context.WithTimeout(ctx, time.Second) +// defer cancel() +// // pushes any last exports to the receiver +// if err := metricPusher.Stop(cxt); err != nil { +// otel.Handle(err) +// } +// // shutdown will flush any remaining spans and shutdown the exporter +// if err := traceExp.Shutdown(cxt); err != nil { +// otel.Handle(err) +// } +// } +// } + +// func setUpTracerExporter(ctx context.Context, addr string, l *log.ZapLogger) *otlptrace.Exporter { +// traceClient := otlptracegrpc.NewClient( +// otlptracegrpc.WithInsecure(), +// otlptracegrpc.WithEndpoint(addr), +// otlptracegrpc.WithDialOption(grpc.WithBlock()), +// ) +// traceExp, err := otlptrace.New(ctx, traceClient) +// handleError(err, "Failed to create the collector trace exporter", l) +// res, err := resource.New(ctx, +// resource.WithFromEnv(), +// resource.WithProcess(), +// resource.WithTelemetrySDK(), +// resource.WithHost(), +// resource.WithAttributes( +// // the service name used to display traces in backends +// semconv.ServiceNameKey.String(api.ServiceName), +// ), +// ) +// handleError(err, "Failed to create tracer resource", l) +// // Register the trace exporter with a TracerProvider, using a batch +// // span processor to aggregate spans before export. +// bsp := sdktrace.NewBatchSpanProcessor(traceExp) +// tracerProvider := sdktrace.NewTracerProvider( +// sdktrace.WithSampler(sdktrace.AlwaysSample()), +// sdktrace.WithResource(res), +// sdktrace.WithSpanProcessor(bsp), +// ) +// // set global propagator to tracecontext (the default is no-op). +// otel.SetTextMapPropagator(propagation.TraceContext{}) +// otel.SetTracerProvider(tracerProvider) +// return traceExp +// } + +// func setUpMetricPusher(ctx context.Context, addr string, collectPeriod time.Duration, l *log.ZapLogger) *controller.Controller { +// metricClient := otlpmetricgrpc.NewClient( +// otlpmetricgrpc.WithInsecure(), +// otlpmetricgrpc.WithEndpoint(addr)) +// metricExp, err := otlpmetric.New(ctx, metricClient) +// handleError(err, "Failed to create the collector metric exporter", l) + +// pusher := controller.New( +// processor.NewFactory( +// simple.NewWithHistogramDistribution(), +// metricExp, +// ), +// controller.WithExporter(metricExp), +// controller.WithCollectPeriod(collectPeriod), +// ) +// global.SetMeterProvider(pusher) +// err = pusher.Start(ctx) +// handleError(err, "Failed to start otel meter pusher", l) +// return pusher +// } + +// func handleError(err error, message string, l *log.ZapLogger) { +// if err != nil { +// l.Error(message, zap.Error(err)) +// } +// } diff --git a/pkg/exporter/prometheusexporter.go b/pkg/exporter/prometheusexporter.go new file mode 100644 index 000000000..dde9e1ea3 --- /dev/null +++ b/pkg/exporter/prometheusexporter.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package exporter + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +const ( + RetinaNamespace = "networkobservability" + retinaControlPlaneNamespace = "controlplane_networkobservability" +) + +type CallBackFunc func() + +var ( + // CombinedGatherer is the combined registry for all metrics to be exposed by promhttp + CombinedGatherer *prometheus.Registry + // AdvancedRegistry is used for advanced metrics. This registry can be reset upon metrics config reconciliation + AdvancedRegistry *prometheus.Registry + DefaultRegistry *prometheus.Registry + MetricsServeCallback CallBackFunc +) + +func init() { + DefaultRegistry = prometheus.DefaultRegisterer.(*prometheus.Registry) + AdvancedRegistry = prometheus.NewRegistry() + CombinedGatherer = prometheus.NewRegistry() + CombinedGatherer.MustRegister(AdvancedRegistry) + CombinedGatherer.MustRegister(DefaultRegistry) + MetricsServeCallback = func() {} +} + +func ResetAdvancedMetricsRegistry() { + CombinedGatherer.Unregister(AdvancedRegistry) + AdvancedRegistry = prometheus.NewRegistry() + CombinedGatherer.MustRegister(AdvancedRegistry) + MetricsServeCallback() +} + +func RegisterMetricsServeCallback(callback CallBackFunc) { + MetricsServeCallback = callback +} + +func CreatePrometheusCounterVecForMetric(r prometheus.Registerer, name, desc string, labels ...string) *prometheus.CounterVec { + return promauto.With(r).NewCounterVec( + prometheus.CounterOpts{ + Namespace: RetinaNamespace, + Name: name, + Help: desc, + }, + labels, + ) +} + +func CreatePrometheusGaugeVecForMetric(r prometheus.Registerer, name, desc string, labels ...string) *prometheus.GaugeVec { + return promauto.With(r).NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: RetinaNamespace, + Name: name, + Help: desc, + }, + labels, + ) +} + +func CreatePrometheusCounterVecForControlPlaneMetric(r prometheus.Registerer, name, desc string, labels ...string) *prometheus.CounterVec { + return promauto.With(r).NewCounterVec( + prometheus.CounterOpts{ + Namespace: retinaControlPlaneNamespace, + Name: name, + Help: desc, + }, + labels, + ) +} + +func CreatePrometheusHistogramWithLinearBucketsForMetric(r prometheus.Registerer, name, desc string, start, width float64, count int) prometheus.Histogram { + opts := prometheus.HistogramOpts{ + Namespace: RetinaNamespace, + Name: name, + Help: desc, + Buckets: prometheus.LinearBuckets(start, width, count), + } + return promauto.With(r).NewHistogram(opts) +} + +func UnregisterMetric(r prometheus.Registerer, metric prometheus.Collector) { + if metric != nil { + r.Unregister(metric) + } +} diff --git a/pkg/label/k8s.go b/pkg/label/k8s.go new file mode 100644 index 000000000..d4bd144a5 --- /dev/null +++ b/pkg/label/k8s.go @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package label + +const ( + + // LabelPrefix is the prefix for all Retina owned labels. + LabelPrefix = "retina.io" + + // AppLabel indicates the Retina-owned application + AppLabel = LabelPrefix + "/app" + + CaptureNameLabel = "capture-name" +) diff --git a/pkg/lib/ignore_headers.txt b/pkg/lib/ignore_headers.txt new file mode 100644 index 000000000..6985c8775 --- /dev/null +++ b/pkg/lib/ignore_headers.txt @@ -0,0 +1,4 @@ +autogenerated + +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. diff --git a/pkg/loader/compile.go b/pkg/loader/compile.go new file mode 100644 index 000000000..006de633b --- /dev/null +++ b/pkg/loader/compile.go @@ -0,0 +1,40 @@ +package loader + +import ( + "bytes" + "context" + "os/exec" + + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" +) + +var compiler string = "clang" + +func runCmd(cmd *exec.Cmd) error { + l := log.Logger().Named(string("run-command")) + + var out, stderr bytes.Buffer + + cmd.Stdout = &out + cmd.Stderr = &stderr + + l.Debug("Running", zap.String("command", cmd.String())) + + err := cmd.Run() + if err != nil { + l.Debug("Error running command", zap.String("command", cmd.String()), zap.String("stderr", stderr.String()), zap.Error(err)) + return err + } + l.Debug("Output running command", zap.String("command", cmd.String()), zap.String("stdout", out.String())) + return nil +} + +func CompileEbpf(ctx context.Context, args ...string) error { + cmd := exec.CommandContext(ctx, compiler, args...) + err := runCmd(cmd) + if err != nil { + return err + } + return nil +} diff --git a/pkg/loader/generate.go b/pkg/loader/generate.go new file mode 100644 index 000000000..bbf9a9c6b --- /dev/null +++ b/pkg/loader/generate.go @@ -0,0 +1,27 @@ +package loader + +import ( + "context" + "fmt" + "os" +) + +func WriteFile(ctx context.Context, destFile, text string) error { + tmpFile := fmt.Sprintf("%s.tmp", destFile) + outputFile, err := os.Create(tmpFile) + if err != nil { + return err + } + defer outputFile.Close() + + _, err = outputFile.WriteString(text) + if err != nil { + return err + } + + err = os.Rename(tmpFile, destFile) + if err != nil { + return err + } + return nil +} diff --git a/pkg/log/zap.go b/pkg/log/zap.go new file mode 100644 index 000000000..e1356ebff --- /dev/null +++ b/pkg/log/zap.go @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package log + +import ( + "net/http" + "os" + "runtime" + "time" + + "github.com/Azure/azure-container-networking/zapai" + "github.com/go-chi/chi/middleware" + logfmt "github.com/jsternberg/zap-logfmt" + "github.com/microsoft/ApplicationInsights-Go/appinsights" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +var l *ZapLogger + +const ( + defaultFileName = "retina.log" + defaultMaxSize = 50 // MB + defaultMaxAge = 30 // days + defaultMaxBackups = 3 +) + +type LogOpts struct { + Level string + File bool + FileName string + MaxFileSizeMB int + MaxBackups int + MaxAgeDays int + ApplicationInsightsID string + EnableTelemetry bool +} + +func GetDefaultLogOpts() *LogOpts { + return &LogOpts{ + Level: "info", + File: false, + } +} + +func Logger() *ZapLogger { + return l +} + +type ZapLogger struct { + *zap.Logger + lvl zapcore.Level + opts *LogOpts + closeAISink func() +} + +func toLevel(lvl string) zapcore.Level { + switch lvl { + case "info": + return zapcore.InfoLevel + case "warn": + return zapcore.WarnLevel + case "error": + return zapcore.ErrorLevel + case "panic": + return zapcore.PanicLevel + case "fatal": + return zapcore.FatalLevel + case "debug": + return zapcore.DebugLevel + default: + panic("Log level not supported") + } +} + +func EncoderConfig() zapcore.EncoderConfig { + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder + return encoderCfg +} + +func SetupZapLogger(lOpts *LogOpts) { + if l != nil { + return + } + + lOpts.validate() + // Setup logger level. + atom := zap.NewAtomicLevel() + atom.SetLevel(toLevel(lOpts.Level)) + encoderCfg := EncoderConfig() + var core, logFmtCore, fileCore, aiCore zapcore.Core + + // Setup a default stdout logger + logFmtCore = zapcore.NewCore( + logfmt.NewEncoder(encoderCfg), + zapcore.Lock(os.Stdout), + atom, + ) + l = &ZapLogger{ + Logger: zap.New(logFmtCore, zap.AddCaller()), + lvl: atom.Level(), + opts: lOpts, + } + l.Logger.Debug("Stdout logger enabled") + + if lOpts.ApplicationInsightsID != "" && lOpts.EnableTelemetry { + // build the AI config + aiTelemetryConfig := appinsights.NewTelemetryConfiguration(lOpts.ApplicationInsightsID) + sinkcfg := zapai.SinkConfig{ + GracePeriod: 30 * time.Second, //nolint:gomnd // ignore + TelemetryConfiguration: *aiTelemetryConfig, + } + sinkcfg.MaxBatchSize = 10000 + sinkcfg.MaxBatchInterval = 10 * time.Second //nolint:gomnd // ignore + + // open the AI zap sink + aisink, aiclose, err := zap.Open(sinkcfg.URI()) //nolint:govet // intentional shadow + if err != nil { + l.Logger.Error("failed to open AI sink", zap.Error(err)) + return + } + // set the AI sink closer + l.closeAISink = aiclose + // build the AI core + aiCore = zapai.NewCore(toLevel(lOpts.Level), aisink).WithFieldMappers(zapai.DefaultMappers) + l.Logger.Debug("Application Insights logger enabled") + } + + if lOpts.File { + // Setup a file logger if it is enabled + // lumberjack is Zap endorsed logger rotation library + fw := zapcore.AddSync(&lumberjack.Logger{ + Filename: lOpts.FileName, + MaxSize: lOpts.MaxFileSizeMB, // megabytes + MaxBackups: l.opts.MaxBackups, + MaxAge: l.opts.MaxAgeDays, // days + }) + fileCore = zapcore.NewCore( + zapcore.NewJSONEncoder(encoderCfg), + fw, + atom, + ) + l.Logger.Debug("File logger enabled") + } + // Create a new teecore including all the enabled zap cores + if fileCore != nil && aiCore != nil { + core = zapcore.NewTee(logFmtCore, fileCore, aiCore) + } else if fileCore != nil { + core = zapcore.NewTee(logFmtCore, fileCore) + } else if aiCore != nil { + core = zapcore.NewTee(logFmtCore, aiCore) + } else { + core = zapcore.NewTee(logFmtCore) + } + l.Logger = zap.New(core, zap.AddCaller()) + l.Logger = l.Logger.With( + zap.String("goversion", runtime.Version()), + zap.String("os", runtime.GOOS), + zap.String("arch", runtime.GOARCH), + zap.Int("numcores", runtime.NumCPU()), + zap.String("hostname", os.Getenv("HOSTNAME")), + zap.String("podname", os.Getenv("POD_NAME")), + ) +} + +func (l *ZapLogger) SetLevel(lvl string) { + l.lvl = toLevel(lvl) + _ = l.Logger.Sync() +} + +func (l *ZapLogger) Level() int { + return int(l.lvl) +} + +func (l *ZapLogger) Close() { + if l.closeAISink != nil { + l.closeAISink() + } + _ = l.Logger.Sync() +} + +func (l *ZapLogger) Named(name string) *ZapLogger { + return &ZapLogger{ + Logger: l.Logger.Named(name), + lvl: l.lvl, + } +} + +func (l *ZapLogger) GetZappedMiddleware() func(next http.Handler) http.Handler { + return l.zappedMiddleware +} + +func (l *ZapLogger) zappedMiddleware(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + var requestID string + if reqID := r.Context().Value(middleware.RequestIDKey); reqID != nil { + requestID = reqID.(string) + } + ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor) + next.ServeHTTP(ww, r) + + latency := time.Since(start) + + if l.Logger != nil { + fields := []zapcore.Field{ + zap.Int("status", ww.Status()), + zap.Duration("took", latency), + zap.Int64("latency", latency.Nanoseconds()), + zap.String("remote", r.RemoteAddr), + zap.String("request", r.RequestURI), + zap.String("method", r.Method), + } + if requestID != "" { + fields = append(fields, zap.String("request-id", requestID)) + } + l.Logger.Info("request completed", fields...) + } + } + + return http.HandlerFunc(fn) +} + +func (lOpts *LogOpts) validate() { + if lOpts.Level == "" { + lOpts.Level = "info" + } + if lOpts.File { + if lOpts.FileName == "" { + lOpts.FileName = defaultFileName + } + if lOpts.MaxFileSizeMB == 0 { + lOpts.MaxFileSizeMB = defaultMaxSize + } + if lOpts.MaxBackups == 0 { + lOpts.MaxBackups = defaultMaxBackups + } + if lOpts.MaxAgeDays == 0 { + lOpts.MaxAgeDays = defaultMaxAge + } + } +} + +// AddFields adds custom fields to the logger +func (l *ZapLogger) AddFields(fields ...zap.Field) { + l.Logger = l.Logger.With(fields...) +} diff --git a/pkg/log/zap_test.go b/pkg/log/zap_test.go new file mode 100644 index 000000000..02d855700 --- /dev/null +++ b/pkg/log/zap_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package log + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestLogFileRotation(t *testing.T) { + lOpts := &LogOpts{ + Level: "info", + File: true, + FileName: "test.log", + MaxFileSizeMB: 1, + MaxBackups: 3, + MaxAgeDays: 1, + } + + SetupZapLogger(lOpts) + + logsToPrint := 10 + for i := 0; i < logsToPrint; i++ { + l.Info("test", zap.Int("i", i)) + } + l.Close() + + _, err := os.Stat(lOpts.FileName) + assert.NoError(t, err, "Test log file is not found") + + // change this to 4 if using logsToPrint as 100000 + expectedReplicas := 1 + curReplicas := 0 + p, err := os.Getwd() + assert.NoError(t, err, "Getwd failed with err") + + err = filepath.Walk(p, func(path string, info os.FileInfo, err error) error { + l.Info("Filename: ", zap.String("path", path), zap.String("name", info.Name())) + if !info.IsDir() { + if strings.HasPrefix(info.Name(), "test") && strings.HasSuffix(info.Name(), ".log") { + curReplicas++ + } + } + return nil + }) + assert.NoError(t, err, "Test log file walk through failed with err") + assert.Equal(t, expectedReplicas, curReplicas, "Test log file replicas are not as expected on 2nd try") +} diff --git a/pkg/managers/controllermanager/controllermanager.go b/pkg/managers/controllermanager/controllermanager.go new file mode 100644 index 000000000..d5268f783 --- /dev/null +++ b/pkg/managers/controllermanager/controllermanager.go @@ -0,0 +1,126 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package controllermanager + +import ( + "context" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + pm "github.com/microsoft/retina/pkg/managers/pluginmanager" + sm "github.com/microsoft/retina/pkg/managers/servermanager" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/telemetry" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" +) + +const ( + ResyncTime time.Duration = 5 * time.Minute +) + +type Controller struct { + l *log.ZapLogger + httpServer *sm.HTTPServer + pluginManager *pm.PluginManager + tel telemetry.Telemetry + conf *kcfg.Config + pubsub *pubsub.PubSub + cache *cache.Cache + enricher *enricher.Enricher +} + +func NewControllerManager(conf *kcfg.Config, kubeclient kubernetes.Interface, tel telemetry.Telemetry) (*Controller, error) { + cmLogger := log.Logger().Named("controller-manager") + + if conf.EnablePodLevel { + // informer factory for pods/services + factory := informers.NewSharedInformerFactory(kubeclient, ResyncTime) + factory.WaitForCacheSync(wait.NeverStop) + } + + // enabledPlugins := {api.PluginName(conf.EnabledPlugin[])} + enabledPlugins := []api.PluginName{} + for _, pluginName := range conf.EnabledPlugin { + enabledPlugins = append(enabledPlugins, api.PluginName(pluginName)) + } + pMgr, err := pm.NewPluginManager( + conf, + tel, + enabledPlugins..., + ) + if err != nil { + return nil, err + } + + // create HTTP server for API server + httpServer := sm.NewHTTPServer( + conf.ApiServer.Host, + conf.ApiServer.Port, + ) + + return &Controller{ + l: cmLogger, + httpServer: httpServer, + pluginManager: pMgr, + tel: tel, + conf: conf, + }, nil +} + +func (m *Controller) Init(ctx context.Context) error { + m.l.Info("Initializing controller manager ...") + + if err := m.httpServer.Init(); err != nil { + return err + } + + if m.conf.EnablePodLevel { + // create pubsub instance + m.pubsub = pubsub.New() + + // create cache instance + m.cache = cache.New(m.pubsub) + + // create enricher instance + m.enricher = enricher.New(ctx, m.cache) + } + + return nil +} + +func (m *Controller) Start(ctx context.Context) { + defer telemetry.TrackPanic() + + var g *errgroup.Group + + g, ctx = errgroup.WithContext(ctx) + + // defer m.otelAgent.Start(ctx)() + g.Go(func() error { + return m.pluginManager.Start(ctx) + }) + g.Go(func() error { + return m.httpServer.Start(ctx) + }) + // g.Go(func() error { + // return m.clusterObsCl.Start() + // }) + + if err := g.Wait(); err != nil { + m.l.Panic("Error running controller manager", zap.Error(err)) + } +} + +func (m *Controller) Stop(ctx context.Context) { + // Stop the plugin manager. This will help clean up the plugin resources. + m.pluginManager.Stop() + m.l.Info("Stopped controller manager") +} diff --git a/pkg/managers/controllermanager/controllermanager_test.go b/pkg/managers/controllermanager/controllermanager_test.go new file mode 100644 index 000000000..97b3ff4f7 --- /dev/null +++ b/pkg/managers/controllermanager/controllermanager_test.go @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package controllermanager + +import ( + "context" + "errors" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + pm "github.com/microsoft/retina/pkg/managers/pluginmanager" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/telemetry" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + k8sfake "k8s.io/client-go/kubernetes/fake" +) + +const ( + testCfgFile = "../../config/testwith/config.yaml" + testMockCfgFile = "../../config/testwith/config-mock.yaml" + testCfgFileWin = "../../config/testwith/config-win.yaml" + timeInter = time.Second * 10 +) + +func TestNewControllerManager(t *testing.T) { + c, err := kcfg.GetConfig(testCfgFile) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, c) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + kubeclient := k8sfake.NewSimpleClientset() + cm, err := NewControllerManager(c, kubeclient, telemetry.NewNoopTelemetry()) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, cm) +} + +func TestNewControllerManagerWin(t *testing.T) { + c, err := kcfg.GetConfig(testCfgFileWin) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, c) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + kubeclient := k8sfake.NewSimpleClientset() + cm, err := NewControllerManager(c, kubeclient, telemetry.NewNoopTelemetry()) + assert.Error(t, err, "Expected error of not recognising windows plugins in linux, instead got no error") + assert.Nil(t, cm) +} + +func TestNewControllerManagerInit(t *testing.T) { + c, err := kcfg.GetConfig(testMockCfgFile) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, c) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + kubeclient := k8sfake.NewSimpleClientset() + cm, err := NewControllerManager(c, kubeclient, telemetry.NewNoopTelemetry()) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, cm) + + err = cm.Init(context.Background()) + assert.NoError(t, err, "Expected no error, instead got %+v", err) +} + +func TestControllerPluginManagerStartFail(t *testing.T) { + c, err := kcfg.GetConfig(testMockCfgFile) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, c) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + kubeclient := k8sfake.NewSimpleClientset() + cm, err := NewControllerManager(c, kubeclient, telemetry.NewNoopTelemetry()) + assert.NoError(t, err, "Expected no error, instead got %+v", err) + assert.NotNil(t, cm) + + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + pluginName := "mockplugin" + + cfg := &kcfg.Config{ + MetricsInterval: timeInter, + EnablePodLevel: true, + } + mgr, err := pm.NewPluginManager(cfg, telemetry.NewNoopTelemetry(), api.PluginName(pluginName)) + require.NoError(t, err, "Expected no error, instead got %+v", err) + + mockPlugin := api.NewMockPlugin(ctl) + mockPlugin.EXPECT().Generate(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Compile(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Stop().Return(nil).AnyTimes() + mockPlugin.EXPECT().Init().Return(nil).AnyTimes() + mockPlugin.EXPECT().Name().Return(pluginName).AnyTimes() + mockPlugin.EXPECT().Start(gomock.Any()).Return(errors.New("test error")).AnyTimes() + + mgr.SetPlugin(api.PluginName(pluginName), mockPlugin) + cm.pluginManager = mgr + + err = cm.Init(context.Background()) + require.NoError(t, err, "Expected no error, instead got %+v", err) + + require.Panics(t, func() { cm.Start(context.Background()) }) +} diff --git a/pkg/managers/filtermanager/cache.go b/pkg/managers/filtermanager/cache.go new file mode 100644 index 000000000..0063d2aff --- /dev/null +++ b/pkg/managers/filtermanager/cache.go @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filtermanager + +import ( + "net" + "sync" +) + +var fc *filterCache + +// requests maps a requestor to a list of request metadata. +// Nested maps +type requests map[Requestor]map[RequestMetadata]bool + +type filterCache struct { + mu sync.Mutex + // Map of IPs --> Requestor --> RequestMetadata. + // Each IP can be requested by multiple requestors, + // for various tasks. For ex: + // trace1 can request IP1 for rule1 and rule2, while + // trace2 can request IP1 for rule3. + data map[string]requests +} + +func newCache() *filterCache { + if fc == nil { + fc = &filterCache{data: make(map[string]requests)} + } + return fc +} + +func (f *filterCache) ips() []net.IP { + f.mu.Lock() + defer f.mu.Unlock() + + ips := []net.IP{} + for ip := range f.data { + ips = append(ips, net.ParseIP(ip)) + } + return ips +} + +func (f *filterCache) reset() { + f.mu.Lock() + defer f.mu.Unlock() + + f.data = make(map[string]requests) +} + +func (f *filterCache) hasKey(ip net.IP) bool { + f.mu.Lock() + defer f.mu.Unlock() + + if _, ok := f.data[ip.String()]; ok { + return true + } + return false +} + +func (f *filterCache) addIP(ip net.IP, r Requestor, m RequestMetadata) { + f.mu.Lock() + defer f.mu.Unlock() + + key := ip.String() + if _, ok := f.data[key]; !ok { + f.data[key] = make(requests) + } + if _, ok := f.data[key][r]; !ok { + f.data[key][r] = make(map[RequestMetadata]bool) + } + f.data[key][r][m] = true +} + +// Return true if the IP was deleted from the cache. +// If more that one requestor has requested the IP, only the requestor +// and the metadata is deleted, and deleteIP returns false. +func (f *filterCache) deleteIP(ip net.IP, r Requestor, m RequestMetadata) bool { + f.mu.Lock() + defer f.mu.Unlock() + + ipDeleted := false + key := ip.String() + if _, ok := f.data[key]; ok { + if _, ok := f.data[key][r]; ok { + delete(f.data[key][r], m) + // If requestor has no more requests for this IP, delete the requestor. + if len(f.data[key][r]) == 0 { + delete(f.data[key], r) + } + // If IP has no more requests, delete the IP. + if len(f.data[key]) == 0 { + delete(f.data, key) + ipDeleted = true + } + } + } + return ipDeleted +} diff --git a/pkg/managers/filtermanager/cache_test.go b/pkg/managers/filtermanager/cache_test.go new file mode 100644 index 000000000..14f12bff4 --- /dev/null +++ b/pkg/managers/filtermanager/cache_test.go @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filtermanager + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_newCache(t *testing.T) { + f := newCache() + assert.NotNil(t, f) + + f.data["1.1.1.1"] = requests{ + "test": map[RequestMetadata]bool{ + {RuleID: "test"}: true, + }, + } + + f2 := newCache() + assert.Equal(t, f, f2) +} + +func Test_IPs(t *testing.T) { + f := newCache() + f.reset() + + ips := f.ips() + assert.Equal(t, 0, len(ips)) + + f.data["1.1.1.1"] = requests{} + f.data["2.2.2.2"] = requests{} + + ips = f.ips() + assert.Equal(t, 2, len(ips)) + + delete(f.data, "1.1.1.1") + ips = f.ips() + assert.Equal(t, 1, len(ips)) +} + +func Test_reset(t *testing.T) { + f := newCache() + assert.NotNil(t, f) + + f.data["1.1.1.1"] = requests{} + f.reset() + assert.Equal(t, 0, len(f.data)) +} + +func Test_hasKey(t *testing.T) { + f := newCache() + f.data["1.1.1.1"] = requests{} + assert.True(t, f.hasKey(net.ParseIP("1.1.1.1"))) + assert.False(t, f.hasKey(net.ParseIP("2.2.2.2"))) +} + +func addIPsHelper() { + f := newCache() + ip1 := net.ParseIP("1.1.1.1") + ip2 := net.ParseIP("2.2.2.2") + + go f.addIP(ip1, "trace1", RequestMetadata{RuleID: "task1"}) + go f.addIP(ip1, "trace1", RequestMetadata{RuleID: "task2"}) + go f.addIP(ip1, "trace2", RequestMetadata{RuleID: "task3"}) + go f.addIP(ip2, "trace1", RequestMetadata{RuleID: "task1"}) + + // Wait for goroutines to finish. + time.Sleep(1 * time.Second) +} + +func Test_addIP(t *testing.T) { + addIPsHelper() + + f := newCache() + assert.Equal(t, 2, len(f.data)) + + expectedData := map[string]requests{ + "1.1.1.1": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task1"}: true, + {RuleID: "task2"}: true, + }, + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + "2.2.2.2": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task1"}: true, + }, + }, + } + assert.Equal(t, expectedData, f.data) + + // Add an IP that already exists. + addIPsHelper() + assert.Equal(t, expectedData, f.data) +} + +func Test_deleteIP(t *testing.T) { + addIPsHelper() + + f := newCache() + assert.Equal(t, 2, len(f.data)) + + ip1 := net.ParseIP("1.1.1.1") + ip2 := net.ParseIP("2.2.2.2") + ip3 := net.ParseIP("3.3.3.3") + + // Try deleting an IP that doesn't exist. + res := f.deleteIP(ip3, "trace1", RequestMetadata{RuleID: "task1"}) + expectedData := map[string]requests{ + "1.1.1.1": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task1"}: true, + {RuleID: "task2"}: true, + }, + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + "2.2.2.2": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task1"}: true, + }, + }, + } + assert.False(t, res) + assert.Equal(t, expectedData, f.data) + + res = f.deleteIP(ip1, "trace1", RequestMetadata{RuleID: "task1"}) + time.Sleep(10 * time.Millisecond) + expectedData = map[string]requests{ + "1.1.1.1": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task2"}: true, + }, + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + "2.2.2.2": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task1"}: true, + }, + }, + } + assert.False(t, res) + assert.Equal(t, expectedData, f.data) + + res = f.deleteIP(ip2, "trace1", RequestMetadata{RuleID: "task1"}) + time.Sleep(1 * time.Millisecond) + expectedData = map[string]requests{ + "1.1.1.1": { + "trace1": map[RequestMetadata]bool{ + {RuleID: "task2"}: true, + }, + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + } + assert.True(t, res) + assert.Equal(t, expectedData, f.data) + + res = f.deleteIP(ip1, "trace1", RequestMetadata{RuleID: "task2"}) + time.Sleep(1 * time.Millisecond) + expectedData = map[string]requests{ + "1.1.1.1": { + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + } + assert.False(t, res) + assert.Equal(t, expectedData, f.data) + + // Try deleting a task that doesn't exist. + res = f.deleteIP(ip1, "trace2", RequestMetadata{RuleID: "task2"}) + time.Sleep(1 * time.Millisecond) + expectedData = map[string]requests{ + "1.1.1.1": { + "trace2": map[RequestMetadata]bool{ + {RuleID: "task3"}: true, + }, + }, + } + assert.False(t, res) + assert.Equal(t, expectedData, f.data) + + res = f.deleteIP(ip1, "trace2", RequestMetadata{RuleID: "task3"}) + time.Sleep(1 * time.Millisecond) + expectedData = map[string]requests{} + assert.True(t, res) + assert.Equal(t, expectedData, f.data) +} + +func Test_multiOp(t *testing.T) { + f := newCache() + + for i := 0; i < 100; i++ { + ip := net.ParseIP(fmt.Sprintf("%d.%d.%d.%d", i, i, i, i)) + go f.addIP(ip, Requestor(fmt.Sprintf("trace-%d", i)), RequestMetadata{RuleID: "task1"}) + } + time.Sleep(1 * time.Second) + assert.Equal(t, 100, len(f.data)) + + fn := func(i int) { + ip := net.ParseIP(fmt.Sprintf("%d.%d.%d.%d", i, i, i, i)) + res := f.deleteIP(ip, Requestor(fmt.Sprintf("trace-%d", i)), RequestMetadata{RuleID: "task1"}) + assert.True(t, res) + } + for i := 0; i < 100; i++ { + go fn(i) + } + time.Sleep(1 * time.Second) + assert.Equal(t, 0, len(f.data)) +} diff --git a/pkg/managers/filtermanager/manager.go b/pkg/managers/filtermanager/manager.go new file mode 100644 index 000000000..524884dd7 --- /dev/null +++ b/pkg/managers/filtermanager/manager.go @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filtermanager + +import ( + "errors" + "net" + "sync" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/filter" + "github.com/microsoft/retina/pkg/utils" + "go.uber.org/zap" +) + +var ( + f *FilterManager + once sync.Once +) + +type FilterManager struct { + mu sync.Mutex + l *log.ZapLogger + c ICache + fm filter.IFilterMap + // retry is the number of times to retry adding/deleting IPs to/from the filter map. + retry int +} + +// Init returns a new instance of FilterManager. +// It is a singleton. +// retry is the number of times to retry adding/deleting IPs to/from the filter map. +// Retry is exponential backoff with a base of 2. For example, retry=3 means: +// 1st try: 1 second +// 2nd try: 2 seconds +// 3rd try: 4 seconds +// Total time: 7 seconds +// The manager locks the cache during retry. +// Suggest to keep retry to a small number (not more than 3). +func Init(retry int) (*FilterManager, error) { + var err error + if retry < 1 { + return nil, errors.New("retry should be greater than 0") + } + once.Do(func() { + f = &FilterManager{ + retry: retry, + } + }) + + if f.l == nil { + f.l = log.Logger().Named("filter-manager") + } + if f.c == nil { + f.c = newCache() + } + f.fm, err = filter.Init() + return f, err +} + +func (f *FilterManager) AddIPs(ips []net.IP, r Requestor, m RequestMetadata) error { + f.mu.Lock() + defer f.mu.Unlock() + + fn := func() error { + ipsToAdd := []net.IP{} + for _, ip := range ips { + if f.c.hasKey(ip) { + // IP already in filter map, add the request to the cache. + f.c.addIP(ip, r, m) + } else { + ipsToAdd = append(ipsToAdd, ip) + } + } + if len(ipsToAdd) > 0 { + // Bulk add the IPs to the filter map. + // This is more efficient than adding one IP at a time. + err := f.fm.Add(ipsToAdd) + if err != nil { + f.l.Error("AddIPs failed", zap.Error(err)) + return err + } + // Add the requests to the cache. + for _, ip := range ipsToAdd { + f.c.addIP(ip, r, m) + } + } + return nil + } + // Exponential backoff retry. + err := utils.Retry(fn, f.retry) + return err +} + +func (f *FilterManager) DeleteIPs(ips []net.IP, r Requestor, m RequestMetadata) error { + f.mu.Lock() + defer f.mu.Unlock() + + fn := func() error { + ipsToDelete := []net.IP{} + for _, ip := range ips { + if f.c.deleteIP(ip, r, m) { + // No more requests for this IP. Delete it from the filter map. + ipsToDelete = append(ipsToDelete, ip) + } + } + if len(ipsToDelete) > 0 { + err := f.fm.Delete(ipsToDelete) + if err != nil { + f.l.Error("DeleteIPs failed", zap.Error(err)) + // IPs were not deleted from the filter map. Add them back to the cache. + for _, ip := range ipsToDelete { + f.c.addIP(ip, r, m) + } + return err + } + } + return nil + } + // Exponential backoff retry. + err := utils.Retry(fn, f.retry) + return err +} + +func (f *FilterManager) HasIP(ip net.IP) bool { + f.mu.Lock() + defer f.mu.Unlock() + + return f.c.hasKey(ip) +} + +func (f *FilterManager) Reset() error { + f.mu.Lock() + defer f.mu.Unlock() + + ipsToDelete := f.c.ips() + if len(ipsToDelete) > 0 { + err := f.fm.Delete(ipsToDelete) + if err != nil { + f.l.Error("Reset failed", zap.Error(err)) + return err + } + f.c.reset() + } + return nil +} + +func (f *FilterManager) Stop() error { + // Delete all the IPs from the filter map. + err := f.Reset() + if err != nil { + f.l.Error("Reset failed", zap.Error(err)) + return err + } + // Close the filter map. + f.fm.Close() + // Clear the cache. + f.c = nil + return nil +} diff --git a/pkg/managers/filtermanager/manager_test.go b/pkg/managers/filtermanager/manager_test.go new file mode 100644 index 000000000..682758c7c --- /dev/null +++ b/pkg/managers/filtermanager/manager_test.go @@ -0,0 +1,294 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filtermanager + +import ( + "errors" + "net" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/filter/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func Test_AddIPs_EmptyCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4(), net.ParseIP("2.2.2.2").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().hasKey(ip).Return(false).Times(1) + mockCache.EXPECT().addIP(ip, r, m).Times(1) + } + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Add(ips).Return(nil).Times(1) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.AddIPs(ips, r, m) + assert.Nil(t, err) + mockCache.EXPECT().hasKey(ips[0]).Return(true).Times(1) + mockCache.EXPECT().hasKey(ips[1]).Return(true).Times(1) + assert.True(t, f.HasIP(ips[0])) + assert.True(t, f.HasIP(ips[1])) +} + +func Test_AddIPs_CacheHit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4(), net.ParseIP("2.2.2.2").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().hasKey(ip).Return(true).Times(1) + mockCache.EXPECT().addIP(ip, r, m).Times(1) + } + f := &FilterManager{ + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.AddIPs(ips, r, m) + assert.Nil(t, err) +} + +func Test_AddIPs_MixedCacheHit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4(), net.ParseIP("2.2.2.2").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + // First IP is a cache hit. + mockCache.EXPECT().hasKey(ips[0]).Return(true).Times(1) + mockCache.EXPECT().addIP(ips[0], r, m).Times(1) + // Second IP is a cache miss. + mockCache.EXPECT().hasKey(ips[1]).Return(false).Times(1) + mockCache.EXPECT().addIP(ips[1], r, m).Times(1) + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Add([]net.IP{ips[1]}).Return(nil).Times(1) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.AddIPs(ips, r, m) + assert.Nil(t, err) +} + +func Test_AddIPs_Retry(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + retry := 3 + expectedErr := errors.New("test error") + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().hasKey(ip).Return(false).Times(retry) + } + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Add(ips).Return(expectedErr).Times(retry) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: retry, + } + err := f.AddIPs(ips, r, m) + assert.EqualError(t, err, expectedErr.Error()) +} + +func Test_DeleteIPs_IpNotDeletedFromCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().deleteIP(ip, r, m).Return(false).Times(1) + } + + f := &FilterManager{ + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.DeleteIPs(ips, r, m) + assert.Nil(t, err) +} + +func Test_DeleteIPs_IPsDeletedFromCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4(), net.ParseIP("2.2.2.2").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().deleteIP(ip, r, m).Return(true).Times(1) + } + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Delete(ips).Return(nil).Times(1) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.DeleteIPs(ips, r, m) + assert.Nil(t, err) +} + +func Test_DeleteIPs_HalfIPsDeletedFromCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4(), net.ParseIP("2.2.2.2").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + + mockCache := NewMockICache(ctrl) // nolint:typecheck + // First IP is a cache hit. + mockCache.EXPECT().deleteIP(ips[0], r, m).Return(true).Times(1) + // Second IP is a cache miss. + mockCache.EXPECT().deleteIP(ips[1], r, m).Return(false).Times(1) + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Delete([]net.IP{ips[0]}).Return(nil).Times(1) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: 1, + } + err := f.DeleteIPs(ips, r, m) + assert.Nil(t, err) +} + +func Test_DeleteIPs_Error(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4()} + r := Requestor("test") + m := RequestMetadata{RuleID: "rule-1"} + expectedErr := errors.New("test error") + retry := 3 + + mockCache := NewMockICache(ctrl) // nolint:typecheck + for _, ip := range ips { + mockCache.EXPECT().deleteIP(ip, r, m).Return(true).Times(retry) + mockCache.EXPECT().addIP(ip, r, m).Times(retry) + } + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Delete(ips).Return(expectedErr).Times(retry) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + retry: retry, + } + err := f.DeleteIPs(ips, r, m) + assert.EqualError(t, err, expectedErr.Error()) +} + +func Test_Reset(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ips := []net.IP{net.ParseIP("1.1.1.1").To4()} + expectedErr := errors.New("test error") + + mockCache := NewMockICache(ctrl) // nolint:typecheck + mockCache.EXPECT().ips().Return(ips).Times(1) + mockCache.EXPECT().reset().Times(1) + + mockUpdateFn := mocks.NewMockIFilterMap(ctrl) + mockUpdateFn.EXPECT().Delete(ips).Return(nil).Times(1) + + f := &FilterManager{ + fm: mockUpdateFn, + c: mockCache, + l: log.Logger().Named("filter-manager"), + } + err := f.Reset() + assert.Nil(t, err) + + // Test filter-map error. + mockCache.EXPECT().ips().Return(ips).Times(1) + mockUpdateFn.EXPECT().Delete(ips).Return(expectedErr).Times(1) + mockCache.EXPECT().reset().Times(0) + err = f.Reset() + assert.EqualError(t, err, expectedErr.Error()) +} + +func Test_Reset_EmptyCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockCache := NewMockICache(ctrl) // nolint:typecheck + mockCache.EXPECT().ips().Return([]net.IP{}).Times(1) + + f := &FilterManager{ + c: mockCache, + l: log.Logger().Named("filter-manager"), + } + err := f.Reset() + assert.Nil(t, err) +} diff --git a/pkg/managers/filtermanager/mock_types.go b/pkg/managers/filtermanager/mock_types.go new file mode 100644 index 000000000..dfb57c173 --- /dev/null +++ b/pkg/managers/filtermanager/mock_types.go @@ -0,0 +1,194 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package filtermanager is a generated GoMock package. +package filtermanager + +import ( + net "net" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockICache is a mock of ICache interface. +type MockICache struct { + ctrl *gomock.Controller + recorder *MockICacheMockRecorder +} + +// MockICacheMockRecorder is the mock recorder for MockICache. +type MockICacheMockRecorder struct { + mock *MockICache +} + +// NewMockICache creates a new mock instance. +func NewMockICache(ctrl *gomock.Controller) *MockICache { + mock := &MockICache{ctrl: ctrl} + mock.recorder = &MockICacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockICache) EXPECT() *MockICacheMockRecorder { + return m.recorder +} + +// addIP mocks base method. +func (m *MockICache) addIP(arg0 net.IP, arg1 Requestor, arg2 RequestMetadata) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "addIP", arg0, arg1, arg2) +} + +// addIP indicates an expected call of addIP. +func (mr *MockICacheMockRecorder) addIP(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "addIP", reflect.TypeOf((*MockICache)(nil).addIP), arg0, arg1, arg2) +} + +// deleteIP mocks base method. +func (m *MockICache) deleteIP(arg0 net.IP, arg1 Requestor, arg2 RequestMetadata) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "deleteIP", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// deleteIP indicates an expected call of deleteIP. +func (mr *MockICacheMockRecorder) deleteIP(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "deleteIP", reflect.TypeOf((*MockICache)(nil).deleteIP), arg0, arg1, arg2) +} + +// hasKey mocks base method. +func (m *MockICache) hasKey(arg0 net.IP) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "hasKey", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// hasKey indicates an expected call of hasKey. +func (mr *MockICacheMockRecorder) hasKey(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasKey", reflect.TypeOf((*MockICache)(nil).hasKey), arg0) +} + +// ips mocks base method. +func (m *MockICache) ips() []net.IP { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ips") + ret0, _ := ret[0].([]net.IP) + return ret0 +} + +// ips indicates an expected call of ips. +func (mr *MockICacheMockRecorder) ips() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ips", reflect.TypeOf((*MockICache)(nil).ips)) +} + +// reset mocks base method. +func (m *MockICache) reset() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "reset") +} + +// reset indicates an expected call of reset. +func (mr *MockICacheMockRecorder) reset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "reset", reflect.TypeOf((*MockICache)(nil).reset)) +} + +// MockIFilterManager is a mock of IFilterManager interface. +type MockIFilterManager struct { + ctrl *gomock.Controller + recorder *MockIFilterManagerMockRecorder +} + +// MockIFilterManagerMockRecorder is the mock recorder for MockIFilterManager. +type MockIFilterManagerMockRecorder struct { + mock *MockIFilterManager +} + +// NewMockIFilterManager creates a new mock instance. +func NewMockIFilterManager(ctrl *gomock.Controller) *MockIFilterManager { + mock := &MockIFilterManager{ctrl: ctrl} + mock.recorder = &MockIFilterManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIFilterManager) EXPECT() *MockIFilterManagerMockRecorder { + return m.recorder +} + +// AddIPs mocks base method. +func (m *MockIFilterManager) AddIPs(arg0 []net.IP, arg1 Requestor, arg2 RequestMetadata) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddIPs", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddIPs indicates an expected call of AddIPs. +func (mr *MockIFilterManagerMockRecorder) AddIPs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddIPs", reflect.TypeOf((*MockIFilterManager)(nil).AddIPs), arg0, arg1, arg2) +} + +// DeleteIPs mocks base method. +func (m *MockIFilterManager) DeleteIPs(arg0 []net.IP, arg1 Requestor, arg2 RequestMetadata) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteIPs", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteIPs indicates an expected call of DeleteIPs. +func (mr *MockIFilterManagerMockRecorder) DeleteIPs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteIPs", reflect.TypeOf((*MockIFilterManager)(nil).DeleteIPs), arg0, arg1, arg2) +} + +// HasIP mocks base method. +func (m *MockIFilterManager) HasIP(arg0 net.IP) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasIP", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// HasIP indicates an expected call of HasIP. +func (mr *MockIFilterManagerMockRecorder) HasIP(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasIP", reflect.TypeOf((*MockIFilterManager)(nil).HasIP), arg0) +} + +// Reset mocks base method. +func (m *MockIFilterManager) Reset() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reset") + ret0, _ := ret[0].(error) + return ret0 +} + +// Reset indicates an expected call of Reset. +func (mr *MockIFilterManagerMockRecorder) Reset() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockIFilterManager)(nil).Reset)) +} + +// Stop mocks base method. +func (m *MockIFilterManager) Stop() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop") + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockIFilterManagerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockIFilterManager)(nil).Stop)) +} diff --git a/pkg/managers/filtermanager/types.go b/pkg/managers/filtermanager/types.go new file mode 100644 index 000000000..f42010a56 --- /dev/null +++ b/pkg/managers/filtermanager/types.go @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filtermanager + +import ( + "net" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types.go -destination=mock_types.go -package=filtermanager +type ICache interface { + ips() []net.IP + reset() + hasKey(net.IP) bool + addIP(net.IP, Requestor, RequestMetadata) + deleteIP(net.IP, Requestor, RequestMetadata) bool +} + +type IFilterManager interface { + // AddIPs adds the given IPs to the filter map (map in kernel space) and the filterCache. + // If any of the IP cannot be added to the filter map, + // no entries will be added to the cache. + // Note: An error returned doesn't gurantee that no IPs + // were added to the filter map. Caller should retry adding + // all the IPs again. + AddIPs([]net.IP, Requestor, RequestMetadata) error + // DeleteIPs deletes the given IPs from the filter map (map in kernel space) and the filterCache. + // If any of the IP cannot be deleted from the filter map, + // no entries will be deleted from the cache. + // Note: An error returned doesn't gurantee that no IPs + // were deleted from the filter map. Caller should retry deleting + // all the IPs again. + DeleteIPs([]net.IP, Requestor, RequestMetadata) error + // HasIP returns true if the given IP is in the filterCache. + HasIP(net.IP) bool + // Reset the cache and the filter map. + // Note: An error returned doesn't gurantee that no IPs + // were deleted from the filter map. Caller should retry Reset + // again. + Reset() error + // Reset the filterManager and close the kernel map. + // Error indicates issues deleting all entries in the kernel map. + Stop() error +} + +type Requestor string + +type RequestMetadata struct { + // Each trace can have multiple rules. + RuleID string +} diff --git a/pkg/managers/pluginmanager/pluginmanager.go b/pkg/managers/pluginmanager/pluginmanager.go new file mode 100644 index 000000000..fcfc0feea --- /dev/null +++ b/pkg/managers/pluginmanager/pluginmanager.go @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package pluginmanager + +import ( + "context" + "fmt" + "sync" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/watchermanager" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/registry" + "github.com/microsoft/retina/pkg/telemetry" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/pkg/errors" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +const ( + + // In any run I haven't seen reconcile take longer than 5 seconds, + // and 10 seconds seems like a reasonable SLA for reconciliation to be completed + MAX_RECONCILE_TIME = 10 * time.Second +) + +const ( + MAX_STARTUP_TIME = 10 * time.Second +) + +var ( + ErrNilCfg = errors.New("pluginmanager requires a non-nil config") + ErrZeroInterval = errors.New("pluginmanager requires a positive MetricsInterval in its config") +) + +type PluginManager struct { + cfg *kcfg.Config + l *log.ZapLogger + plugins map[api.PluginName]api.Plugin + tel telemetry.Telemetry + + watcherManager watchermanager.IWatcherManager +} + +func init() { + registry.RegisterPlugins() +} + +func NewPluginManager( + cfg *kcfg.Config, + tel telemetry.Telemetry, + pluginNames ...api.PluginName, +) (*PluginManager, error) { + logger := log.Logger().Named("plugin-manager") + mgr := &PluginManager{ + cfg: cfg, + l: logger, + tel: tel, + plugins: map[api.PluginName]api.Plugin{}, + } + + if mgr.cfg.EnablePodLevel { + mgr.l.Info("plugin manager has pod level enabled") + mgr.watcherManager = watchermanager.NewWatcherManager() + } else { + mgr.l.Info("plugin manager has pod level disabled") + } + + for _, name := range pluginNames { + newPluginFn, ok := registry.PluginHandler[name] + if !ok { + return nil, fmt.Errorf("plugin %s not found in registry", name) + } + mgr.plugins[name] = newPluginFn(mgr.cfg) + } + + return mgr, nil +} + +func (p *PluginManager) Stop() { + var wg sync.WaitGroup + for _, plugin := range p.plugins { + wg.Add(1) + go func(plugin api.Plugin) { + defer wg.Done() + if err := plugin.Stop(); err != nil { + p.l.Error("failed to stop plugin", zap.Error(err)) + // Continue stopping other plugins. + // This allows us to stop as many plugins as possible, + // even if some plugins fail to stop. + } + p.l.Info("Cleaned up resource for plugin", zap.String("name", plugin.Name())) + }(plugin) + } + wg.Wait() +} + +// Reconcile reconciles a particular plugin. +func (p *PluginManager) Reconcile(ctx context.Context, plugin api.Plugin) error { + defer p.tel.StopPerf(p.tel.StartPerf(fmt.Sprintf("reconcile-%s", plugin.Name()))) + // Regenerate eBPF code and bpf object. + // This maybe no-op for plugins that don't use eBPF. + if err := plugin.Generate(ctx); err != nil { + return err + } + if err := plugin.Compile(ctx); err != nil { + return err + } + + // Re-start plugin. + if err := plugin.Stop(); err != nil { + return err + } + if err := plugin.Init(); err != nil { + return err + } + + p.l.Info("Reconciled plugin", zap.String("name", plugin.Name())) + return nil +} + +// Start plugin manager. +// Note: This will block as long as main thread is running. +func (p *PluginManager) Start(ctx context.Context) error { + counter := p.tel.StartPerf("start-plugin-manager") + // start plugins evenly throughout the interval, + // if 2 plugins enabled, and 10 second interval + // 10 / 2 = 5, then after every start sleep 5s + // plugin 1 = 0s + // plugin 2 = 5s + // then the plugins won't awake at the same time and they'll have even execution time + + delay := float32(MAX_STARTUP_TIME) / float32(len(p.plugins)) + p.l.Info("Starting plugin manager ...") + var err error + + if p.cfg == nil { + return ErrNilCfg + } + + if p.cfg.MetricsInterval == 0 { + return ErrZeroInterval + } + + if p.cfg.EnablePodLevel { + p.l.Info("starting watchers") + + // Start watcher manager + if err := p.watcherManager.Start(ctx); err != nil { + return errors.Wrap(err, "failed to start watcher manager") + } + } + + // start all plugins + g, ctx := errgroup.WithContext(ctx) + for _, plugin := range p.plugins { + plug := plugin + + reconcilectx, cancel := context.WithTimeout(ctx, time.Duration(MAX_RECONCILE_TIME)) + defer cancel() + err = p.Reconcile(reconcilectx, plug) + if err != nil { + // Update control plane metrics counter + metrics.PluginManagerFailedToReconcileCounter.WithLabelValues(plugin.Name()).Inc() + return errors.Wrapf(err, "failed to reconcile plugin %s", plugin.Name()) + } + + g.Go(func() error { + p.l.Info(fmt.Sprintf("starting plugin %s", plug.Name())) + return errors.Wrapf(plug.Start(ctx), "failed to start plugin %s", plug.Name()) + }) + + time.Sleep(time.Duration(delay)) + } + + p.tel.StopPerf(counter) + p.l.Info("successfully started pluginmanager") + // on cancel context wait for all plugins to exit + err = g.Wait() + if err != nil { + p.l.Error("plugin manager exited with error", zap.Error(err)) + return errors.Wrapf(err, "failed to start plugin manager, plugin exited") + } + + if p.cfg.EnablePodLevel { + p.l.Info("stopping watcher manager") + + // Stop watcher manager. + if err := p.watcherManager.Stop(ctx); err != nil { + return errors.Wrap(err, "failed to stop watcher manager") + } + } + + p.l.Info("stopping pluginmanager...") + return nil +} + +func (p *PluginManager) SetPlugin(name api.PluginName, plugin api.Plugin) { + if p == nil { + return + } + + if p.plugins == nil { + p.plugins = map[api.PluginName]api.Plugin{} + } + p.plugins[name] = plugin +} + +func (p *PluginManager) SetupChannel(c chan *v1.Event) { + for name, plugin := range p.plugins { + err := plugin.SetupChannel(c) + if err != nil { + p.l.Error("failed to setup channel for plugin", zap.String("plugin name", string(name)), zap.Error(err)) + } + } +} diff --git a/pkg/managers/pluginmanager/pluginmanager_test.go b/pkg/managers/pluginmanager/pluginmanager_test.go new file mode 100644 index 000000000..acecccbe7 --- /dev/null +++ b/pkg/managers/pluginmanager/pluginmanager_test.go @@ -0,0 +1,454 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package pluginmanager + +import ( + "context" + "errors" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + watchermock "github.com/microsoft/retina/pkg/managers/watchermanager/mocks" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/telemetry" + "github.com/golang/mock/gomock" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +const ( + timeInter = time.Second * 10 +) + +var ( + cfgPodLevelEnabled = &kcfg.Config{ + MetricsInterval: timeInter, + EnablePodLevel: true, + } + cfgPodLevelDisabled = &kcfg.Config{ + MetricsInterval: timeInter, + EnablePodLevel: false, + } +) + +func setupWatcherManagerMock(ctl *gomock.Controller) (m *watchermock.MockIWatcherManager) { + m = watchermock.NewMockIWatcherManager(ctl) + m.EXPECT().Start(gomock.Any()).Return(nil).AnyTimes() + m.EXPECT().Stop(gomock.Any()).Return(nil).AnyTimes() + return +} + +func TestNewManager(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tel := telemetry.NewNoopTelemetry() + tests := []struct { + name string + cfg *kcfg.Config + pluginName string + wantErr bool + }{ + { + name: "New Manager success Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantErr: false, + }, + { + name: "New Manager success Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantErr: false, + }, + { + name: "Fail with plugin not found Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "fakeplugin", + wantErr: true, + }, + { + name: "Fail with plugin not found Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "fakeplugin", + wantErr: true, + }, + } + + for _, tt := range tests { + mgr, err := NewPluginManager(tt.cfg, tel, api.PluginName(tt.pluginName)) + if tt.wantErr { + require.NotNil(t, err, "Expected error but got nil") + require.Nil(t, mgr, "Expected mgr to be nil but it isn't") + } else { + require.Nil(t, err, "Expected nil but got error:%w", err) + require.NotNil(t, mgr, "Expected mgr to be intialized but found nil") + require.Condition(t, assert.Comparison(func() bool { + _, ok := mgr.plugins[api.PluginName(tt.pluginName)] + return ok + }), "plugin not found in mgr map") + } + } +} + +func TestNewManagerStart(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tel := telemetry.NewNoopTelemetry() + tests := []struct { + name string + cfg *kcfg.Config + pluginName string + wantErr bool + }{ + { + name: "New Manager success Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantErr: false, + }, + { + name: "New Manager success Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantErr: false, + }, + } + + for _, tt := range tests { + mgr, err := NewPluginManager(tt.cfg, tel, api.PluginName(tt.pluginName)) + require.Nil(t, err, "Expected nil but got error:%w", err) + require.NotNil(t, mgr, "Expected mgr to be intialized but found nil") + require.Condition(t, assert.Comparison(func() bool { + _, ok := mgr.plugins[api.PluginName(tt.pluginName)] + return ok + }), "plugin not found in mgr map") + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + err = mgr.Start(ctx) + require.Nil(t, err, "Expected nil but got error:%w", err) + }() + + time.Sleep(1 * time.Second) + cancel() + } +} + +func TestNewManagerWithPluginStartFailure(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + pluginName := "mockplugin" + + mgr := &PluginManager{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("plugin-manager"), + plugins: make(map[api.PluginName]api.Plugin), + tel: telemetry.NewNoopTelemetry(), + watcherManager: setupWatcherManagerMock(ctl), + } + + mockPlugin := api.NewMockPlugin(ctl) + mockPlugin.EXPECT().Generate(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Compile(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Stop().Return(nil).AnyTimes() + mockPlugin.EXPECT().Init().Return(nil).AnyTimes() + mockPlugin.EXPECT().Start(gomock.Any()).Return(errors.New("Plugin failed to start")).AnyTimes() + mockPlugin.EXPECT().Name().Return(pluginName).AnyTimes() + + mgr.plugins[api.PluginName(pluginName)] = mockPlugin + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + err := mgr.Start(ctx) + require.NotNil(t, err, "Expected Error but got nil:%w", err) + require.ErrorContains(t, err, "Plugin failed to start", err, "Expected error to contain 'Plugin failed to start' but got %s", err.Error()) + }() + + time.Sleep(1 * time.Second) + cancel() +} + +func TestNewManagerWithPluginReconcileFailure(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + + pluginName := "mockplugin" + + mgr := &PluginManager{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("plugin-manager"), + plugins: make(map[api.PluginName]api.Plugin), + tel: telemetry.NewNoopTelemetry(), + watcherManager: setupWatcherManagerMock(ctl), + } + + mockPlugin := api.NewMockPlugin(ctl) + mockPlugin.EXPECT().Generate(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Compile(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Stop().Return(errors.New("Plugin failed to stop")).AnyTimes() + mockPlugin.EXPECT().Init().Return(nil).AnyTimes() + mockPlugin.EXPECT().Start(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Name().Return(pluginName).AnyTimes() + + mgr.plugins[api.PluginName(pluginName)] = mockPlugin + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + err := mgr.Start(ctx) + require.NotNil(t, err, "Expected Error but got nil:%w", err) + require.ErrorContains(t, err, "Plugin failed to stop", err, "Expected error to contain 'Plugin failed to start' but got %s", err.Error()) + count, error := metrics.PluginManagerFailedToReconcileCounter.GetMetricWithLabelValues(pluginName) + out := &dto.Metric{} + count.Write(out) + require.Nil(t, error, "Expected nil but got error:%w", error) + require.Equal(t, float64(1), *out.Counter.Value, "Expected 1 but got %f", *out.Counter.Value) + }() + + time.Sleep(1 * time.Second) + cancel() +} + +func TestPluginInit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tel := telemetry.NewNoopTelemetry() + tests := []struct { + name string + cfg *kcfg.Config + pluginName string + wantErr bool + }{ + { + name: "Plugin init successs Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantErr: false, + }, + { + name: "Plugin init successs Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantErr: false, + }, + } + for _, tt := range tests { + mgr, err := NewPluginManager(tt.cfg, tel, api.PluginName(tt.pluginName)) + require.Nil(t, err, "Expected nil but got error:%w", err) + for _, plugin := range mgr.plugins { + if tt.wantErr { + err := plugin.Init() + require.NotNil(t, err, "Expected Init err but got nil") + require.ErrorContains(t, err, "event channel is nil", "Expected event channel nil but got:%w", err) + } else { + err := plugin.Init() + require.Nil(t, err, "Expected nil but got init error:%w", err) + } + } + } +} + +func TestPluginStartWithoutInit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tel := telemetry.NewNoopTelemetry() + tests := []struct { + name string + cfg *kcfg.Config + pluginName string + wantErr bool + initPlugin bool + }{ + { + name: "Plugin start successs Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantErr: true, + initPlugin: false, + }, + { + name: "Plugin start successs Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantErr: false, + initPlugin: true, + }, + } + for _, tt := range tests { + mgr, err := NewPluginManager(tt.cfg, tel, api.PluginName(tt.pluginName)) + require.Nil(t, err, "Expected nil but got error:%w", err) + for _, plugin := range mgr.plugins { + if tt.initPlugin { + err := plugin.Init() + require.Nil(t, err, "Expected nil but got init error:%w", err) + } + if tt.wantErr { + err := plugin.Start(context.Background()) + require.NotNil(t, err, "Expected Start err but got nil") + require.ErrorContains(t, err, "plugin not initialized", "Expected event channel nil but got:%w", err) + } else { + err = plugin.Start(context.Background()) + require.Nil(t, err, "Expected nil but got start error:%w", err) + } + } + } +} + +func TestPluginStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tel := telemetry.NewNoopTelemetry() + tests := []struct { + name string + cfg *kcfg.Config + pluginName string + wantStartErr bool + wantStopErr bool + initPlugin bool + startPlugin bool + }{ + { + name: "Plugin stop successs Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantStartErr: false, + wantStopErr: false, + initPlugin: true, + startPlugin: true, + }, + { + name: "Plugin stop failure Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantStartErr: true, + wantStopErr: false, + initPlugin: false, + startPlugin: false, + }, + { + name: "Plugin stop failure Pod Level Disabled", + cfg: cfgPodLevelDisabled, + pluginName: "mockplugin", + wantStartErr: false, + wantStopErr: false, + initPlugin: true, + startPlugin: true, + }, + { + name: "Plugin stop successs Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantStartErr: false, + wantStopErr: false, + initPlugin: true, + startPlugin: true, + }, + { + name: "Plugin stop failure Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantStartErr: true, + wantStopErr: false, + initPlugin: false, + startPlugin: false, + }, + { + name: "Plugin stop failure Pod Level Enabled", + cfg: cfgPodLevelEnabled, + pluginName: "mockplugin", + wantStartErr: false, + wantStopErr: false, + initPlugin: true, + startPlugin: true, + }, + } + for _, tt := range tests { + mgr, err := NewPluginManager(tt.cfg, tel, api.PluginName(tt.pluginName)) + require.Nil(t, err, "Expected nil but got error:%w", err) + for _, plugin := range mgr.plugins { + if tt.initPlugin { + err := plugin.Init() + require.Nil(t, err, "Expected nil but got init error:%w", err) + } + if tt.startPlugin { + err := plugin.Start(context.Background()) + if tt.wantStartErr { + require.NotNil(t, err, "Expected nil but got start error:%w", err) + } else { + require.Nil(t, err, "Expected nil but got stop error:%w", err) + } + } + err := plugin.Stop() + if tt.wantStopErr { + require.NotNil(t, err, "Expected stop err but got nil") + } else { + require.Nil(t, err, "Expected nil but got stop error:%w", err) + } + } + } +} + +func TestStopPluginManagerGracefully(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + pluginName := "mockplugin" + + mgr := &PluginManager{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("plugin-manager"), + plugins: make(map[api.PluginName]api.Plugin), + tel: telemetry.NewNoopTelemetry(), + watcherManager: setupWatcherManagerMock(ctl), + } + + mockPlugin := api.NewMockPlugin(ctl) + mockPlugin.EXPECT().Generate(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Compile(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Stop().Return(nil).AnyTimes() + mockPlugin.EXPECT().Init().Return(nil).AnyTimes() + mockPlugin.EXPECT().Start(gomock.Any()).Return(nil).AnyTimes() + mockPlugin.EXPECT().Name().Return(pluginName).AnyTimes() + + mgr.plugins[api.PluginName(pluginName)] = mockPlugin + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return mgr.Start(errctx) + }) + + time.Sleep(1 * time.Second) + cancel() + err := g.Wait() + require.NoError(t, err) +} + +func TestWatcherManagerFailure(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + m := watchermock.NewMockIWatcherManager(ctl) + m.EXPECT().Start(gomock.Any()).Return(errors.New("error")).AnyTimes() + + mgr := &PluginManager{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("plugin-manager"), + plugins: make(map[api.PluginName]api.Plugin), + tel: telemetry.NewNoopTelemetry(), + watcherManager: m, + } + + err := mgr.Start(context.Background()) + require.NotNil(t, err, "Expected Start err but got nil") + require.ErrorContains(t, err, "failed to start watcher manager", "Expected watcher manager , but got:%w", err) +} diff --git a/pkg/managers/servermanager/servermanager.go b/pkg/managers/servermanager/servermanager.go new file mode 100644 index 000000000..7ae8c6c4d --- /dev/null +++ b/pkg/managers/servermanager/servermanager.go @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package servermanager + +import ( + "context" + "fmt" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/server" + "go.uber.org/zap" +) + +type HTTPServer struct { + l *log.ZapLogger + host string + port int + router *server.Server +} + +func NewHTTPServer( + host string, port int, +) *HTTPServer { + logger := log.Logger().Named("http-server") + return &HTTPServer{ + l: logger, + host: host, + port: port, + } +} + +func (s *HTTPServer) Init() error { + s.l.Info("Initializing HTTP server ...") + rt := server.New(s.l) + rt.SetupHandlers() + s.router = rt + s.l.Info("HTTP server initialized...") + return nil +} + +func (s *HTTPServer) Start(ctx context.Context) error { + s.l.Info("Starting HTTP server ...", zap.String("host", s.host), zap.Int("port", s.port)) + return s.router.Start(ctx, fmt.Sprintf("%s:%d", s.host, s.port)) +} diff --git a/pkg/managers/watchermanager/mocks/mock_types.go b/pkg/managers/watchermanager/mocks/mock_types.go new file mode 100644 index 000000000..652a8d8e1 --- /dev/null +++ b/pkg/managers/watchermanager/mocks/mock_types.go @@ -0,0 +1,128 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockIWatcher is a mock of IWatcher interface. +type MockIWatcher struct { + ctrl *gomock.Controller + recorder *MockIWatcherMockRecorder +} + +// MockIWatcherMockRecorder is the mock recorder for MockIWatcher. +type MockIWatcherMockRecorder struct { + mock *MockIWatcher +} + +// NewMockIWatcher creates a new mock instance. +func NewMockIWatcher(ctrl *gomock.Controller) *MockIWatcher { + mock := &MockIWatcher{ctrl: ctrl} + mock.recorder = &MockIWatcherMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIWatcher) EXPECT() *MockIWatcherMockRecorder { + return m.recorder +} + +// Init mocks base method. +func (m *MockIWatcher) Init(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init. +func (mr *MockIWatcherMockRecorder) Init(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockIWatcher)(nil).Init), ctx) +} + +// Refresh mocks base method. +func (m *MockIWatcher) Refresh(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Refresh", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Refresh indicates an expected call of Refresh. +func (mr *MockIWatcherMockRecorder) Refresh(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Refresh", reflect.TypeOf((*MockIWatcher)(nil).Refresh), ctx) +} + +// Stop mocks base method. +func (m *MockIWatcher) Stop(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockIWatcherMockRecorder) Stop(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockIWatcher)(nil).Stop), ctx) +} + +// MockIWatcherManager is a mock of IWatcherManager interface. +type MockIWatcherManager struct { + ctrl *gomock.Controller + recorder *MockIWatcherManagerMockRecorder +} + +// MockIWatcherManagerMockRecorder is the mock recorder for MockIWatcherManager. +type MockIWatcherManagerMockRecorder struct { + mock *MockIWatcherManager +} + +// NewMockIWatcherManager creates a new mock instance. +func NewMockIWatcherManager(ctrl *gomock.Controller) *MockIWatcherManager { + mock := &MockIWatcherManager{ctrl: ctrl} + mock.recorder = &MockIWatcherManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIWatcherManager) EXPECT() *MockIWatcherManagerMockRecorder { + return m.recorder +} + +// Start mocks base method. +func (m *MockIWatcherManager) Start(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockIWatcherManagerMockRecorder) Start(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockIWatcherManager)(nil).Start), ctx) +} + +// Stop mocks base method. +func (m *MockIWatcherManager) Stop(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockIWatcherManagerMockRecorder) Stop(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockIWatcherManager)(nil).Stop), ctx) +} diff --git a/pkg/managers/watchermanager/types.go b/pkg/managers/watchermanager/types.go new file mode 100644 index 000000000..0e78cf315 --- /dev/null +++ b/pkg/managers/watchermanager/types.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package watchermanager + +import ( + "context" + "sync" + "time" + + "github.com/microsoft/retina/pkg/log" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types.go -destination=mocks/mock_types.go -package=mocks . +type IWatcher interface { + // Init, Stop, and Refresh should only be called by watchermanager. + Init(ctx context.Context) error + Stop(ctx context.Context) error + Refresh(ctx context.Context) error +} + +type IWatcherManager interface { + Start(ctx context.Context) error + Stop(ctx context.Context) error +} + +type WatcherManager struct { + Watchers []IWatcher + l *log.ZapLogger + refreshRate time.Duration + cancel context.CancelFunc + wg sync.WaitGroup +} diff --git a/pkg/managers/watchermanager/watchermanager.go b/pkg/managers/watchermanager/watchermanager.go new file mode 100644 index 000000000..af6a69c88 --- /dev/null +++ b/pkg/managers/watchermanager/watchermanager.go @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package watchermanager + +import ( + "context" + "fmt" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/watchers/apiserver" + "github.com/microsoft/retina/pkg/watchers/endpoint" + "go.uber.org/zap" +) + +const ( + // DefaultRefreshRate is the default refresh rate for watchers. + DefaultRefreshRate = 30 * time.Second +) + +func NewWatcherManager() *WatcherManager { + return &WatcherManager{ + Watchers: []IWatcher{ + endpoint.Watcher(), + apiserver.Watcher(), + }, + l: log.Logger().Named("watcher-manager"), + refreshRate: DefaultRefreshRate, + } +} + +func (wm *WatcherManager) Start(ctx context.Context) error { + newCtx, cancelCtx := context.WithCancel(ctx) + wm.cancel = cancelCtx + + for _, w := range wm.Watchers { + if err := w.Init(ctx); err != nil { + wm.l.Error("init failed", zap.String("watcher_type", fmt.Sprintf("%T", w))) + return err + } + wm.wg.Add(1) + go wm.runWatcher(newCtx, w) + wm.l.Info("watcher started", zap.String("watcher_type", fmt.Sprintf("%T", w))) + } + return nil +} + +func (wm *WatcherManager) Stop(ctx context.Context) error { + if wm.cancel != nil { + wm.cancel() // cancel all runWatcher + } + for _, w := range wm.Watchers { + if err := w.Stop(ctx); err != nil { + wm.l.Error("failed to stop", zap.String("watcher_type", fmt.Sprintf("%T", w)), zap.Error(err)) + return err + } + } + wm.wg.Wait() // wait for all runWatcher to stop + wm.l.Info("watcher manager stopped") + return nil +} + +func (wm *WatcherManager) runWatcher(ctx context.Context, w IWatcher) error { + defer wm.wg.Done() // signal that this runWatcher is done + ticker := time.NewTicker(wm.refreshRate) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + wm.l.Info("watcher stopping...", zap.String("watcher_type", fmt.Sprintf("%T", w))) + return nil + case <-ticker.C: + err := w.Refresh(ctx) + if err != nil { + wm.l.Error("refresh failed", zap.Error(err)) + return err + } + } + } +} diff --git a/pkg/managers/watchermanager/watchermanager_test.go b/pkg/managers/watchermanager/watchermanager_test.go new file mode 100644 index 000000000..1b45ae186 --- /dev/null +++ b/pkg/managers/watchermanager/watchermanager_test.go @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package watchermanager + +import ( + "context" + "errors" + "testing" + + "github.com/microsoft/retina/pkg/log" + mock "github.com/microsoft/retina/pkg/managers/watchermanager/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestStopWatcherManagerGracefully(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + mgr := NewWatcherManager() + + ctx, _ := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return mgr.Start(errctx) + }) + err := g.Wait() + + mgr.Stop(errctx) + require.NoError(t, err) +} + +func TestWatcherInitFailsGracefully(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + mockApiServerWatcher := mock.NewMockIWatcher(ctl) + mockEndpointWatcher := mock.NewMockIWatcher(ctl) + + mgr := NewWatcherManager() + mgr.Watchers = []IWatcher{ + mockApiServerWatcher, + mockEndpointWatcher, + } + + mockApiServerWatcher.EXPECT().Init(gomock.Any()).Return(errors.New("error")).AnyTimes() + mockEndpointWatcher.EXPECT().Init(gomock.Any()).Return(errors.New("error")).AnyTimes() + + err := mgr.Start(context.Background()) + require.NotNil(t, err, "Expected error when starting watcher manager") +} + +func TestWatcherStopWithoutStart(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + mgr := NewWatcherManager() + + err := mgr.Stop(context.Background()) + require.Nil(t, err, "Expected no error when stopping watcher manager without starting it") +} diff --git a/pkg/metrics/interfaces.go b/pkg/metrics/interfaces.go new file mode 100644 index 000000000..7e3306ec6 --- /dev/null +++ b/pkg/metrics/interfaces.go @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=interfaces.go -destination=mock_types.go -package=metrics + +type ICounterVec interface { + WithLabelValues(lvs ...string) prometheus.Counter + GetMetricWithLabelValues(lvs ...string) (prometheus.Counter, error) +} + +type IGaugeVec interface { + WithLabelValues(lvs ...string) prometheus.Gauge + GetMetricWithLabelValues(lvs ...string) (prometheus.Gauge, error) +} + +type IHistogramVec interface { + Observe(float64) + // Keep the Write method for testing purposes. + Write(*dto.Metric) error +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 000000000..44e19f861 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/utils" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Initiates and creates the common metrics +func InitializeMetrics() { + metricsLogger = log.Logger().Named("metrics") + + if isInitialized { + metricsLogger.Warn("Metrics already initialized. Exiting.") + return + } + DropCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.DropCountTotalName, + dropCountTotalDescription, + utils.Reason, + utils.Direction) + DropBytesCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.DropBytesTotalName, + dropBytesTotalDescription, + utils.Reason, + utils.Direction) + ForwardCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.ForwardCountTotalName, + forwardCountTotalDescription, + utils.Direction) + ForwardBytesCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.ForwardBytesTotalName, + forwardBytesTotalDescription, + utils.Direction) + WindowsCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + hnsStats, + hnsStatsDescription, + utils.Direction, + ) + NodeConnectivityStatusGauge = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.NodeConnectivityStatusName, + nodeConnectivityStatusDescription, + utils.SourceNodeName, + utils.TargetNodeName) + NodeConnectivityLatencyGauge = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.NodeConnectivityLatencySecondsName, + nodeConnectivityLatencySecondsDescription, + utils.SourceNodeName, + utils.TargetNodeName) + + TCPStateGauge = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.TcpStateGaugeName, + tcpStateGaugeDescription, + utils.State, + ) + TCPConnectionRemoteGauge = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.TcpConnectionRemoteGaugeName, + tcpConnectionRemoteGaugeDescription, + utils.Address, + utils.Port, + ) + TCPConnectionStats = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.TcpConnectionStatsName, + tcpConnectionStatsDescription, + utils.StatName, + ) + TCPFlagCounters = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.TcpFlagCounters, + tcpFlagCountersDescription, + utils.Direction, + utils.Flag, + ) + + // IP States + IPConnectionStats = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.IpConnectionStatsName, + ipConnectionStatsDescription, + utils.StatName, + ) + + // UDP Stats + UDPConnectionStats = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.UdpConnectionStatsName, + udpConnectionStatsDescription, + utils.StatName, + ) + UDPActiveSocketsCounter = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.UdpActiveSocketsCounterName, + udpActiveSocketsCounterDescription, + ) + + // Interface Stats + InterfaceStats = exporter.CreatePrometheusGaugeVecForMetric( + exporter.DefaultRegistry, + utils.InterfaceStatsName, + interfaceStatsDescription, + utils.InterfaceName, + utils.StatName, + ) + + // Control Plane Metrics + PluginManagerFailedToReconcileCounter = exporter.CreatePrometheusCounterVecForControlPlaneMetric( + exporter.DefaultRegistry, + pluginManagerFailedToReconcileCounterName, + pluginManagerFailedToReconcileCounterDescription, + utils.Reason, + ) + + // Lost Events defines the number of packets lost from reading eBPF maps + LostEventsCounter = exporter.CreatePrometheusCounterVecForControlPlaneMetric( + exporter.DefaultRegistry, + lostEventsCounterName, + lostEventsCounterDescription, + utils.Type, + utils.Reason, + ) + + // DNS Metrics. + DNSRequestCounter = exporter.CreatePrometheusCounterVecForMetric( + exporter.DefaultRegistry, + utils.DNSRequestCounterName, + dnsRequestCounterDescription, + utils.DNSLabels..., + ) + DNSResponseCounter = exporter.CreatePrometheusCounterVecForMetric( + exporter.DefaultRegistry, + utils.DNSResponseCounterName, + dnsResponseCounterDescription, + utils.DNSLabels..., + ) + + isInitialized = true + metricsLogger.Info("Metrics initialized") +} + +// GetCounterValue returns the current value +// stored for the counter +func GetCounterValue(m prometheus.Counter) float64 { + var pm dto.Metric + err := m.Write(&pm) + if err == nil { + return *pm.Counter.Value + } + return 0 +} diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go new file mode 100644 index 000000000..60a3317b7 --- /dev/null +++ b/pkg/metrics/metrics_test.go @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "testing" + + "github.com/microsoft/retina/pkg/log" +) + +func TestInitialization_FirstInit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + InitializeMetrics() + + // All metrics should be initialized. + objs := []interface{}{DropCounter, DropBytesCounter, ForwardBytesCounter, ForwardCounter, NodeConnectivityStatusGauge, NodeConnectivityLatencyGauge, PluginManagerFailedToReconcileCounter} + for _, obj := range objs { + if obj == nil { + t.Fatalf("Expected all metrics to be initialized") + } + } +} + +func TestInitialization_MultipleInit(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Fatalf("Did not expect InitializeMetrics to panic on reinitialization") + } + }() + log.SetupZapLogger(log.GetDefaultLogOpts()) + + InitializeMetrics() + // Should not panic when reinitializing. + InitializeMetrics() +} diff --git a/pkg/metrics/mock_types.go b/pkg/metrics/mock_types.go new file mode 100644 index 000000000..58a8e2180 --- /dev/null +++ b/pkg/metrics/mock_types.go @@ -0,0 +1,182 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interfaces.go + +// Package metrics is a generated GoMock package. +package metrics + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + prometheus "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" +) + +// MockICounterVec is a mock of ICounterVec interface. +type MockICounterVec struct { + ctrl *gomock.Controller + recorder *MockICounterVecMockRecorder +} + +// MockICounterVecMockRecorder is the mock recorder for MockICounterVec. +type MockICounterVecMockRecorder struct { + mock *MockICounterVec +} + +// NewMockICounterVec creates a new mock instance. +func NewMockICounterVec(ctrl *gomock.Controller) *MockICounterVec { + mock := &MockICounterVec{ctrl: ctrl} + mock.recorder = &MockICounterVecMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockICounterVec) EXPECT() *MockICounterVecMockRecorder { + return m.recorder +} + +// GetMetricWithLabelValues mocks base method. +func (m *MockICounterVec) GetMetricWithLabelValues(lvs ...string) (prometheus.Counter, error) { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range lvs { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMetricWithLabelValues", varargs...) + ret0, _ := ret[0].(prometheus.Counter) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetricWithLabelValues indicates an expected call of GetMetricWithLabelValues. +func (mr *MockICounterVecMockRecorder) GetMetricWithLabelValues(lvs ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricWithLabelValues", reflect.TypeOf((*MockICounterVec)(nil).GetMetricWithLabelValues), lvs...) +} + +// WithLabelValues mocks base method. +func (m *MockICounterVec) WithLabelValues(lvs ...string) prometheus.Counter { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range lvs { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WithLabelValues", varargs...) + ret0, _ := ret[0].(prometheus.Counter) + return ret0 +} + +// WithLabelValues indicates an expected call of WithLabelValues. +func (mr *MockICounterVecMockRecorder) WithLabelValues(lvs ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithLabelValues", reflect.TypeOf((*MockICounterVec)(nil).WithLabelValues), lvs...) +} + +// MockIGaugeVec is a mock of IGaugeVec interface. +type MockIGaugeVec struct { + ctrl *gomock.Controller + recorder *MockIGaugeVecMockRecorder +} + +// MockIGaugeVecMockRecorder is the mock recorder for MockIGaugeVec. +type MockIGaugeVecMockRecorder struct { + mock *MockIGaugeVec +} + +// NewMockIGaugeVec creates a new mock instance. +func NewMockIGaugeVec(ctrl *gomock.Controller) *MockIGaugeVec { + mock := &MockIGaugeVec{ctrl: ctrl} + mock.recorder = &MockIGaugeVecMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIGaugeVec) EXPECT() *MockIGaugeVecMockRecorder { + return m.recorder +} + +// GetMetricWithLabelValues mocks base method. +func (m *MockIGaugeVec) GetMetricWithLabelValues(lvs ...string) (prometheus.Gauge, error) { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range lvs { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMetricWithLabelValues", varargs...) + ret0, _ := ret[0].(prometheus.Gauge) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetricWithLabelValues indicates an expected call of GetMetricWithLabelValues. +func (mr *MockIGaugeVecMockRecorder) GetMetricWithLabelValues(lvs ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricWithLabelValues", reflect.TypeOf((*MockIGaugeVec)(nil).GetMetricWithLabelValues), lvs...) +} + +// WithLabelValues mocks base method. +func (m *MockIGaugeVec) WithLabelValues(lvs ...string) prometheus.Gauge { + m.ctrl.T.Helper() + varargs := []interface{}{} + for _, a := range lvs { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "WithLabelValues", varargs...) + ret0, _ := ret[0].(prometheus.Gauge) + return ret0 +} + +// WithLabelValues indicates an expected call of WithLabelValues. +func (mr *MockIGaugeVecMockRecorder) WithLabelValues(lvs ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithLabelValues", reflect.TypeOf((*MockIGaugeVec)(nil).WithLabelValues), lvs...) +} + +// MockIHistogramVec is a mock of IHistogramVec interface. +type MockIHistogramVec struct { + ctrl *gomock.Controller + recorder *MockIHistogramVecMockRecorder +} + +// MockIHistogramVecMockRecorder is the mock recorder for MockIHistogramVec. +type MockIHistogramVecMockRecorder struct { + mock *MockIHistogramVec +} + +// NewMockIHistogramVec creates a new mock instance. +func NewMockIHistogramVec(ctrl *gomock.Controller) *MockIHistogramVec { + mock := &MockIHistogramVec{ctrl: ctrl} + mock.recorder = &MockIHistogramVecMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIHistogramVec) EXPECT() *MockIHistogramVecMockRecorder { + return m.recorder +} + +// Observe mocks base method. +func (m *MockIHistogramVec) Observe(arg0 float64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Observe", arg0) +} + +// Observe indicates an expected call of Observe. +func (mr *MockIHistogramVecMockRecorder) Observe(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Observe", reflect.TypeOf((*MockIHistogramVec)(nil).Observe), arg0) +} + +// Write mocks base method. +func (m *MockIHistogramVec) Write(arg0 *io_prometheus_client.Metric) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write. +func (mr *MockIHistogramVecMockRecorder) Write(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockIHistogramVec)(nil).Write), arg0) +} diff --git a/pkg/metrics/types.go b/pkg/metrics/types.go new file mode 100644 index 000000000..8e8ce5e6e --- /dev/null +++ b/pkg/metrics/types.go @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "github.com/microsoft/retina/pkg/log" + "github.com/cilium/cilium/api/v1/flow" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +const ( + // Control plane metrics + pluginManagerFailedToReconcileCounterName = "plugin_manager_failed_to_reconcile" + lostEventsCounterName = "lost_events_counter" + + // Windows + hnsStats = "windows_hns_stats" + hnsStatsDescription = "Include many different metrics from packets sent/received to closed connections" + + // Linux only metrics (for now). + nodeApiServerHandshakeLatencyHistName = "node_apiserver_handshake_latency_ms" + + // Metric Descriptions + dropCountTotalDescription = "Total dropped packets" + dropBytesTotalDescription = "Total dropped bytes" + forwardCountTotalDescription = "Total forwarded packets" + forwardBytesTotalDescription = "Total forwarded bytes" + nodeConnectivityStatusDescription = "The last observed status of both ICMP and HTTP connectivity between the current Cilium agent and other Cilium nodes" + nodeConnectivityLatencySecondsDescription = "The last observed latency between the current Cilium agent and other Cilium nodes in seconds" + tcpStateGaugeDescription = "number of active TCP connections by state" + tcpConnectionRemoteGaugeDescription = "number of active TCP connections by remote address" + tcpConnectionStatsDescription = "TCP connections Statistics" + tcpFlagCountersDescription = "TCP counters by flag" + ipConnectionStatsDescription = "IP connections Statistics" + udpConnectionStatsDescription = "UDP connections Statistics" + udpActiveSocketsCounterDescription = "number of active UDP sockets" + interfaceStatsDescription = "Interface Statistics" + nodeApiServerHandshakeLatencyDesc = "Histogram depicting latency of the TCP handshake between nodes and Kubernetes API server measured in milliseconds" + dnsRequestCounterDescription = "DNS requests by statistics" + dnsResponseCounterDescription = "DNS responses by statistics" + + // Control plane metrics + pluginManagerFailedToReconcileCounterDescription = "Number of times the plugin manager failed to reconcile the plugins" + lostEventsCounterDescription = "Number of events lost in control plane" +) + +// Metric Counters +var ( + // Prevent re-initialization + isInitialized bool + + // Common counters across os distributions + DropCounter IGaugeVec + DropBytesCounter IGaugeVec + ForwardCounter IGaugeVec + ForwardBytesCounter IGaugeVec + + WindowsCounter IGaugeVec + + // Common gauges across os distributions + NodeConnectivityStatusGauge IGaugeVec + NodeConnectivityLatencyGauge IGaugeVec + + // TCP Stats + TCPStateGauge IGaugeVec + TCPConnectionRemoteGauge IGaugeVec + TCPConnectionStats IGaugeVec + TCPFlagCounters IGaugeVec + + // IP States + IPConnectionStats IGaugeVec + + // UDP Stats + UDPConnectionStats IGaugeVec + UDPActiveSocketsCounter IGaugeVec + + // Interface Stats + InterfaceStats IGaugeVec + + metricsLogger *log.ZapLogger + + // Control Plane Metrics + PluginManagerFailedToReconcileCounter ICounterVec + LostEventsCounter ICounterVec + + // DNS Metrics. + DNSRequestCounter ICounterVec + DNSResponseCounter ICounterVec +) + +func ToPrometheusType(metric interface{}) prometheus.Collector { + if metric != nil { + return nil + } + switch m := metric.(type) { + case IGaugeVec: + return m.(*prometheus.GaugeVec) + case ICounterVec: + return m.(*prometheus.CounterVec) + case IHistogramVec: + return m.(prometheus.Histogram) + default: + metricsLogger.Error("error converting unknown metric type", zap.Any("metric", m)) + return nil + } +} + +type DropReasonType uint32 + +// Alert: this ordering should match the drop_reason_t enum ordering +// in dropreason.h of DropReason plugin +const ( + IPTABLE_RULE_DROP DropReasonType = iota + IPTABLE_NAT_DROP + TCP_CONNECT_BASIC + TCP_ACCEPT_BASIC + TCP_CLOSE_BASIC + CONNTRACK_ADD_DROP + UNKNOWN_DROP +) + +func GetDropType(value uint32) DropReasonType { + switch value { + case 0: + return IPTABLE_RULE_DROP + case 1: + return IPTABLE_NAT_DROP + case 2: + return TCP_CONNECT_BASIC + case 3: + return TCP_ACCEPT_BASIC + case 4: + return TCP_CLOSE_BASIC + case 5: + return CONNTRACK_ADD_DROP + default: + return UNKNOWN_DROP + } +} + +func GetDropTypeFlowDropReason(dr flow.DropReason) DropReasonType { + return GetDropType(uint32(dr)) +} + +func (d DropReasonType) String() string { + switch d { + case IPTABLE_RULE_DROP: + return "IPTABLE_RULE_DROP" + case IPTABLE_NAT_DROP: + return "IPTABLE_NAT_DROP" + case TCP_CONNECT_BASIC: + return "TCP_CONNECT_BASIC" + case TCP_ACCEPT_BASIC: + return "TCP_ACCEPT_BASIC" + case TCP_CLOSE_BASIC: + return "TCP_CLOSE_BASIC" + case CONNTRACK_ADD_DROP: + return "CONNTRACK_ADD_DROP" + default: + return "UNKNOWN_DROP" + } +} diff --git a/pkg/module/metrics/basemetricsobject.go b/pkg/module/metrics/basemetricsobject.go new file mode 100644 index 000000000..19f0b9e67 --- /dev/null +++ b/pkg/module/metrics/basemetricsobject.go @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" +) + +type baseMetricObject struct { + advEnable bool + contextMode enrichmentContext + ctxOptions *api.MetricsContextOptions + srcCtx ContextOptionsInterface + dstCtx ContextOptionsInterface + l *log.ZapLogger +} + +func newBaseMetricsObject(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) baseMetricObject { + b := baseMetricObject{ + advEnable: ctxOptions.IsAdvanced(), + ctxOptions: ctxOptions, + l: fl, + contextMode: isLocalContext, + } + + b.populateCtxOptions(ctxOptions) + return b +} + +func (b *baseMetricObject) populateCtxOptions(ctxOptions *api.MetricsContextOptions) { + if b.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + // so we can getaway with just one context type. For this reason we will only use srccontext + // we can ignore adding destination context. + if ctxOptions.SourceLabels != nil { + b.srcCtx = NewCtxOption(ctxOptions.SourceLabels, localCtx) + } + } else { + if ctxOptions.SourceLabels != nil { + b.srcCtx = NewCtxOption(ctxOptions.SourceLabels, source) + } + + if ctxOptions.DestinationLabels != nil { + b.dstCtx = NewCtxOption(ctxOptions.DestinationLabels, destination) + } + } +} + +func (b *baseMetricObject) isLocalContext() bool { + return b.contextMode == localContext +} diff --git a/pkg/module/metrics/dns.go b/pkg/module/metrics/dns.go new file mode 100644 index 000000000..00b2d2467 --- /dev/null +++ b/pkg/module/metrics/dns.go @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "fmt" + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + // Metric descriptions + DNSRequestCountDesc = "Total number of DNS query packets" + DNSResponseCountDesc = "Total number of DNS response packets" +) + +var ( + DNSRequestCountName = fmt.Sprintf("adv_%s", utils.DNSRequestCounterName) + DNSResponseCountName = fmt.Sprintf("adv_%s", utils.DNSResponseCounterName) +) + +type DNSMetrics struct { + baseMetricObject + dnsMetrics metricsinit.ICounterVec + metricName string +} + +func NewDNSMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *DNSMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), "dns") { + return nil + } + + fl = fl.Named("dns-metricsmodule") + fl.Info("Creating DNS count metrics", zap.Any("options", ctxOptions)) + return &DNSMetrics{ + baseMetricObject: newBaseMetricsObject(ctxOptions, fl, isLocalContext), + } +} + +func (d *DNSMetrics) Init(metricName string) { + // TODO: Remove metricName from Init(). This makes the implementation of metrics + // convoluted and difficult to understand. + d.metricName = metricName + switch metricName { + case utils.DNSRequestCounterName: + d.dnsMetrics = exporter.CreatePrometheusCounterVecForMetric( + exporter.AdvancedRegistry, + DNSRequestCountName, + DNSRequestCountDesc, + d.getLabels()..., + ) + case utils.DNSResponseCounterName: + d.dnsMetrics = exporter.CreatePrometheusCounterVecForMetric( + exporter.AdvancedRegistry, + DNSResponseCountName, + DNSResponseCountDesc, + d.getLabels()..., + ) + } +} + +func (d *DNSMetrics) getLabels() []string { + labels := utils.DNSLabels + if d.srcCtx != nil { + labels = append(labels, d.srcCtx.getLabels()...) + d.l.Info("src labels", zap.Any("labels", labels)) + } + + if d.dstCtx != nil { + labels = append(labels, d.dstCtx.getLabels()...) + d.l.Info("dst labels", zap.Any("labels", labels)) + } + + return labels +} + +func (d *DNSMetrics) values(flow *flow.Flow) []string { + flowDns, dnsType, numResponses := utils.GetDns(flow) + if flowDns == nil { + return nil + } + if dnsType == utils.DNSType_UNKNOWN || + (d.metricName == utils.DNSRequestCounterName && dnsType != utils.DNSType_QUERY) || + (d.metricName == utils.DNSResponseCounterName && dnsType != utils.DNSType_RESPONSE) { + return nil + } + + // Ref: DNSLabels {"return_code", "query_type", "query", "response", "num_response"} + // "Response" label for DNS maybe empty. This is to avoid + // https://github.com/inspektor-gadget/inspektor-gadget/issues/2008 . + // Also ref: https://github.com/inspektor-gadget/inspektor-gadget/blob/main/docs/gadgets/trace/dns.md#limitations . + labels := []string{ + utils.DnsRcodeToString(flow), + strings.Join(flowDns.Qtypes, ","), flowDns.Query, strings.Join(flowDns.Ips, ","), fmt.Sprintf("%d", numResponses), + } + return labels +} + +func (d *DNSMetrics) ProcessFlow(flow *v1.Flow) { + if flow == nil { + return + } + + if flow.Verdict != utils.Verdict_DNS { + return + } + + if d.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + d.processLocalCtxFlow(flow) + return + } + + labels := d.values(flow) + if labels == nil { + return + } + if d.srcCtx != nil { + srcLabels := d.srcCtx.getValues(flow) + if len(srcLabels) > 0 { + labels = append(labels, srcLabels...) + } + } + + if d.dstCtx != nil { + dstLabels := d.dstCtx.getValues(flow) + if len(dstLabels) > 0 { + labels = append(labels, dstLabels...) + } + } + + d.dnsMetrics.WithLabelValues(labels...).Inc() + d.l.Debug("Update dns metric in remote ctx", zap.Any("metric", d.dnsMetrics), zap.Any("labels", labels)) +} + +func (d *DNSMetrics) processLocalCtxFlow(flow *v1.Flow) { + labelValuesMap := d.srcCtx.getLocalCtxValues(flow) + if labelValuesMap == nil { + return + } + + labels := d.values(flow) + if labels == nil { + return + } + + if len(labelValuesMap[ingress]) > 0 && len(labelValuesMap[egress]) > 0 { + // Check flow direction. + if flow.TrafficDirection == v1.TrafficDirection_INGRESS { + // For ingress flows, we add destination labels. + labels = append(labels, labelValuesMap[ingress]...) + } else { + // For egress flows, we add source labels. + labels = append(labels, labelValuesMap[egress]...) + } + } else if len(labelValuesMap[ingress]) > 0 { + labels = append(labels, labelValuesMap[ingress]...) + } else if len(labelValuesMap[egress]) > 0 { + labels = append(labels, labelValuesMap[egress]...) + } else { + return + } + d.dnsMetrics.WithLabelValues(labels...).Inc() + d.l.Debug("Update dns metric in local ctx", zap.Any("metric", d.dnsMetrics), zap.Any("labels", labels)) +} + +func (d *DNSMetrics) Clean() { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(d.dnsMetrics)) +} diff --git a/pkg/module/metrics/dns_test.go b/pkg/module/metrics/dns_test.go new file mode 100644 index 000000000..e8b010c8e --- /dev/null +++ b/pkg/module/metrics/dns_test.go @@ -0,0 +1,262 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "reflect" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/cilium/cilium/api/v1/flow" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "gotest.tools/v3/assert" + + "github.com/microsoft/retina/pkg/utils" +) + +func TestGetLabels(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("testGetLabels") + + tests := []struct { + name string + want []string + d *DNSMetrics + }{ + { + name: "basic context", + want: utils.DNSLabels, + d: &DNSMetrics{ + baseMetricObject: baseMetricObject{ + srcCtx: nil, + dstCtx: nil, + }, + }, + }, + { + name: "local context", + want: append(utils.DNSLabels, "ip", "namespace", "podname", "workloadKind", "workloadName", "service", "port"), + d: &DNSMetrics{ + baseMetricObject: baseMetricObject{ + srcCtx: &ContextOptions{ + option: localCtx, + IP: true, + Namespace: true, + Podname: true, + Service: true, + Port: true, + Workload: true, + }, + dstCtx: nil, + l: l, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.d.getLabels(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetLabels() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValues(t *testing.T) { + testR := &flow.Flow{} + utils.AddDnsInfo(testR, "R", 0, "bing.com", []string{"A"}, 1, []string{"1.1.1.1"}) + + testQ := &flow.Flow{} + utils.AddDnsInfo(testQ, "Q", 0, "bing.com", []string{"A"}, 0, []string{}) + + testU := &flow.Flow{} + utils.AddDnsInfo(testU, "U", 0, "bing.com", []string{"A"}, 0, []string{}) + + tests := []struct { + name string + want []string + d *DNSMetrics + input *flow.Flow + l7Type flow.L7FlowType + }{ + { + name: "basic context", + want: nil, + d: &DNSMetrics{}, + input: nil, + l7Type: 0, + }, + { + name: "Query", + want: []string{"NOERROR", "A", "bing.com", "", "0"}, + d: &DNSMetrics{metricName: utils.DNSRequestCounterName}, + input: testQ, + l7Type: flow.L7FlowType_REQUEST, + }, + { + name: "Response", + want: []string{"NOERROR", "A", "bing.com", "1.1.1.1", "1"}, + d: &DNSMetrics{metricName: utils.DNSResponseCounterName}, + input: testR, + l7Type: flow.L7FlowType_RESPONSE, + }, + { + name: "UnknownType/DNSRequest", + want: nil, + d: &DNSMetrics{metricName: utils.DNSRequestCounterName}, + input: testU, + l7Type: flow.L7FlowType_UNKNOWN_L7_TYPE, + }, + { + name: "UnknownType/DNSResponse", + want: nil, + d: &DNSMetrics{metricName: utils.DNSResponseCounterName}, + input: testU, + l7Type: flow.L7FlowType_UNKNOWN_L7_TYPE, + }, + { + name: "Query/ResponseMetric", + want: nil, + d: &DNSMetrics{metricName: utils.DNSResponseCounterName}, + input: testQ, + l7Type: flow.L7FlowType_REQUEST, + }, + { + name: "Response/RequestMetric", + want: nil, + d: &DNSMetrics{metricName: utils.DNSRequestCounterName}, + input: testR, + l7Type: flow.L7FlowType_RESPONSE, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.d.values(tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Values() = %v, want %v", got, tt.want) + } + if tt.input == nil { + return + } + assert.Equal(t, tt.input.Type, flow.FlowType_L7) + assert.Equal(t, tt.input.GetL7().GetType(), tt.l7Type) + }) + } +} + +func TestProcessLocalCtx(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("testValues") + + c := prometheus.NewCounter(prometheus.CounterOpts{}) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testR := &flow.Flow{} + utils.AddDnsInfo(testR, "R", 0, "bing.com", []string{"A"}, 1, []string{"1.1.1.1"}) + + testIngress := &flow.Flow{TrafficDirection: flow.TrafficDirection_INGRESS} + utils.AddDnsInfo(testIngress, "R", 0, "bing.com", []string{"A"}, 1, []string{"1.1.1.1"}) + + testEgress := &flow.Flow{TrafficDirection: flow.TrafficDirection_EGRESS} + utils.AddDnsInfo(testEgress, "R", 0, "bing.com", []string{"A"}, 1, []string{"1.1.1.1"}) + + tests := []struct { + name string + d *DNSMetrics + input *flow.Flow + output map[string][]string + expectedLabels []string + metricsUpdate bool + }{ + { + name: "No context labels", + input: nil, + output: nil, + d: &DNSMetrics{}, + metricsUpdate: false, + }, + { + name: "Only ingress labels", + input: testR, + output: map[string][]string{ + ingress: {"PodA", "NamespaceA"}, + egress: nil, + }, + d: &DNSMetrics{ + metricName: utils.DNSResponseCounterName, + baseMetricObject: baseMetricObject{ + l: l, + }, + }, + expectedLabels: []string{"NOERROR", "A", "bing.com", "1.1.1.1", "1", "PodA", "NamespaceA"}, + metricsUpdate: true, + }, + { + name: "Only egress labels", + input: testR, + output: map[string][]string{ + ingress: nil, + egress: {"PodA", "NamespaceA"}, + }, + d: &DNSMetrics{ + metricName: utils.DNSResponseCounterName, + baseMetricObject: baseMetricObject{ + l: l, + }, + }, + expectedLabels: []string{"NOERROR", "A", "bing.com", "1.1.1.1", "1", "PodA", "NamespaceA"}, + metricsUpdate: true, + }, + { + name: "Both ingress and egress labels with ingress flow", + input: testIngress, + output: map[string][]string{ + ingress: {"PodA", "NamespaceA"}, + egress: {"PodB", "NamespaceB"}, + }, + d: &DNSMetrics{ + metricName: utils.DNSResponseCounterName, + baseMetricObject: baseMetricObject{ + l: l, + }, + }, + expectedLabels: []string{"NOERROR", "A", "bing.com", "1.1.1.1", "1", "PodA", "NamespaceA"}, + metricsUpdate: true, + }, + { + name: "Both ingress and egress labels with egress flow", + input: testEgress, + output: map[string][]string{ + ingress: {"PodA", "NamespaceA"}, + egress: {"PodB", "NamespaceB"}, + }, + d: &DNSMetrics{ + metricName: utils.DNSResponseCounterName, + baseMetricObject: baseMetricObject{ + l: l, + }, + }, + expectedLabels: []string{"NOERROR", "A", "bing.com", "1.1.1.1", "1", "PodB", "NamespaceB"}, + metricsUpdate: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := NewMockContextOptionsInterface(ctrl) //nolint:typecheck + m.EXPECT().getLocalCtxValues(tt.input).Return(tt.output).Times(1) + + mockCV := metrics.NewMockICounterVec(ctrl) + if tt.metricsUpdate { + mockCV.EXPECT().WithLabelValues(tt.expectedLabels).Return(c).Times(1) + } + + tt.d.dnsMetrics = mockCV + tt.d.srcCtx = m + tt.d.processLocalCtxFlow(tt.input) + }) + } +} diff --git a/pkg/module/metrics/drops.go b/pkg/module/metrics/drops.go new file mode 100644 index 000000000..52f929dd5 --- /dev/null +++ b/pkg/module/metrics/drops.go @@ -0,0 +1,171 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + TotalDropCountName = "adv_drop_count" + TotalDropBytesName = "adv_drop_bytes" + + TotalDropCountDesc = "Total number of dropped packets" + TotalDropBytesDesc = "Total number of dropped bytes" +) + +type DropCountMetrics struct { + baseMetricObject + dropMetric metrics.IGaugeVec + metricName string +} + +func NewDropCountMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *DropCountMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), "drop") { + return nil + } + + fl = fl.Named("dropreason-metricsmodule") + fl.Info("Creating drop count metrics", zap.Any("options", ctxOptions)) + return &DropCountMetrics{ + baseMetricObject: newBaseMetricsObject(ctxOptions, fl, isLocalContext), + } +} + +func (d *DropCountMetrics) Init(metricName string) { + switch metricName { + case utils.DropCountTotalName: + d.dropMetric = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TotalDropCountName, + TotalDropCountDesc, + d.getLabels()...) + case utils.DropBytesTotalName: + d.dropMetric = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TotalDropBytesName, + TotalDropBytesDesc, + d.getLabels()...) + default: + d.l.Error("unknown metric name", zap.String("metricName", metricName)) + } + d.metricName = metricName +} + +func (d *DropCountMetrics) getLabels() []string { + labels := []string{ + utils.Reason, + utils.Direction, + } + + if d.srcCtx != nil { + labels = append(labels, d.srcCtx.getLabels()...) + d.l.Info("src labels", zap.Any("labels", labels)) + } + + if d.dstCtx != nil { + labels = append(labels, d.dstCtx.getLabels()...) + d.l.Info("dst labels", zap.Any("labels", labels)) + } + + // No additional context options + + return labels +} + +func (d *DropCountMetrics) Clean() { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metrics.ToPrometheusType(d.dropMetric)) +} + +// TODO: update ProcessFlow with bytes metrics. We are only accounting for count. +// bytes metrics needs some additional work in ebpf and in this func to get the skb length +func (d *DropCountMetrics) ProcessFlow(flow *v1.Flow) { + // Flow does not have bytes section at the moment, + // so we will update only packet count + if flow == nil { + return + } + + if flow.Verdict != v1.Verdict_DROPPED { + return + } + + if d.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + d.processLocalCtxFlow(flow) + return + } + + labels := []string{ + metrics.GetDropTypeFlowDropReason(flow.DropReasonDesc).String(), + flow.TrafficDirection.String(), + } + + if !d.advEnable { + d.update(flow, labels) + return + } + + if d.srcCtx != nil { + srcLabels := d.srcCtx.getValues(flow) + if len(srcLabels) > 0 { + labels = append(labels, srcLabels...) + } + } + + if d.dstCtx != nil { + dstLabel := d.dstCtx.getValues(flow) + if len(dstLabel) > 0 { + labels = append(labels, dstLabel...) + } + } + + // No additional context options + + d.update(flow, labels) + d.l.Debug("drop count metric is added", zap.Any("labels", labels)) +} + +func (d *DropCountMetrics) processLocalCtxFlow(flow *v1.Flow) { + labelValuesMap := d.srcCtx.getLocalCtxValues(flow) + if labelValuesMap == nil { + return + } + dropReason := metrics.GetDropTypeFlowDropReason(flow.DropReasonDesc).String() + + // Ingress values + if l := len(labelValuesMap[ingress]); l > 0 { + labels := make([]string, 0, l+2) + labels = append(labels, dropReason, ingress) + labels = append(labels, labelValuesMap[ingress]...) + d.update(flow, labels) + d.l.Debug("drop count metric is added in INGRESS in local ctx", zap.Any("labels", labels)) + } + + if l := len(labelValuesMap[egress]); l > 0 { + labels := make([]string, 0, l+2) + labels = append(labels, dropReason, egress) + labels = append(labels, labelValuesMap[egress]...) + d.update(flow, labels) + d.l.Debug("drop count metric is added in EGRESS in local ctx", zap.Any("labels", labels)) + } +} + +func (d *DropCountMetrics) update(fl *v1.Flow, labels []string) { + switch d.metricName { + case utils.DropCountTotalName: + d.dropMetric.WithLabelValues(labels...).Inc() + case utils.DropBytesTotalName: + d.dropMetric.WithLabelValues(labels...).Add(float64(utils.PacketSize(fl))) + } +} diff --git a/pkg/module/metrics/drops_test.go b/pkg/module/metrics/drops_test.go new file mode 100644 index 000000000..94d9f9ff3 --- /dev/null +++ b/pkg/module/metrics/drops_test.go @@ -0,0 +1,299 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +//nolint:all +package metrics + +import ( + "testing" + + "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/cilium/cilium/api/v1/flow" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestNewDrop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + tt := []TestMetrics{ + { + name: "empty opts", + opts: &v1alpha1.MetricsContextOptions{}, + checkIsAdvance: false, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + exepectedLabels: []string{"reason"}, + metricCall: 0, + nilObj: true, + }, + { + name: "empty opts", + opts: &v1alpha1.MetricsContextOptions{}, + checkIsAdvance: false, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + exepectedLabels: []string{"reason"}, + metricCall: 0, + nilObj: true, + }, + { + name: "plain opts", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + }, + checkIsAdvance: false, + f: &flow.Flow{}, + exepectedLabels: []string{"reason", "direction"}, + metricCall: 0, + }, + { + name: "plain opts dropped verdict", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + }, + checkIsAdvance: false, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + exepectedLabels: []string{"reason", "direction"}, + metricCall: 1, + }, + { + name: "plain opts dropped verdict nil flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + }, + checkIsAdvance: false, + f: nil, + exepectedLabels: []string{"reason", "direction"}, + metricCall: 0, + }, + { + name: "source opts 1 without metric name", + opts: &v1alpha1.MetricsContextOptions{ + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + exepectedLabels: []string{ + "reason", + "direction", + }, + metricCall: 1, + nilObj: true, + }, + { + name: "source opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "dest opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "DROP", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 1, + }, + { + name: "source opts with flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "forward source opts with flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + nilObj: true, + }, + { + name: "drop source opts with flow in localcontext", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 1, + nilObj: false, + localContext: localContext, + }, + { + name: "drop source opts with destination flow in localcontext", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Destination: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 1, + nilObj: false, + localContext: localContext, + }, + { + name: "drop source opts with source and destination flow in localcontext", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Destination: &flow.Endpoint{}, + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "reason", + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 2, + nilObj: false, + localContext: localContext, + }, + } + + for _, tc := range tt { + for _, metricName := range []string{"drop_count", "drop_bytes"} { + log.Logger().Info("Running test name", zap.String("name", tc.name), zap.String("metricName", metricName)) + ctrl := gomock.NewController(t) + f := NewDropCountMetrics(tc.opts, log.Logger(), tc.localContext) + if tc.nilObj { + assert.Nil(t, f, "drop metrics should be nil Test Name: %s", tc.name) + continue + } else { + assert.NotNil(t, f, "drp[] metrics should not be nil Test Name: %s", tc.name) + } + dropMock := metricsinit.NewMockIGaugeVec(ctrl) //nolint:typecheck + + f.dropMetric = dropMock + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + dropMock.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).Times(tc.metricCall) + + assert.Equal(t, f.advEnable, tc.checkIsAdvance, "advance metrics options should be equal Test Name: %s", tc.name) + assert.Equal(t, tc.exepectedLabels, f.getLabels(), "labels should be equal Test Name: %s", tc.name) + + f.metricName = metricName + f.ProcessFlow(tc.f) + ctrl.Finish() + } + } +} diff --git a/pkg/module/metrics/forward.go b/pkg/module/metrics/forward.go new file mode 100644 index 000000000..f9e3d2a7c --- /dev/null +++ b/pkg/module/metrics/forward.go @@ -0,0 +1,171 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + TotalCountName = "adv_forward_count" + + // TODO remove bytes as it is not being populated + TotalBytesName = "adv_forward_bytes" + + TotalCountDesc = "Total number of forwarded packets" + TotalBytesDesc = "Total number of forwarded bytes" +) + +type ForwardMetrics struct { + baseMetricObject + forwardMetric metricsinit.IGaugeVec + // bytesMetric metricsinit.IGaugeVec + metricName string +} + +func NewForwardCountMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *ForwardMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), "forward") { + return nil + } + + l := fl.Named("forward-metricsmodule") + l.Info("Creating forward count metrics", zap.Any("options", ctxOptions)) + return &ForwardMetrics{ + baseMetricObject: newBaseMetricsObject(ctxOptions, fl, isLocalContext), + } +} + +func (f *ForwardMetrics) Init(metricName string) { + switch metricName { + case utils.ForwardCountTotalName: + f.forwardMetric = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TotalCountName, + TotalCountDesc, + f.getLabels()...) + case utils.ForwardBytesTotalName: + f.forwardMetric = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TotalBytesName, + TotalBytesDesc, + f.getLabels()...) + default: + f.l.Error("unknown metric name", zap.String("name", metricName)) + } + f.metricName = metricName +} + +func (f *ForwardMetrics) getLabels() []string { + labels := []string{ + utils.Direction, + } + + if !f.advEnable { + return labels + } + + if f.srcCtx != nil { + labels = append(labels, f.srcCtx.getLabels()...) + f.l.Info("src labels", zap.Any("labels", labels)) + } + + if f.dstCtx != nil { + labels = append(labels, f.dstCtx.getLabels()...) + f.l.Info("dst labels", zap.Any("labels", labels)) + } + + // No additional context options + + return labels +} + +func (f *ForwardMetrics) Clean() { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(f.forwardMetric)) +} + +// TODO: update ProcessFlow with bytes metrics. We are only accounting for count. +// bytes metrics needs some additional work in ebpf and in this func to get the skb length +func (f *ForwardMetrics) ProcessFlow(flow *v1.Flow) { + // Flow does not have bytes section at the moment, + // so we will update only packet count + if flow == nil { + return + } + + if flow.Verdict != v1.Verdict_FORWARDED { + return + } + + if f.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + f.processLocalCtxFlow(flow) + return + } + + labels := []string{ + flow.TrafficDirection.String(), + } + + if !f.advEnable { + f.update(flow, labels) + return + } + + if f.srcCtx != nil { + srcLabels := f.srcCtx.getValues(flow) + if len(srcLabels) > 0 { + labels = append(labels, srcLabels...) + } + } + + if f.dstCtx != nil { + dstLabel := f.dstCtx.getValues(flow) + if len(dstLabel) > 0 { + labels = append(labels, dstLabel...) + } + } + + // No additional context options + + f.update(flow, labels) + f.l.Debug("forward count metric is added", zap.Any("labels", labels)) +} + +func (f *ForwardMetrics) processLocalCtxFlow(flow *v1.Flow) { + labelValuesMap := f.srcCtx.getLocalCtxValues(flow) + if labelValuesMap == nil { + return + } + // Ingress values. + if len(labelValuesMap[ingress]) > 0 { + labels := append([]string{ingress}, labelValuesMap[ingress]...) + f.update(flow, labels) + f.l.Debug("forward count metric in INGRESS in local ctx", zap.Any("labels", labels)) + } + + // Egress values. + if len(labelValuesMap[egress]) > 0 { + labels := append([]string{egress}, labelValuesMap[egress]...) + f.update(flow, labels) + f.l.Debug("forward count metric in EGRESS in local ctx", zap.Any("labels", labels)) + } +} + +func (f *ForwardMetrics) update(fl *v1.Flow, labels []string) { + switch f.metricName { + case utils.ForwardCountTotalName: + f.forwardMetric.WithLabelValues(labels...).Inc() + case utils.ForwardBytesTotalName: + f.forwardMetric.WithLabelValues(labels...).Add(float64(utils.PacketSize(fl))) + } +} diff --git a/pkg/module/metrics/forward_test.go b/pkg/module/metrics/forward_test.go new file mode 100644 index 000000000..0ec12df49 --- /dev/null +++ b/pkg/module/metrics/forward_test.go @@ -0,0 +1,310 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +//nolint:all +package metrics + +import ( + "testing" + + "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/cilium/cilium/api/v1/flow" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +type TestMetrics struct { + name string + opts *v1alpha1.MetricsContextOptions + f *flow.Flow + checkIsAdvance bool + exepectedLabels []string + metricCall int + nilObj bool + localContext enrichmentContext +} + +func TestNewForward(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("TestNewForward") + + tt := []TestMetrics{ + { + name: "empty opts", + opts: &v1alpha1.MetricsContextOptions{}, + checkIsAdvance: false, + f: &flow.Flow{}, + exepectedLabels: []string{"direction"}, + metricCall: 0, + nilObj: true, + }, + { + name: "plain opts", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + }, + checkIsAdvance: false, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + exepectedLabels: []string{"direction"}, + metricCall: 1, + }, + { + name: "plain opts with nil flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + }, + checkIsAdvance: false, + f: nil, + exepectedLabels: []string{"direction"}, + metricCall: 0, + }, + { + name: "plain opts dropped verdict", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + }, + checkIsAdvance: false, + f: &flow.Flow{ + Verdict: flow.Verdict_DROPPED, + }, + exepectedLabels: []string{"direction"}, + metricCall: 0, + }, + { + name: "source opts 1 without metric name", + opts: &v1alpha1.MetricsContextOptions{ + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{}, + exepectedLabels: []string{ + "direction", + }, + metricCall: 0, + nilObj: true, + }, + { + name: "source opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "dest opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "FORWARD", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 1, + }, + { + name: "source opts with flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "drop source opts expect nil", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "drop", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + }, + nilObj: true, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "source opts with flow dropped verdict", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 0, + }, + { + name: "source opts with flow in local context", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "forward", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 1, + localContext: localContext, + }, + { + name: "dest opts 1 with flow in local context", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "FORWARD", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + Destination: &flow.Endpoint{}, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 1, + localContext: localContext, + }, + { + name: "src and dest opts 1 with flow in local context", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "FORWARD", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + Destination: &flow.Endpoint{}, + Source: &flow.Endpoint{}, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "direction", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + metricCall: 2, + localContext: localContext, + }, + } + + for _, tc := range tt { + for _, metricName := range []string{"forward_count", "forward_bytes"} { + l.Info("Running test", zap.String("name", tc.name), zap.String("metricName", metricName)) + ctrl := gomock.NewController(t) + + f := NewForwardCountMetrics(tc.opts, log.Logger(), tc.localContext) + if tc.nilObj { + assert.Nil(t, f, "forward metrics should be nil Test Name: %s", tc.name) + continue + } else { + assert.NotNil(t, f, "forward metrics should not be nil Test Name: %s", tc.name) + } + + forwardMock := metricsinit.NewMockIGaugeVec(ctrl) //nolint:typecheck + + f.forwardMetric = forwardMock + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + forwardMock.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).Times(tc.metricCall) + assert.Equal(t, f.advEnable, tc.checkIsAdvance, "advance metrics options should be equal Test Name: %s", tc.name) + assert.Equal(t, tc.exepectedLabels, f.getLabels(), "labels should be equal Test Name: %s", tc.name) + + f.metricName = metricName + f.ProcessFlow(tc.f) + ctrl.Finish() + } + } +} diff --git a/pkg/module/metrics/latency.go b/pkg/module/metrics/latency.go new file mode 100644 index 000000000..319bc3b56 --- /dev/null +++ b/pkg/module/metrics/latency.go @@ -0,0 +1,348 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "context" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/api/v1/flow" + ttlcache "github.com/jellydator/ttlcache/v3" + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/common" + cc "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/utils" + "go.uber.org/zap" +) + +const ( + apiserverLatencyName = "adv_node_apiserver_latency" + apiserverLatencyDesc = "Latency of node apiserver in ms" + noResponseFromNodeAPIServerName = "adv_node_apiserver_no_response" + noResponseFromNodeAPIServerDesc = "Number of packets that did not get a response from node apiserver" + apiServerHandshakeLatencyName = "adv_node_apiserver_tcp_handshake_latency" + apiServerHandshakeLatencyDesc = "Latency of node apiserver tcp handshake in ms" + TTL time.Duration = 500 * time.Millisecond + LIMIT uint64 = 100000 + // Bucket size. + start = 0 + width = 0.5 + count = 10 +) + +type key struct { + // Flow + TCP ID uniquely identifies a TCP connection. + // Source IP, Source Port, Destination IP, Destination Port and TCP ID. + // TCP ID is the 32 bit number that uniquely identifies a TCP connection. + // We are using TSval/TSecr to identify a TCP connection. + // Ref for direction: cprog/packetparser.h . + srcIP string + dstIP string + srcP uint32 + dstP uint32 + id uint64 +} + +type val struct { + // Time in nanoseconds. + t int32 + // TCP flags. Required for handshake latency. + flags *flow.TCPFlags +} + +type LatencyMetrics struct { + l *log.ZapLogger + apiServerIps map[string]struct{} + cache *ttlcache.Cache[key, *val] + nodeApiServerLatency metricsinit.IHistogramVec + nodeApiServerHandshakeLatency metricsinit.IHistogramVec + noResponseMetric metricsinit.ICounterVec + callbackId string + mu sync.RWMutex +} + +var lm *LatencyMetrics + +func NewLatencyMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *LatencyMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), nodeApiserver) { + return nil + } + + if lm == nil { + lm = &LatencyMetrics{l: fl.Named("latency-metricsmodule")} + // Ignore isLocalContext for now. + } + + switch ctxOptions.MetricName { + case utils.NodeApiServerLatencyName: + lm.nodeApiServerLatency = exporter.CreatePrometheusHistogramWithLinearBucketsForMetric( + exporter.AdvancedRegistry, + apiserverLatencyName, + apiserverLatencyDesc, + start, + width, + count, + ) + case utils.NodeApiServerTcpHandshakeLatencyName: + lm.nodeApiServerHandshakeLatency = exporter.CreatePrometheusHistogramWithLinearBucketsForMetric( + exporter.AdvancedRegistry, + apiServerHandshakeLatencyName, + apiServerHandshakeLatencyDesc, + start, + width, + count, + ) + case utils.NoResponseFromApiServerName: + lm.noResponseMetric = exporter.CreatePrometheusCounterVecForMetric( + exporter.AdvancedRegistry, + noResponseFromNodeAPIServerName, + noResponseFromNodeAPIServerDesc, + "count", + ) + } + + return lm +} + +func (lm *LatencyMetrics) Init(metricName string) { + // Create cache. + // Cache is key value store for key -> timestamp. + lm.cache = ttlcache.New( + ttlcache.WithTTL[key, *val](TTL), + ttlcache.WithCapacity[key, *val](LIMIT), + ) + lm.cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[key, *val]) { + if reason == ttlcache.EvictionReasonExpired { + // Didn't get the corresponding packet. + lm.l.Debug("Evicted item", zap.Any("item", item)) + if lm.noResponseMetric != nil { + lm.noResponseMetric.WithLabelValues("no_response").Inc() + lm.l.Debug("Incremented no response metric", zap.Any("metric", lm.noResponseMetric)) + } + } + }) + + lm.mu.Lock() + // initialize the list of apiserver ips + lm.apiServerIps = make(map[string]struct{}) + lm.mu.Unlock() + + ps := pubsub.New() + + // Register callback. + // Everytime the apiserver object is updated, the callback function is called and it updates the ip variable. + fn := pubsub.CallBackFunc(lm.apiserverWatcherCallbackFn) + // Check if callback is already registered. + if lm.callbackId == "" { + lm.callbackId = ps.Subscribe(common.PubSubAPIServer, &fn) + } + + go lm.cache.Start() +} + +func (lm *LatencyMetrics) Clean() { + if lm.nodeApiServerLatency != nil { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(lm.nodeApiServerLatency)) + } + if lm.nodeApiServerLatency != nil { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(lm.nodeApiServerHandshakeLatency)) + } + if lm.noResponseMetric != nil { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(lm.noResponseMetric)) + } + if lm.cache != nil { + lm.cache.Stop() + } + + // Get pubsub instance. + ps := pubsub.New() + + // Unsubscribe callback. + if lm.callbackId != "" { + if err := ps.Unsubscribe(common.PubSubAPIServer, lm.callbackId); err != nil { + lm.l.Error("failed to unsubscribe callback", zap.Error(err)) + return + } + // Reset callback id after unsubscribing. + lm.callbackId = "" + } +} + +func (lm *LatencyMetrics) ProcessFlow(f *v1.Flow) { + if f == nil || f.L4 == nil || f.L4.GetTCP() == nil || utils.GetTcpID(f) == 0 || f.IP == nil { + return + } + + // Safely read lm.apiServerIps value + lm.mu.RLock() + apiServerIps := lm.apiServerIps + lm.mu.RUnlock() + + // Convert f.IP.Source and f.IP.Destination to net.IP type + sourceIP := net.ParseIP(f.IP.Source).String() + destinationIP := net.ParseIP(f.IP.Destination).String() + + if apiServerIps != nil { + // Check if source or destination IP is in the apiServerIps set + _, ipInSource := apiServerIps[sourceIP] + _, ipInDestination := apiServerIps[destinationIP] + + if ipInSource || ipInDestination { + lm.calculateLatency(f) + } + } +} + +/* ++-------------------------------------------------+ +| | +| TCP Packet Leaving Host | +| | ++-------------------------------------------------+ + + | + | + v + ++-------------------------------------------------+ +| | +| Add Entry to TTL Cache | +| | +| Key: {source IP, destination IP, | +| source port, destination port, | +| tcp timestamp value} | +| | +| Value: Time in Nanoseconds | +| | ++-------------------------------------------------+ + + | + | + v + ++-------------------------------------------------+ +| | +| TCP Packet Entering Host | +| | ++-------------------------------------------------+ + + | + | + v + ++-------------------------------------------------+ +| | +| Check if Packet is in TTL Cache | +| | +| Key: {destination IP, source IP, | +| destination port, source port, | +| tcp timestamp echo reply value} | +| | +| If Yes, Calculate RTT: | +| RTT = Current Time - Time Packet Observed | +| | +| Remove Entry from TTL Cache | +| | ++-------------------------------------------------+ +*/ +func (lm *LatencyMetrics) calculateLatency(f *v1.Flow) { + // Ignore all packets observed at endpoint. + // We only care about node-apiserver packets observed at eth0. + // TO_NETWORK: Packets leaving node via eth0. + // FROM_NETWORK: Packets entering node via eth0. + if f.TraceObservationPoint == v1.TraceObservationPoint_TO_NETWORK { + k := key{ + srcIP: f.IP.Source, + dstIP: f.IP.Destination, + srcP: f.L4.GetTCP().SourcePort, + dstP: f.L4.GetTCP().DestinationPort, + id: utils.GetTcpID(f), + } + // There will be multiple identical packets with same ID. Store only the first one. + if item := lm.cache.Get(k); item == nil { + lm.cache.Set(k, &val{ + t: f.Time.Nanos, + flags: f.L4.GetTCP().Flags, + }, TTL) + } + } else if f.TraceObservationPoint == v1.TraceObservationPoint_FROM_NETWORK { + k := key{ + srcIP: f.IP.Destination, + dstIP: f.IP.Source, + srcP: f.L4.GetTCP().DestinationPort, + dstP: f.L4.GetTCP().SourcePort, + id: utils.GetTcpID(f), + } + if item := lm.cache.Get(k); item != nil { + // Calculate latency in milliseconds. + latency := math.Round(float64(f.Time.Nanos-item.Value().t) / float64(1000000)) + + // Log continuous latency. + if lm.nodeApiServerLatency != nil { + lm.nodeApiServerLatency.Observe(latency) + } + + // Determine if this is the first reply packet, and if so, log handshake latency. + prevFlowflags := item.Value().flags + curFlowflags := f.L4.GetTCP().Flags + if lm.nodeApiServerHandshakeLatency != nil && prevFlowflags != nil && prevFlowflags.SYN && curFlowflags != nil && curFlowflags.SYN && curFlowflags.ACK { + // This is the first reply packet. + lm.nodeApiServerHandshakeLatency.Observe(latency) + } + // Delete the entry from cache. Calculate latency for the first reply packet only. + lm.cache.Delete(k) + } + } +} + +func (lm *LatencyMetrics) apiserverWatcherCallbackFn(obj interface{}) { + event := obj.(*cc.CacheEvent) + if event == nil { + return + } + + apiServer := event.Obj.(*common.APIServerObject) + if apiServer == nil { + lm.l.Warn("invalid or nil APIServer object in callback function") + return + } + + // Locking before modifying the ip variable + lm.mu.Lock() + defer lm.mu.Unlock() + apiServerIPs := apiServer.IPs() + + switch event.Type { + case cc.EventTypeAddAPIServerIPs: + lm.l.Debug("Add apiserver ips", zap.Any("ips", apiServerIPs)) + lm.addIps(apiServerIPs) + case cc.EventTypeDeleteAPIServerIPs: + lm.l.Debug("Delete apiserver ips", zap.Any("ips", apiServerIPs)) + lm.removeIps(apiServerIPs) + + default: + lm.l.Debug("Unknown event type", zap.Any("event", event)) + } +} + +func (lm *LatencyMetrics) addIps(ips []net.IP) { + for _, ip := range ips { + lm.apiServerIps[ip.String()] = struct{}{} + } +} + +func (lm *LatencyMetrics) removeIps(ips []net.IP) { + for _, ip := range ips { + delete(lm.apiServerIps, ip.String()) + } +} diff --git a/pkg/module/metrics/latency_test.go b/pkg/module/metrics/latency_test.go new file mode 100644 index 000000000..d0bf3c8bd --- /dev/null +++ b/pkg/module/metrics/latency_test.go @@ -0,0 +1,170 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "net" + "testing" + "time" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/cilium/api/v1/flow" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "gotest.tools/v3/assert" +) + +func TestNewLatencyMetrics(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test") + + options := []*api.MetricsContextOptions{ + nil, + { + MetricName: "forward", + }, + { + MetricName: "node_apiserver_latency", + }, + { + MetricName: "node_apiserver_handshake_latency", + }, + { + MetricName: "node_apiserver_no_response", + }, + } + for _, option := range options[:2] { + if NewLatencyMetrics(option, l, "") != nil { + t.Errorf("NewLatencyMetrics(%v) should return nil", option) + } + } + + var lm *LatencyMetrics + for _, option := range options[2:] { + lm = NewLatencyMetrics(option, l, "") + } + if lm.nodeApiServerLatency == nil { + t.Errorf("LatencyMetrics.nodeApiServerLatency should be initialized") + } + if lm.noResponseMetric == nil { + t.Errorf("LatencyMetrics.noResponseMetric should be initialized") + } + if lm.nodeApiServerHandshakeLatency == nil { + t.Errorf("LatencyMetrics.nodeApiServerHandshakeLatency should be initialized") + } + + // Stop. + lm.Clean() +} + +func TestInit(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test") + + exporter.AdvancedRegistry = prometheus.NewRegistry() + lm := &LatencyMetrics{l: l} + + lm.Init("latency") + + if lm.cache == nil { + t.Errorf("LatencyMetrics.cache should be initialized") + } + + // Stop. + lm.Clean() +} + +func TestProcessFlow(t *testing.T) { + apiSeverIp := net.IPv4(1, 1, 1, 1) + nodeIp := net.IPv4(2, 2, 2, 2) + + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test") + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + exporter.AdvancedRegistry = prometheus.NewRegistry() + lm := &LatencyMetrics{l: l} + + lm.Init("latency") + + // Set mock apiServerIps. + lm.apiServerIps[apiSeverIp.String()] = struct{}{} + + // Set mock nodeApiServerLatency. + mHist := metrics.NewMockIHistogramVec(ctrl) + mHist.EXPECT().Observe(float64(1)).Return().Times(2) + lm.nodeApiServerLatency = mHist + + // Set mock nodeApiServerHandshakeLatency. + mHist2 := metrics.NewMockIHistogramVec(ctrl) + mHist2.EXPECT().Observe(float64(1)).Return().Times(1) + lm.nodeApiServerHandshakeLatency = mHist2 + + // Test No response metric. + c := prometheus.NewCounter(prometheus.CounterOpts{}) + mNoResponse := metrics.NewMockICounterVec(ctrl) + mNoResponse.EXPECT().WithLabelValues("no_response").Return(c).Times(1) + lm.noResponseMetric = mNoResponse + + t1 := time.Now().UnixNano() + t2 := t1 + 1*time.Millisecond.Nanoseconds() + + /* + * Test case 1: TCP handshake. + */ + // Node -> Api server. + f1 := utils.ToFlow(t1, apiSeverIp, nodeIp, 80, 443, 6, 3, 0, 0) + utils.AddTcpID(f1, 1234) + utils.AddTcpFlags(f1, 1, 0, 0, 0, 0, 0) + f1.Destination = &flow.Endpoint{ + PodName: "kubernetes-apiserver", + } + // Api server -> Node. + f2 := utils.ToFlow(t2, nodeIp, apiSeverIp, 443, 80, 6, 2, 0, 0) + utils.AddTcpID(f2, 1234) + utils.AddTcpFlags(f2, 1, 1, 0, 0, 0, 0) + f2.Source = &flow.Endpoint{ + PodName: "kubernetes-apiserver", + } + // Process flow. + lm.ProcessFlow(f1) + lm.ProcessFlow(f2) + + /* + * Test case 2: Existing TCP connection. + */ + // Node -> Api server. + utils.AddTcpFlags(f1, 1, 0, 0, 0, 0, 0) + // Api server -> Node. + utils.AddTcpFlags(f2, 0, 1, 0, 0, 0, 0) + // Process flow. + lm.ProcessFlow(f1) + lm.ProcessFlow(f2) + + /* + * Test case 3: No reply from apiserver. + */ + lm.ProcessFlow(f1) + // Sleep for TTL. + time.Sleep(1 * time.Second) + // Check dropped packet. + assert.Equal(t, value(c), float64(1), "Expected no response metric to be 1") + + // Stop. + lm.Clean() +} + +func value(c prometheus.Counter) float64 { + m := &dto.Metric{} + c.Write(m) + + return m.Counter.GetValue() +} diff --git a/pkg/module/metrics/metrics_module.go b/pkg/module/metrics/metrics_module.go new file mode 100644 index 000000000..4724c8bbd --- /dev/null +++ b/pkg/module/metrics/metrics_module.go @@ -0,0 +1,584 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "context" + "net" + "strings" + "sync" + "time" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/crd/api/v1alpha1/validations" + "github.com/microsoft/retina/pkg/common" + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + forward string = "forward" + drop string = "drop" + tcp string = "tcp" + nodeApiserver string = "node_apiserver" + dns string = "dns" + + metricModuleReq filtermanager.Requestor = "metricModule" + interval time.Duration = 1 * time.Second +) + +var ( + m *Module + once sync.Once + // moduleReqMetadata is the metadata for the metric module. This metadata is used for namespaces + moduleReqMetadata filtermanager.RequestMetadata = filtermanager.RequestMetadata{ + RuleID: "namespace", + } + // modulePodReqMetadata is the metadata for the metric module pods. + // and the ips can be removed separately. Removing a namespace should remove pod ips in that namespace, but if the pod + // was added to the filtermanager separately, then the pod ip should still exist in the filtermap since it has another reference. + modulePodReqMetadata filtermanager.RequestMetadata = filtermanager.RequestMetadata{ + RuleID: "pod", + } +) + +type Module struct { + *sync.RWMutex + // ctx is the parent context + ctx context.Context + + ctxCancel context.CancelFunc + + // moduleCtx is the context of the metric module + moduleCtx context.Context + + // l is the logger + l *log.ZapLogger + + // daemon config + daemonConfig *kcfg.Config + + // metricConfigs is the list of metric configurations from CRD + configs []*api.MetricsConfiguration + + // current metrics spec for metrics module + currentSpec *api.MetricsSpec + + // pubsub is the pubsub client + pubsub pubsub.PubSubInterface + + // isRunning is the flag to indicate if the metric module is running + isRunning bool + + // enricher to read events from + enricher enricher.EnricherInterface + + // wg is the wait group for the metric module + wg sync.WaitGroup + + // includedNamespaces for metrics + includedNamespaces map[string]struct{} + + // excludedNamespaces for metrics + excludedNamespaces map[string]struct{} + + // metrics registry + registry map[string]AdvMetricsInterface + + // filterManager to add or delete ip address filters + filterManager filtermanager.IFilterManager + + // cache is the cache of all the objects + daemonCache cache.CacheInterface + + // dirtyPods is the map of pod IPs to add or delete + // from filtermanager + // todo need metadata , not just net.ip (need new struct) + dirtyPods *common.DirtyCache + + // pubsub subscription uuid + pubsubPodSub string +} + +func InitModule(ctx context.Context, + conf *kcfg.Config, + pubsub pubsub.PubSubInterface, + enricher enricher.EnricherInterface, + fm filtermanager.IFilterManager, + cache cache.CacheInterface, +) *Module { + // this is a thread-safe singleton instance of the metric module + once.Do(func() { + m = &Module{ + RWMutex: &sync.RWMutex{}, + l: log.Logger().Named(string("MetricModule")), + pubsub: pubsub, + configs: make([]*api.MetricsConfiguration, 0), + enricher: enricher, + wg: sync.WaitGroup{}, + registry: make(map[string]AdvMetricsInterface), + moduleCtx: ctx, + filterManager: fm, + daemonCache: cache, + dirtyPods: common.NewDirtyCache(), + pubsubPodSub: "", + daemonConfig: conf, + } + }) + + return m +} + +func (m *Module) Reconcile(spec *api.MetricsSpec) error { + // If the new spec has not changed, then do nothing. + if m.currentSpec != nil && m.currentSpec.Equals(spec) { + m.l.Debug("Spec has not changed. Not reconciling.") + return nil + } + + if m.isRunning { + m.l.Warn("Metric module is running. Cannot reconcile.") + // need to cancel the current context and create a new one + + m.ctxCancel() + m.wg.Wait() + } + + m.l.Info("Reconciling metric module", zap.Any("spec", spec)) + + m.Lock() + defer m.Unlock() + + m.updateNamespaceLists(spec) + + if m.currentSpec == nil || !validations.MetricsContextOptionsCompare(m.currentSpec.ContextOptions, spec.ContextOptions) { + m.updateMetricsContexts(spec) + } + + m.currentSpec = spec + + newCtx, cancel := context.WithCancel(m.moduleCtx) + m.ctxCancel = cancel + m.ctx = newCtx + m.run(newCtx) + return nil +} + +func (m *Module) updateNamespaceLists(spec *api.MetricsSpec) { + if len(spec.Namespaces.Include) > 0 && len(spec.Namespaces.Exclude) > 0 { + m.l.Error("Both included and excluded namespaces are specified. Cannot reconcile.") + } + + if len(spec.Namespaces.Include) > 0 { + m.l.Info("Including namespaces", zap.Strings("namespaces", spec.Namespaces.Include)) + m.appendIncludeList(spec.Namespaces.Include) + m.appendExcludeList([]string{}) + } + + if len(spec.Namespaces.Exclude) > 0 { + m.l.Info("Excluding namespaces", zap.Strings("namespaces", spec.Namespaces.Exclude)) + m.appendExcludeList(spec.Namespaces.Exclude) + m.appendIncludeList([]string{}) + } +} + +func (m *Module) updateMetricsContexts(spec *api.MetricsSpec) { + // clean old metrics from registry (remove prometheus collectors and remove map entry) + // reset the advanced metrics registry + for key, metricObj := range m.registry { + metricObj.Clean() + delete(m.registry, key) + } + + exporter.ResetAdvancedMetricsRegistry() + + ctxType := remoteContext + if m.daemonConfig != nil && !m.daemonConfig.RemoteContext { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + // so we can getaway with just one context type. For this reason we will only use srccontext + ctxType = localContext + } + + for _, ctxOption := range spec.ContextOptions { + switch { + case strings.Contains(ctxOption.MetricName, forward): + fm := NewForwardCountMetrics(&ctxOption, m.l, ctxType) + if fm != nil { + m.registry[ctxOption.MetricName] = fm + } + case strings.Contains(ctxOption.MetricName, drop): + dm := NewDropCountMetrics(&ctxOption, m.l, ctxType) + if dm != nil { + m.registry[ctxOption.MetricName] = dm + } + case strings.Contains(ctxOption.MetricName, tcp): + tm := NewTCPMetrics(&ctxOption, m.l, ctxType) + if tm != nil { + m.registry[ctxOption.MetricName] = tm + } + tr := NewTCPRetransMetrics(&ctxOption, m.l, ctxType) + if tr != nil { + m.registry[ctxOption.MetricName] = tr + } + case strings.Contains(ctxOption.MetricName, nodeApiserver): + // Uses the pattern we will follow in future where each base metric has one instance. + // Example - tcp, latency, dns, etc. + lm := NewLatencyMetrics(&ctxOption, m.l, ctxType) + if lm != nil { + m.registry[nodeApiserver] = lm + } + case strings.Contains(ctxOption.MetricName, dns): + dm := NewDNSMetrics(&ctxOption, m.l, ctxType) + if dm != nil { + m.registry[ctxOption.MetricName] = dm + } + default: + m.l.Error("Invalid metric name", zap.String("metricName", ctxOption.MetricName)) + } + } + + for metricName, metricObj := range m.registry { + metricObj.Init(metricName) + } +} + +func (m *Module) run(newCtx context.Context) { + if m.isRunning { + m.l.Warn("Metric module is already running. Cannot start again.") + return + } + + cbFunc := pubsub.CallBackFunc(m.PodCallBackFn) + m.pubsubPodSub = m.pubsub.Subscribe(common.PubSubPods, &cbFunc) + + m.wg.Add(1) + go func() { + m.Lock() + m.isRunning = true + m.ctx = newCtx + m.Unlock() + + evReader := m.enricher.ExportReader() + for { + ev := evReader.NextFollow(newCtx) + if ev == nil { + break + } + + switch ev.Event.(type) { + case *flow.Flow: + m.RLock() + f := ev.Event.(*flow.Flow) + m.l.Debug("converted flow object", zap.Any("flow l4", f.IP)) + for _, metricObj := range m.registry { + metricObj.ProcessFlow(f) + } + m.RUnlock() + case *flow.LostEvent: + ev := ev.Event.(*flow.LostEvent) + // the number of lost events == the size of the ring buffer initialized. + metrics.LostEventsCounter.WithLabelValues(utils.EnricherRing, string(metricModuleReq)).Add(float64(ev.NumEventsLost)) + default: + m.l.Warn("Unknown event type", zap.Any("event", ev)) + } + } + + err := evReader.Close() + if err != nil { + m.l.Error("Error closing the event reader", zap.Error(err)) + } + m.Lock() + m.isRunning = false + m.ctx = nil + m.Unlock() + + m.wg.Done() + }() + + m.wg.Add(1) + go func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + m.l.Debug("Processing dirty pods") + m.applyDirtyPods() + case <-newCtx.Done(): + m.l.Info("Context cancelled. Exiting.") + err := m.pubsub.Unsubscribe(common.PubSubPods, m.pubsubPodSub) + if err != nil { + m.l.Error("Error unsubscribing from pubsub", zap.Error(err)) + } + m.wg.Done() + return + } + } + }() +} + +func (m *Module) appendIncludeList(namespaces []string) { + if m.includedNamespaces == nil { + m.includedNamespaces = make(map[string]struct{}) + } + + m.l.Info("Appending namespaces to include list", zap.Strings("namespaces", namespaces)) + + // TODO here we will need to check for IP which + // needs to be added to filter manager and which needs to be removed + // this logic is for temporary testing purposes. We will need to support multiple scenarios + // 1. Adding Ips initially when CRD is added + // 2. adding or deleting IPs when pods get created or deleted in namespace of interest + // 3. adding or deleting IPs when namespace is added or deleted + // 4. deleting ips when CRD is deleted + + tempNewNs := make(map[string]struct{}) + for _, ns := range namespaces { + tempNewNs[ns] = struct{}{} + } + + m.l.Info("Current included namespaces", zap.Any("namespaces", m.includedNamespaces)) + toAdd, toRemove := make([]string, 0), make([]string, 0) + for _, ns := range namespaces { + if _, ok := m.includedNamespaces[ns]; !ok { + toAdd = append(toAdd, ns) + } + } + + for ns := range m.includedNamespaces { + if _, ok := tempNewNs[ns]; !ok { + toRemove = append(toRemove, ns) + delete(m.includedNamespaces, ns) + } + } + + if len(m.includedNamespaces) != len(tempNewNs) { + m.includedNamespaces = tempNewNs + } + + m.l.Info("Namespaces to add", zap.Strings("namespaces", toAdd)) + m.l.Info("Namespaces to remove", zap.Strings("namespaces", toRemove)) + + // toAdd namespace IPs to filter manager + for _, ns := range toAdd { + ips := m.daemonCache.GetIPsByNamespace(ns) + m.l.Info("Adding IPs to filter manager", zap.String("namespace", ns), zap.Any("ips", ips)) + + err := m.filterManager.AddIPs(ips, metricModuleReq, moduleReqMetadata) + if err != nil { + m.l.Error("Error adding IPs to filter manager", zap.Error(err)) + } + } + + // toRemove namespace IPs from filter manager + for _, ns := range toRemove { + ips := m.daemonCache.GetIPsByNamespace(ns) + m.l.Info("Removing IPs from filter manager", zap.String("namespace", ns), zap.Any("ips", ips)) + + err := m.filterManager.DeleteIPs(ips, metricModuleReq, moduleReqMetadata) + if err != nil { + m.l.Error("Error removing IPs from filter manager", zap.Error(err)) + } + } +} + +func (m *Module) appendExcludeList(ns []string) { + if m.excludedNamespaces == nil { + m.excludedNamespaces = make(map[string]struct{}) + } + + // TODO here we will need to check for IP which + // needs to be added to filter manager and which needs to be removed +} + +func (m *Module) PodCallBackFn(obj interface{}) { + event := obj.(*cache.CacheEvent) + if event == nil { + return + } + + pod := event.Obj.(*common.RetinaEndpoint) + if pod == nil { + return + } + + ip, err := pod.PrimaryNetIP() + if err != nil || ip == nil { + m.l.Error("Error getting primary net IP", zap.Any("pod obj", pod), zap.Error(err)) + return + } + + m.Lock() + if !m.nsOfInterest(pod.Namespace()) && !m.podOfInterest(ip, pod.Annotations()) { + m.Unlock() + return + } + m.Unlock() + + handlePodEvent(event, m, pod, ip) +} + +func handlePodEvent(event *cache.CacheEvent, m *Module, pod *common.RetinaEndpoint, ip net.IP) { + if pod.Name() == common.APIServerEndpointName && pod.Namespace() == common.APIServerEndpointName { + m.l.Debug("Ignoring apiserver endpoint") + return + } + podCacheEntry := DirtyCachePod{ + IP: ip, + Annotated: m.podAnnotated(pod.Annotations()), + Namespaced: m.nsOfInterest(pod.Namespace()), + } + switch event.Type { + case cache.EventTypePodAdded: + // Pod is not annotated NOR is it namespaced (in crd or annotated). + // This means that we have the stale pod ip in the filtermap so we should remove it. + // This case should only occur when the pod annotation is removed since this is an EventTypePodAdded (also accounts for pod update) + if !podCacheEntry.Annotated && !podCacheEntry.Namespaced { + m.l.Info("Adding pod IP to DELETE dirty pods cache. Pod not annotated or in namespace of interest.", zap.String("pod name", pod.NamespacedName())) + podCacheEntry.Annotated = true + m.dirtyPods.ToDelete(ip.String(), podCacheEntry) + return + } + m.l.Info("Adding pod IP to ADD dirty pods cache", zap.String("pod name", pod.NamespacedName())) + m.dirtyPods.ToAdd(podCacheEntry.IP.String(), podCacheEntry) + case cache.EventTypePodDeleted: + m.l.Info("Adding pod IP to DELETE dirty pods cache", zap.String("pod name", pod.NamespacedName())) + m.dirtyPods.ToDelete(ip.String(), podCacheEntry) + default: + m.l.Warn("Unknown cache event type", zap.Any("event", event)) + return + } +} + +// Adds or removes pod ips from filtermanager +func (m *Module) applyDirtyPods() { + m.Lock() + defer m.Unlock() + + m.applyDirtyPodsAdd() + m.applyDirtyPodsDelete() +} + +// applyDirtyPodsAdd adds pod ips to filtermanager +// if the pod is annotated, then it should be added to the filtermanager with pod request metadata +// if the pod is a namespace of interest, then it should be added to the filtermanager with default request metadata +// there can be overlap here, since if a pod is annotated, and the namespace is annotated, we do not want to remove +// the pod ip from the filtermanager if only one of the annotations is removed. +func (m *Module) applyDirtyPodsAdd() { + adds := m.dirtyPods.GetAddList() + if len(adds) > 0 { + podsToAdd := make([]net.IP, 0) + podsToAddNamespaced := make([]net.IP, 0) + for _, entry := range adds { + podEntry := entry.(DirtyCachePod) + if podEntry.Annotated { + podsToAdd = append(podsToAdd, podEntry.IP) + } + if podEntry.Namespaced { + podsToAddNamespaced = append(podsToAddNamespaced, podEntry.IP) + } + } + if len(podsToAdd) > 0 { + m.l.Debug("Adding annotated pod IPs to filtermap", zap.Any("IPs", podsToAdd)) + err := m.filterManager.AddIPs(podsToAdd, metricModuleReq, modulePodReqMetadata) + if err != nil { + m.l.Error("Error adding pod IP to filter manager", zap.Error(err)) + } + } + if len(podsToAddNamespaced) > 0 { + m.l.Debug("Adding namespaced pod IPs to filtermap", zap.Any("IPs", podsToAddNamespaced)) + err := m.filterManager.AddIPs(podsToAddNamespaced, metricModuleReq, moduleReqMetadata) + if err != nil { + m.l.Error("Error adding pod IP to filter manager", zap.Error(err)) + } + } + } + m.dirtyPods.ClearAdd() +} + +// applyDirtyPodsDelete deletes pod ips from filtermanager +func (m *Module) applyDirtyPodsDelete() { + deletes := m.dirtyPods.GetDeleteList() + if len(deletes) > 0 { + podOfInterestDeleteList := make([]net.IP, 0) + namespaceOfInterestDeleteList := make([]net.IP, 0) + for _, entry := range deletes { + podEntry := entry.(DirtyCachePod) + if podEntry.Annotated { + podOfInterestDeleteList = append(podOfInterestDeleteList, podEntry.IP) + } + if podEntry.Namespaced { + namespaceOfInterestDeleteList = append(namespaceOfInterestDeleteList, podEntry.IP) + } + } + + if len(podOfInterestDeleteList) > 0 { + m.l.Debug("Deleting Ips in dirty pods from filtermap", zap.Any("IPs", podOfInterestDeleteList)) + err := m.filterManager.DeleteIPs(podOfInterestDeleteList, metricModuleReq, modulePodReqMetadata) + if err != nil { + m.l.Error("Error deleting pod IP from filter manager", zap.Error(err)) + } + } + if len(namespaceOfInterestDeleteList) > 0 { + m.l.Debug("Deleting Ips in dirty pods from filtermap", zap.Any("IPs", namespaceOfInterestDeleteList)) + err := m.filterManager.DeleteIPs(namespaceOfInterestDeleteList, metricModuleReq, moduleReqMetadata) + if err != nil { + m.l.Error("Error deleting pod IP from filter manager", zap.Error(err)) + } + } + } + + m.dirtyPods.ClearDelete() +} + +// nsOfInterest checks if the namespace is in the included or excluded list +// included namespaces can be defined by crd or automatically applied by annotated namespaces. +func (m *Module) nsOfInterest(ns string) bool { + if len(m.includedNamespaces) > 0 { + if _, ok := m.includedNamespaces[ns]; ok { + return true + } + return false + } + + if len(m.excludedNamespaces) > 0 { + if _, ok := m.excludedNamespaces[ns]; ok { + return false + } + return true + } + + return false +} + +func (m *Module) podAnnotated(annotations map[string]string) bool { + if m.daemonConfig == nil || !m.daemonConfig.EnableAnnotations { + return false + } + + if len(annotations) == 0 { + return false + } + + if val, ok := annotations[common.RetinaPodAnnotation]; ok && val == common.RetinaPodAnnotationValue { + m.l.Debug("Pod is annotated with retina observe annotation", zap.Any("annotations", annotations)) + return true + } + + return false +} + +func (m *Module) podOfInterest(ip net.IP, annotations map[string]string) bool { + fmHasIP := m.filterManager.HasIP(ip) + return fmHasIP || m.podAnnotated(annotations) +} diff --git a/pkg/module/metrics/metrics_module_test.go b/pkg/module/metrics/metrics_module_test.go new file mode 100644 index 000000000..6896055f5 --- /dev/null +++ b/pkg/module/metrics/metrics_module_test.go @@ -0,0 +1,583 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "context" + "net" + "sync" + "testing" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/common" + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/cilium/cilium/pkg/hubble/container" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +const ( + testCfgFile = "../../config/testwith/config.yaml" +) + +func TestAppendIncludeList(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + cfg, err := kcfg.GetConfig(testCfgFile) + assert.NotNil(t, cfg) + assert.Nil(t, err) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + p := pubsub.NewMockPubSubInterface(ctrl) //nolint:typecheck + e := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + fm := filtermanager.NewMockIFilterManager(ctrl) //nolint:typecheck + c := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + c.EXPECT().GetIPsByNamespace(gomock.Any()).Return([]net.IP{}).AnyTimes() + fm.EXPECT().AddIPs(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + me := InitModule( + context.Background(), + cfg, + p, + e, + fm, + c, + ) + assert.NotNil(t, me) + + me.appendIncludeList([]string{"test"}) +} + +func TestPodCallBack(t *testing.T) { + cfg, err := kcfg.GetConfig(testCfgFile) + cfg.EnableAnnotations = true + assert.NotNil(t, cfg) + assert.Nil(t, err) + log.SetupZapLogger(log.GetDefaultLogOpts()) + + tests := []struct { + name string + addObjs []*common.RetinaEndpoint + deleteObjs []*common.RetinaEndpoint + includeNslist []string + excludeNslist []string + deleteExpected []net.IP + addExpected []net.IP + fmHasIP bool + }{ + { + name: "test", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + }, + includeNslist: []string{"ns1"}, + deleteExpected: nil, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + }, + { + name: "test2", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod2", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + }, + includeNslist: []string{"ns1"}, + deleteExpected: nil, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + }, + { + name: "test3", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod2", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + }, + deleteObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 2)}), + }, + includeNslist: []string{"ns1"}, + deleteExpected: []net.IP{net.IPv4(10, 0, 0, 2)}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + }, + { + name: "test4", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod2", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod2", "ns3", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 4)}), + }, + deleteObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 2)}), + }, + includeNslist: []string{"ns1"}, + deleteExpected: []net.IP{net.IPv4(10, 0, 0, 2)}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + }, + { + name: "pod of interest namespace not of interest", + addObjs: []*common.RetinaEndpoint{ + func() *common.RetinaEndpoint { + ep := common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}) + ep.SetAnnotations(map[string]string{common.RetinaPodAnnotation: common.RetinaPodAnnotationValue}) + return ep + }(), + }, + includeNslist: []string{"ns2"}, + deleteExpected: []net.IP{}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + }, + { + name: "pod not of interest and 1 namespace of interest", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod1", "ns2", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 2)}), + }, + includeNslist: []string{"ns2"}, + deleteExpected: []net.IP{}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 2)}, + }, + { + name: "ns not of interest, pod not annotated, pod ip in filtermap", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + common.NewRetinaEndpoint("pod1", "ns2", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 2)}), + }, + includeNslist: []string{"ns2"}, + deleteExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 2)}, + fmHasIP: true, + }, + { + name: "pod and ns of interest", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + }, + includeNslist: []string{"ns1"}, + deleteExpected: []net.IP{}, + addExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + fmHasIP: true, + }, + { + name: "pod and ns not of interest, but fm has ip (annotation removed)", + addObjs: []*common.RetinaEndpoint{ + common.NewRetinaEndpoint("pod1", "ns1", &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}), + }, + includeNslist: []string{"ns2"}, + deleteExpected: []net.IP{net.IPv4(10, 0, 0, 1)}, + addExpected: []net.IP{}, + fmHasIP: true, + }, + } + + for _, tt := range tests { + log.Logger().Info("***** Running test *****", zap.String("name", tt.name)) + ctrl := gomock.NewController(t) + + p := pubsub.NewMockPubSubInterface(ctrl) //nolint:typecheck + e := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + fm := filtermanager.NewMockIFilterManager(ctrl) //nolint:typecheck + c := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + c.EXPECT().GetIPsByNamespace(gomock.Any()).Return([]net.IP{}).AnyTimes() + fm.EXPECT().AddIPs([]net.IP{}, gomock.Any(), gomock.Any()).Return(nil).Times(1) + fm.EXPECT().HasIP(gomock.Any()).Return(tt.fmHasIP).AnyTimes() + if len(tt.addExpected) > 0 { + fm.EXPECT().AddIPs(tt.addExpected, gomock.Any(), gomock.Any()).Return(nil).Times(1) + } + if len(tt.deleteExpected) > 0 { + fm.EXPECT().DeleteIPs(tt.deleteExpected, gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + } + + me := &Module{ + RWMutex: &sync.RWMutex{}, + l: log.Logger().Named(string("MetricModule")), + pubsub: p, + configs: make([]*api.MetricsConfiguration, 0), + enricher: e, + wg: sync.WaitGroup{}, + registry: make(map[string]AdvMetricsInterface), + moduleCtx: context.Background(), + filterManager: fm, + daemonCache: c, + dirtyPods: common.NewDirtyCache(), + pubsubPodSub: "", + daemonConfig: cfg, + } + assert.NotNil(t, me) + me.appendIncludeList(tt.includeNslist) + for _, obj := range tt.addObjs { + ev := cache.NewCacheEvent(cache.EventTypePodAdded, obj) + me.PodCallBackFn(ev) + } + + for _, obj := range tt.deleteObjs { + ev := cache.NewCacheEvent(cache.EventTypePodDeleted, obj) + me.PodCallBackFn(ev) + } + + me.applyDirtyPods() + + ctrl.Finish() + } +} + +// Deletes pod ips for specific namespace, but confirms that delete is called for namespace request metadata +func TestModule_NamespaceAndPodUpdates(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + cfg, _ := kcfg.GetConfig(testCfgFile) + cfg.EnableAnnotations = true + p := pubsub.NewMockPubSubInterface(ctrl) //nolint:typecheck + e := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + fm := filtermanager.NewMockIFilterManager(ctrl) //nolint:typecheck + c := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + ns1 := "ns1" + ns2 := "ns2" + + includedNs := make(map[string]struct{}) + includedNs[ns1] = struct{}{} + + // adding new namespace with new pod ips + c.EXPECT().GetIPsByNamespace(ns2).Times(1).Return([]net.IP{net.IPv4(10, 0, 0, 2)}) + fm.EXPECT().AddIPs([]net.IP{net.IPv4(10, 0, 0, 2)}, gomock.Any(), moduleReqMetadata).Times(1) + c.EXPECT().GetIPsByNamespace(ns1).Times(1).Return([]net.IP{net.IPv4(10, 0, 0, 1)}) + fm.EXPECT().HasIP(gomock.Any()).Return(true).AnyTimes() + + // confirm specific calls to filter manager and cache with correct request metadata + fm.EXPECT().DeleteIPs([]net.IP{net.IPv4(10, 0, 0, 1)}, gomock.Any(), moduleReqMetadata).Times(1) + // fm.EXPECT().DeleteIPs([]net.IP{net.IPv4(10, 0, 0, 2)}, gomock.Any(), modulePodReqMetadata).Times(1) + + me := &Module{ + RWMutex: &sync.RWMutex{}, + l: log.Logger().Named(string("MetricModule")), + pubsub: p, + configs: make([]*api.MetricsConfiguration, 0), + enricher: e, + wg: sync.WaitGroup{}, + registry: make(map[string]AdvMetricsInterface), + moduleCtx: context.Background(), + filterManager: fm, + daemonCache: c, + dirtyPods: common.NewDirtyCache(), + pubsubPodSub: "", + daemonConfig: cfg, + includedNamespaces: includedNs, + } + podobj := common.NewRetinaEndpoint("pod1", ns1, &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 1)}) + podobj.SetAnnotations(map[string]string{common.RetinaPodAnnotation: common.RetinaPodAnnotationValue}) + podobj2 := common.NewRetinaEndpoint("pod2", ns2, &common.IPAddresses{IPv4: net.IPv4(10, 0, 0, 2)}) + podobj2.SetAnnotations(map[string]string{common.RetinaPodAnnotation: common.RetinaPodAnnotationValue}) + + ev := cache.NewCacheEvent(cache.EventTypePodAdded, podobj) + ev2 := cache.NewCacheEvent(cache.EventTypePodAdded, podobj2) + + // add pod1 and pod2 to fm (annotations) + me.PodCallBackFn(ev) + me.PodCallBackFn(ev2) + + // add ns2 to include list, but also remove ns1 from fm + me.appendIncludeList([]string{ns2}) + + // remove annotation + podobj2.SetAnnotations(map[string]string{}) + ev3 := cache.NewCacheEvent(cache.EventTypePodAdded, podobj2) + me.PodCallBackFn(ev3) + + ctrl.Finish() +} + +func TestModule_Reconcile(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + l := log.Logger().Named("test") + + testDropMetric := &DropCountMetrics{ + baseMetricObject: baseMetricObject{ + advEnable: true, + ctxOptions: &api.MetricsContextOptions{ + MetricName: "drop_count", + SourceLabels: []string{"ip"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + } + testDropMetric.Init("drop_count") + testDropMetricBytes := &DropCountMetrics{ + baseMetricObject: baseMetricObject{ + advEnable: true, + ctxOptions: &api.MetricsContextOptions{ + MetricName: "drop_bytes", + SourceLabels: []string{"ip"}, + }, + }, + } + testDropMetricBytes.Init("drop_bytes") + testForwardMetric := &ForwardMetrics{ + baseMetricObject: baseMetricObject{ + advEnable: true, + ctxOptions: &api.MetricsContextOptions{ + MetricName: "forward_count", + SourceLabels: []string{"ip"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + } + testForwardMetric.Init("forward_count") + testForwardMetricBytes := &ForwardMetrics{ + baseMetricObject: baseMetricObject{ + advEnable: true, + ctxOptions: &api.MetricsContextOptions{ + MetricName: "forward_bytes", + SourceLabels: []string{"ip"}, + }, + }, + } + testForwardMetricBytes.Init("forward_bytes") + + tests := []struct { + name string + spec *api.MetricsSpec + m *Module + expectErr bool + expectNoCalls bool + }{ + { + name: "Registry is empty and no error", + spec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + }, + m: &Module{ + RWMutex: &sync.RWMutex{}, + registry: make(map[string]AdvMetricsInterface), + l: l, + moduleCtx: context.Background(), + }, + expectErr: false, + }, + { + name: "Registry is not empty and no error", + spec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + { + MetricName: "forward_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + }, + m: &Module{ + RWMutex: &sync.RWMutex{}, + registry: map[string]AdvMetricsInterface{ + "drop_count": testDropMetric, + "forward_count": testForwardMetric, + }, + moduleCtx: context.Background(), + l: l, + }, + expectErr: false, + }, + { + name: "Registry is not empty and no error for bytes", + spec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + { + MetricName: "drop_bytes", + SourceLabels: []string{"ip"}, + }, + { + MetricName: "forward_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + { + MetricName: "forward_bytes", + SourceLabels: []string{"ip"}, + }, + }, + }, + m: &Module{ + RWMutex: &sync.RWMutex{}, + registry: map[string]AdvMetricsInterface{ + "drop_count": testDropMetric, + "drop_bytes": testDropMetricBytes, + "forward_count": testForwardMetric, + "forward_bytes": testForwardMetricBytes, + }, + moduleCtx: context.Background(), + l: l, + }, + expectErr: false, + }, + { + name: "Registry is not empty and error for invalid name", + spec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_hello", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + { + MetricName: "drop_bytes", + SourceLabels: []string{"ip"}, + }, + { + MetricName: "forward_count", + SourceLabels: []string{"ip", "pod"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + { + MetricName: "forward_hi", + SourceLabels: []string{"ip"}, + }, + }, + }, + m: &Module{ + RWMutex: &sync.RWMutex{}, + registry: map[string]AdvMetricsInterface{ + "drop_count": testDropMetric, + "drop_bytes": testDropMetricBytes, + "forward_count": testForwardMetric, + "forward_bytes": testForwardMetricBytes, + }, + moduleCtx: context.Background(), + l: l, + }, + expectErr: true, + }, + { + name: "Expect no change for spec the same", + spec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + }, + m: &Module{ + RWMutex: &sync.RWMutex{}, + registry: map[string]AdvMetricsInterface{ + "drop_count": testDropMetric, + }, + moduleCtx: context.Background(), + l: l, + currentSpec: &api.MetricsSpec{ + ContextOptions: []api.MetricsContextOptions{ + { + MetricName: "drop_count", + SourceLabels: []string{"ip"}, + DestinationLabels: []string{"pod"}, + AdditionalLabels: []string{"namespace"}, + }, + }, + }, + }, + expectErr: false, + expectNoCalls: true, + }, + } + + for _, tt := range tests { + log.Logger().Info("***** Running test *****", zap.String("name", tt.name)) + p := pubsub.NewMockPubSubInterface(ctrl) //nolint:typecheck + e := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + fm := filtermanager.NewMockIFilterManager(ctrl) //nolint:typecheck + c := cache.NewMockCacheInterface(ctrl) //nolint:typecheck + tt.m.pubsub = p + tt.m.enricher = e + tt.m.filterManager = fm + tt.m.daemonCache = c + testRing := container.NewRing(container.Capacity1) + testRingReader := container.NewRingReader(testRing, 0) + p.EXPECT().Subscribe(gomock.Any(), gomock.Any()).Return("test").AnyTimes() + e.EXPECT().ExportReader().AnyTimes().Return(testRingReader) + + if tt.expectNoCalls { + p.EXPECT().Subscribe(gomock.Any(), gomock.Any()).Times(0) + e.EXPECT().ExportReader().Times(0) + } + + err := tt.m.Reconcile(tt.spec) + if err != nil && !tt.expectErr { + t.Errorf("unexpected error: %v", err) + } + } +} + +func TestPodAnnotated(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test") + // test podAnnotated function + tests := []struct { + name string + annotations map[string]string + m *Module + expected bool + }{ + { + name: "pod annotated disabled annotations", + annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + m: &Module{}, + expected: false, + }, + { + name: "pod annotated", + annotations: map[string]string{ + common.RetinaPodAnnotation: common.RetinaPodAnnotationValue, + }, + m: &Module{ + daemonConfig: &kcfg.Config{ + EnableAnnotations: true, + }, + l: l, + }, + expected: true, + }, + { + name: "pod not annotated", + annotations: map[string]string{ + common.RetinaPodAnnotation: "test", + }, + m: &Module{}, + expected: false, + }, + } + for _, tt := range tests { + l.Info("***** Running test *****", zap.String("name", tt.name)) + assert.Equal(t, tt.expected, tt.m.podAnnotated(tt.annotations)) + } +} diff --git a/pkg/module/metrics/mock_types.go b/pkg/module/metrics/mock_types.go new file mode 100644 index 000000000..8da8b13c1 --- /dev/null +++ b/pkg/module/metrics/mock_types.go @@ -0,0 +1,174 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package metrics is a generated GoMock package. +package metrics + +import ( + reflect "reflect" + + flow "github.com/cilium/cilium/api/v1/flow" + gomock "github.com/golang/mock/gomock" + v1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +// MockIModule is a mock of IModule interface. +type MockIModule struct { + ctrl *gomock.Controller + recorder *MockIModuleMockRecorder +} + +// MockIModuleMockRecorder is the mock recorder for MockIModule. +type MockIModuleMockRecorder struct { + mock *MockIModule +} + +// NewMockIModule creates a new mock instance. +func NewMockIModule(ctrl *gomock.Controller) *MockIModule { + mock := &MockIModule{ctrl: ctrl} + mock.recorder = &MockIModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIModule) EXPECT() *MockIModuleMockRecorder { + return m.recorder +} + +// Reconcile mocks base method. +func (m *MockIModule) Reconcile(spec *v1alpha1.MetricsSpec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reconcile", spec) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reconcile indicates an expected call of Reconcile. +func (mr *MockIModuleMockRecorder) Reconcile(spec interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockIModule)(nil).Reconcile), spec) +} + +// MockAdvMetricsInterface is a mock of AdvMetricsInterface interface. +type MockAdvMetricsInterface struct { + ctrl *gomock.Controller + recorder *MockAdvMetricsInterfaceMockRecorder +} + +// MockAdvMetricsInterfaceMockRecorder is the mock recorder for MockAdvMetricsInterface. +type MockAdvMetricsInterfaceMockRecorder struct { + mock *MockAdvMetricsInterface +} + +// NewMockAdvMetricsInterface creates a new mock instance. +func NewMockAdvMetricsInterface(ctrl *gomock.Controller) *MockAdvMetricsInterface { + mock := &MockAdvMetricsInterface{ctrl: ctrl} + mock.recorder = &MockAdvMetricsInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAdvMetricsInterface) EXPECT() *MockAdvMetricsInterfaceMockRecorder { + return m.recorder +} + +// Clean mocks base method. +func (m *MockAdvMetricsInterface) Clean() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Clean") +} + +// Clean indicates an expected call of Clean. +func (mr *MockAdvMetricsInterfaceMockRecorder) Clean() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clean", reflect.TypeOf((*MockAdvMetricsInterface)(nil).Clean)) +} + +// Init mocks base method. +func (m *MockAdvMetricsInterface) Init(metricName string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Init", metricName) +} + +// Init indicates an expected call of Init. +func (mr *MockAdvMetricsInterfaceMockRecorder) Init(metricName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockAdvMetricsInterface)(nil).Init), metricName) +} + +// ProcessFlow mocks base method. +func (m *MockAdvMetricsInterface) ProcessFlow(f *flow.Flow) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ProcessFlow", f) +} + +// ProcessFlow indicates an expected call of ProcessFlow. +func (mr *MockAdvMetricsInterfaceMockRecorder) ProcessFlow(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessFlow", reflect.TypeOf((*MockAdvMetricsInterface)(nil).ProcessFlow), f) +} + +// MockContextOptionsInterface is a mock of ContextOptionsInterface interface. +type MockContextOptionsInterface struct { + ctrl *gomock.Controller + recorder *MockContextOptionsInterfaceMockRecorder +} + +// MockContextOptionsInterfaceMockRecorder is the mock recorder for MockContextOptionsInterface. +type MockContextOptionsInterfaceMockRecorder struct { + mock *MockContextOptionsInterface +} + +// NewMockContextOptionsInterface creates a new mock instance. +func NewMockContextOptionsInterface(ctrl *gomock.Controller) *MockContextOptionsInterface { + mock := &MockContextOptionsInterface{ctrl: ctrl} + mock.recorder = &MockContextOptionsInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockContextOptionsInterface) EXPECT() *MockContextOptionsInterfaceMockRecorder { + return m.recorder +} + +// getLabels mocks base method. +func (m *MockContextOptionsInterface) getLabels() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getLabels") + ret0, _ := ret[0].([]string) + return ret0 +} + +// getLabels indicates an expected call of getLabels. +func (mr *MockContextOptionsInterfaceMockRecorder) getLabels() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getLabels", reflect.TypeOf((*MockContextOptionsInterface)(nil).getLabels)) +} + +// getLocalCtxValues mocks base method. +func (m *MockContextOptionsInterface) getLocalCtxValues(f *flow.Flow) map[string][]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getLocalCtxValues", f) + ret0, _ := ret[0].(map[string][]string) + return ret0 +} + +// getLocalCtxValues indicates an expected call of getLocalCtxValues. +func (mr *MockContextOptionsInterfaceMockRecorder) getLocalCtxValues(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getLocalCtxValues", reflect.TypeOf((*MockContextOptionsInterface)(nil).getLocalCtxValues), f) +} + +// getValues mocks base method. +func (m *MockContextOptionsInterface) getValues(f *flow.Flow) []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getValues", f) + ret0, _ := ret[0].([]string) + return ret0 +} + +// getValues indicates an expected call of getValues. +func (mr *MockContextOptionsInterfaceMockRecorder) getValues(f interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValues", reflect.TypeOf((*MockContextOptionsInterface)(nil).getValues), f) +} diff --git a/pkg/module/metrics/tcpflags.go b/pkg/module/metrics/tcpflags.go new file mode 100644 index 000000000..fc4ca5243 --- /dev/null +++ b/pkg/module/metrics/tcpflags.go @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + // Metric names + TCPFlagsCountName = "adv_tcpflags_count" + + // Metric descriptions + TCPFlagsCountDesc = "Total number of packets by TCP flag" +) + +type TCPMetrics struct { + baseMetricObject + tcpFlagsMetrics metricsinit.IGaugeVec +} + +func NewTCPMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *TCPMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), "flag") { + return nil + } + + fl = fl.Named("tcpflags-metricsmodule") + fl.Info("Creating TCP Flags count metrics", zap.Any("options", ctxOptions)) + return &TCPMetrics{ + baseMetricObject: newBaseMetricsObject(ctxOptions, fl, isLocalContext), + } +} + +func (t *TCPMetrics) Init(metricName string) { + // only 1 metric. No need to check metric name which is already validated. + t.tcpFlagsMetrics = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TCPFlagsCountName, + TCPFlagsCountDesc, + t.getLabels()..., + ) +} + +func (t *TCPMetrics) getLabels() []string { + labels := []string{ + utils.Flag, + } + if t.srcCtx != nil { + labels = append(labels, t.srcCtx.getLabels()...) + } + + if t.dstCtx != nil { + labels = append(labels, t.dstCtx.getLabels()...) + } + + return labels +} + +func (t *TCPMetrics) ProcessFlow(flow *v1.Flow) { + if flow == nil { + return + } + + if flow.Verdict != v1.Verdict_FORWARDED { + return + } + + tcp := flow.L4.GetTCP() + if tcp == nil { + return + } + + flags := t.getFlagValues(tcp.GetFlags()) + if len(flags) == 0 { + return + } + + if t.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + t.processLocalCtxFlow(flow, flags) + return + } + + var srcLabels, dstLabels []string + if t.srcCtx != nil { + srcLabels = t.srcCtx.getValues(flow) + } + + if t.dstCtx != nil { + dstLabels = t.dstCtx.getValues(flow) + } + + for _, flag := range flags { + labels := append([]string{flag}, srcLabels...) + labels = append(labels, dstLabels...) + t.tcpFlagsMetrics.WithLabelValues(labels...).Inc() + t.l.Debug("TCP flag metric", zap.String("flag", flag), zap.Strings("labels", labels)) + } +} + +func (t *TCPMetrics) processLocalCtxFlow(flow *v1.Flow, flags []string) { + labelValuesMap := t.srcCtx.getLocalCtxValues(flow) + if labelValuesMap == nil { + return + } + // Ingress values + if l := len(labelValuesMap[ingress]); l > 0 { + for _, flag := range flags { + labels := append([]string{flag}, labelValuesMap[ingress]...) + t.tcpFlagsMetrics.WithLabelValues(labels...).Inc() + t.l.Debug("TCP flag metric", zap.String("flag", flag), zap.Strings("labels", labels)) + } + } + + if l := len(labelValuesMap[egress]); l > 0 { + for _, flag := range flags { + labels := append([]string{flag}, labelValuesMap[egress]...) + t.tcpFlagsMetrics.WithLabelValues(labels...).Inc() + t.l.Debug("TCP flag metric", zap.String("flag", flag), zap.Strings("labels", labels)) + } + } +} + +func (t *TCPMetrics) getFlagValues(flags *v1.TCPFlags) []string { + f := make([]string, 0) + if flags == nil { + return f + } + + if flags.GetFIN() { + f = append(f, utils.FIN) + } + + if flags.GetSYN() && flags.GetACK() { + f = append(f, utils.SYNACK) + } else { + if flags.GetSYN() { + f = append(f, utils.SYN) + } + if flags.GetACK() { + f = append(f, utils.ACK) + } + } + if flags.GetRST() { + f = append(f, utils.RST) + } + + if flags.GetPSH() { + f = append(f, utils.PSH) + } + + if flags.GetURG() { + f = append(f, utils.URG) + } + + if flags.GetECE() { + f = append(f, utils.ECE) + } + + if flags.GetCWR() { + f = append(f, utils.CWR) + } + + return f +} + +func (t *TCPMetrics) Clean() { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(t.tcpFlagsMetrics)) +} diff --git a/pkg/module/metrics/tcpflags_test.go b/pkg/module/metrics/tcpflags_test.go new file mode 100644 index 000000000..5ee1f8550 --- /dev/null +++ b/pkg/module/metrics/tcpflags_test.go @@ -0,0 +1,497 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +//nolint:all +package metrics + +import ( + "testing" + + "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/cilium/cilium/api/v1/flow" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestNewTCPMetrics(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + tt := []TestMetrics{ + { + name: "empty opts", + opts: &v1alpha1.MetricsContextOptions{}, + checkIsAdvance: false, + f: &flow.Flow{}, + exepectedLabels: []string{}, + metricCall: 1, + nilObj: true, + }, + { + name: "empty opts nil flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "tcpflags", + }, + checkIsAdvance: false, + f: nil, + exepectedLabels: []string{"flag"}, + metricCall: 0, + }, + { + name: "plain opts", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "tcpflags", + }, + checkIsAdvance: false, + f: &flow.Flow{}, + exepectedLabels: []string{"flag"}, + metricCall: 0, + }, + { + name: "source opts 1 without metric name", + opts: &v1alpha1.MetricsContextOptions{ + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + exepectedLabels: []string{ + "flag", + }, + metricCall: 0, + nilObj: true, + }, + { + name: "source opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{}, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 0, + }, + { + name: "dest opts 1", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 0, + }, + { + name: "source opts with flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 0, + }, + { + name: "source opts with flow with flags", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 1, + }, + { + name: "source opts with nil flow", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: nil, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 0, + }, + { + name: "source opts with flow with all flags except ack", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "source_ip", + "source_namespace", + "source_podname", + "source_workloadKind", + "source_workloadName", + "source_service", + "source_port", + }, + metricCall: 7, + }, + { + name: "dest opts with flow with all flags ", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 7, + }, + { + name: "dest opts with flow with all but syn flags ", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 7, + }, + { + name: "dest opts with flow with all flags dropped verdict", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + DestinationLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_DROPPED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "destination_ip", + "destination_namespace", + "destination_podname", + "destination_workloadKind", + "destination_workloadName", + "destination_service", + "destination_port", + }, + metricCall: 0, + }, + { + name: "local ctx dest opts with flow with all flags ", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + localContext: localContext, + metricCall: 7, + }, + { + name: "local ctx dest opts with flow with all flags ", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + localContext: localContext, + metricCall: 0, + }, + { + name: "local ctx ars and dest opts with flow with all flags ", + opts: &v1alpha1.MetricsContextOptions{ + MetricName: "flag", + SourceLabels: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + }, + f: &flow.Flow{ + Source: &flow.Endpoint{}, + Destination: &flow.Endpoint{}, + Verdict: flow.Verdict_FORWARDED, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + Flags: &flow.TCPFlags{ + SYN: true, + FIN: true, + RST: true, + PSH: true, + URG: true, + ECE: true, + CWR: true, + ACK: true, + }, + }, + }, + }, + }, + checkIsAdvance: true, + exepectedLabels: []string{ + "flag", + "ip", + "namespace", + "podname", + "workloadKind", + "workloadName", + "service", + "port", + }, + localContext: localContext, + metricCall: 14, + }, + } + + for _, tc := range tt { + log.Logger().Info("Running test name", zap.String("name", tc.name)) + ctrl := gomock.NewController(t) + + tcp := NewTCPMetrics(tc.opts, log.Logger(), tc.localContext) + if tc.nilObj { + assert.Nil(t, tcp, "forward metrics should be nil Test Name: %s", tc.name) + continue + } else { + assert.NotNil(t, tcp, "forward metrics should not be nil Test Name: %s", tc.name) + } + + tcpFlagMockMetrics := metricsinit.NewMockIGaugeVec(ctrl) //nolint:staticcheck + tcp.tcpFlagsMetrics = tcpFlagMockMetrics + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + tcpFlagMockMetrics.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).Times(tc.metricCall) + assert.Equal(t, tc.checkIsAdvance, tcp.advEnable, "IsAdvance should be %v Test Name: %s", tc.checkIsAdvance, tc.name) + assert.Equal(t, tc.exepectedLabels, tcp.getLabels(), "labels should be %v Test Name: %s", tc.exepectedLabels, tc.name) + + tcp.ProcessFlow(tc.f) + ctrl.Finish() + } +} diff --git a/pkg/module/metrics/tcpretrans.go b/pkg/module/metrics/tcpretrans.go new file mode 100644 index 000000000..efdcd5ec5 --- /dev/null +++ b/pkg/module/metrics/tcpretrans.go @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package metrics + +import ( + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + metricsinit "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/api/v1/flow" + "go.uber.org/zap" +) + +const ( + // Metric names + TCPRetransCountName = "adv_tcpretrans_count" + + // Metric descriptions + TCPRetransCountDesc = "Total number of TCP retransmitted packets" +) + +type TCPRetransMetrics struct { + baseMetricObject + tcpRetransMetrics metricsinit.IGaugeVec +} + +func NewTCPRetransMetrics(ctxOptions *api.MetricsContextOptions, fl *log.ZapLogger, isLocalContext enrichmentContext) *TCPRetransMetrics { + if ctxOptions == nil || !strings.Contains(strings.ToLower(ctxOptions.MetricName), "retrans") { + return nil + } + + fl = fl.Named("tcpretrans-metricsmodule") + fl.Info("Creating TCP retransmit count metrics", zap.Any("options", ctxOptions)) + return &TCPRetransMetrics{ + baseMetricObject: newBaseMetricsObject(ctxOptions, fl, isLocalContext), + } +} + +func (t *TCPRetransMetrics) Init(metricName string) { + // only 1 metric. No need to check metric name which is already validated. + t.tcpRetransMetrics = exporter.CreatePrometheusGaugeVecForMetric( + exporter.AdvancedRegistry, + TCPRetransCountName, + TCPRetransCountDesc, + t.getLabels()..., + ) +} + +func (t *TCPRetransMetrics) getLabels() []string { + labels := []string{utils.Direction} + if t.srcCtx != nil { + labels = append(labels, t.srcCtx.getLabels()...) + t.l.Info("src labels", zap.Any("labels", labels)) + } + + if t.dstCtx != nil { + labels = append(labels, t.dstCtx.getLabels()...) + t.l.Info("dst labels", zap.Any("labels", labels)) + } + + return labels +} + +func (t *TCPRetransMetrics) ProcessFlow(flow *v1.Flow) { + if flow == nil { + return + } + + if flow.Verdict != utils.Verdict_RETRANSMISSION { + return + } + + if t.isLocalContext() { + // when localcontext is enabled, we do not need the context options for both src and dst + // metrics aggregation will be on a single pod basis and not the src/dst pod combination basis. + t.processLocalCtxFlow(flow) + return + } + + labels := []string{flow.TrafficDirection.String()} + if t.srcCtx != nil { + srcLabels := t.srcCtx.getValues(flow) + if len(srcLabels) > 0 { + labels = append(labels, srcLabels...) + } + } + + if t.dstCtx != nil { + dstLabels := t.dstCtx.getValues(flow) + if len(dstLabels) > 0 { + labels = append(labels, dstLabels...) + } + } + + t.tcpRetransMetrics.WithLabelValues(labels...).Inc() +} + +func (t *TCPRetransMetrics) processLocalCtxFlow(flow *v1.Flow) { + labelValuesMap := t.srcCtx.getLocalCtxValues(flow) + if labelValuesMap == nil { + return + } + + if len(labelValuesMap[ingress]) > 0 { + labels := append([]string{ingress}, labelValuesMap[ingress]...) + t.tcpRetransMetrics.WithLabelValues(labels...).Inc() + t.l.Debug("tcp retransmission count metric in INGRESS in local ctx", zap.Any("labels", labels)) + } + + if len(labelValuesMap[egress]) > 0 { + labels := append([]string{egress}, labelValuesMap[egress]...) + t.tcpRetransMetrics.WithLabelValues(labels...).Inc() + t.l.Debug("tcp retransmission count metric in EGRESS in local ctx", zap.Any("labels", labels)) + } +} + +func (t *TCPRetransMetrics) Clean() { + exporter.UnregisterMetric(exporter.AdvancedRegistry, metricsinit.ToPrometheusType(t.tcpRetransMetrics)) +} diff --git a/pkg/module/metrics/types.go b/pkg/module/metrics/types.go new file mode 100644 index 000000000..5c669b933 --- /dev/null +++ b/pkg/module/metrics/types.go @@ -0,0 +1,368 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "fmt" + "net" + "strings" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/cilium/api/v1/flow" +) + +const ( + // source context options prefix + sourceCtxPrefix = "source_" + + // destination context options prefix + destinationCtxPrefix = "destination_" + + // IP Context Option + ipCtxOption = "ip" + + // namespace context option + namespaceCtxOption = "namespace" + + // pod context option + podCtxOption = "podname" + + // service context option + serviceCtxOption = "service" + + // port context option + portCtxOption = "port" + + // workloads context option + workloadKindCtxOption = "workloadKind" + + // workload context option + workloadNameCtxOption = "workloadName" + + // workload context option + workloadCtxOption = "workload" + + // localContext means only the pods on this node will be watched + // and only these events will be enriched + localContext enrichmentContext = "local" + + // remoteContext means all pods on the cluster will be watched + // and events will be enriched + remoteContext enrichmentContext = "remote" + + // ingress means the direction of the flow is from outside the pod to inside + ingress = "ingress" + + // egress means the direction of the flow is from inside the pod to outside + egress = "egress" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types.go -destination=mock_types.go -package=metrics +type IModule interface { + Reconcile(spec *api.MetricsSpec) error +} + +type enrichmentContext string + +type ctxOptionType int + +const ( + source ctxOptionType = iota + 1 + destination + localCtx +) + +type AdvMetricsInterface interface { + Init(metricName string) + // This func is used to clean up old metrics on reconcile. + Clean() + ProcessFlow(f *flow.Flow) +} + +type ContextOptionsInterface interface { + getLabels() []string + getValues(f *flow.Flow) []string + getLocalCtxValues(f *flow.Flow) map[string][]string +} + +type ContextOptions struct { + option ctxOptionType + IP bool + Namespace bool + Podname bool + Workload bool + Service bool + Port bool +} + +type DirtyCachePod struct { + // IP is the IP of the endpoint + IP net.IP + // indicates if pod is annotated / of interest + Annotated bool + // indicates if pod namespace is annotated / of interest + Namespaced bool +} + +func NewCtxOption(opts []string, option ctxOptionType) *ContextOptions { + c := &ContextOptions{ + option: option, + } + + if opts == nil { + return c + } + // TODO check lower case here + for _, opt := range opts { + switch strings.ToLower(opt) { + case ipCtxOption: + c.IP = true + case namespaceCtxOption: + c.Namespace = true + case podCtxOption: + c.Podname = true + case workloadCtxOption: + c.Workload = true + case serviceCtxOption: + c.Service = true + case portCtxOption: + c.Port = true + } + } + + return c +} + +func (c *ContextOptions) getLabels() []string { + // Note: order of append here of labels should match the order of values + prefix := "" + switch c.option { + case source: + prefix = sourceCtxPrefix + case destination: + prefix = destinationCtxPrefix + case localCtx: + // no prefix for localcontext as the aggregation is at each pod level + prefix = "" + } + + labels := make([]string, 0) + if c.IP { + labels = append(labels, prefix+ipCtxOption) + } + if c.Namespace { + labels = append(labels, prefix+namespaceCtxOption) + } + if c.Podname { + labels = append(labels, prefix+podCtxOption) + } + if c.Workload { + labels = append(labels, prefix+workloadKindCtxOption, prefix+workloadNameCtxOption) + } + + if c.Service { + labels = append(labels, prefix+serviceCtxOption) + } + + if c.Port { + labels = append(labels, prefix+portCtxOption) + } + + return labels +} + +func (c *ContextOptions) getValues(f *flow.Flow) []string { + return c.getByDirectionValues(f, c.isDest()) +} + +func (c *ContextOptions) isDest() bool { + return c.option == destination +} + +func (c *ContextOptions) isSrc() bool { + return c.option == source +} + +func (c *ContextOptions) getLocalCtxValues(f *flow.Flow) map[string][]string { + values := map[string][]string{ + ingress: nil, + egress: nil, + } + if c.option != localCtx { + return nil + } + + // for local context, we only enrich the information of pods running on the same node + // as retina pods. So if a src/dst field is non empty then we will need to + // add a metric specific to that pod based on the direction. + // there will be a case where src and dst are both filled, in that case + // we will need to add two metrics, one for src and one for dst based on direction + // and location of observation i.e. tracepoint + // + // if only Source pod info is available: + // - return labels of this pod with direction as EGRESS + // if only Destination pod info is available: + // - return labels of this pod with direction as INGRESS + // if both Source and Destination pod info is available: + // - return labels of source pod with direction as EGRESS + // - return labels of destination pod with direction as INGRESS + + if f == nil { + return values + } + + if f.Source != nil && !isAPIServerPod(f.Source) { + values[egress] = c.getByDirectionValues(f, false) + } + + if f.Destination != nil && !isAPIServerPod(f.Destination) { + values[ingress] = c.getByDirectionValues(f, true) + } + + return values +} + +func (c *ContextOptions) getByDirectionValues(f *flow.Flow, dest bool) []string { + // Note: order of append here of values should match the order of labels + values := make([]string, 0) + if f == nil { + return values + } + + if c.IP { + ip := "unknown" + if f.IP != nil { + ip = f.IP.Source + if dest { + ip = f.IP.Destination + } + } + values = append(values, ip) + } + + ep := f.Source + if dest { + ep = f.Destination + } + + if c.Namespace { + if ep != nil { + values = append(values, ep.Namespace) + } else { + values = append(values, "unknown") + } + } + + if c.Podname { + if ep != nil { + values = append(values, ep.PodName) + } else { + values = append(values, "unknown") + } + } + + if c.Workload { + wk := ep.GetWorkloads() + if len(wk) > 0 { + values = append(values, wk[0].Kind, wk[0].Name) + } else { + values = append(values, "unknown", "unknown") + } + } + + if c.Service { + if dest { + if f.DestinationService != nil { + values = append(values, f.DestinationService.Name) + } else { + values = append(values, "unknown") + } + } else { + if f.SourceService != nil { + values = append(values, f.SourceService.Name) + } else { + values = append(values, "unknown") + } + } + } + + if c.Port { + l4 := f.GetL4() + if l4 != nil { + // check if TCP object is present + if tcp := l4.GetTCP(); tcp != nil { + if dest { + values = append(values, fmt.Sprintf("%d", tcp.GetDestinationPort())) + } else { + values = append(values, fmt.Sprintf("%d", tcp.GetSourcePort())) + } + } else if udp := l4.GetUDP(); udp != nil { + if dest { + values = append(values, fmt.Sprintf("%d", udp.GetDestinationPort())) + } else { + values = append(values, fmt.Sprintf("%d", udp.GetSourcePort())) + } + } + } else { + values = append(values, "unknown") + } + } + + return values +} + +// DefaultCtxOptions used for enableAnnotations where it sets the source and destination labels +// so users will not have to manually define. +func DefaultCtxOptions() []string { + return []string{ + ipCtxOption, + namespaceCtxOption, + podCtxOption, + workloadCtxOption, + // ignoring service option as we have not added right logic around it + // TODO add service specific enrichment and logic, #610 + // serviceCtxOption, + // Port adds a ton of extra dimention to metrics + // so not adding it as a default option + // portCtxOption, + } +} + +// DefaultMetrics used for enableAnnotations where it sets enabled advanced metrics +// so users will not have to manually define. +// For any new advanced metrics we want to have enabled by default for annotation based solution, add it here. +func DefaultMetrics() []string { + return []string{ + // forward + utils.ForwardCountTotalName, + utils.ForwardBytesTotalName, + // drop + utils.DropCountTotalName, + utils.DropBytesTotalName, + // tcp flags + utils.TcpFlagCounters, + // tcp retransmissions + utils.TcpRetransCount, + // latency + utils.NodeApiServerLatencyName, + utils.NodeApiServerTcpHandshakeLatencyName, + utils.NoResponseFromApiServerName, + // dns + utils.DNSRequestCounterName, + utils.DNSResponseCounterName, + } +} + +func isAPIServerPod(ep *flow.Endpoint) bool { + if ep == nil { + return false + } + + if ep.Namespace == common.APIServerEndpointName && ep.PodName == common.APIServerEndpointName { + return true + } + + return false +} diff --git a/pkg/module/metrics/types_test.go b/pkg/module/metrics/types_test.go new file mode 100644 index 000000000..db1730eda --- /dev/null +++ b/pkg/module/metrics/types_test.go @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package metrics + +import ( + "testing" + + "github.com/cilium/cilium/api/v1/flow" + "github.com/stretchr/testify/assert" +) + +type TestCtxOpts struct { + name string + opts []string + ctxType ctxOptionType + expected []string + f *flow.Flow + expectedVals []string +} + +func TestNewCtxOps(t *testing.T) { + tt := []TestCtxOpts{ + { + name: "empty opts", + opts: []string{}, + ctxType: source, + expected: []string{}, + f: &flow.Flow{}, + expectedVals: []string{}, + }, + { + name: "source opts 1", + opts: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + ctxType: source, + expected: []string{ + "source_ip", "source_namespace", "source_podname", + "source_workloadKind", "source_workloadName", "source_service", "source_port", + }, + f: &flow.Flow{}, + expectedVals: []string{"unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown"}, + }, + { + name: "dest opts 1", + opts: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + ctxType: destination, + expected: []string{ + "destination_ip", "destination_namespace", "destination_podname", + "destination_workloadKind", "destination_workloadName", "destination_service", "destination_port", + }, + f: &flow.Flow{}, + expectedVals: []string{"unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown"}, + }, + { + name: "source opts with flow", + opts: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + ctxType: source, + expected: []string{ + "source_ip", "source_namespace", "source_podname", + "source_workloadKind", "source_workloadName", "source_service", "source_port", + }, + f: &flow.Flow{ + Source: &flow.Endpoint{ + Namespace: "ns", + PodName: "test", + }, + }, + expectedVals: []string{"unknown", "ns", "test", "unknown", "unknown", "unknown", "unknown"}, + }, + { + name: "source opts with flow", + opts: []string{"ip", "namespace", "podName", "Workload", "PORT", "serVICE"}, + ctxType: destination, + expected: []string{ + "destination_ip", "destination_namespace", "destination_podname", + "destination_workloadKind", "destination_workloadName", "destination_service", "destination_port", + }, + f: &flow.Flow{ + Destination: &flow.Endpoint{ + Namespace: "ns", + PodName: "test", + }, + }, + expectedVals: []string{"unknown", "ns", "test", "unknown", "unknown", "unknown", "unknown"}, + }, + { + name: "source opts of ip", + opts: []string{"ip", "namespace", "podName"}, + ctxType: source, + expected: []string{"source_ip", "source_namespace", "source_podname"}, + f: &flow.Flow{ + Source: &flow.Endpoint{ + Namespace: "ns", + PodName: "test", + }, + IP: &flow.IP{ + Source: "10.0.0.1", + }, + }, + expectedVals: []string{"10.0.0.1", "ns", "test"}, + }, + { + name: "dest opts of ip", + opts: []string{"ip", "namespace", "podName"}, + ctxType: destination, + expected: []string{"destination_ip", "destination_namespace", "destination_podname"}, + f: &flow.Flow{ + Destination: &flow.Endpoint{ + Namespace: "ns", + PodName: "test", + }, + IP: &flow.IP{ + Destination: "10.0.0.1", + }, + }, + expectedVals: []string{"10.0.0.1", "ns", "test"}, + }, + { + name: "dest opts of ip with no destination info", + opts: []string{"ip", "namespace", "podName"}, + ctxType: destination, + expected: []string{"destination_ip", "destination_namespace", "destination_podname"}, + f: &flow.Flow{ + Source: &flow.Endpoint{ + Namespace: "ns", + PodName: "test", + }, + IP: &flow.IP{ + Source: "10.0.0.1", + }, + }, + expectedVals: []string{"", "unknown", "unknown"}, + }, + } + + for _, tc := range tt { + c := NewCtxOption(tc.opts, tc.ctxType) + assert.Equal(t, tc.expected, c.getLabels(), "labels should match %s", tc.name) + values := c.getValues(tc.f) + assert.Equal(t, tc.expectedVals, values, "values should match %s", tc.name) + } +} diff --git a/pkg/module/traces/mock_moduleinterface.go b/pkg/module/traces/mock_moduleinterface.go new file mode 100644 index 000000000..6763e938e --- /dev/null +++ b/pkg/module/traces/mock_moduleinterface.go @@ -0,0 +1,67 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/module/traces (interfaces: ModuleInterface) + +// Package traces is a generated GoMock package. +package traces + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1alpha1 "github.com/microsoft/retina/crd/api/v1alpha1" +) + +// MockModuleInterface is a mock of ModuleInterface interface. +type MockModuleInterface struct { + ctrl *gomock.Controller + recorder *MockModuleInterfaceMockRecorder +} + +// MockModuleInterfaceMockRecorder is the mock recorder for MockModuleInterface. +type MockModuleInterfaceMockRecorder struct { + mock *MockModuleInterface +} + +// NewMockModuleInterface creates a new mock instance. +func NewMockModuleInterface(ctrl *gomock.Controller) *MockModuleInterface { + mock := &MockModuleInterface{ctrl: ctrl} + mock.recorder = &MockModuleInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModuleInterface) EXPECT() *MockModuleInterfaceMockRecorder { + return m.recorder +} + +// Reconcile mocks base method. +func (m *MockModuleInterface) Reconcile(arg0 *v1alpha1.TracesSpec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reconcile", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reconcile indicates an expected call of Reconcile. +func (mr *MockModuleInterfaceMockRecorder) Reconcile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockModuleInterface)(nil).Reconcile), arg0) +} + +// Run mocks base method. +func (m *MockModuleInterface) Run() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Run") +} + +// Run indicates an expected call of Run. +func (mr *MockModuleInterfaceMockRecorder) Run() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockModuleInterface)(nil).Run)) +} diff --git a/pkg/module/traces/traces_module.go b/pkg/module/traces/traces_module.go new file mode 100644 index 000000000..81328d9f4 --- /dev/null +++ b/pkg/module/traces/traces_module.go @@ -0,0 +1,99 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package traces + +import ( + "context" + "sync" + "time" + + api "github.com/microsoft/retina/crd/api/v1alpha1" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "go.uber.org/zap" +) + +var ( + t *Module + once sync.Once +) + +const ( + moduleIntervalSecs = 1 * time.Second +) + +type Module struct { + *sync.RWMutex + // ctx is the context of the trace module + ctx context.Context + + // l is the logger + l *log.ZapLogger + + // traceConfigs is the list of trace configurations from CRD + configs []*api.TraceConfiguration + + // traceOutputCOnfigs is the list of trace output configurations from CRD + outputConfigs []*api.TraceOutputConfiguration + + // pubsub is the pubsub client + pubsub pubsub.PubSubInterface + + // isRunning is the flag to indicate if the trace module is running + isRunning bool + + // TODO add the filter manager here or add a pubsub topic for + // filter manager to subscribe to +} + +func NewModule(ctx context.Context, pubsub pubsub.PubSubInterface) *Module { + // this is a thread-safe singleton instance of the trace module + once.Do(func() { + t = &Module{ + RWMutex: &sync.RWMutex{}, + l: log.Logger().Named(string("TraceModule")), + ctx: ctx, + pubsub: pubsub, + configs: make([]*api.TraceConfiguration, 0), + outputConfigs: make([]*api.TraceOutputConfiguration, 0), + } + + t.init() + }) + + return t +} + +func (t *Module) init() { + // TODO +} + +func (t *Module) Run() { + if t.isRunning { + return + } + go func() { + t.isRunning = true + ticker := time.NewTicker(moduleIntervalSecs) + defer ticker.Stop() + + for { + select { + case <-t.ctx.Done(): + return + case <-ticker.C: + if err := t.run(); err != nil { + t.l.Error("error running trace module", zap.Error(err)) + } + } + } + }() +} + +func (t *Module) Reconcile(spec *api.TracesSpec) error { + return nil +} + +func (t *Module) run() error { + return nil +} diff --git a/pkg/module/traces/types.go b/pkg/module/traces/types.go new file mode 100644 index 000000000..8e028c473 --- /dev/null +++ b/pkg/module/traces/types.go @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package traces + +import api "github.com/microsoft/retina/crd/api/v1alpha1" + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mock_moduleinterface.go -copyright_file=../../lib/ignore_headers.txt -package=traces github.com/microsoft/retina/pkg/module/traces ModuleInterface + +type ModuleInterface interface { + // Run starts the trace module. + Run() + + Reconcile(spec *api.TracesSpec) error +} diff --git a/pkg/plugin/api/mock_plugin.go b/pkg/plugin/api/mock_plugin.go new file mode 100644 index 000000000..79fa61db6 --- /dev/null +++ b/pkg/plugin/api/mock_plugin.go @@ -0,0 +1,140 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/plugin/api (interfaces: Plugin) + +// Package api is a generated GoMock package. +package api + +import ( + context "context" + reflect "reflect" + + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + gomock "github.com/golang/mock/gomock" +) + +// MockPlugin is a mock of Plugin interface. +type MockPlugin struct { + ctrl *gomock.Controller + recorder *MockPluginMockRecorder +} + +// MockPluginMockRecorder is the mock recorder for MockPlugin. +type MockPluginMockRecorder struct { + mock *MockPlugin +} + +// NewMockPlugin creates a new mock instance. +func NewMockPlugin(ctrl *gomock.Controller) *MockPlugin { + mock := &MockPlugin{ctrl: ctrl} + mock.recorder = &MockPluginMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPlugin) EXPECT() *MockPluginMockRecorder { + return m.recorder +} + +// Compile mocks base method. +func (m *MockPlugin) Compile(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Compile", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Compile indicates an expected call of Compile. +func (mr *MockPluginMockRecorder) Compile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compile", reflect.TypeOf((*MockPlugin)(nil).Compile), arg0) +} + +// Generate mocks base method. +func (m *MockPlugin) Generate(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Generate", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Generate indicates an expected call of Generate. +func (mr *MockPluginMockRecorder) Generate(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Generate", reflect.TypeOf((*MockPlugin)(nil).Generate), arg0) +} + +// Init mocks base method. +func (m *MockPlugin) Init() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Init") + ret0, _ := ret[0].(error) + return ret0 +} + +// Init indicates an expected call of Init. +func (mr *MockPluginMockRecorder) Init() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockPlugin)(nil).Init)) +} + +// Name mocks base method. +func (m *MockPlugin) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockPluginMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockPlugin)(nil).Name)) +} + +// SetupChannel mocks base method. +func (m *MockPlugin) SetupChannel(arg0 chan *v1.Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetupChannel", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetupChannel indicates an expected call of SetupChannel. +func (mr *MockPluginMockRecorder) SetupChannel(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupChannel", reflect.TypeOf((*MockPlugin)(nil).SetupChannel), arg0) +} + +// Start mocks base method. +func (m *MockPlugin) Start(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockPluginMockRecorder) Start(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockPlugin)(nil).Start), arg0) +} + +// Stop mocks base method. +func (m *MockPlugin) Stop() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop") + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockPluginMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockPlugin)(nil).Stop)) +} diff --git a/pkg/plugin/api/types.go b/pkg/plugin/api/types.go new file mode 100644 index 000000000..16b812c6a --- /dev/null +++ b/pkg/plugin/api/types.go @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package api + +import ( + "context" + + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +const ( + Meter string = "retina-meter" + ServiceName string = "retina" +) + +type PluginName string + +// PluginEvent - Generic plugin event structure to receive data from plugin +type PluginEvent struct { + Name PluginName +} + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mock_plugin.go -copyright_file=../../lib/ignore_headers.txt -package=api github.com/microsoft/retina/pkg/plugin/api Plugin + +// All functions should be idempotent. +type Plugin interface { + Name() string + // Generate the plugin specific header files. + // This maybe no-op for plugins that don't use eBPF. + Generate(ctx context.Context) error + // Compile the ebpf to generate bpf object. + // This maybe no-op for plugins that don't use eBPF. + Compile(ctx context.Context) error + // Init initializes plugin specific objects. Plugin has to send data through the channel passed in arg. + Init() error + // Start The plugin has to start its execution in collecting traces + Start(ctx context.Context) error + // Stop The plugin has to stop its job + Stop() error + // Allow adding external channels that clients can use + // to get data from the plugin. This allows custom post processing of plugin data. + SetupChannel(chan *v1.Event) error +} diff --git a/pkg/plugin/common/common_linux.go b/pkg/plugin/common/common_linux.go new file mode 100644 index 000000000..2178e120d --- /dev/null +++ b/pkg/plugin/common/common_linux.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package common + +import ( + "errors" + "os" + "strings" + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/perf" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mocks/mock_types.go -package=mocks . ITracer + +// Interface for IG tracers. +// Ref: https://pkg.go.dev/github.com/inspektor-gadget/inspektor-gadget@v0.18.1/pkg/gadgets/trace/dns/tracer#Tracer +type ITracer interface { + SetEventHandler(interface{}) + Attach(pid uint32) error + Detach(pid uint32) error + Close() +} + +// Interface for IG event handlers. Maps to cilum Flow. +func ProtocolToFlow(protocol string) int { + switch protocol { + case "tcp": + return unix.IPPROTO_TCP + case "udp": + return unix.IPPROTO_UDP + default: + return 0 + } +} + +// Refer: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 +func RCodeToFlow(rCode string) uint32 { + if strings.EqualFold(rCode, "NoError") { + return 0 + } else if strings.EqualFold(rCode, "FormErr") { + return 1 + } else if strings.EqualFold(rCode, "ServFail") { + return 2 + } else if strings.EqualFold(rCode, "NXDomain") { + return 3 + } else if strings.EqualFold(rCode, "NotImp") { + return 4 + } else if strings.EqualFold(rCode, "Refused") { + return 5 + } + return 24 +} + +// NewPerfReader creates a new perf reader with a buffer size that is a power of 2. +// It starts with the maximum buffer size and tries with smaller buffer sizes if it fails with ENOMEM. +// Returns an error if it fails to create a perf reader or encounters any other error. +// Not enforced, but max and min should be a power of 2. +// The allocated size will be a multiple of pagesize determined at runtime. +func NewPerfReader(l *log.ZapLogger, m *ebpf.Map, max, min int) (*perf.Reader, error) { + var enomem error = syscall.ENOMEM + for i := max; i >= min; i = i / 2 { + r, err := perf.NewReader(m, i*os.Getpagesize()) + if err == nil { + l.Info("perf reader created", zap.Any("Map", m.String()), zap.Int("PageSize", os.Getpagesize()), zap.Int("BufferSize", i*os.Getpagesize())) + return r, nil + } else if errors.Is(err, enomem) { + // If the error is ENOMEM, we can try with a smaller buffer size. + // Give plugins a chance to run even if they cannot allocate the maximum buffer size at the start. + // Can help with repeated OOM kills. + // Linux mainly uses lazy allocation, That is to say what the underlying physical pages are allocated + // when you touch them for the first time. + // So, you may be able to allocate a buffer of X sizes, but when using it later, if the memory is under pressure, + // the real allocation may fail and the OOM killer be triggered to free some memory, at the expense of killing a process. + l.Warn("perf reader creation failed with ENOMEM", zap.Any("Map", m.String()), zap.Error(err), zap.Int("BufferSize", i*os.Getpagesize())) + continue + } else { + return nil, err + } + } + return nil, errors.New("failed to create perf reader") +} diff --git a/pkg/plugin/common/mocks/mock_types.go b/pkg/plugin/common/mocks/mock_types.go new file mode 100644 index 000000000..6e047720f --- /dev/null +++ b/pkg/plugin/common/mocks/mock_types.go @@ -0,0 +1,86 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/plugin/common (interfaces: ITracer) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockITracer is a mock of ITracer interface. +type MockITracer struct { + ctrl *gomock.Controller + recorder *MockITracerMockRecorder +} + +// MockITracerMockRecorder is the mock recorder for MockITracer. +type MockITracerMockRecorder struct { + mock *MockITracer +} + +// NewMockITracer creates a new mock instance. +func NewMockITracer(ctrl *gomock.Controller) *MockITracer { + mock := &MockITracer{ctrl: ctrl} + mock.recorder = &MockITracerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockITracer) EXPECT() *MockITracerMockRecorder { + return m.recorder +} + +// Attach mocks base method. +func (m *MockITracer) Attach(arg0 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Attach", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Attach indicates an expected call of Attach. +func (mr *MockITracerMockRecorder) Attach(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockITracer)(nil).Attach), arg0) +} + +// Close mocks base method. +func (m *MockITracer) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockITracerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockITracer)(nil).Close)) +} + +// Detach mocks base method. +func (m *MockITracer) Detach(arg0 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Detach", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Detach indicates an expected call of Detach. +func (mr *MockITracerMockRecorder) Detach(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Detach", reflect.TypeOf((*MockITracer)(nil).Detach), arg0) +} + +// SetEventHandler mocks base method. +func (m *MockITracer) SetEventHandler(arg0 interface{}) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEventHandler", arg0) +} + +// SetEventHandler indicates an expected call of SetEventHandler. +func (mr *MockITracerMockRecorder) SetEventHandler(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEventHandler", reflect.TypeOf((*MockITracer)(nil).SetEventHandler), arg0) +} diff --git a/pkg/plugin/constants/constants.go b/pkg/plugin/constants/constants.go new file mode 100644 index 000000000..99f5dad52 --- /dev/null +++ b/pkg/plugin/constants/constants.go @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package constants + +const ( + TCP = "IPPROTO_TCP" + UDP = "IPPROTO_UDP" + LocalhostIP = "127.0.0.1" + ZeroIP = "0.0.0.0" + + FilterMapPath = "/sys/fs/bpf" + FilterMapName = "retina_filter_map" +) diff --git a/pkg/plugin/deprecated/iptablelogger/cprog/iptable_logger.c b/pkg/plugin/deprecated/iptablelogger/cprog/iptable_logger.c new file mode 100644 index 000000000..2bb239bf5 --- /dev/null +++ b/pkg/plugin/deprecated/iptablelogger/cprog/iptable_logger.c @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_core_read.h" +#include "bpf_tracing.h" +#include "bpf_endian.h" +// #include + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define ETH_P_IP 0x0800 +#define ETH_P_IPV6 0x86DD +#define ETH_P_8021Q 0x8100 +#define ETH_P_ARP 0x0806 +#define TASK_COMM_LEN 16 + +struct iptuple +{ + __u16 proto; + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __u32 hook; + char devname[32]; + __u16 padding; +}; + +struct verdict +{ + struct iptuple flow; + __u64 ts; + __u32 pid; + unsigned char comm[TASK_COMM_LEN]; + int status; +}; + +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, u32); + __type(value, struct iptuple); +} ipflows SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} verdicts SEC(".maps"); + +const struct verdict *unused __attribute__((unused)); + +#define MAC_HEADER_SIZE 14; +#define member_address(source_struct, source_member) \ + ({ \ + void *__ret; \ + __ret = (void *)(((char *)source_struct) + offsetof(typeof(*source_struct), source_member)); \ + __ret; \ + }) +#define member_read(destination, source_struct, source_member) \ + do \ + { \ + bpf_probe_read( \ + destination, \ + sizeof(source_struct->source_member), \ + member_address(source_struct, source_member)); \ + } while (0) + +SEC("kprobe/nf_hook_slow") +int BPF_KPROBE(nf_hook_slow, struct sk_buff *skb, struct nf_hook_state *state) +{ + char *head; + __u16 mac_header, nw_header, tcp_header, eth_proto; + + if (skb) + { + member_read(&head, skb, head); + member_read(&mac_header, skb, mac_header); + member_read(&nw_header, skb, network_header); + member_read(ð_proto, skb, protocol); + char *ip_header_address = head + nw_header; + if (eth_proto == bpf_htons(ETH_P_IP)) + { + struct iphdr iphdr; + __u16 sport, dport; + + member_read(&tcp_header, skb, transport_header); + struct iptuple ipt; + __builtin_memset(&ipt, 0, sizeof(ipt)); + // event.ip_version = 4; + bpf_probe_read(&iphdr, sizeof(iphdr), ip_header_address); + __u8 l4proto = iphdr.protocol; + // Discard non UDP traffic + if (l4proto != IPPROTO_TCP) + { + return 0; + } + // event.addr.v4.saddr = iphdr.saddr; + // event.addr.v4.daddr = iphdr.daddr; + // struct tcphdr *tcphdr = (struct tcphdr *)(&ip_header_address[offset]); + struct tcphdr tcphdr; + char *tcphdraddr = head + tcp_header; + bpf_probe_read(&tcphdr, sizeof(tcphdr), tcphdraddr); + sport = bpf_htons(tcphdr.source); + dport = bpf_htons(tcphdr.dest); + + ipt.proto = iphdr.protocol; + ipt.saddr = iphdr.saddr; + ipt.daddr = iphdr.daddr; + ipt.sport = sport; + ipt.dport = dport; + member_read(&ipt.hook, state, hook); + // bpf_printk("devname:%s", ipt.devname); + // bpf_printk("kprobe:sport: %d dport:%d hook:%d\n", ipt.sport, ipt.dport, ipt.hook); + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + // if (sport == 4567 || dport ==4567) + // { + bpf_map_update_elem(&ipflows, &pid, &ipt, BPF_ANY); + // } + } + } + return 0; +} + +SEC("kretprobe/nf_hook_slow") +int BPF_KRETPROBE(nf_hook_slow_ret, int verdict) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + struct iptuple *ipflow = bpf_map_lookup_elem(&ipflows, &pid); + if (!ipflow) + { + return 0; + } + + if (verdict >= 0) + { + return 0; + } + + __u64 ts = bpf_ktime_get_ns(); + struct verdict v = { + .ts = ts, + .flow = *ipflow, + .status = verdict, + .pid = pid}; + + bpf_get_current_comm(v.comm, sizeof(v.comm)); + + bpf_perf_event_output(ctx, &verdicts, BPF_F_CURRENT_CPU, &v, sizeof(v)); + bpf_map_delete_elem(&ipflows, &pid); + return 0; +} diff --git a/pkg/plugin/deprecated/iptablelogger/iptablelogger.go b/pkg/plugin/deprecated/iptablelogger/iptablelogger.go new file mode 100644 index 000000000..94539fa7e --- /dev/null +++ b/pkg/plugin/deprecated/iptablelogger/iptablelogger.go @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package iptablelogger + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "net" + "os" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "golang.org/x/sys/unix" + + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/rlimit" + "go.uber.org/zap" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_x86 -Wall" -target bpf -type verdict kprobe ./cprog/iptable_logger.c -- -I../common/include -I../common/libbpf/src + +func readEvent(ipl *iptableLogger, event chan<- kprobeVerdict) { + var data kprobeVerdict + for { + record, err := ipl.verdictRd.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + return + } + ipl.l.Error("reading from perf event reader: %w", zap.Error(err)) + continue + } + + if record.LostSamples != 0 { + ipl.l.Warn("perf event ring buffer full:", zap.Uint64("dropped samples", record.LostSamples)) + continue + } + + // Parse the perf event entry into a bpfEvent structure. + if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &data); err != nil { + ipl.l.Error("parsing perf event: %w", zap.Error(err)) + continue + } + + event <- data + } +} + +func notifyVerdictData(ipl *iptableLogger, event kprobeVerdict) { + saddrbuf := make([]byte, 4) + daddrbuf := make([]byte, 4) + + binary.LittleEndian.PutUint32(saddrbuf, uint32(event.Flow.Saddr)) + binary.LittleEndian.PutUint32(daddrbuf, uint32(event.Flow.Daddr)) + + srcip := net.IPv4(saddrbuf[0], saddrbuf[1], saddrbuf[2], saddrbuf[3]) + dstip := net.IPv4(daddrbuf[0], daddrbuf[1], daddrbuf[2], daddrbuf[3]) + + var b []byte + + for _, v := range event.Comm { + b = append(b, v) + } + + tcpData := &IPTableEvent{ + Timestamp: event.Ts, + Pid: event.Pid, + ProcessName: unix.ByteSliceToString(b), + SrcIP: srcip, + DstIP: dstip, + SrcPort: event.Flow.Sport, + DstPort: event.Flow.Dport, + Verdict: event.Status, + Hook: Chain(event.Flow.Hook), + } + + pluginEvent := api.PluginEvent{ + Name: Name, + Event: tcpData, + } + + // notify event channel about packet + ipl.event <- pluginEvent +} + +// NewTcpTracerPlugin Initialize logger object +func New(logger *log.ZapLogger) api.Plugin { + return &iptableLogger{ + l: logger, + } +} + +func NewPluginFn(l *log.ZapLogger) api.Plugin { + return New(l) +} + +func (ipl *iptableLogger) Init(pluginEvent chan<- api.PluginEvent) error { + nfhookslowfn := "nf_hook_slow" + + if err := rlimit.RemoveMemlock(); err != nil { + ipl.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + objs := kprobeObjects{} + if err := loadKprobeObjects(&objs, nil); err != nil { + ipl.l.Error("loading objects: %w", zap.Error(err)) + return err + } + + defer objs.Close() + + var err error + ipl.Knfhook, err = link.Kprobe(nfhookslowfn, objs.NfHookSlow, nil) + if err != nil { + ipl.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + ipl.Kretnfhook, err = link.Kretprobe(nfhookslowfn, objs.NfHookSlowRet, nil) + if err != nil { + ipl.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + ipl.verdictRd, err = perf.NewReader(objs.Verdicts, os.Getpagesize()) + if err != nil { + ipl.l.Error("creating perf event reader: %w", zap.Error(err)) + return err + } + + ipl.event = pluginEvent + return err +} + +func (ipl *iptableLogger) Start(ctx context.Context) error { + ipl.l.Info("Listening for events..") + go waitOnEvent(ipl) + return nil +} + +func waitOnEvent(ipl *iptableLogger) { + verdictEvent := make(chan kprobeVerdict, 500) + go readEvent(ipl, verdictEvent) + + for { + select { + case verdictData := <-verdictEvent: + notifyVerdictData(ipl, verdictData) + } + } +} + +func (ipl *iptableLogger) Stop() error { + ipl.l.Info("Closing probes...") + ipl.Knfhook.Close() + ipl.Kretnfhook.Close() + ipl.verdictRd.Close() + return nil +} diff --git a/pkg/plugin/deprecated/iptablelogger/iptablelogger_test.go b/pkg/plugin/deprecated/iptablelogger/iptablelogger_test.go new file mode 100644 index 000000000..b41a5eb11 --- /dev/null +++ b/pkg/plugin/deprecated/iptablelogger/iptablelogger_test.go @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package iptablelogger + +import ( + "encoding/binary" + "net" + "os" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/stretchr/testify/require" +) + +func ip2int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.LittleEndian.Uint32(ip[12:16]) + } + return binary.LittleEndian.Uint32(ip) +} + +func TestIPTableLoggerEvent(t *testing.T) { + l := log.SetupZapLogger(log.GetDefaultLogOpts()) + tt := &iptableLogger{l: l} + srcIP := "192.168.0.4" + dstIP := "192.168.0.5" + flow := kprobeIptuple{ + Saddr: ip2int(net.ParseIP(srcIP)), + Daddr: ip2int(net.ParseIP(dstIP)), + Sport: 1234, + Dport: 2345, + Proto: 6, + } + e := kprobeVerdict{ + Flow: flow, + Ts: 1234, + Status: 1, + Pid: uint32(1234), + } + + event := make(chan api.PluginEvent, 500) + tt.event = event + notifyVerdictData(tt, e) + + var data api.PluginEvent + select { + case data = <-event: + var ipt *IPTableEvent + ipt = data.Event.(*IPTableEvent) + require.Exactly(t, Name, data.Name, "Expected:%s but got:%s", Name, data.Name) + require.Equal(t, srcIP, ipt.SrcIP.String(), "Expected:%s but got:%s", srcIP, ipt.SrcIP.String()) + require.Equal(t, e.Flow.Sport, ipt.SrcPort, "Expected:%d but got:%d", e.Flow.Sport, ipt.SrcPort) + require.Equal(t, dstIP, ipt.DstIP.String(), "Expected:%s but got:%s", dstIP, ipt.DstIP.String()) + require.Equal(t, e.Flow.Dport, ipt.DstPort, "Expected:%d but got:%d", e.Flow.Dport, ipt.DstPort) + require.Equal(t, e.Pid, ipt.Pid, "Expected:%d but got:%d", e.Pid, ipt.Pid) + require.Equal(t, e.Ts, ipt.Timestamp, "Expected:%d but got:%d", e.Ts, ipt.Timestamp) + require.Equal(t, e.Status, ipt.Verdict, "Expected:%d but got:%d", e.Status, ipt.Verdict) + case <-time.After(time.Second * 3): + t.Fatal("Timedout. Event not received") + } +} diff --git a/pkg/plugin/deprecated/iptablelogger/types.go b/pkg/plugin/deprecated/iptablelogger/types.go new file mode 100644 index 000000000..e6771d5b0 --- /dev/null +++ b/pkg/plugin/deprecated/iptablelogger/types.go @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package iptablelogger + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/microsoft/retina/pkg/deprecated/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +const ( + Name api.PluginName = "iptablelogger" + counterName string = "iptablelogger_count" +) + +//nolint:unused +type iptableLogger struct { + l *log.ZapLogger + Knfhook link.Link + Kretnfhook link.Link + verdictRd *perf.Reader + event chan<- api.PluginEvent +} + +type Chain uint32 + +type IPTableEvent struct { + Timestamp uint64 + Pid uint32 + ProcessName string + SrcIP net.IP + DstIP net.IP + SrcPort uint16 + DstPort uint16 + Hook Chain + Verdict int32 +} + +func (ipe *IPTableEvent) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, t trace.Tracer) { + // metric + cnter, err := m.Int64Counter(counterName) + if err == nil { + cnter.Add(context.TODO(), 1, attr...) + } + + // trace + _, span := t.Start(context.Background(), string(Name)) + span.SetAttributes(attr...) + span.End() +} + +func (ipe *IPTableEvent) GetPluginEventAttributes(pluginname string, c *cache.Cache) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0) + attrs = utils.GetPluginEventAttributes(attrs, pluginname, ipe.Hook.String(), time.Now().String()) + attrs = utils.GetPluginEventLocalAttributes(attrs, c, ipe.SrcIP.String(), fmt.Sprint(ipe.SrcPort)) + attrs = utils.GetPluginEventRemoteAttributes(attrs, c, ipe.DstIP.String(), fmt.Sprint(ipe.DstPort)) + return attrs +} + +const ( + INPUT Chain = iota + PREROUTING + FORWARD + OUTPUT + POSTROUTING + UNKNOWN +) + +func (c Chain) String() string { + switch c { + case INPUT: + return "INPUT" + case PREROUTING: + return "PREROUTING" + case FORWARD: + return "FORWARD" + case OUTPUT: + return "OUTPUT" + case POSTROUTING: + return "POSTROUTING" + default: + return "UNKNOWN" + } +} + +func (t IPTableEvent) String() string { + return fmt.Sprintf("Timestamp:%d Pid:%d ProcessName:%s Hook:%s SrcAddr:%s SrcPort:%d DstAddr:%s DstPort:%d Verdict:%d", + t.Timestamp, t.Pid, t.ProcessName, t.Hook.String(), t.SrcIP.String(), t.SrcPort, t.DstIP.String(), t.DstPort, t.Verdict) +} diff --git a/pkg/plugin/deprecated/tcpreceive/cprog/tcpreceive.c b/pkg/plugin/deprecated/tcpreceive/cprog/tcpreceive.c new file mode 100644 index 000000000..b7e974ecf --- /dev/null +++ b/pkg/plugin/deprecated/tcpreceive/cprog/tcpreceive.c @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_core_read.h" +#include "bpf_tracing.h" +#include "bpf_endian.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define TASK_COMM_LEN 16 + +struct tcpreceiveEvent +{ + __u32 pid; + __u64 ts; + __u8 comm[TASK_COMM_LEN]; + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __s32 recv_bytes; + __u16 operation; + __u8 padding[1]; +}; + +// mapkey is struct to define a 5-tuple as a key for mapevent +struct mapkey +{ + __u32 saddr; + __u32 daddr; + __u16 sport; + __u16 dport; + __u8 l4proto; +}; +struct mapkey *unused_k __attribute__((unused)); + +// mapevent adds all unique mapkeys to the hashtable +// value is the amount of bytes sent +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 4096); + __type(key, struct mapkey); + __type(value, int); + __uint(map_flags, BPF_F_NO_PREALLOC); +} mapevent SEC(".maps"); + +const struct tcpreceiveEvent *unusedv4 __attribute__((unused)); + +SEC("kprobe/tcp_cleanup_rbuf") +int BPF_KPROBE(tcp_cleanup_rbuf, struct sock *sk, int copied) +{ + if (sk == NULL || copied <= 0) + { + return 0; + } + + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __u8 l4proto; + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + BPF_CORE_READ_INTO(&l4proto, sk, sk_protocol); + + struct mapkey k; + __builtin_memset(&k, 0, sizeof(k)); + k.saddr = saddr; + k.daddr = daddr; + k.sport = sport; + k.dport = bpf_ntohs(dport); + k.l4proto = l4proto; + + int initval = copied, *val; + val = bpf_map_lookup_elem(&mapevent, &k); + if (!val) + { + int err = bpf_map_update_elem(&mapevent, &k, &initval, BPF_ANY); + if (err == -1) + { + bpf_printk("Could not update map: %d", err); + return 0; + } + } + else + { + // adding send bytes + __sync_fetch_and_add(val, copied); + } + return 0; +} diff --git a/pkg/plugin/deprecated/tcpreceive/tcpreceive.go b/pkg/plugin/deprecated/tcpreceive/tcpreceive.go new file mode 100644 index 000000000..8d88e8e2d --- /dev/null +++ b/pkg/plugin/deprecated/tcpreceive/tcpreceive.go @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build exclude + +package tcpreceive + +import ( + "bytes" + "context" + "encoding/binary" + "net" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/constants" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/rlimit" + "go.uber.org/zap" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_x86 -Wall" -target bpf -type tcpreceiveEvent kprobe ./cprog/tcpreceive.c -- -I../common/include -I../common/libbpf/src + +// pull from hashmap in 10 second intervals to collect send data +// clear map at the end of each map iteration +func pullMapData(r *tcpreceive, event chan<- kprobeTcpreceiveEvent, hmap *ebpf.Map) { + var data kprobeMapkey + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for range ticker.C { + var key []byte + var value int32 + + iter := hmap.Iterate() + for iter.Next(&key, &value) { + if err := binary.Read(bytes.NewBuffer(key), binary.LittleEndian, &data); err != nil { + r.l.Error("parse map values error", zap.Error(err)) + } + + ByteCount := value + saddrbuf := make([]byte, 4) + daddrbuf := make([]byte, 4) + + binary.LittleEndian.PutUint32(saddrbuf, uint32(data.Saddr)) + binary.LittleEndian.PutUint32(daddrbuf, uint32(data.Daddr)) + srcip := net.IPv4(saddrbuf[0], saddrbuf[1], saddrbuf[2], saddrbuf[3]) + dstip := net.IPv4(daddrbuf[0], daddrbuf[1], daddrbuf[2], daddrbuf[3]) + l4proto := data.L4proto + var protocol string + if l4proto == 6 { + protocol = constants.TCP + } + + if l4proto == 17 { + protocol = constants.UDP + } + + if srcip.String() == constants.LocalhostIP || dstip.String() == constants.LocalhostIP { + r.l.Debug("srcip or dstip == 127.0.0.1") + continue + } + + if srcip.String() == constants.ZeroIP || dstip.String() == constants.ZeroIP { + r.l.Debug("skipping IP 0.0.0.0") + continue + } + + if srcip.String() == dstip.String() { + r.l.Debug("src and dst IP are the same") + continue + } + + recvData := &TcpreceiveData{ + LocalIP: srcip, + RemoteIP: dstip, + LocalPort: data.Sport, + RemotePort: data.Dport, + L4Proto: protocol, + Recv: ByteCount, + Op: "RECEIVE", + } + + notifyRecvData(r, recvData) + } + // clearing map + iterClear := hmap.Iterate() + for iterClear.Next(&key, &value) { + err := hmap.Delete(&key) + if err != nil { + r.l.Error("Deleting map key failed", zap.Error(err)) + } + } + } +} + +func notifyRecvData(s *tcpreceive, recvData *TcpreceiveData) { + pluginEvent := api.PluginEvent{ + Name: Name, + Event: recvData, + } + // notify event channel about packet + s.event <- pluginEvent +} + +// NewtcpreceivePlugin Initialize logger object +func New(logger *log.ZapLogger) api.Plugin { + return &tcpreceive{ + l: logger, + } +} + +func NewPluginFn(l *log.ZapLogger) api.Plugin { + return New(l) +} + +func (r *tcpreceive) Init(pluginEvent chan<- api.PluginEvent) error { + r.l.Info("Entering tcpreceive Init...") + recvfn := "tcp_cleanup_rbuf" + + if err := rlimit.RemoveMemlock(); err != nil { + r.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + objs := kprobeObjects{} + if err := loadKprobeObjects(&objs, nil); err != nil { + r.l.Error("loading objects: %w", zap.Error(err)) + return err + } + + var err error + r.kRecv, err = link.Kprobe(recvfn, objs.TcpCleanupRbuf, nil) + if err != nil { + r.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + r.hashmapData = objs.Mapevent + r.event = pluginEvent + r.l.Info("Exiting tcpreceive Init...") + return err +} + +func (r *tcpreceive) Start(ctx context.Context) error { + r.l.Info("Start listening for receive events...") + tcpreceiveEvent := make(chan kprobeTcpreceiveEvent, 500) + go pullMapData(r, tcpreceiveEvent, r.hashmapData) + r.l.Info("Exiting tcpreceive start...") + return nil +} + +func (r *tcpreceive) Stop() error { + r.l.Info("Closing probes...") + r.kRecv.Close() + r.hashmapData.Close() + r.l.Info("Exiting tcpreceive Stop...") + return nil +} diff --git a/pkg/plugin/deprecated/tcpreceive/tcpreceive_test.go b/pkg/plugin/deprecated/tcpreceive/tcpreceive_test.go new file mode 100644 index 000000000..62432cc2d --- /dev/null +++ b/pkg/plugin/deprecated/tcpreceive/tcpreceive_test.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build exclude + +package tcpreceive + +import ( + "net" + "os" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/stretchr/testify/require" +) + +func TestTcpsend(t *testing.T) { + l := log.SetupZapLogger(log.GetDefaultLogOpts()) + tt := &tcpreceive{l: l} + srcIP := net.IPv4(192, 168, 0, 4) + dstIP := net.IPv4(192, 168, 0, 5) + + e := &TcpreceiveData{ + LocalIP: net.IP(srcIP), + RemoteIP: net.IP(dstIP), + LocalPort: 1234, + RemotePort: 3446, + L4Proto: "TCP", + Recv: 456, + } + + event := make(chan api.PluginEvent, 500) + tt.event = event + notifyRecvData(tt, e) + + var data api.PluginEvent + select { + case data = <-event: + var tcp *TcpreceiveData + tcp = data.Event.(*TcpreceiveData) + require.Exactly(t, Name, data.Name, "Expected:%s but got:%s", Name, data.Name) + require.Equal(t, srcIP, tcp.LocalIP, "Expected:%s but got:%s", srcIP, tcp.LocalIP) + require.Equal(t, e.LocalPort, tcp.LocalPort, "Expected:%d but got:%d", e.LocalPort, tcp.LocalPort) + require.Equal(t, dstIP, tcp.RemoteIP, "Expected:%s but got:%s", dstIP, tcp.RemoteIP) + require.Equal(t, e.RemotePort, tcp.RemotePort, "Expected:%d but got:%d", e.RemotePort, tcp.RemotePort) + require.Equal(t, e.L4Proto, tcp.L4Proto, "Expected:%d but got:%d", e.L4Proto, tcp.L4Proto) + require.Equal(t, e.Recv, tcp.Recv, "Expected:%d but got:%d", e.Recv, tcp.Recv) + + case <-time.After(time.Second * 3): + t.Fatal("Timedout. Event not received") + } +} diff --git a/pkg/plugin/deprecated/tcpreceive/types.go b/pkg/plugin/deprecated/tcpreceive/types.go new file mode 100644 index 000000000..0c88cdfd8 --- /dev/null +++ b/pkg/plugin/deprecated/tcpreceive/types.go @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcpreceive + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/microsoft/retina/pkg/deprecated/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +const ( + Name api.PluginName = "tcpreceive" + TCPBytesRecv string = "tcp_bytes_recv" + Proto string = "proto" + counterName string = "tcprecv_count" +) + +//nolint:unused +type tcpreceive struct { + l *log.ZapLogger + kRecv link.Link + hashmapData *ebpf.Map + event chan<- api.PluginEvent +} + +type TcpreceiveData struct { + LocalIP net.IP + RemoteIP net.IP + LocalPort uint16 + RemotePort uint16 + L4Proto string + Recv int32 + Op string +} + +func (r *TcpreceiveData) GetPluginEventAttributes(pluginname string, c *cache.Cache) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0) + attrs = utils.GetPluginEventAttributes(attrs, pluginname, r.Op, time.Now().String()) + attrs = utils.GetPluginEventLocalAttributes(attrs, c, r.LocalIP.String(), fmt.Sprint(r.LocalPort)) + attrs = utils.GetPluginEventRemoteAttributes(attrs, c, r.RemoteIP.String(), fmt.Sprint(r.RemotePort)) + attrs = append(attrs, + attribute.Key(TCPBytesRecv).Int(int(r.Recv)), + attribute.Key(Proto).String(r.L4Proto), + ) + return attrs +} + +func (r *TcpreceiveData) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, t trace.Tracer) { + // metric + cnter, err := m.Int64Counter(counterName) + if err == nil { + cnter.Add(context.TODO(), 1, attr...) + } + + // trace + _, span := t.Start(context.Background(), string(Name)) + span.SetAttributes(attr...) + span.End() +} + +func (t TcpreceiveData) String() string { + return fmt.Sprintf("LocalAddr:%s LocalPort:%d RemoteAddr:%s RemotePort:%d L4Proto:%s Operation:%s Recv:%d", + t.LocalIP.String(), t.LocalPort, t.RemoteIP.String(), t.RemotePort, t.L4Proto, t.Op, t.Recv) +} diff --git a/pkg/plugin/deprecated/tcpsend/cprog/tcpsend.c b/pkg/plugin/deprecated/tcpsend/cprog/tcpsend.c new file mode 100644 index 000000000..f8b531d84 --- /dev/null +++ b/pkg/plugin/deprecated/tcpsend/cprog/tcpsend.c @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_core_read.h" +#include "bpf_tracing.h" +#include "bpf_endian.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define TASK_COMM_LEN 16 + +struct tcpsendEvent +{ + __u32 pid; + __u64 ts; + __u8 comm[TASK_COMM_LEN]; + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __u16 l4proto; + __u64 sent_bytes; + __u16 operation; + __u8 padding[1]; +}; + +// mapkey is struct to define a 5-tuple as a key for mapevent +struct mapkey +{ + __u32 saddr; + __u32 daddr; + __u16 sport; + __u16 dport; + __u16 l4proto; +}; +struct mapkey *unused_k __attribute__((unused)); + +// mapevent adds all unique mapkeys to the hashtable +// value is the amount of bytes sent +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 4096); + __type(key, struct mapkey); + __type(value, __u64); + __uint(map_flags, BPF_F_NO_PREALLOC); +} mapevent SEC(".maps"); + +const struct tcpsendEvent *unusedv4 __attribute__((unused)); + +SEC("kprobe/tcp_sendmsg") +int BPF_KPROBE(tcp_sendmsg, struct sock *sk, + struct msghdr *msg, size_t size) +{ + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __u8 l4proto; + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + BPF_CORE_READ_INTO(&l4proto, sk, sk_protocol); + + struct mapkey k; + __builtin_memset(&k, 0, sizeof(k)); + k.saddr = saddr; + k.daddr = daddr; + k.sport = sport; + k.dport = bpf_ntohs(dport); + k.l4proto = l4proto; + + __u64 initval = size, *val; + val = bpf_map_lookup_elem(&mapevent, &k); + if (!val) + { + int err = bpf_map_update_elem(&mapevent, &k, &initval, BPF_ANY); + if (err == -1) + { + bpf_printk("Could not update map: %d", err); + return 0; + } + } + else + { + // adding send bytes + __sync_fetch_and_add(val, size); + } + return 0; +} diff --git a/pkg/plugin/deprecated/tcpsend/tcpsend.go b/pkg/plugin/deprecated/tcpsend/tcpsend.go new file mode 100644 index 000000000..c34c4a445 --- /dev/null +++ b/pkg/plugin/deprecated/tcpsend/tcpsend.go @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcpsend + +import ( + "bytes" + "context" + "encoding/binary" + "net" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/constants" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/rlimit" + "go.uber.org/zap" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_x86 -Wall" -target bpf -type tcpsendEvent kprobe ./cprog/tcpsend.c -- -I../common/include -I../common/libbpf/src + +// pull from hashmap in 10 second intervals to collect send data +// clear map at the end of each map iteration +func pullMapData(s *tcpsend, hmap *ebpf.Map) { + var data kprobeMapkey + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for range ticker.C { + var key []byte + var value uint64 + + iter := hmap.Iterate() + for iter.Next(&key, &value) { + if err := binary.Read(bytes.NewBuffer(key), binary.LittleEndian, &data); err != nil { + s.l.Error("parse map values error", zap.Error(err)) + } + + ByteCount := value + saddrbuf := make([]byte, 4) + daddrbuf := make([]byte, 4) + + binary.LittleEndian.PutUint32(saddrbuf, uint32(data.Saddr)) + binary.LittleEndian.PutUint32(daddrbuf, uint32(data.Daddr)) + srcip := net.IPv4(saddrbuf[0], saddrbuf[1], saddrbuf[2], saddrbuf[3]) + dstip := net.IPv4(daddrbuf[0], daddrbuf[1], daddrbuf[2], daddrbuf[3]) + l4proto := data.L4proto + var protocol string + if l4proto == 6 { + protocol = constants.TCP + } + + if l4proto == 17 { + protocol = constants.UDP + } + + if srcip.String() == constants.LocalhostIP || dstip.String() == constants.LocalhostIP { + s.l.Debug("srcip or dstip == 127.0.0.1") + continue + } + + if srcip.String() == constants.ZeroIP || dstip.String() == constants.ZeroIP { + s.l.Debug("skipping IP 0.0.0.0") + continue + } + + if srcip.String() == dstip.String() { + s.l.Debug("src and dst IP are the same") + continue + } + + sendData := &TcpsendData{ + LocalIP: srcip, + RemoteIP: dstip, + LocalPort: data.Sport, + RemotePort: data.Dport, + L4Proto: protocol, + Sent: ByteCount, + Op: "SEND", + } + + notifySendData(s, sendData) + } + // clearing map + iterClear := hmap.Iterate() + for iterClear.Next(&key, &value) { + err := hmap.Delete(&key) + if err != nil { + s.l.Error("Deleting map key failed", zap.Error(err)) + } + } + } +} + +func notifySendData(s *tcpsend, sendData *TcpsendData) { + pluginEvent := api.PluginEvent{ + Name: Name, + Event: sendData, + } + // notify event channel about packet + s.event <- pluginEvent +} + +// NewtcpsendPlugin Initialize logger object +func New(logger *log.ZapLogger) api.Plugin { + return &tcpsend{ + l: logger, + } +} + +func NewPluginFn(l *log.ZapLogger) api.Plugin { + return New(l) +} + +func (s *tcpsend) Init(pluginEvent chan<- api.PluginEvent) error { + s.l.Info("Entering tcpsend Init...") + sendfn := "tcp_sendmsg" + + if err := rlimit.RemoveMemlock(); err != nil { + s.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + objs := kprobeObjects{} + if err := loadKprobeObjects(&objs, nil); err != nil { + s.l.Error("loading objects: %w", zap.Error(err)) + return err + } + + var err error + s.kSend, err = link.Kprobe(sendfn, objs.TcpSendmsg, nil) + if err != nil { + s.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + s.hashmapData = objs.Mapevent + s.event = pluginEvent + s.l.Info("Exiting tcpsend Init...") + return err +} + +func (s *tcpsend) Start(ctx context.Context) error { + s.l.Info("Start listening for send events...") + go pullMapData(s, s.hashmapData) + s.l.Info("Exiting tcpsend start...") + return nil +} + +func (s *tcpsend) Stop() error { + s.l.Info("Closing probes...") + s.kSend.Close() + s.hashmapData.Close() + s.l.Info("Exiting tcpsend Stop...") + return nil +} diff --git a/pkg/plugin/deprecated/tcpsend/tscpsend_test.go b/pkg/plugin/deprecated/tcpsend/tscpsend_test.go new file mode 100644 index 000000000..36966a530 --- /dev/null +++ b/pkg/plugin/deprecated/tcpsend/tscpsend_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcpsend + +import ( + "net" + "os" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/stretchr/testify/require" +) + +func TestTcpsend(t *testing.T) { + l := log.SetupZapLogger(log.GetDefaultLogOpts()) + tt := &tcpsend{l: l} + srcIP := net.IPv4(192, 168, 0, 4) + dstIP := net.IPv4(192, 168, 0, 5) + + e := &TcpsendData{ + LocalIP: net.IP(srcIP), + RemoteIP: net.IP(dstIP), + LocalPort: 1234, + RemotePort: 3446, + L4Proto: "TCP", + Sent: 456, + } + + event := make(chan api.PluginEvent, 500) + tt.event = event + notifySendData(tt, e) + + var data api.PluginEvent + select { + case data = <-event: + var tcp *TcpsendData + tcp = data.Event.(*TcpsendData) + require.Exactly(t, Name, data.Name, "Expected:%s but got:%s", Name, data.Name) + require.Equal(t, srcIP, tcp.LocalIP, "Expected:%s but got:%s", srcIP, tcp.LocalIP) + require.Equal(t, e.LocalPort, tcp.LocalPort, "Expected:%d but got:%d", e.LocalPort, tcp.LocalPort) + require.Equal(t, dstIP, tcp.RemoteIP, "Expected:%s but got:%s", dstIP, tcp.RemoteIP) + require.Equal(t, e.RemotePort, tcp.RemotePort, "Expected:%d but got:%d", e.RemotePort, tcp.RemotePort) + require.Equal(t, e.L4Proto, tcp.L4Proto, "Expected:%d but got:%d", e.L4Proto, tcp.L4Proto) + require.Equal(t, e.Sent, tcp.Sent, "Expected:%d but got:%d", e.Sent, tcp.Sent) + + case <-time.After(time.Second * 3): + t.Fatal("Timedout. Event not received") + } +} diff --git a/pkg/plugin/deprecated/tcpsend/types.go b/pkg/plugin/deprecated/tcpsend/types.go new file mode 100644 index 000000000..92871eb48 --- /dev/null +++ b/pkg/plugin/deprecated/tcpsend/types.go @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcpsend + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/microsoft/retina/pkg/deprecated/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +const ( + Name api.PluginName = "tcpsend" + TCPBytesSend string = "tcp_bytes_send" + Proto string = "proto" + counterName string = "tcpsend_count" +) + +//nolint:unused +type tcpsend struct { + l *log.ZapLogger + kSend link.Link + hashmapData *ebpf.Map + event chan<- api.PluginEvent +} + +type TcpsendData struct { + LocalIP net.IP + RemoteIP net.IP + LocalPort uint16 + RemotePort uint16 + L4Proto string + Sent uint64 + Op string +} + +func (s *TcpsendData) GetPluginEventAttributes(pluginname string, c *cache.Cache) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0) + attrs = utils.GetPluginEventAttributes(attrs, pluginname, s.Op, time.Now().String()) + attrs = utils.GetPluginEventLocalAttributes(attrs, c, s.LocalIP.String(), fmt.Sprint(s.LocalPort)) + attrs = utils.GetPluginEventRemoteAttributes(attrs, c, s.RemoteIP.String(), fmt.Sprint(s.RemotePort)) + attrs = append(attrs, + attribute.Key(TCPBytesSend).Int(int(s.Sent)), + attribute.Key(Proto).String(s.L4Proto), + ) + return attrs +} + +func (s *TcpsendData) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, t trace.Tracer) { + // metric + cnter, err := m.Int64Counter(counterName) + if err == nil { + cnter.Add(context.TODO(), 1, attr...) + } + + // trace + _, span := t.Start(context.Background(), string(Name)) + span.SetAttributes(attr...) + span.End() +} + +func (t TcpsendData) String() string { + return fmt.Sprintf("LocalAddr:%s LocalPort:%d RemoteAddr:%s RemotePort:%d L4Proto:%s Operation:%s Sent:%d", + t.LocalIP.String(), t.LocalPort, t.RemoteIP.String(), t.RemotePort, t.L4Proto, t.Op, t.Sent) +} diff --git a/pkg/plugin/deprecated/tcptracer/cprog/tcptracer.c b/pkg/plugin/deprecated/tcptracer/cprog/tcptracer.c new file mode 100644 index 000000000..05c168baf --- /dev/null +++ b/pkg/plugin/deprecated/tcptracer/cprog/tcptracer.c @@ -0,0 +1,217 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// Based on: https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpconnect.c +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_core_read.h" +#include "bpf_tracing.h" +#include "bpf_endian.h" +// #include + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define CONNECT 1 +#define ACCEPT 2 +#define CLOSE 3 +#define TASK_COMM_LEN 16 + +struct tcpv4event +{ + __u32 pid; + __u64 ts; + __u8 comm[TASK_COMM_LEN]; + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + __u64 sent_bytes; + __s32 recv_bytes; + __u16 operation; + __u8 padding[1]; +}; + +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, u32); + __type(value, struct sock *); + __uint(map_flags, BPF_F_NO_PREALLOC); +} sockets SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} tcpv4connect SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} tcpv4accept SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} tcpv4close SEC(".maps"); + +// struct +// { +// __uint(type, BPF_MAP_TYPE_RINGBUF); +// } events_ring SEC(".maps"); + +const struct tcpv4event *unusedv4 __attribute__((unused)); + +// static __always_inline int +// enter_tcp_connect(struct pt_regs *ctx, struct sock *sk) +SEC("kprobe/tcp_v4_connect") +int BPF_KPROBE(tcp_v4_connect, struct sock *sk) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + bpf_printk("enter: setting sk for tid..: %ld\n", pid); + bpf_map_update_elem(&sockets, &pid, &sk, BPF_ANY); + return 0; +} + +// static __always_inline int +// exit_tcp_connect(struct pt_regs *ctx, int ret) + +SEC("kretprobe/tcp_v4_connect") +int BPF_KRETPROBE(tcp_v4_connect_ret, int ret) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + struct sock **skpp; + struct sock *sk; + + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + + skpp = bpf_map_lookup_elem(&sockets, &pid); + if (!skpp) + { + bpf_printk("exit: no pointer for tid, returning..: %ld", pid); + return 0; + } + + sk = *skpp; + + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + + __u64 ts = bpf_ktime_get_ns(); + struct tcpv4event et; + __builtin_memset(&et, 0, sizeof(et)); + et.pid = pid; + et.ts = ts; + et.saddr = saddr; + et.daddr = daddr; + et.dport = bpf_ntohs(dport); + et.sport = sport; + et.operation = CONNECT; + + if (saddr != daddr) + { + bpf_get_current_comm(et.comm, sizeof(et.comm)); + bpf_perf_event_output(ctx, &tcpv4connect, BPF_F_CURRENT_CPU, &et, sizeof(et)); + } + + bpf_map_delete_elem(&sockets, &pid); + return 0; +} + +SEC("kretprobe/inet_csk_accept") +int BPF_KRETPROBE(inet_csk_accept_ret, struct sock *sk) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + + if (sk == NULL) + return 0; + + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + + __u64 ts = bpf_ktime_get_ns(); + struct tcpv4event et; + __builtin_memset(&et, 0, sizeof(et)); + et.pid = pid; + et.ts = ts; + et.saddr = saddr; + et.daddr = daddr; + et.dport = bpf_ntohs(dport); + et.sport = sport; + et.operation = ACCEPT; + + if (saddr != daddr) + { + bpf_get_current_comm(et.comm, sizeof(et.comm)); + bpf_perf_event_output(ctx, &tcpv4accept, BPF_F_CURRENT_CPU, &et, sizeof(et)); + } + + return 0; +} + +SEC("kprobe/tcp_close") +int BPF_KPROBE(tcp_close, struct sock *sk, long timeout) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + + if (sk == NULL) + return 0; + + u16 oldstate; //= sk->sk_state; + BPF_CORE_READ_INTO(&oldstate, sk, __sk_common.skc_state); + bpf_printk("close old state:%d\n", oldstate); + + // Don't generate close events for connections that were never + // established in the first place. + if (oldstate == TCP_SYN_SENT || + oldstate == TCP_SYN_RECV || + oldstate == TCP_NEW_SYN_RECV) + return 0; + + __u32 saddr; + __u32 daddr; + __u16 dport; + __u16 sport; + + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + + __u64 ts = bpf_ktime_get_ns(); + struct tcpv4event et; + __builtin_memset(&et, 0, sizeof(et)); + et.pid = pid; + et.ts = ts; + et.saddr = saddr; + et.daddr = daddr; + et.dport = bpf_ntohs(dport); + et.sport = sport; + et.operation = CLOSE; + + if (saddr != daddr) + { + bpf_get_current_comm(et.comm, sizeof(et.comm)); + bpf_perf_event_output(ctx, &tcpv4close, BPF_F_CURRENT_CPU, &et, sizeof(et)); + } + + return 0; +} diff --git a/pkg/plugin/deprecated/tcptracer/tcptracer.go b/pkg/plugin/deprecated/tcptracer/tcptracer.go new file mode 100644 index 000000000..d15a15fb8 --- /dev/null +++ b/pkg/plugin/deprecated/tcptracer/tcptracer.go @@ -0,0 +1,211 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcptracer + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "net" + "os" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/rlimit" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_x86 -Wall" -target bpf -type tcpv4event kprobe ./cprog/tcptracer.c -- -I../common/include -I../common/libbpf/src + +func readEvent(t *tcpTracer, event chan<- kprobeTcpv4event, eventRD *perf.Reader) { + var data kprobeTcpv4event + for { + record, err := eventRD.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + return + } + t.l.Warn("reading from perf event reader: %w", zap.Error(err)) + continue + } + + if record.LostSamples != 0 { + t.l.Warn("perf event ring buffer full:", zap.Uint64("lostsamples", record.LostSamples)) + continue + } + + // Parse the perf event entry into a bpfEvent structure. + if err := binary.Read(bytes.NewBuffer(record.RawSample), binary.LittleEndian, &data); err != nil { + t.l.Error("parsing perf event: %w", zap.Error(err)) + continue + } + + event <- data + } +} + +func notifyTcpV4Data(t *tcpTracer, event kprobeTcpv4event) { + saddrbuf := make([]byte, 4) + daddrbuf := make([]byte, 4) + + binary.LittleEndian.PutUint32(saddrbuf, uint32(event.Saddr)) + binary.LittleEndian.PutUint32(daddrbuf, uint32(event.Daddr)) + + srcip := net.IPv4(saddrbuf[0], saddrbuf[1], saddrbuf[2], saddrbuf[3]) + dstip := net.IPv4(daddrbuf[0], daddrbuf[1], daddrbuf[2], daddrbuf[3]) + + if srcip.String() == "127.0.0.1" && dstip.String() == "127.0.0.1" { + return + } + + if srcip.String() == "0.0.0.0" || dstip.String() == "0.0.0.0" { + t.l.Info("skipping IP 0.0.0.0") + return + } + + var b []byte + for _, v := range event.Comm { + b = append(b, v) + } + + tcpData := &TcpV4Data{ + Timestamp: event.Ts, + Pid: event.Pid, + ProcessName: unix.ByteSliceToString(b), + LocalIP: srcip, + RemoteIP: dstip, + LocalPort: event.Sport, + RemotePort: event.Dport, + Sent: event.SentBytes, + Recv: event.RecvBytes, + Op: Operation(event.Operation), + } + + pluginEvent := api.PluginEvent{ + Name: Name, + Event: tcpData, + } + + // notify event channel about packet + t.event <- pluginEvent +} + +// NewTcpTracerPlugin Initialize logger object +func New(logger *log.ZapLogger) api.Plugin { + return &tcpTracer{ + l: logger, + } +} + +func NewPluginFn(l *log.ZapLogger) api.Plugin { + return New(l) +} + +func (t *tcpTracer) Init(pluginEvent chan<- api.PluginEvent) error { + connectfn := "tcp_v4_connect" + acceptfn := "inet_csk_accept" + closefn := "tcp_close" + + if err := rlimit.RemoveMemlock(); err != nil { + t.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + objs := kprobeObjects{} + if err := loadKprobeObjects(&objs, nil); err != nil { + t.l.Error("loading objects: %w", zap.Error(err)) + return err + } + + defer objs.Close() + + var err error + t.kConnect, err = link.Kprobe(connectfn, objs.TcpV4Connect, nil) + if err != nil { + t.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + t.kretConnect, err = link.Kretprobe(connectfn, objs.TcpV4ConnectRet, nil) + if err != nil { + t.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + t.kretAccept, err = link.Kretprobe(acceptfn, objs.InetCskAcceptRet, nil) + if err != nil { + t.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + t.kClose, err = link.Kprobe(closefn, objs.TcpClose, nil) + if err != nil { + t.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + t.tcpv4Rdaccept, err = perf.NewReader(objs.Tcpv4accept, os.Getpagesize()) + if err != nil { + t.l.Error("creating perf event reader: %w", zap.Error(err)) + return err + } + + t.tcpv4Rdconnect, err = perf.NewReader(objs.Tcpv4connect, os.Getpagesize()) + if err != nil { + t.l.Error("creating perf event reader: %w", zap.Error(err)) + return err + } + + t.tcpv4Rdclose, err = perf.NewReader(objs.Tcpv4close, os.Getpagesize()) + if err != nil { + t.l.Error("creating perf event reader: %w", zap.Error(err)) + return err + } + + t.event = pluginEvent + return err +} + +func (t *tcpTracer) Start(ctx context.Context) error { + t.l.Info("Listening for tcp connect events...") + go waitOnEvent(t) + return nil +} + +func waitOnEvent(t *tcpTracer) { + tcpv4AcceptEvent := make(chan kprobeTcpv4event, 500) + tcpv4ConnectEvent := make(chan kprobeTcpv4event, 500) + tcpv4CloseEvent := make(chan kprobeTcpv4event, 500) + go readEvent(t, tcpv4AcceptEvent, t.tcpv4Rdaccept) + go readEvent(t, tcpv4ConnectEvent, t.tcpv4Rdconnect) + go readEvent(t, tcpv4CloseEvent, t.tcpv4Rdclose) + + for { + select { + case tcpv4AcceptData := <-tcpv4AcceptEvent: + notifyTcpV4Data(t, tcpv4AcceptData) + case tcpv4ConnectData := <-tcpv4ConnectEvent: + notifyTcpV4Data(t, tcpv4ConnectData) + case tcpv4CloseData := <-tcpv4CloseEvent: + notifyTcpV4Data(t, tcpv4CloseData) + } + } +} + +func (t *tcpTracer) Stop() error { + t.l.Info("Closing probes...") + t.kConnect.Close() + t.kretConnect.Close() + t.kretAccept.Close() + t.kClose.Close() + t.tcpv4Rdaccept.Close() + t.tcpv4Rdconnect.Close() + t.tcpv4Rdclose.Close() + return nil +} diff --git a/pkg/plugin/deprecated/tcptracer/tcptracer_test.go b/pkg/plugin/deprecated/tcptracer/tcptracer_test.go new file mode 100644 index 000000000..48c51e19b --- /dev/null +++ b/pkg/plugin/deprecated/tcptracer/tcptracer_test.go @@ -0,0 +1,62 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcptracer + +import ( + "encoding/binary" + "net" + "os" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/stretchr/testify/require" +) + +func ip2int(ip net.IP) uint32 { + if len(ip) == 16 { + return binary.LittleEndian.Uint32(ip[12:16]) + } + return binary.LittleEndian.Uint32(ip) +} + +func TestTcpTracer(t *testing.T) { + l := log.SetupZapLogger(log.GetDefaultLogOpts()) + tt := &tcpTracer{l: l} + srcIP := "192.168.0.4" + dstIP := "192.168.0.5" + e := kprobeTcpv4event{ + Pid: 123, + Saddr: ip2int(net.ParseIP(srcIP)), + Daddr: ip2int(net.ParseIP(dstIP)), + Sport: 1234, + Dport: 3446, + Operation: uint16(CONNECT), + SentBytes: 456, + } + + event := make(chan api.PluginEvent, 500) + tt.event = event + notifyTcpV4Data(tt, e) + + var data api.PluginEvent + select { + case data = <-event: + var tcp *TcpV4Data + tcp = data.Event.(*TcpV4Data) + require.Exactly(t, Name, data.Name, "Expected:%s but got:%s", Name, data.Name) + require.Equal(t, e.Operation, uint16(tcp.Op), "Expected:%s but got:%s", Name, data.Name) + require.Equal(t, srcIP, tcp.LocalIP.String(), "Expected:%s but got:%s", srcIP, tcp.LocalIP.String()) + require.Equal(t, e.Sport, tcp.LocalPort, "Expected:%d but got:%d", e.Sport, tcp.LocalPort) + require.Equal(t, dstIP, tcp.RemoteIP.String(), "Expected:%s but got:%s", dstIP, tcp.RemoteIP.String()) + require.Equal(t, e.Dport, tcp.RemotePort, "Expected:%d but got:%d", e.Dport, tcp.RemotePort) + require.Equal(t, e.Pid, tcp.Pid, "Expected:%d but got:%d", e.Pid, tcp.Pid) + require.Equal(t, e.Ts, tcp.Timestamp, "Expected:%d but got:%d", e.Ts, tcp.Timestamp) + require.Equal(t, e.SentBytes, tcp.Sent, "Expected:%d but got:%d", e.SentBytes, tcp.Sent) + case <-time.After(time.Second * 3): + t.Fatal("Timedout. Event not received") + } +} diff --git a/pkg/plugin/deprecated/tcptracer/types.go b/pkg/plugin/deprecated/tcptracer/types.go new file mode 100644 index 000000000..fd6fd4fb1 --- /dev/null +++ b/pkg/plugin/deprecated/tcptracer/types.go @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcptracer + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/microsoft/retina/pkg/deprecated/cache" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +const ( + Name api.PluginName = "tcptracer" + Proto string = "proto" + counterName string = "tcp%s_count" +) + +//nolint:unused +type tcpTracer struct { + l *log.ZapLogger + kConnect link.Link + kretConnect link.Link + kretAccept link.Link + kClose link.Link + tcpv4Rdconnect *perf.Reader + tcpv4Rdaccept *perf.Reader + tcpv4Rdclose *perf.Reader + event chan<- api.PluginEvent +} + +type Operation int + +type TcpV4Data struct { + Timestamp uint64 + Pid uint32 + ProcessName string + LocalIP net.IP + RemoteIP net.IP + LocalPort uint16 + RemotePort uint16 + Sent uint64 + Recv int32 + Op Operation +} + +const ( + CONNECT Operation = iota + 1 + ACCEPT + CLOSE + SEND + RECEIVE +) + +func (t *TcpV4Data) GetPluginEventAttributes(pluginname string, c *cache.Cache) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0) + attrs = utils.GetPluginEventAttributes(attrs, pluginname, t.Op.EventString(), time.Now().String()) + attrs = utils.GetPluginEventLocalAttributes(attrs, c, t.LocalIP.String(), fmt.Sprint(t.LocalPort)) + attrs = utils.GetPluginEventRemoteAttributes(attrs, c, t.RemoteIP.String(), fmt.Sprint(t.RemotePort)) + attrs = append(attrs, + attribute.Key(Proto).String("TCP"), + ) + return attrs +} + +func (t *TcpV4Data) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, tr trace.Tracer) { + cnter, err := m.Int64Counter(strings.ToLower(fmt.Sprintf(counterName, t.Op))) + if err == nil { + cnter.Add(context.TODO(), 1, attr...) + } + + // trace + _, span := tr.Start(context.Background(), string(Name)) + span.SetAttributes(attr...) + span.End() +} + +func (op Operation) String() string { + switch op { + case CONNECT: + return "CONNECT" + case ACCEPT: + return "ACCEPT" + case CLOSE: + return "CLOSE" + case SEND: + return "SEND" + case RECEIVE: + return "RECEIVE" + } + return "unknown" +} + +func (op Operation) EventString() string { + switch op { + case CONNECT: + return "tcpConnectEvent" + case ACCEPT: + return "tcpAcceptEvent" + case CLOSE: + return "tcpCloseEvent" + case SEND: + return "tcpSendEvent" + case RECEIVE: + return "tcpRecvEvent" + } + return "unknown" +} + +func (t TcpV4Data) String() string { + return fmt.Sprintf("Pid:%d PName: %s LocalAddr:%s LocalPort:%d RemoteAddr:%s RemotePort:%d Operation:%s Sent:%d Recv:%d", + t.Pid, t.ProcessName, t.LocalIP.String(), t.LocalPort, t.RemoteIP.String(), t.RemotePort, t.Op.String(), t.Sent, t.Recv) +} diff --git a/pkg/plugin/deprecated/weaveworkstracer/tcptracer.go b/pkg/plugin/deprecated/weaveworkstracer/tcptracer.go new file mode 100644 index 000000000..a6ee356b1 --- /dev/null +++ b/pkg/plugin/deprecated/weaveworkstracer/tcptracer.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcptracer + +import ( + "context" + + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/weaveworks/tcptracer-bpf/pkg/tracer" + "go.uber.org/zap" +) + +// TCPEventV4 handle tcpv4 packets +func (t *tracerInfo) TCPEventV4(e tracer.TcpV4) { + if e.Type == tracer.EventFdInstall { + return + } + + tcpEvent := &TcpData{ + SrcAddr: e.SAddr, + DstAddr: e.DAddr, + SrcPort: e.SPort, + DstPort: e.DPort, + ProcessName: e.Comm, + ProcessId: e.Pid, + Op: Operation(tracer.EventConnect.String()), + } + pluginEvent := api.PluginEvent{ + Name: Name, + Event: tcpEvent, + } + // notify event channel about packet + t.Event <- pluginEvent +} + +func (fl *tracerInfo) TCPEventV6(e tracer.TcpV6) { +} + +func (fl *tracerInfo) LostV4(count uint64) { +} + +func (fl *tracerInfo) LostV6(count uint64) { +} + +// NewTcpTracerPlugin Initialize logger object +func New(logger *zap.Logger) api.Plugin { + return &TcpTracerPlugin{ + l: logger, + } +} + +// Init intialize Tcp Tracer +func (ttp *TcpTracerPlugin) Init(pluginEvent chan<- api.PluginEvent) error { + var err error + ttp.l.Info("Initializing tcptracer plugin ...") + ttp.tracer = &tracerInfo{ + l: ttp.l, + } + ttp.tracer.t, err = tracer.NewTracer(ttp.tracer) + if err != nil { + ttp.l.Error("Failed to initialize tcp tracer:%w", zap.Error(err)) + return err + } + ttp.tracer.Event = pluginEvent + return nil +} + +func (ttp *TcpTracerPlugin) Start(ctx context.Context) error { + ttp.l.Info("Starting tcptracer plugin ...") + ttp.tracer.t.Start() + return nil +} + +func (ttp *TcpTracerPlugin) Stop() error { + ttp.l.Info("Stopping tcptracer plugin ...") + if ttp.tracer.t != nil { + ttp.tracer.t.Stop() + } + return nil +} + +func NewPluginFn(l *zap.Logger) api.Plugin { + return New(l) +} diff --git a/pkg/plugin/deprecated/weaveworkstracer/tcptracer_test.go b/pkg/plugin/deprecated/weaveworkstracer/tcptracer_test.go new file mode 100644 index 000000000..cc4d08529 --- /dev/null +++ b/pkg/plugin/deprecated/weaveworkstracer/tcptracer_test.go @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build exclude + +package tcptracer + +import ( + "net" + "testing" + "time" + + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/stretchr/testify/require" + "github.com/weaveworks/tcptracer-bpf/pkg/tracer" + "go.uber.org/zap" +) + +func TestTcpTracerV4Event(t *testing.T) { + l, _ := zap.NewProduction() + tt := &tracerInfo{l: l} + e := tracer.TcpV4{ + Timestamp: 1234, + Type: tracer.EventConnect, + SAddr: net.ParseIP("192.168.1.4"), + DAddr: net.ParseIP("192.168.1.6"), + SPort: uint16(2345), + DPort: uint16(4567), + Pid: uint32(1234), + Comm: "UnitTest", + } + event := make(chan api.PluginEvent, 500) + tt.Event = event + tt.TCPEventV4(e) + + var data api.PluginEvent + select { + case data = <-event: + var tcpdata *TcpData + tcpdata = data.Event.(*TcpData) + require.Exactly(t, Name, data.Name, "Expected:%s but got:%s", Name, data.Name) + require.EqualValues(t, e.Type.String(), tcpdata.Op, "Expected:%s but got:%s", e.Type.String(), tcpdata.Op) + require.Equal(t, e.SAddr.String(), tcpdata.SrcAddr.String(), "Expected:%s but got:%s", e.SAddr.String(), tcpdata.SrcAddr.String()) + require.Equal(t, e.SPort, tcpdata.SrcPort, "Expected:%d but got:%d", e.SPort, tcpdata.SrcPort) + require.Equal(t, e.DAddr.String(), tcpdata.DstAddr.String(), "Expected:%s but got:%s", e.DAddr.String(), tcpdata.DstAddr.String()) + require.Equal(t, e.DPort, tcpdata.DstPort, "Expected:%d but got:%d", e.DPort, tcpdata.DstPort) + require.Equal(t, e.Pid, tcpdata.ProcessId, "Expected:%d but got:%d", e.Pid, tcpdata.ProcessId) + require.Equal(t, e.Comm, tcpdata.ProcessName, "Expected:%s but got:%s", e.Comm, tcpdata.ProcessName) + case <-time.After(time.Second * 3): + t.Fatal("Timedout. Event not received") + } +} diff --git a/pkg/plugin/deprecated/weaveworkstracer/types.go b/pkg/plugin/deprecated/weaveworkstracer/types.go new file mode 100644 index 000000000..d9c373b11 --- /dev/null +++ b/pkg/plugin/deprecated/weaveworkstracer/types.go @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// This file is intended to define tcptracerplugin structure and interface +package tcptracer + +import ( + "fmt" + "net" + "time" + + "github.com/microsoft/retina/pkg/deprecated/cache" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "github.com/weaveworks/tcptracer-bpf/pkg/tracer" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +const ( + Name api.PluginName = "tcptracer" +) + +type Operation string + +type TcpData struct { + ProcessName string + ProcessId uint32 + SrcAddr net.IP + DstAddr net.IP + SrcPort uint16 + DstPort uint16 + Op Operation +} + +func (t *TcpData) GetPluginEventAttributes(pluginname string, c *cache.Cache) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0) + attrs = utils.GetPluginEventAttributes(attrs, pluginname, string(t.Op), time.Now().String()) + attrs = utils.GetPluginEventLocalAttributes(attrs, c, t.SrcAddr.String(), fmt.Sprint(t.SrcPort)) + attrs = utils.GetPluginEventRemoteAttributes(attrs, c, t.DstAddr.String(), fmt.Sprint(t.DstPort)) + return attrs +} + +func (t *TcpData) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, tr trace.Tracer) { +} + +func (t TcpData) String() string { + return fmt.Sprintf("ProcessName:%s Pid:%d SrcAddr:%s SrcPort:%d DstAddr:%s DstPort:%d Operaiton:%s", + t.ProcessName, t.ProcessId, t.SrcAddr, t.SrcPort, t.DstAddr, t.DstPort, t.Op) +} + +//nolint:unused +type TcpTracerPlugin struct { + l *zap.Logger + tracer *tracerInfo +} + +//nolint:unused +type tracerInfo struct { + l *zap.Logger + // weaveworks tcp tracer + t *tracer.Tracer + // Plugin use this channel to send data to manager + Event chan<- api.PluginEvent +} diff --git a/pkg/plugin/dns/dns_linux.go b/pkg/plugin/dns/dns_linux.go new file mode 100644 index 000000000..c322bcc94 --- /dev/null +++ b/pkg/plugin/dns/dns_linux.go @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package dns + +import ( + "context" + "fmt" + "net" + "os" + "strings" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/common" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf/rlimit" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/tracer" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" + "github.com/inspektor-gadget/inspektor-gadget/pkg/utils/host" + "go.uber.org/zap" +) + +func New(cfg *kcfg.Config) api.Plugin { + return &dns{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +func (d *dns) Name() string { + return string(Name) +} + +func (d *dns) Generate(ctx context.Context) error { + return nil +} + +func (d *dns) Compile(ctx context.Context) error { + return nil +} + +func (d *dns) Init() error { + if err := rlimit.RemoveMemlock(); err != nil { + d.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + // Create tracer. In this case no parameters are passed. + err := host.Init(host.Config{}) + tracer, err := tracer.NewTracer() + if err != nil { + d.l.Error("Failed to create tracer", zap.Error(err)) + return err + } + d.tracer = tracer + d.tracer.SetEventHandler(d.eventHandler) + d.pid = uint32(os.Getpid()) + d.l.Info("Initialized dns plugin") + return nil +} + +func (d *dns) Start(ctx context.Context) error { + if d.cfg.EnablePodLevel { + d.enricher = enricher.Instance() + if d.enricher == nil { + d.l.Warn("Failed to get enricher instance") + } + } + if err := d.tracer.Attach(d.pid); err != nil { + d.l.Error("Failed to attach tracer", zap.Error(err)) + return err + } + + <-ctx.Done() + return nil +} + +func (d *dns) Stop() error { + if d.tracer != nil { + d.tracer.Detach(d.pid) + d.tracer.Close() + } + d.l.Info("Stopped dns plugin") + return nil +} + +func (d *dns) SetupChannel(c chan *v1.Event) error { + d.externalChannel = c + return nil +} + +func (d *dns) eventHandler(event *types.Event) { + if event == nil { + return + } + d.l.Debug("Event received", zap.Any("event", event)) + + if event.Qr == types.DNSPktTypeQuery { + m = metrics.DNSRequestCounter + } else if event.Qr == types.DNSPktTypeResponse { + m = metrics.DNSResponseCounter + } else { + return + } + var dir uint32 + if event.PktType == "HOST" { + // Ingress. + dir = 2 + } else if event.PktType == "OUTGOING" { + // Egress. + dir = 3 + } else { + return + } + responses := strings.Join(event.Addresses, ",") + // Update basic metrics. + m.WithLabelValues(event.Rcode, event.QType, event.DNSName, responses, fmt.Sprintf("%d", event.NumAnswers)).Inc() + + if !d.cfg.EnablePodLevel { + return + } + + // Update advanced metrics. + f := utils.ToFlow(int64(event.Timestamp), net.ParseIP(event.SrcIP), + net.ParseIP(event.DstIP), uint32(event.SrcPort), uint32(event.DstPort), + uint8(common.ProtocolToFlow(event.Protocol)), + dir, utils.Verdict_DNS, 0) + utils.AddDnsInfo(f, string(event.Qr), common.RCodeToFlow(event.Rcode), event.DNSName, []string{event.QType}, event.NumAnswers, event.Addresses) + // d.l.Debug("DNS Flow", zap.Any("flow", f)) + + ev := (&v1.Event{ + Event: f, + Timestamp: f.Time, + }) + if d.enricher != nil { + d.enricher.Write(ev) + } + + // Send event to external channel. + if d.externalChannel != nil { + select { + case d.externalChannel <- ev: + default: + metrics.LostEventsCounter.WithLabelValues(utils.ExternalChannel, string(Name)).Inc() + } + } +} diff --git a/pkg/plugin/dns/dns_test.go b/pkg/plugin/dns/dns_test.go new file mode 100644 index 000000000..93a2d8915 --- /dev/null +++ b/pkg/plugin/dns/dns_test.go @@ -0,0 +1,253 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//nolint:typecheck +package dns + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/common/mocks" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/golang/mock/gomock" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "gotest.tools/v3/assert" +) + +func TestStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + d := &dns{ + l: log.Logger().Named(string(Name)), + pid: 1234, + } + // Check nil tracer. + d.Stop() + + // Check with tracer. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + m := mocks.NewMockITracer(ctrl) + m.EXPECT().Detach(d.pid).Return(nil).Times(1) + m.EXPECT().Close().Times(1) + d.tracer = m + d.Stop() +} + +func TestStart(t *testing.T) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + log.SetupZapLogger(log.GetDefaultLogOpts()) + + c := cache.New(pubsub.New()) + e := enricher.New(ctxTimeout, c) + e.Run() + defer e.Reader.Close() + + d := &dns{ + l: log.Logger().Named(string(Name)), + pid: 1234, + cfg: &config.Config{ + EnablePodLevel: true, + }, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + m := mocks.NewMockITracer(ctrl) + m.EXPECT().Attach(d.pid).Return(nil).Times(1) + d.tracer = m + err := d.Start(ctxTimeout) + assert.Equal(t, err, nil) + if d.enricher == nil { + t.Fatal("enricher is nil") + } + + // Test error case. + expected := errors.New("Error") + m = mocks.NewMockITracer(ctrl) + m.EXPECT().Attach(d.pid).Return(expected).Times(1) + d.tracer = m + + err = d.Start(ctxTimeout) + assert.Error(t, err, expected.Error()) +} + +func TestMalformedEventHandler(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + d := &dns{ + l: log.Logger().Named(string(Name)), + } + + // Test nil event. + m = nil + d.eventHandler(nil) + assert.Equal(t, m, nil) + + // Test event with no Query type. + m = nil + event := &types.Event{ + Qr: "Z", + } + d.eventHandler(event) + assert.Equal(t, m, nil) +} + +func TestRequestEventHandler(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + + d := &dns{ + l: log.Logger().Named(string(Name)), + cfg: &config.Config{ + EnablePodLevel: true, + }, + } + + // Test event with Query type. + m = nil + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + event := &types.Event{ + Qr: "Q", + Rcode: "NOERROR", + QType: "A", + DNSName: "test.com", + Addresses: []string{}, + NumAnswers: 0, + PktType: "OUTGOING", + SrcIP: "1.1.1.1", + DstIP: "2.2.2.2", + SrcPort: 58, + DstPort: 8080, + Protocol: "TCP", + } + c := prometheus.NewCounter(prometheus.CounterOpts{}) + + // Basic metrics. + mockCV := metrics.NewMockICounterVec(ctrl) + mockCV.EXPECT().WithLabelValues(event.Rcode, event.QType, event.DNSName, "", "0").Return(c).Times(1) + before := value(c) + metrics.DNSRequestCounter = mockCV + + // Advanced metrics. + mockEnricher := enricher.NewMockEnricherInterface(ctrl) + mockEnricher.EXPECT().Write(EventMatched( + utils.DNSType_QUERY, 0, event.DNSName, []string{event.QType}, 0, []string{}, + )).Times(1) + d.enricher = mockEnricher + + d.eventHandler(event) + after := value(c) + assert.Equal(t, after-before, float64(1)) +} + +func TestResponseEventHandler(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + + d := &dns{ + l: log.Logger().Named(string(Name)), + cfg: &config.Config{ + EnablePodLevel: true, + }, + } + + // Test event with Query type. + m = nil + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + event := &types.Event{ + Qr: "R", + Rcode: "NOERROR", + QType: "A", + DNSName: "test.com", + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + NumAnswers: 2, + PktType: "HOST", + SrcIP: "1.1.1.1", + DstIP: "2.2.2.2", + SrcPort: 58, + DstPort: 8080, + Protocol: "TCP", + } + + // Basic metrics. + c := prometheus.NewCounter(prometheus.CounterOpts{}) + mockCV := metrics.NewMockICounterVec(ctrl) + mockCV.EXPECT().WithLabelValues(event.Rcode, event.QType, event.DNSName, "1.1.1.1,2.2.2.2", "2").Return(c).Times(1) + before := value(c) + metrics.DNSResponseCounter = mockCV + + // Advanced metrics. + mockEnricher := enricher.NewMockEnricherInterface(ctrl) + mockEnricher.EXPECT().Write(EventMatched( + utils.DNSType_RESPONSE, 0, event.DNSName, []string{event.QType}, 2, []string{"1.1.1.1", "2.2.2.2"}, + )).Times(1) + d.enricher = mockEnricher + + d.eventHandler(event) + after := value(c) + assert.Equal(t, after-before, float64(1)) +} + +func value(c prometheus.Counter) float64 { + m := &dto.Metric{} + c.Write(m) + + return m.Counter.GetValue() +} + +// Helpers. + +type EventMatcher struct { + qType utils.DNSType + rCode uint32 + query string + qTypes []string + numAnswers uint32 + ips []string +} + +func (m *EventMatcher) Matches(x interface{}) bool { + inputFlow := x.(*v1.Event).Event.(*flow.Flow) + expectedDns, expectedDnsType, expectedNumResponses := utils.GetDns(inputFlow) + return expectedDns != nil && + expectedDns.Rcode == m.rCode && + expectedDns.Query == m.query && + reflect.DeepEqual(expectedDns.Ips, m.ips) && + reflect.DeepEqual(expectedDns.Qtypes, m.qTypes) && + expectedDnsType == m.qType && + expectedNumResponses == m.numAnswers +} + +func (m *EventMatcher) String() string { + return "is anything" +} + +func EventMatched(qType utils.DNSType, rCode uint32, query string, qTypes []string, numAnswers uint32, ips []string) gomock.Matcher { + return &EventMatcher{ + qType: qType, + rCode: rCode, + query: query, + qTypes: qTypes, + numAnswers: numAnswers, + ips: ips, + } +} diff --git a/pkg/plugin/dns/types.go b/pkg/plugin/dns/types.go new file mode 100644 index 000000000..e2303bc9b --- /dev/null +++ b/pkg/plugin/dns/types.go @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package dns + +import ( + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/common" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +const ( + Name api.PluginName = "dns" +) + +var m metrics.ICounterVec + +type dns struct { + cfg *kcfg.Config + l *log.ZapLogger + tracer common.ITracer + pid uint32 + enricher enricher.EnricherInterface + externalChannel chan *v1.Event +} diff --git a/pkg/plugin/dropreason/_cprog/drop_reason.c b/pkg/plugin/dropreason/_cprog/drop_reason.c new file mode 100644 index 000000000..cb7418c1e --- /dev/null +++ b/pkg/plugin/dropreason/_cprog/drop_reason.c @@ -0,0 +1,472 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_core_read.h" +#include "bpf_tracing.h" +#include "bpf_endian.h" +#include "drop_reason.h" +#include "dynamic.h" +#include "retina_filter.c" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define ETH_P_IP 0x0800 +#define ETH_P_IPV6 0x86DD +#define ETH_P_8021Q 0x8100 +#define ETH_P_ARP 0x0806 +#define TASK_COMM_LEN 16 +// TODO (Vamsi): Add top 100 dropped connections with LRU map + +// Ref: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/if_packet.h#L26 +#define PACKET_HOST 0 // Incomming packets +#define PACKET_OUTGOING 4 // Outgoing packets + +typedef enum +{ + UNKNOWN_DIRECTION = 0, + INGRESS_KEY, + EGRESS_KEY, +} direction_type; + +struct metrics_map_key +{ + __u32 drop_type; + __u32 return_val; +}; +struct metrics_map_value +{ + __u64 count; + __u64 bytes; +}; + +struct packet +{ + __u32 src_ip; + __u32 dst_ip; + __u16 src_port; + __u16 dst_port; + __u8 proto; + __u64 skb_len; + direction_type direction; + struct metrics_map_key key; + __u64 ts; // timestamp in nanoseconds + // in_filtermap defines if a given packet is of interest to us + // and added to the filtermap. is this is set then dropreason + // will send a perf event along with the usual aggregation in metricsmap + bool in_filtermap; +}; +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} dropreason_events SEC(".maps"); + +// Define const variables to avoid warnings. +const struct packet *unused __attribute__((unused)); + +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, __u32); + __type(value, struct packet); +} natdrop_pids SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, __u32); + __type(value, struct packet); +} drop_pids SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 16384); + __type(key, __u32); + __type(value, __u64); +} accept_pids SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 512); + __type(key, struct metrics_map_key); + __type(value, struct metrics_map_value); +} metrics_map SEC(".maps"); + +#define member_address(source_struct, source_member) \ + ({ \ + void *__ret; \ + __ret = (void *)(((char *)source_struct) + offsetof(typeof(*source_struct), source_member)); \ + __ret; \ + }) +#define member_read(destination, source_struct, source_member) \ + do \ + { \ + bpf_probe_read( \ + destination, \ + sizeof(source_struct->source_member), \ + member_address(source_struct, source_member)); \ + } while (0) + +void update_metrics_map(void *ctx, drop_reason_t drop_type, int ret_val, struct packet *p) +{ + struct metrics_map_value *entry, new_entry = {}; + struct metrics_map_key key = { + .drop_type = drop_type, + .return_val = ret_val}; + + entry = bpf_map_lookup_elem(&metrics_map, &key); + if (entry) + { + entry->count += 1; + entry->bytes += p->skb_len; + } + else + { + new_entry.count = 1; + new_entry.bytes = p->skb_len; + bpf_map_update_elem(&metrics_map, &key, &new_entry, 0); + } +// parse packet if advanced metrics are enabled +#ifdef ADVANCED_METRICS +#if ADVANCED_METRICS == 1 + if (p->in_filtermap) + { + struct metrics_map_key key2 = { + .drop_type = drop_type, + .return_val = ret_val}; + p->key = key2; + bpf_perf_event_output(ctx, &dropreason_events, BPF_F_CURRENT_CPU, p, sizeof(struct packet)); + }; +#endif +#endif +} + +static void get_packet_from_skb(struct packet *p, struct sk_buff *skb) +{ + if (!skb) + { + return; + } + // TODO parse direction like in packetforward + __u64 skb_len = 0; + member_read(&skb_len, skb, len); + p->skb_len = skb_len; + +#ifdef ADVANCED_METRICS +#if ADVANCED_METRICS == 1 + char *head; + __u16 nw_header, trans_header, eth_proto; + + member_read(&head, skb, head); + member_read(ð_proto, skb, protocol); + member_read(&nw_header, skb, network_header); + char *ip_header_address = head + nw_header; + + struct iphdr iphdr; + member_read(&trans_header, skb, transport_header); + bpf_probe_read(&iphdr, sizeof(iphdr), ip_header_address); + + // Check if the packet is of interest. + #ifdef BYPASS_LOOKUP_IP_OF_INTEREST + #if BYPASS_LOOKUP_IP_OF_INTEREST == 0 + if (!lookup(iphdr.saddr) && !lookup(iphdr.daddr)) + return; + #endif + #endif + + p->in_filtermap = true; + p->src_ip = iphdr.saddr; + p->dst_ip = iphdr.daddr; + // get current timestamp in ns + p->ts = bpf_ktime_get_ns(); + + if (iphdr.protocol == IPPROTO_TCP) + { + struct tcphdr tcphdr; + char *tcphdraddr = head + trans_header; + bpf_probe_read(&tcphdr, sizeof(tcphdr), tcphdraddr); + p->src_port = bpf_htons(tcphdr.source); + p->dst_port = bpf_htons(tcphdr.dest); + p->proto = iphdr.protocol; + } + else if (iphdr.protocol == IPPROTO_UDP) + { + struct udphdr udphdr; + char *udphdraddr = head + trans_header; + bpf_probe_read(&udphdr, sizeof(udphdr), udphdraddr); + p->src_port = bpf_htons(udphdr.source); + p->dst_port = bpf_htons(udphdr.dest); + p->proto = iphdr.protocol; + } +#endif +#endif +} + +static void get_packet_from_sock(struct packet *p, struct sock *sk) +{ + // extract 5 tuple from pid + __u32 saddr; + __u32 daddr; + __u16 sport; + __u16 dport; + + BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr); + BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr); + BPF_CORE_READ_INTO(&dport, sk, __sk_common.skc_dport); + BPF_CORE_READ_INTO(&sport, sk, __sk_common.skc_num); + // Check if the packet is of interest. + #ifdef BYPASS_LOOKUP_IP_OF_INTEREST + #if BYPASS_LOOKUP_IP_OF_INTEREST == 0 + if (!lookup(saddr) && !lookup(daddr)) + return; + #endif + #endif + + // get current timestamp in ns + p->ts = bpf_ktime_get_ns(); + p->in_filtermap = true; + p->src_ip = saddr; + p->dst_ip = daddr; + p->dst_port = bpf_ntohs(dport); + p->src_port = bpf_ntohs(sport); +} + +/* + +This function will be saving the PID and length of skb it is working on. + +*/ + +SEC("kprobe/nf_hook_slow") +int BPF_KPROBE(nf_hook_slow, struct sk_buff *skb, struct nf_hook_state *state) +{ + if (!skb) + return 0; + + __u16 eth_proto; + + member_read(ð_proto, skb, protocol); + if (eth_proto != bpf_htons(ETH_P_IP)) + return 0; + + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + p.in_filtermap = false; + p.skb_len = 0; + get_packet_from_skb(&p, skb); + + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + bpf_map_update_elem(&drop_pids, &pid, &p, BPF_ANY); + return 0; +} + +/* +This function will look PID and the length of SKB it is working on. Then it checks +the return value of the function and update the metrics map accordingly. + +*/ + +SEC("kretprobe/nf_hook_slow") +int BPF_KRETPROBE(nf_hook_slow_ret, int retVal) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + struct packet *p = bpf_map_lookup_elem(&drop_pids, &pid); + bpf_map_delete_elem(&drop_pids, &pid); + + if (!p) + { + return 0; + } + + if (retVal >= 0) + { + return 0; + } + + update_metrics_map(ctx, IPTABLE_RULE_DROP, 0, p); + return 0; +} + +// static __always_inline int +// exit_tcp_connect(struct pt_regs *ctx, int ret) + +/* +This function checks the return value of tcp_v4_connect and + update the metrics map accordingly. + + tcp_v4_connect does not have any lenth attached to it. +*/ + +SEC("kretprobe/tcp_v4_connect") +int BPF_KRETPROBE(tcp_v4_connect_ret, int retVal) +{ + if (retVal == 0) + { + return 0; + } + + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + p.in_filtermap = false; + p.skb_len = 0; + + update_metrics_map(ctx, TCP_CONNECT_BASIC, retVal, &p); + return 0; +} + +SEC("kprobe/inet_csk_accept") +int BPF_KPROBE(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern) +{ + /* + This function will save the reference value to error. + in kretprobe we look at the value we got back in that reference + */ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + __u64 err_ptr = (__u64)err; + bpf_map_update_elem(&accept_pids, &pid, &err_ptr, BPF_ANY); + return 0; +} + +SEC("kretprobe/inet_csk_accept") +int BPF_KRETPROBE(inet_csk_accept_ret, struct sock *sk) +{ + /* + //https://elixir.bootlin.com/linux/v5.4.156/source/net/ipv4/af_inet.c#L734 + //if accept returns empty sock, then this function errored. + + //errors: + //https://elixir.bootlin.com/linux/v5.4.156/source/include/uapi/asm-generic/errno-base.h#L26 + //TODO: use tracepoint + + */ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + __u64 *err_ptr = bpf_map_lookup_elem(&accept_pids, &pid); + bpf_map_delete_elem(&accept_pids, &pid); + + if (!err_ptr) + return 0; + + if (sk != NULL) + return 0; + + int err = (int)*err_ptr; + if (err >= 0) + return 0; + + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + p.in_filtermap = false; + p.skb_len = 0; + +#ifdef ADVANCED_METRICS +#if ADVANCED_METRICS == 1 + get_packet_from_sock(&p, sk); +#endif +#endif + + update_metrics_map(ctx, TCP_ACCEPT_BASIC, err, &p); + return 0; +} + +SEC("kprobe/nf_nat_inet_fn") +int BPF_KPROBE(nf_nat_inet_fn, void *priv, struct sk_buff *skb, const struct nf_hook_state *state) +{ + if (!skb) + return 0; + + __u16 eth_proto; + + member_read(ð_proto, skb, protocol); + if (eth_proto != bpf_htons(ETH_P_IP)) + return 0; + + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + p.in_filtermap = false; + p.skb_len = 0; + get_packet_from_skb(&p, skb); + + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + bpf_map_update_elem(&natdrop_pids, &pid, &p, BPF_ANY); + return 0; +} + +SEC("kretprobe/nf_nat_inet_fn") +int BPF_KRETPROBE(nf_nat_inet_fn_ret, int retVal) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + struct packet *p = bpf_map_lookup_elem(&natdrop_pids, &pid); + bpf_map_delete_elem(&natdrop_pids, &pid); + + if (!p) + return 0; + + if (retVal != NF_DROP) + { + return 0; + } + + update_metrics_map(ctx, IPTABLE_NAT_DROP, 0, p); + return 0; +} + +SEC("kprobe/__nf_conntrack_confirm") +int BPF_KPROBE(nf_conntrack_confirm, struct sk_buff *skb) +{ + if (!skb) + return 0; + + __u16 eth_proto; + + member_read(ð_proto, skb, protocol); + if (eth_proto != bpf_htons(ETH_P_IP)) + return 0; + + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + p.in_filtermap = false; + p.skb_len = 0; + get_packet_from_skb(&p, skb); + + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + bpf_map_update_elem(&natdrop_pids, &pid, &p, BPF_ANY); + return 0; +} + +SEC("kretprobe/__nf_conntrack_confirm") +int BPF_KRETPROBE(nf_conntrack_confirm_ret, int retVal) +{ + __u64 pid_tgid = bpf_get_current_pid_tgid(); + __u32 pid = pid_tgid >> 32; + struct packet *p = bpf_map_lookup_elem(&natdrop_pids, &pid); + bpf_map_delete_elem(&natdrop_pids, &pid); + + if (!p) + return 0; + + if (retVal != NF_DROP) + { + return 0; + } + + update_metrics_map(ctx, CONNTRACK_ADD_DROP, retVal, p); + return 0; +} diff --git a/pkg/plugin/dropreason/_cprog/drop_reason.h b/pkg/plugin/dropreason/_cprog/drop_reason.h new file mode 100644 index 000000000..4a5f458ac --- /dev/null +++ b/pkg/plugin/dropreason/_cprog/drop_reason.h @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" + +typedef enum +{ + IPTABLE_RULE_DROP = 0, + IPTABLE_NAT_DROP, + TCP_CONNECT_BASIC, + TCP_ACCEPT_BASIC, + TCP_CLOSE_BASIC, + CONNTRACK_ADD_DROP, + UNKNOWN_DROP, +} drop_reason_t; + +#define NF_DROP 0 + +// https://elixir.bootlin.com/linux/v5.4.156/source/include/uapi/asm-generic/errno-base.h#L26 diff --git a/pkg/plugin/dropreason/_cprog/dynamic.h b/pkg/plugin/dropreason/_cprog/dynamic.h new file mode 100644 index 000000000..80abbd931 --- /dev/null +++ b/pkg/plugin/dropreason/_cprog/dynamic.h @@ -0,0 +1,2 @@ +// Place holder header file that will be replaced by the actual header file during runtime +// DO NOT DELETE diff --git a/pkg/plugin/dropreason/_cprog/placeholder.go b/pkg/plugin/dropreason/_cprog/placeholder.go new file mode 100644 index 000000000..fd0225add --- /dev/null +++ b/pkg/plugin/dropreason/_cprog/placeholder.go @@ -0,0 +1,3 @@ +package cprog //nolint:all + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/dropreason/dropreason_linux.go b/pkg/plugin/dropreason/dropreason_linux.go new file mode 100644 index 000000000..271cf7cd9 --- /dev/null +++ b/pkg/plugin/dropreason/dropreason_linux.go @@ -0,0 +1,498 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package dropreason + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "runtime" + "time" + "unsafe" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/utils" + + "github.com/cilium/cilium/api/v1/flow" + hubblev1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/rlimit" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/loader" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + plugincommon "github.com/microsoft/retina/pkg/plugin/common" + "github.com/microsoft/retina/pkg/plugin/constants" + "go.uber.org/zap" + + _ "github.com/microsoft/retina/pkg/plugin/dropreason/_cprog" // nolint +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_${GOARCH} -Wall" -target ${GOARCH} -type metrics_map_value -type drop_reason_t -type packet kprobe ./_cprog/drop_reason.c -- -I../lib/_${GOARCH} -I../lib/common/libbpf/_src -I../filter/_cprog/ +const ( + nfHookSlowFn = "nf_hook_slow" + tcpConnectFn = "tcp_v4_connect" + intCskAcceptFn = "inet_csk_accept" + nfNatInetFn = "nf_nat_inet_fn" + nfConntrackConfirmFn = "__nf_conntrack_confirm" +) + +// New creates a new dropreason plugin. +// When opts.EnablePodLevel=false, the enricher will not be used. +func New(cfg *kcfg.Config) api.Plugin { + return &dropReason{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +// Plugin API implementation for packet forward. +// Ref: github.com/microsoft/retina/pkg/plugin/api + +func (dr *dropReason) Name() string { + return string(Name) +} + +func (dr *dropReason) Generate(ctx context.Context) error { + // Get absolute path to this file during runtime. + _, filename, _, ok := runtime.Caller(0) + if !ok { + return errors.New("unable to get absolute path to this file") + } + dir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", dir, bpfSourceDir, dynamicHeaderFileName) + i := 0 + if dr.cfg.EnablePodLevel { + i = 1 + } + + j := 0 + if dr.cfg.BypassLookupIPOfInterest { + dr.l.Logger.Info("Bypassing lookup IP of interest") + j = 1 + } + st := fmt.Sprintf("#define ADVANCED_METRICS %d \n#define BYPASS_LOOKUP_IP_OF_INTEREST %d \n", i, j) + err := loader.WriteFile(ctx, dynamicHeaderPath, st) + if err != nil { + dr.l.Error("Error writing dynamic header", zap.Error(err)) + return err + } + + dr.l.Debug("DropReason header generated at", zap.String("path", dynamicHeaderPath)) + return nil +} + +// Compile should be able to compile the eBPF source code during runtime. +func (dr *dropReason) Compile(ctx context.Context) error { + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + // Generate dynamic header file. + if err := dr.Generate(ctx); err != nil { + dr.l.Error("failed to generate DropReason dynamic header:%w", zap.Error(err)) + } + + bpfSourceFile := fmt.Sprintf("%s/%s/%s", dir, bpfSourceDir, bpfSourceFileName) + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + arch := runtime.GOARCH + includeDir := fmt.Sprintf("-I%s/../lib/_%s", dir, arch) + filterDir := fmt.Sprintf("-I%s/../filter/_cprog/", dir) + libbpfDir := fmt.Sprintf("-I%s/../lib/common/libbpf/_src", dir) + targetArch := "-D__TARGET_ARCH_x86" + if arch == "arm64" { + targetArch = "-D__TARGET_ARCH_arm64" + } + // Keep target as bpf, otherwise clang compilation yields bpf object that elf reader cannot load. + err = loader.CompileEbpf(ctx, "-target", "bpf", "-Wall", targetArch, "-g", "-O2", "-c", bpfSourceFile, "-o", bpfOutputFile, includeDir, libbpfDir, filterDir) + if err != nil { + return err + } + dr.l.Info("DropReason metric compiled") + return nil +} + +func (dr *dropReason) Init() error { + var err error + + if err = rlimit.RemoveMemlock(); err != nil { + dr.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + objs := &kprobeObjects{} //nolint:typecheck + spec, err := ebpf.LoadCollectionSpec(bpfOutputFile) + if err != nil { + return err + } + + // TODO remove the opts + if err := spec.LoadAndAssign(objs, &ebpf.CollectionOptions{ + Programs: ebpf.ProgramOptions{ + LogLevel: 2, + }, + Maps: ebpf.MapOptions{ + PinPath: constants.FilterMapPath, + }, + }); err != nil { + dr.l.Error("Error loading objects: %w", zap.Error(err)) + return err + } + + // read perf map + dr.reader, err = plugincommon.NewPerfReader(dr.l, objs.DropreasonEvents, perCPUBuffer, 1) + if err != nil { + dr.l.Error("Error NewReader: %w", zap.Error(err)) + return err + } + + dr.KNfHook, err = link.Kprobe(nfHookSlowFn, objs.NfHookSlow, nil) + if err != nil { + dr.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + + dr.KRetnfhook, err = link.Kretprobe(nfHookSlowFn, objs.NfHookSlowRet, nil) + if err != nil { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + dr.KRetTCPConnect, err = link.Kretprobe(tcpConnectFn, objs.TcpV4ConnectRet, nil) + if err != nil { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + dr.KTCPAccept, err = link.Kretprobe(intCskAcceptFn, objs.InetCskAccept, nil) + if err != nil { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + dr.KRetTCPAccept, err = link.Kretprobe(intCskAcceptFn, objs.InetCskAcceptRet, nil) + if err != nil { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + + dr.KNfNatInet, err = link.Kretprobe(nfNatInetFn, objs.NfNatInetFn, nil) + if err != nil { + // TODO: remove this check once we get this working on Mariner OS. + if errors.Is(err, os.ErrNotExist) { + dr.l.Warn("nf_nat_inet_fn not found, skipping attaching kretprobe to it. This may impact the drop reason metrics.") + } else { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + } + + dr.KRetNfNatInet, err = link.Kretprobe(nfNatInetFn, objs.NfNatInetFnRet, nil) + if err != nil { + // TODO: remove this check once we get this working on Mariner OS. + if errors.Is(err, os.ErrNotExist) { + dr.l.Warn("nf_nat_inet_fn_ret not found, skipping attaching kretprobe to it. This may impact the drop reason metrics.") + } else { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + } + + dr.KNfConntrackConfirm, err = link.Kprobe(nfConntrackConfirmFn, objs.NfConntrackConfirm, nil) + if err != nil { + // TODO: remove this check once we get this working on Mariner OS. + if errors.Is(err, os.ErrNotExist) { + dr.l.Warn("nf_conntrack_confirm not found, skipping attaching kprobe to it. This may impact the drop reason metrics.") + } else { + dr.l.Error("opening kprobe: %w", zap.Error(err)) + return err + } + } + + dr.KRetNfConntrackConfirm, err = link.Kretprobe(nfConntrackConfirmFn, objs.NfConntrackConfirmRet, nil) + if err != nil { + // TODO: remove this check once we get this working on Mariner OS. + if errors.Is(err, os.ErrNotExist) { + dr.l.Warn("nf_conntrack_confirm_ret not found, skipping attaching kretprobe to it. This may impact the drop reason metrics.") + } else { + dr.l.Error("opening kretprobe: %w", zap.Error(err)) + return err + } + } + + dr.metricsMapData = objs.MetricsMap + return nil +} + +func (dr *dropReason) Start(ctx context.Context) error { + dr.isRunning = true + dr.l.Info("Start listening for drop reason events...") + + if dr.cfg.EnablePodLevel { + // Setup records channel. + dr.recordsChannel = make(chan perf.Record, buffer) + + dr.l.Info("setting up enricher since pod level is enabled") + // Setup enricher. + dr.enricher = enricher.Instance() + if dr.enricher == nil { + dr.l.Warn("Retina enricher is nil") + } + } else { + dr.l.Info("will not set up enricher since pod level is disabled") + } + + return dr.run(ctx) +} + +func (dr *dropReason) SetupChannel(ch chan *hubblev1.Event) error { + dr.externalChannel = ch + return nil +} + +// Helper function that accepts an IMap interface and returns an *ebpf.MapIterator +// Added to facilitate unit tests +func convertIMapToIMapIterator(inMap IMap) IMapIterator { + return inMap.Iterate() +} + +// Assign the helper function to a variable, this will make it easier to mock in unit tests +var iMapIterator func(IMap) IMapIterator = convertIMapToIMapIterator + +func (dr *dropReason) run(ctx context.Context) error { + go dr.readBasicMetricsData(ctx) + + if dr.cfg.EnablePodLevel { + for i := 0; i < workers; i++ { + dr.wg.Add(1) + go dr.processRecord(ctx, i) + } + // run advanced group here. + // Don't add it to the wait group because we don't want to wait for it. + // The perf reader Read call blocks until there is data available in the perf buffer. + // That call is unblocked when Reader is closed. + go dr.readAdvancedMetricsData(ctx) + } + + <-ctx.Done() + // Wait for all workers to finish. + // Only relevant when pod level is enabled. + dr.wg.Wait() + + dr.l.Info("Context is done, stopping DropReason plugin") + return nil +} + +func (dr *dropReason) readBasicMetricsData(ctx context.Context) { + var dataKey dropMetricKey + var dataValue dropMetricValues + ticker := time.NewTicker(dr.cfg.MetricsInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + dr.l.Info("Context is done, dropreason basic metrics loop will stop running") + return + case <-ticker.C: + var iter IMapIterator + iter = iMapIterator(dr.metricsMapData) + if err := iter.Err(); err != nil { + dr.l.Error("Error while reading metrics map...", zap.String("iter error", err.Error())) + } + for iter.Next(&dataKey, &dataValue) { + dr.processMapValue(dataKey, dataValue) + } + + // TODO manage deletiong of old entries + // If we start deleting keys, we need to change the metric add logic in DropMetricAdd + } + } +} + +func (dr *dropReason) readAdvancedMetricsData(ctx context.Context) { + for { + select { + case <-ctx.Done(): + dr.l.Info("Context is done, dropreason advanced metrics loop will stop running") + return + default: + if err := dr.readEventArrayData(); err != nil { + dr.l.Error("Error reading event array data", zap.Error(err)) + } + } + } +} + +func (dr *dropReason) processRecord(ctx context.Context, id int) { + dr.l.Debug("Starting worker", zap.Int("id", id)) + defer dr.wg.Done() + for { + select { + case <-ctx.Done(): + dr.l.Info("Context is done, dropreason worker will stop running", zap.Int("id", id)) + return + case record := <-dr.recordsChannel: + bpfEvent := (*kprobePacket)(unsafe.Pointer(&record.RawSample[0])) //nolint:typecheck + sourcePortShort := uint32(utils.HostToNetShort(bpfEvent.SrcPort)) + destinationPortShort := uint32(utils.HostToNetShort(bpfEvent.DstPort)) + dropKey := (dropMetricKey)(bpfEvent.Key) + + fl := utils.ToFlow( + int64(bpfEvent.Ts), + utils.Int2ip(bpfEvent.SrcIp).To4(), // Precautionary To4() call. + utils.Int2ip(bpfEvent.DstIp).To4(), // Precautionary To4() call. + sourcePortShort, + destinationPortShort, + bpfEvent.Proto, + 2, // drop reason packet doesn't have a direction yet, so we set it to 2 + flow.Verdict_DROPPED, + dropKey.DropType, + ) + if fl == nil { + dr.l.Warn("Could not convert bpfEvent to flow", zap.Any("bpfEvent", bpfEvent)) + continue + } + fl.DropReasonDesc = flow.DropReason(bpfEvent.Key.DropType) + utils.AddPacketSize(fl, bpfEvent.SkbLen) + + // This is only for development purposes. + // Removing this makes logs way too chatter-y. + dr.l.Debug("DropReason Packet Received", zap.Any("flow", fl), zap.Any("Raw Bpf Event", bpfEvent), zap.Uint32("drop type", bpfEvent.Key.DropType)) + + // Write the event to the enricher. + ev := &hubblev1.Event{ + Event: fl, + Timestamp: fl.Time, + } + if dr.enricher != nil { + dr.enricher.Write(ev) + } + + // Send event to external channel. + if dr.externalChannel != nil { + select { + case dr.externalChannel <- ev: + default: + metrics.LostEventsCounter.WithLabelValues(utils.ExternalChannel, string(Name)).Inc() + } + } + } + } +} + +func (dr *dropReason) readEventArrayData() error { + record, err := dr.reader.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + dr.l.Warn("Perf array is empty") + // nothing to do, we're done + return nil + } else { + dr.l.Error("Error reading perf array", zap.Error(err)) + return fmt.Errorf("Error reading perf array") + } + } + + if record.LostSamples > 0 { + // dr.l.Warn("Lost samples in drop reason plugin", zap.Uint64("lost samples", record.LostSamples)) + metrics.LostEventsCounter.WithLabelValues(utils.Kernel, string(Name)).Add(float64(record.LostSamples)) + return nil + } + + select { + case dr.recordsChannel <- record: + dr.l.Debug("Record sent to channel", zap.Any("record", record)) + default: + // dr.l.Warn("Channel is full, dropping record", zap.Any("record", record)) + metrics.LostEventsCounter.WithLabelValues(utils.BufferedChannel, string(Name)).Inc() + } + + return nil +} + +func (dr *dropReason) processMapValue(dataKey dropMetricKey, dataValue dropMetricValues) { + pktCount, pktBytes := dataValue.getPktCountAndBytes() + + dr.l.Debug("DATA From the DropReason Map", zap.String("Droptype", dataKey.getType()), + zap.Uint32("Return Val", dataKey.ReturnVal), + zap.Int("DropCount", int(pktCount)), + zap.Int("DropBytes", int(pktBytes))) + + dr.dropMetricAdd(dataKey.getType(), dataKey.getDirection(), pktCount, pktBytes) +} + +func (dr *dropReason) Stop() error { + if !dr.isRunning { + return nil + } + dr.l.Info("Closing drop reason probes...") + if dr.KNfHook != nil { + dr.KNfHook.Close() + } + if dr.KRetnfhook != nil { + dr.KRetnfhook.Close() + } + if dr.KRetTCPConnect != nil { + dr.KRetTCPConnect.Close() + } + if dr.KRetTCPAccept != nil { + dr.KRetTCPAccept.Close() + } + if dr.KTCPAccept != nil { + dr.KTCPAccept.Close() + } + if dr.metricsMapData != nil { + dr.metricsMapData.Close() + } + + if dr.reader != nil { + if err := dr.reader.Close(); err != nil { + dr.l.Error("Error closing perf reader", zap.Error(err)) + } + } + + // Close records channel. + // At this point, all workers should have exited, + // as well as the producer of the records channel. + if dr.recordsChannel != nil { + close(dr.recordsChannel) + dr.l.Debug("Closed records channel") + } + + dr.l.Info("Exiting DropReason Stop...") + dr.isRunning = false + return nil +} + +func (dr *dropReason) dropMetricAdd(reason string, direction string, count float64, bytes float64) { + metrics.DropCounter.WithLabelValues(reason, direction).Set(float64(count)) + metrics.DropBytesCounter.WithLabelValues(reason, direction).Set(float64(bytes)) +} + +// Helper functions. + +// absPath returns the absolute path to the directory where this file resides. +func absPath() (string, error) { + _, filename, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("failed to determine current file path") + } + dir := path.Dir(filename) + return dir, nil +} diff --git a/pkg/plugin/dropreason/dropreason_test.go b/pkg/plugin/dropreason/dropreason_test.go new file mode 100644 index 000000000..ee2713678 --- /dev/null +++ b/pkg/plugin/dropreason/dropreason_test.go @@ -0,0 +1,477 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package dropreason + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "runtime" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + mocks "github.com/microsoft/retina/pkg/plugin/dropreason/mocks" + "github.com/cilium/ebpf/perf" + "github.com/golang/mock/gomock" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +var ( + cfgPodLevelEnabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + BypassLookupIPOfInterest: true, + } + cfgPodLevelDisabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: false, + } +) + +func TestStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + err := p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } + + p.isRunning = true + err = p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } +} + +func TestShutdown(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &dropReason{ + cfg: &kcfg.Config{ + MetricsInterval: 100 * time.Second, + EnablePodLevel: false, + }, + l: log.Logger().Named(string(Name)), + } + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return p.Start(errctx) + }) + + time.Sleep(1 * time.Second) + cancel() + err := g.Wait() + require.NoError(t, err) +} + +func TestCompile(t *testing.T) { + takeBackup() + defer restoreBackup() + + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + dir, _ := absPath() + expectedOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + err := os.Remove(expectedOutputFile) + if err != nil && !errors.Is(err, os.ErrNotExist) { + t.Fatalf("Expected no error. Error: %+v", err) + } + + err = p.Compile(context.Background()) + if err != nil { + t.Fatalf("Expected no error. Error: %+v", err) + } + if _, err := os.Stat(expectedOutputFile); errors.Is(err, os.ErrNotExist) { + t.Fatalf("File %+v doesn't exist", expectedOutputFile) + } +} + +func TestProcessMapValue(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + + testMetricKey := dropMetricKey{DropType: 1, ReturnVal: 2} + testMetricValues := dropMetricValues{{Count: 10, Bytes: 100}} + + dr.processMapValue(testMetricKey, testMetricValues) + + // check if the metrics are updated + reason := testMetricKey.getType() + direction := testMetricKey.getDirection() + + dropCount := &dto.Metric{} + err := metrics.DropCounter.WithLabelValues(reason, direction).Write(dropCount) + require.Nil(t, err, "Expected no error but got: %w", err) + + dropBytes := &dto.Metric{} + err = metrics.DropBytesCounter.WithLabelValues(reason, direction).Write(dropBytes) + require.Nil(t, err, "Expected no error but got: %w", err) + + dropCountValue := *dropCount.Gauge.Value + dropBytesValue := *dropBytes.Gauge.Value + + require.Equal(t, float64(testMetricValues[0].Count), dropCountValue, "Expected drop count to be %d but got %d", float64(testMetricValues[0].Count), dropCountValue) + require.Equal(t, float64(testMetricValues[0].Bytes), dropBytesValue, "Expected drop bytes to be %d but got %d", float64(testMetricValues[0].Bytes), dropBytesValue) +} + +func TestDropReasonRun_Error(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMapIterator := mocks.NewMockIMapIterator(ctrl) + + // reasign helper function so that it returns the mockedMapIterator + iMapIterator = func(x IMap) IMapIterator { + return mockedMapIterator + } + mockedMapIterator.EXPECT().Err().Return(errors.New("test error")).MinTimes(1) + mockedMapIterator.EXPECT().Next(gomock.Any(), gomock.Any()).Return(false).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelDisabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + } + + // create a ticker with a short interval for testing purposes + ticker := time.NewTicker(1 * time.Second) + + // Create a context with a short timeout for testing purposes + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + + // Start the drop reason routine in a goroutine + go func() { + if err := dr.run(ctx); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + // Wait for a short period of time for the routine to start + time.Sleep(2 * time.Second) + + cancel() + ticker.Stop() +} + +func TestDropReasonRun(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMapIterator := mocks.NewMockIMapIterator(ctrl) + mockedPerfReader := mocks.NewMockIPerfReader(ctrl) + menricher := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + + // reasign helper function so that it returns the mockedMapIterator + iMapIterator = func(x IMap) IMapIterator { + return mockedMapIterator + } + mockedMapIterator.EXPECT().Err().Return(nil).MinTimes(1) + mockedMapIterator.EXPECT().Next(gomock.Any(), gomock.Any()).Return(false).MinTimes(1) + + mockedPerfRecord := perf.Record{ + CPU: 0, + RawSample: []byte{0x01, 0x02, 0x03}, + LostSamples: 0, + } + mockedPerfReader.EXPECT().Read().Return(mockedPerfRecord, nil).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + reader: mockedPerfReader, + enricher: menricher, + recordsChannel: make(chan perf.Record, buffer), + } + menricher.EXPECT().Write(gomock.Any()).MinTimes(1) + + // Create a context with a short timeout for testing purposes + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + + // create a ticker with a short interval for testing purposes + ticker := time.NewTicker(2 * time.Second) + + // Start the drop reason routine in a goroutine + go func() { + if err := dr.run(ctx); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + // Wait for a short period of time for the routine to start + time.Sleep(2 * time.Second) + + cancel() + ticker.Stop() +} + +func TestDropReasonReadDataPodLevelEnabled(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedPerfReader := mocks.NewMockIPerfReader(ctrl) + menricher := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + + // mock perf reader record + mockedPerfRecord := perf.Record{ + CPU: 0, + RawSample: []byte{0x01, 0x02, 0x03}, + LostSamples: 0, + } + mockedPerfReader.EXPECT().Read().Return(mockedPerfRecord, nil).MinTimes(1) + menricher.EXPECT().Write(gomock.Any()).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + reader: mockedPerfReader, + enricher: menricher, + recordsChannel: make(chan perf.Record, buffer), + } + + // Create a context with a short timeout for testing purposes + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Start the drop reason routine in a goroutine + go func() { + dr.readEventArrayData() + }() + + go func() { + dr.wg.Add(1) + dr.processRecord(ctx, 0) + }() + + // Wait for a short period of time for the routine to start + // time.Sleep(2 * time.Second) + <-ctx.Done() +} + +func TestDropReasonReadData_WithEmptyPerfArray(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedPerfReader := mocks.NewMockIPerfReader(ctrl) + + // mock perf reader record + mockedPerfRecord := perf.Record{ + CPU: 0, + RawSample: []byte{}, + LostSamples: 0, + } + mockedPerfReader.EXPECT().Read().Return(mockedPerfRecord, perf.ErrClosed).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + reader: mockedPerfReader, + } + + // Create a context with a short timeout for testing purposes + _, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Start the drop reason routine in a goroutine + go func() { + dr.readEventArrayData() + }() + + // Wait for a short period of time for the routine to start + time.Sleep(2 * time.Second) +} + +func TestDropReasonReadData_WithPerfArrayLostSamples(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedPerfReader := mocks.NewMockIPerfReader(ctrl) + + // mock perf reader record + mockedPerfRecord := perf.Record{ + CPU: 0, + RawSample: []byte{0x01, 0x02, 0x03}, + LostSamples: 3, + } + mockedPerfReader.EXPECT().Read().Return(mockedPerfRecord, nil).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + reader: mockedPerfReader, + } + + // Create a context with a short timeout for testing purposes + _, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Start the drop reason routine in a goroutine + go func() { + dr.readEventArrayData() + }() + + // Wait for a short period of time for the routine to start + time.Sleep(2 * time.Second) +} + +func TestDropReasonReadData_WithUnknownError(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedPerfReader := mocks.NewMockIPerfReader(ctrl) + + // mock perf reader record + mockedPerfRecord := perf.Record{ + CPU: 0, + RawSample: []byte{0x01, 0x02, 0x03}, + LostSamples: 3, + } + mockedPerfReader.EXPECT().Read().Return(mockedPerfRecord, errors.New("Unknown Error")).MinTimes(1) + + // Create drop reason instance + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + metricsMapData: mockedMap, + reader: mockedPerfReader, + } + + // Create a context with a short timeout for testing purposes + _, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + // Start the drop reason routine in a goroutine + go func() { + dr.readEventArrayData() + }() + + // Wait for a short period of time for the routine to start + time.Sleep(2 * time.Second) +} + +func TestDropReasonGenerate(t *testing.T) { + takeBackup() + defer restoreBackup() + + log.SetupZapLogger(log.GetDefaultLogOpts()) + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + t.Fatal("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Instantiate the dropReason struct with a mocked logger and context. + dr := &dropReason{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + ctx := context.Background() + + // Call the Generate function and check if it returns an error. + if err := dr.Generate(ctx); err != nil { + t.Fatalf("failed to generate DropReason header: %v", err) + } + + // Verify that the dynamic header file was created in the expected location and contains the expected contents. + if _, err := os.Stat(dynamicHeaderPath); os.IsNotExist(err) { + t.Fatalf("dynamic header file does not exist: %v", err) + } + + expectedContents := "#define ADVANCED_METRICS 1 \n#define BYPASS_LOOKUP_IP_OF_INTEREST 1 \n" + actualContents, err := os.ReadFile(dynamicHeaderPath) + if err != nil { + t.Fatalf("failed to read dynamic header file: %v", err) + } + if string(actualContents) != expectedContents { + t.Errorf("unexpected dynamic header file contents: got %q, want %q", string(actualContents), expectedContents) + } +} + +// Helpers. +func takeBackup() { + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Rename the dynamic header file if it already exists. + if _, err := os.Stat(dynamicHeaderPath); err == nil { + if err := os.Rename(dynamicHeaderPath, fmt.Sprintf("%s.bak", dynamicHeaderPath)); err != nil { + panic(fmt.Sprintf("failed to rename existing dynamic header file: %v", err)) + } + } +} + +func restoreBackup() { + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Remove the dynamic header file generated during test. + os.RemoveAll(dynamicHeaderPath) + + // Restore the dynamic header file if it was renamed. + if _, err := os.Stat(fmt.Sprintf("%s.bak", dynamicHeaderPath)); err == nil { + if err := os.Rename(fmt.Sprintf("%s.bak", dynamicHeaderPath), dynamicHeaderPath); err != nil { + panic(fmt.Sprintf("failed to restore dynamic header file: %v", err)) + } + } +} diff --git a/pkg/plugin/dropreason/kprobe_bpfel_arm64.go b/pkg/plugin/dropreason/kprobe_bpfel_arm64.go new file mode 100644 index 000000000..54eff7e5e --- /dev/null +++ b/pkg/plugin/dropreason/kprobe_bpfel_arm64.go @@ -0,0 +1,191 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package dropreason + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type kprobeDropReasonT uint32 + +type kprobeMapKey struct { + Prefixlen uint32 + Data uint32 +} + +type kprobeMetricsMapKey struct { + DropType uint32 + ReturnVal uint32 +} + +type kprobeMetricsMapValue struct { + Count uint64 + Bytes uint64 +} + +type kprobePacket struct { + SrcIp uint32 + DstIp uint32 + SrcPort uint16 + DstPort uint16 + Proto uint8 + _ [3]byte + SkbLen uint64 + Direction uint32 + Key kprobeMetricsMapKey + _ [4]byte + Ts uint64 + InFiltermap bool + _ [7]byte +} + +// loadKprobe returns the embedded CollectionSpec for kprobe. +func loadKprobe() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_KprobeBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load kprobe: %w", err) + } + + return spec, err +} + +// loadKprobeObjects loads kprobe and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *kprobeObjects +// *kprobePrograms +// *kprobeMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadKprobeObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadKprobe() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// kprobeSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeSpecs struct { + kprobeProgramSpecs + kprobeMapSpecs +} + +// kprobeSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeProgramSpecs struct { + InetCskAccept *ebpf.ProgramSpec `ebpf:"inet_csk_accept"` + InetCskAcceptRet *ebpf.ProgramSpec `ebpf:"inet_csk_accept_ret"` + NfConntrackConfirm *ebpf.ProgramSpec `ebpf:"nf_conntrack_confirm"` + NfConntrackConfirmRet *ebpf.ProgramSpec `ebpf:"nf_conntrack_confirm_ret"` + NfHookSlow *ebpf.ProgramSpec `ebpf:"nf_hook_slow"` + NfHookSlowRet *ebpf.ProgramSpec `ebpf:"nf_hook_slow_ret"` + NfNatInetFn *ebpf.ProgramSpec `ebpf:"nf_nat_inet_fn"` + NfNatInetFnRet *ebpf.ProgramSpec `ebpf:"nf_nat_inet_fn_ret"` + TcpV4ConnectRet *ebpf.ProgramSpec `ebpf:"tcp_v4_connect_ret"` +} + +// kprobeMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeMapSpecs struct { + AcceptPids *ebpf.MapSpec `ebpf:"accept_pids"` + DropPids *ebpf.MapSpec `ebpf:"drop_pids"` + DropreasonEvents *ebpf.MapSpec `ebpf:"dropreason_events"` + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` + MetricsMap *ebpf.MapSpec `ebpf:"metrics_map"` + NatdropPids *ebpf.MapSpec `ebpf:"natdrop_pids"` +} + +// kprobeObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobeObjects struct { + kprobePrograms + kprobeMaps +} + +func (o *kprobeObjects) Close() error { + return _KprobeClose( + &o.kprobePrograms, + &o.kprobeMaps, + ) +} + +// kprobeMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobeMaps struct { + AcceptPids *ebpf.Map `ebpf:"accept_pids"` + DropPids *ebpf.Map `ebpf:"drop_pids"` + DropreasonEvents *ebpf.Map `ebpf:"dropreason_events"` + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` + MetricsMap *ebpf.Map `ebpf:"metrics_map"` + NatdropPids *ebpf.Map `ebpf:"natdrop_pids"` +} + +func (m *kprobeMaps) Close() error { + return _KprobeClose( + m.AcceptPids, + m.DropPids, + m.DropreasonEvents, + m.RetinaFilterMap, + m.MetricsMap, + m.NatdropPids, + ) +} + +// kprobePrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobePrograms struct { + InetCskAccept *ebpf.Program `ebpf:"inet_csk_accept"` + InetCskAcceptRet *ebpf.Program `ebpf:"inet_csk_accept_ret"` + NfConntrackConfirm *ebpf.Program `ebpf:"nf_conntrack_confirm"` + NfConntrackConfirmRet *ebpf.Program `ebpf:"nf_conntrack_confirm_ret"` + NfHookSlow *ebpf.Program `ebpf:"nf_hook_slow"` + NfHookSlowRet *ebpf.Program `ebpf:"nf_hook_slow_ret"` + NfNatInetFn *ebpf.Program `ebpf:"nf_nat_inet_fn"` + NfNatInetFnRet *ebpf.Program `ebpf:"nf_nat_inet_fn_ret"` + TcpV4ConnectRet *ebpf.Program `ebpf:"tcp_v4_connect_ret"` +} + +func (p *kprobePrograms) Close() error { + return _KprobeClose( + p.InetCskAccept, + p.InetCskAcceptRet, + p.NfConntrackConfirm, + p.NfConntrackConfirmRet, + p.NfHookSlow, + p.NfHookSlowRet, + p.NfNatInetFn, + p.NfNatInetFnRet, + p.TcpV4ConnectRet, + ) +} + +func _KprobeClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed kprobe_bpfel_arm64.o +var _KprobeBytes []byte diff --git a/pkg/plugin/dropreason/kprobe_bpfel_arm64.o b/pkg/plugin/dropreason/kprobe_bpfel_arm64.o new file mode 100644 index 000000000..23435a787 Binary files /dev/null and b/pkg/plugin/dropreason/kprobe_bpfel_arm64.o differ diff --git a/pkg/plugin/dropreason/kprobe_bpfel_x86.go b/pkg/plugin/dropreason/kprobe_bpfel_x86.go new file mode 100644 index 000000000..737352236 --- /dev/null +++ b/pkg/plugin/dropreason/kprobe_bpfel_x86.go @@ -0,0 +1,191 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package dropreason + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type kprobeDropReasonT uint32 + +type kprobeMapKey struct { + Prefixlen uint32 + Data uint32 +} + +type kprobeMetricsMapKey struct { + DropType uint32 + ReturnVal uint32 +} + +type kprobeMetricsMapValue struct { + Count uint64 + Bytes uint64 +} + +type kprobePacket struct { + SrcIp uint32 + DstIp uint32 + SrcPort uint16 + DstPort uint16 + Proto uint8 + _ [3]byte + SkbLen uint64 + Direction uint32 + Key kprobeMetricsMapKey + _ [4]byte + Ts uint64 + InFiltermap bool + _ [7]byte +} + +// loadKprobe returns the embedded CollectionSpec for kprobe. +func loadKprobe() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_KprobeBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load kprobe: %w", err) + } + + return spec, err +} + +// loadKprobeObjects loads kprobe and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *kprobeObjects +// *kprobePrograms +// *kprobeMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadKprobeObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadKprobe() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// kprobeSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeSpecs struct { + kprobeProgramSpecs + kprobeMapSpecs +} + +// kprobeSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeProgramSpecs struct { + InetCskAccept *ebpf.ProgramSpec `ebpf:"inet_csk_accept"` + InetCskAcceptRet *ebpf.ProgramSpec `ebpf:"inet_csk_accept_ret"` + NfConntrackConfirm *ebpf.ProgramSpec `ebpf:"nf_conntrack_confirm"` + NfConntrackConfirmRet *ebpf.ProgramSpec `ebpf:"nf_conntrack_confirm_ret"` + NfHookSlow *ebpf.ProgramSpec `ebpf:"nf_hook_slow"` + NfHookSlowRet *ebpf.ProgramSpec `ebpf:"nf_hook_slow_ret"` + NfNatInetFn *ebpf.ProgramSpec `ebpf:"nf_nat_inet_fn"` + NfNatInetFnRet *ebpf.ProgramSpec `ebpf:"nf_nat_inet_fn_ret"` + TcpV4ConnectRet *ebpf.ProgramSpec `ebpf:"tcp_v4_connect_ret"` +} + +// kprobeMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type kprobeMapSpecs struct { + AcceptPids *ebpf.MapSpec `ebpf:"accept_pids"` + DropPids *ebpf.MapSpec `ebpf:"drop_pids"` + DropreasonEvents *ebpf.MapSpec `ebpf:"dropreason_events"` + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` + MetricsMap *ebpf.MapSpec `ebpf:"metrics_map"` + NatdropPids *ebpf.MapSpec `ebpf:"natdrop_pids"` +} + +// kprobeObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobeObjects struct { + kprobePrograms + kprobeMaps +} + +func (o *kprobeObjects) Close() error { + return _KprobeClose( + &o.kprobePrograms, + &o.kprobeMaps, + ) +} + +// kprobeMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobeMaps struct { + AcceptPids *ebpf.Map `ebpf:"accept_pids"` + DropPids *ebpf.Map `ebpf:"drop_pids"` + DropreasonEvents *ebpf.Map `ebpf:"dropreason_events"` + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` + MetricsMap *ebpf.Map `ebpf:"metrics_map"` + NatdropPids *ebpf.Map `ebpf:"natdrop_pids"` +} + +func (m *kprobeMaps) Close() error { + return _KprobeClose( + m.AcceptPids, + m.DropPids, + m.DropreasonEvents, + m.RetinaFilterMap, + m.MetricsMap, + m.NatdropPids, + ) +} + +// kprobePrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadKprobeObjects or ebpf.CollectionSpec.LoadAndAssign. +type kprobePrograms struct { + InetCskAccept *ebpf.Program `ebpf:"inet_csk_accept"` + InetCskAcceptRet *ebpf.Program `ebpf:"inet_csk_accept_ret"` + NfConntrackConfirm *ebpf.Program `ebpf:"nf_conntrack_confirm"` + NfConntrackConfirmRet *ebpf.Program `ebpf:"nf_conntrack_confirm_ret"` + NfHookSlow *ebpf.Program `ebpf:"nf_hook_slow"` + NfHookSlowRet *ebpf.Program `ebpf:"nf_hook_slow_ret"` + NfNatInetFn *ebpf.Program `ebpf:"nf_nat_inet_fn"` + NfNatInetFnRet *ebpf.Program `ebpf:"nf_nat_inet_fn_ret"` + TcpV4ConnectRet *ebpf.Program `ebpf:"tcp_v4_connect_ret"` +} + +func (p *kprobePrograms) Close() error { + return _KprobeClose( + p.InetCskAccept, + p.InetCskAcceptRet, + p.NfConntrackConfirm, + p.NfConntrackConfirmRet, + p.NfHookSlow, + p.NfHookSlowRet, + p.NfNatInetFn, + p.NfNatInetFnRet, + p.TcpV4ConnectRet, + ) +} + +func _KprobeClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed kprobe_bpfel_x86.o +var _KprobeBytes []byte diff --git a/pkg/plugin/dropreason/kprobe_bpfel_x86.o b/pkg/plugin/dropreason/kprobe_bpfel_x86.o new file mode 100644 index 000000000..26b7caca5 Binary files /dev/null and b/pkg/plugin/dropreason/kprobe_bpfel_x86.o differ diff --git a/pkg/plugin/dropreason/mocks/mock_types.go b/pkg/plugin/dropreason/mocks/mock_types.go new file mode 100644 index 000000000..61b86cc36 --- /dev/null +++ b/pkg/plugin/dropreason/mocks/mock_types.go @@ -0,0 +1,167 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types_linux.go + +// Package dropreason is a generated GoMock package. +package dropreason + +import ( + reflect "reflect" + + ebpf "github.com/cilium/ebpf" + perf "github.com/cilium/ebpf/perf" + gomock "github.com/golang/mock/gomock" +) + +// MockIMapIterator is a mock of IMapIterator interface. +type MockIMapIterator struct { + ctrl *gomock.Controller + recorder *MockIMapIteratorMockRecorder +} + +// MockIMapIteratorMockRecorder is the mock recorder for MockIMapIterator. +type MockIMapIteratorMockRecorder struct { + mock *MockIMapIterator +} + +// NewMockIMapIterator creates a new mock instance. +func NewMockIMapIterator(ctrl *gomock.Controller) *MockIMapIterator { + mock := &MockIMapIterator{ctrl: ctrl} + mock.recorder = &MockIMapIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMapIterator) EXPECT() *MockIMapIteratorMockRecorder { + return m.recorder +} + +// Err mocks base method. +func (m *MockIMapIterator) Err() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Err") + ret0, _ := ret[0].(error) + return ret0 +} + +// Err indicates an expected call of Err. +func (mr *MockIMapIteratorMockRecorder) Err() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockIMapIterator)(nil).Err)) +} + +// Next mocks base method. +func (m *MockIMapIterator) Next(keyOut, valueOut interface{}) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next", keyOut, valueOut) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockIMapIteratorMockRecorder) Next(keyOut, valueOut interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIMapIterator)(nil).Next), keyOut, valueOut) +} + +// MockIMap is a mock of IMap interface. +type MockIMap struct { + ctrl *gomock.Controller + recorder *MockIMapMockRecorder +} + +// MockIMapMockRecorder is the mock recorder for MockIMap. +type MockIMapMockRecorder struct { + mock *MockIMap +} + +// NewMockIMap creates a new mock instance. +func NewMockIMap(ctrl *gomock.Controller) *MockIMap { + mock := &MockIMap{ctrl: ctrl} + mock.recorder = &MockIMapMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMap) EXPECT() *MockIMapMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIMap) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIMapMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIMap)(nil).Close)) +} + +// Iterate mocks base method. +func (m *MockIMap) Iterate() *ebpf.MapIterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterate") + ret0, _ := ret[0].(*ebpf.MapIterator) + return ret0 +} + +// Iterate indicates an expected call of Iterate. +func (mr *MockIMapMockRecorder) Iterate() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockIMap)(nil).Iterate)) +} + +// MockIPerfReader is a mock of IPerfReader interface. +type MockIPerfReader struct { + ctrl *gomock.Controller + recorder *MockIPerfReaderMockRecorder +} + +// MockIPerfReaderMockRecorder is the mock recorder for MockIPerfReader. +type MockIPerfReaderMockRecorder struct { + mock *MockIPerfReader +} + +// NewMockIPerfReader creates a new mock instance. +func NewMockIPerfReader(ctrl *gomock.Controller) *MockIPerfReader { + mock := &MockIPerfReader{ctrl: ctrl} + mock.recorder = &MockIPerfReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIPerfReader) EXPECT() *MockIPerfReaderMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIPerfReader) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIPerfReaderMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIPerfReader)(nil).Close)) +} + +// Read mocks base method. +func (m *MockIPerfReader) Read() (perf.Record, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read") + ret0, _ := ret[0].(perf.Record) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Read indicates an expected call of Read. +func (mr *MockIPerfReaderMockRecorder) Read() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockIPerfReader)(nil).Read)) +} diff --git a/pkg/plugin/dropreason/types_linux.go b/pkg/plugin/dropreason/types_linux.go new file mode 100644 index 000000000..3a3b156a3 --- /dev/null +++ b/pkg/plugin/dropreason/types_linux.go @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package dropreason + +import ( + "sync" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + hubblev1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" +) + +const ( + Name api.PluginName = "dropreason" + bpfSourceDir string = "_cprog" + bpfSourceFileName string = "drop_reason.c" + bpfObjectFileName string = "kprobe_bpf.o" + dynamicHeaderFileName string = "dynamic.h" + buffer int = 10000 + workers int = 2 +) + +// Determined via testing on a large cluster. +// Actual buffer size will be 16 * pagesize. +var perCPUBuffer = 16 + +type dropReason struct { + cfg *kcfg.Config + l *log.ZapLogger + KNfHook link.Link + KRetnfhook link.Link + KTCPAccept link.Link + KRetTCPAccept link.Link + KRetTCPConnect link.Link + KNfNatInet link.Link + KRetNfNatInet link.Link + KNfConntrackConfirm link.Link + KRetNfConntrackConfirm link.Link + metricsMapData IMap + event chan<- api.PluginEvent + isRunning bool + reader IPerfReader + enricher enricher.EnricherInterface + recordsChannel chan perf.Record + wg sync.WaitGroup + externalChannel chan *hubblev1.Event +} + +type ( + returnValue uint32 +) + +// Interface to https://pkg.go.dev/github.com/cilium/ebpf#Map. +// Added for unit tests. +// +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types_linux.go -destination=mocks/mock_types.go -package=dropreason . IMap IMapIterator IPerfReader +type IMapIterator interface { + Next(keyOut interface{}, valueOut interface{}) bool + Err() error +} + +type IMap interface { + Iterate() *ebpf.MapIterator + Close() error +} + +type IPerfReader interface { + Read() (perf.Record, error) + Close() error +} + +//lint:ignore +type dropMetricKey kprobeMetricsMapKey //nolint:typecheck + +//lint:ignore +type dropMetricValues []kprobeMetricsMapValue //nolint:typecheck + +func (dk *dropMetricKey) getType() string { + //nolint:typecheck + return metrics.GetDropType(dk.DropType).String() +} + +func (dk *dropMetricKey) getDirection() string { + switch dk.getType() { + case metrics.TCP_CONNECT_BASIC.String(): + return "egress" + case metrics.TCP_ACCEPT_BASIC.String(): + return "ingress" + } + return "unknown" +} + +func (dv *dropMetricValues) getPktCountAndBytes() (float64, float64) { + count := uint64(0) + bytes := uint64(0) + for _, v := range *dv { + count += v.Count + bytes += v.Bytes + } + return float64(count), float64(bytes) +} diff --git a/pkg/plugin/filter/_cprog/placeholder.go b/pkg/plugin/filter/_cprog/placeholder.go new file mode 100644 index 000000000..fd0225add --- /dev/null +++ b/pkg/plugin/filter/_cprog/placeholder.go @@ -0,0 +1,3 @@ +package cprog //nolint:all + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/filter/_cprog/retina_filter.c b/pkg/plugin/filter/_cprog/retina_filter.c new file mode 100644 index 000000000..f4a0c356d --- /dev/null +++ b/pkg/plugin/filter/_cprog/retina_filter.c @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" +#include "bpf_helpers.h" + +struct mapKey { + __u32 prefixlen; + __u32 data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __type(key, struct mapKey); + __type(value, __u8); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, 255); + __uint(pinning, LIBBPF_PIN_BY_NAME); // Pinned to /sys/fs/bpf. +} retina_filter_map SEC(".maps"); + +// Returns 1 if the IP address is in the map, 0 otherwise. +bool lookup(__u32 ipaddr) +{ + struct mapKey key = { + .prefixlen = 32, + .data = ipaddr + }; + + if (bpf_map_lookup_elem(&retina_filter_map, &key)) + return true; + return false; +} diff --git a/pkg/plugin/filter/filter_bpfel_arm64.go b/pkg/plugin/filter/filter_bpfel_arm64.go new file mode 100644 index 000000000..7c8a69996 --- /dev/null +++ b/pkg/plugin/filter/filter_bpfel_arm64.go @@ -0,0 +1,120 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package filter + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type filterMapKey struct { + Prefixlen uint32 + Data uint32 +} + +// loadFilter returns the embedded CollectionSpec for filter. +func loadFilter() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_FilterBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load filter: %w", err) + } + + return spec, err +} + +// loadFilterObjects loads filter and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *filterObjects +// *filterPrograms +// *filterMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadFilterObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadFilter() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// filterSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterSpecs struct { + filterProgramSpecs + filterMapSpecs +} + +// filterSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterProgramSpecs struct { +} + +// filterMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterMapSpecs struct { + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` +} + +// filterObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterObjects struct { + filterPrograms + filterMaps +} + +func (o *filterObjects) Close() error { + return _FilterClose( + &o.filterPrograms, + &o.filterMaps, + ) +} + +// filterMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterMaps struct { + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` +} + +func (m *filterMaps) Close() error { + return _FilterClose( + m.RetinaFilterMap, + ) +} + +// filterPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterPrograms struct { +} + +func (p *filterPrograms) Close() error { + return _FilterClose() +} + +func _FilterClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed filter_bpfel_arm64.o +var _FilterBytes []byte diff --git a/pkg/plugin/filter/filter_bpfel_arm64.o b/pkg/plugin/filter/filter_bpfel_arm64.o new file mode 100644 index 000000000..f0c05ce9c Binary files /dev/null and b/pkg/plugin/filter/filter_bpfel_arm64.o differ diff --git a/pkg/plugin/filter/filter_bpfel_x86.go b/pkg/plugin/filter/filter_bpfel_x86.go new file mode 100644 index 000000000..eb27128ba --- /dev/null +++ b/pkg/plugin/filter/filter_bpfel_x86.go @@ -0,0 +1,120 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package filter + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type filterMapKey struct { + Prefixlen uint32 + Data uint32 +} + +// loadFilter returns the embedded CollectionSpec for filter. +func loadFilter() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_FilterBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load filter: %w", err) + } + + return spec, err +} + +// loadFilterObjects loads filter and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *filterObjects +// *filterPrograms +// *filterMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadFilterObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadFilter() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// filterSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterSpecs struct { + filterProgramSpecs + filterMapSpecs +} + +// filterSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterProgramSpecs struct { +} + +// filterMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type filterMapSpecs struct { + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` +} + +// filterObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterObjects struct { + filterPrograms + filterMaps +} + +func (o *filterObjects) Close() error { + return _FilterClose( + &o.filterPrograms, + &o.filterMaps, + ) +} + +// filterMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterMaps struct { + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` +} + +func (m *filterMaps) Close() error { + return _FilterClose( + m.RetinaFilterMap, + ) +} + +// filterPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadFilterObjects or ebpf.CollectionSpec.LoadAndAssign. +type filterPrograms struct { +} + +func (p *filterPrograms) Close() error { + return _FilterClose() +} + +func _FilterClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed filter_bpfel_x86.o +var _FilterBytes []byte diff --git a/pkg/plugin/filter/filter_bpfel_x86.o b/pkg/plugin/filter/filter_bpfel_x86.o new file mode 100644 index 000000000..f0c05ce9c Binary files /dev/null and b/pkg/plugin/filter/filter_bpfel_x86.o differ diff --git a/pkg/plugin/filter/filter_map_linux.go b/pkg/plugin/filter/filter_map_linux.go new file mode 100644 index 000000000..7371e5b80 --- /dev/null +++ b/pkg/plugin/filter/filter_map_linux.go @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filter + +import ( + "errors" + "net" + "strings" + "sync" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/constants" + "github.com/microsoft/retina/pkg/utils" + "github.com/cilium/ebpf" + "go.uber.org/zap" + + _ "github.com/microsoft/retina/pkg/plugin/filter/_cprog" // nolint +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_${GOARCH} -Wall" -target ${GOARCH} -type mapKey filter ./_cprog/retina_filter.c -- -I../lib/_${GOARCH} -I../lib/common/libbpf/_src + +var ( + f *FilterMap + once sync.Once +) + +type FilterMap struct { + l *log.ZapLogger + obj *filterObjects //nolint:typecheck + kfm IEbpfMap + batchApiNotSupported bool +} + +func Init() (*FilterMap, error) { + once.Do(func() { + f = &FilterMap{} + }) + if f.l == nil { + f.l = log.Logger().Named("filter-map") + } + if f.obj != nil { + return f, nil + } + + obj := &filterObjects{} //nolint:typecheck + err := loadFilterObjects(obj, &ebpf.CollectionOptions{ //nolint:typecheck + Maps: ebpf.MapOptions{ + PinPath: constants.FilterMapPath, + }, + }) + if err != nil { + f.l.Error("loadFiltermanagerObjects failed", zap.Error(err)) + return f, err + } + f.obj = obj + f.kfm = obj.RetinaFilterMap + return f, nil +} + +func (f *FilterMap) Add(ips []net.IP) error { + keys := []filterMapKey{} //nolint:typecheck + values := make([]uint8, len(ips)) + for idx, ip := range ips { + key, err := mapKey(ip) + if err != nil { + return err + } + keys = append(keys, key) + values[idx] = 1 + } + + var updated int + var err error + if !f.batchApiNotSupported { + updated, err = f.kfm.BatchUpdate(keys, values, &ebpf.BatchOptions{ + Flags: uint64(ebpf.UpdateAny), + }) + + // if batch update not supported by kernel, perform single updates + if err != nil && strings.Contains(err.Error(), "not supported") { + f.l.Debug("Batch update not supported by kernel. Performing single updates instead.") + f.batchApiNotSupported = true + updated, err = f.performSingleUpdates(keys, values) + if err != nil { + return err + } + } + } else { + updated, err = f.performSingleUpdates(keys, values) + if err != nil { + return err + } + } + + f.l.Debug("Add", zap.Int("updated", updated)) + return err +} + +func (f *FilterMap) Delete(ips []net.IP) error { + keys := []filterMapKey{} //nolint:typecheck + for _, ip := range ips { + key, err := mapKey(ip) + if err != nil { + return err + } + keys = append(keys, key) + } + var deleted int + var err error + if !f.batchApiNotSupported { + deleted, err = f.kfm.BatchDelete(keys, &ebpf.BatchOptions{ + Flags: uint64(ebpf.UpdateAny), + }) + + // if batch delete not supported by kernel, perform single deletes + if err != nil && strings.Contains(err.Error(), "not supported") { + f.l.Debug("Batch delete not supported by kernel. Performing single deletes instead.") + f.batchApiNotSupported = true + deleted, err = f.performSingleDeletes(keys) + if err != nil { + return err + } + } + } else { + deleted, err = f.performSingleDeletes(keys) + if err != nil { + return err + } + } + + f.l.Debug("Delete", zap.Int("deleted", deleted)) + return err +} + +func (f *FilterMap) performSingleUpdates(keys []filterMapKey, values []uint8) (int, error) { //nolint:typecheck + var updated int + for idx, key := range keys { + err := f.kfm.Put(key, values[idx]) + if err != nil { + return updated, err + } + updated++ + } + return updated, nil +} + +func (f *FilterMap) performSingleDeletes(keys []filterMapKey) (int, error) { //nolint:typecheck + var deleted int + for _, key := range keys { + err := f.kfm.Delete(key) + if err != nil { + return deleted, err + } + deleted++ + } + return deleted, nil +} + +func (f *FilterMap) Close() { + if f.obj == nil { + return + } + if f.kfm != nil { + f.kfm.Close() + } + f.obj.Close() +} + +// Helper functions. +func mapKey(ip net.IP) (filterMapKey, error) { //nolint:typecheck + // Convert to 4 byte representation. + // Sometimes, IP.Net has 16 byte representation for IPV4 addresses. + ipv4 := ip.To4() + if ipv4 == nil { + return filterMapKey{}, errors.New("invalid IP address") //nolint:typecheck + } + ipv4Int, err := utils.Ip2int(ipv4) + if err != nil { + return filterMapKey{}, err //nolint:typecheck + } + return filterMapKey{ //nolint:typecheck + Prefixlen: uint32(32), + Data: ipv4Int, + }, nil +} diff --git a/pkg/plugin/filter/filter_map_test.go b/pkg/plugin/filter/filter_map_test.go new file mode 100644 index 000000000..2404d0c0d --- /dev/null +++ b/pkg/plugin/filter/filter_map_test.go @@ -0,0 +1,209 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filter + +import ( + "errors" + "net" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/filter/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func Test_mapKey(t *testing.T) { + type args struct { + ip net.IP + } + tests := []struct { + name string + args args + want filterMapKey //nolint:typecheck + wantErr bool + }{ + { + name: "IPv4", + args: args{ + ip: net.ParseIP("1.1.1.1").To4(), + }, + want: filterMapKey{ //nolint:typecheck + Prefixlen: uint32(32), + Data: uint32(16843009), + }, + wantErr: false, + }, + { + name: "IPv6", + args: args{ + ip: net.ParseIP("2001:db8::68").To16(), + }, + want: filterMapKey{}, //nolint:typecheck + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key, err := mapKey(tt.args.ip) + // If wantErr is true, we expect an err != nil. + // If wantErr is false, we expect an err == nil. + if (tt.wantErr && err == nil) || (!tt.wantErr && err != nil) { + t.Errorf("mapKey() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, key) + }) + } +} + +func TestFilterMap_Add(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + input := []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2")} + expectedKeys := []filterMapKey{ //nolint:typecheck + { + Prefixlen: uint32(32), + Data: uint32(16843009), + }, + { + Prefixlen: uint32(32), + Data: uint32(33686018), + }, + } + expectedValues := []uint8{1, 1} + + mockKfm := mocks.NewMockIEbpfMap(ctrl) + mockKfm.EXPECT().BatchUpdate(expectedKeys, expectedValues, gomock.Any()).Return(2, nil) + + f := &FilterMap{ + l: log.Logger().Named("filter-map"), + kfm: mockKfm, + } + err := f.Add(input) + assert.NoError(t, err) + + // Test error case. + mockKfm.EXPECT().BatchUpdate(expectedKeys, expectedValues, gomock.Any()).Return(0, errors.New("test error")) + err = f.Add(input) + assert.Error(t, err) +} + +func TestFilterMap_Delete(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + input := []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2")} + expectedKeys := []filterMapKey{ //nolint:typecheck + { + Prefixlen: uint32(32), + Data: uint32(16843009), + }, + { + Prefixlen: uint32(32), + Data: uint32(33686018), + }, + } + + mockKfm := mocks.NewMockIEbpfMap(ctrl) + mockKfm.EXPECT().BatchDelete(expectedKeys, gomock.Any()).Return(2, nil) + + f := &FilterMap{ + l: log.Logger().Named("filter-map"), + kfm: mockKfm, + } + err := f.Delete(input) + assert.NoError(t, err) + + // Test error case. + mockKfm.EXPECT().BatchDelete(expectedKeys, gomock.Any()).Return(0, errors.New("test error")) + err = f.Delete(input) + assert.Error(t, err) +} + +func TestFilterMap_Add_No_BatchApi(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + input := []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2")} + expectedKeys := []filterMapKey{ //nolint:typecheck + { + Prefixlen: uint32(32), + Data: uint32(16843009), + }, + { + Prefixlen: uint32(32), + Data: uint32(33686018), + }, + } + expectedValues := []uint8{1, 1} + + mockKfm := mocks.NewMockIEbpfMap(ctrl) + // Test error case. + mockKfm.EXPECT().BatchUpdate(expectedKeys, expectedValues, gomock.Any()).Return(0, errors.New("map batch api not supported (requires >= v5.6)")) + mockKfm.EXPECT().Put(gomock.Any(), gomock.Any()).Return(nil).Times(2) + + f := &FilterMap{ + l: log.Logger().Named("filter-map"), + kfm: mockKfm, + } + err := f.Add(input) + assert.NoError(t, err) + + // Test error case. + mockKfm.EXPECT().Put(gomock.Any(), gomock.Any()).Return(errors.New("test error")) + err = f.Add(input) + assert.Error(t, err) +} + +func TestFilterMap_Delete_No_BatchApi(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + input := []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2")} + expectedKeys := []filterMapKey{ //nolint:typecheck + { + Prefixlen: uint32(32), + Data: uint32(16843009), + }, + { + Prefixlen: uint32(32), + Data: uint32(33686018), + }, + } + + mockKfm := mocks.NewMockIEbpfMap(ctrl) + mockKfm.EXPECT().BatchDelete(expectedKeys, gomock.Any()).Return(0, errors.New("map batch api not supported (requires >= v5.6)")) + mockKfm.EXPECT().Delete(gomock.Any()).Return(nil).Times(2) + + f := &FilterMap{ + l: log.Logger().Named("filter-map"), + kfm: mockKfm, + } + err := f.Delete(input) + assert.NoError(t, err) + + // Test error case. + mockKfm.EXPECT().Delete(gomock.Any()).Return(errors.New("test error")) + + err = f.Delete(input) + assert.Error(t, err) +} + +func TestFilterMap_Close(t *testing.T) { + f := &FilterMap{} + f.Close() // expect no panic when obj is nil. + + f.obj = &filterObjects{} //nolint:typecheck + f.Close() // expect no panic when kfm is nil. +} diff --git a/pkg/plugin/filter/filter_map_windows.go b/pkg/plugin/filter/filter_map_windows.go new file mode 100644 index 000000000..d7968bacc --- /dev/null +++ b/pkg/plugin/filter/filter_map_windows.go @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filter + +import "net" + +type FilterMap struct{} + +func Init() (*FilterMap, error) { + return &FilterMap{}, nil +} + +func (f *FilterMap) Add(ips []net.IP) error { + return nil +} + +func (f *FilterMap) Delete(ips []net.IP) error { + return nil +} + +func (f *FilterMap) Close() { +} diff --git a/pkg/plugin/filter/mocks/mock_types.go b/pkg/plugin/filter/mocks/mock_types.go new file mode 100644 index 000000000..c9f354f0c --- /dev/null +++ b/pkg/plugin/filter/mocks/mock_types.go @@ -0,0 +1,171 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + net "net" + reflect "reflect" + + ebpf "github.com/cilium/ebpf" + gomock "github.com/golang/mock/gomock" +) + +// MockIFilterMap is a mock of IFilterMap interface. +type MockIFilterMap struct { + ctrl *gomock.Controller + recorder *MockIFilterMapMockRecorder +} + +// MockIFilterMapMockRecorder is the mock recorder for MockIFilterMap. +type MockIFilterMapMockRecorder struct { + mock *MockIFilterMap +} + +// NewMockIFilterMap creates a new mock instance. +func NewMockIFilterMap(ctrl *gomock.Controller) *MockIFilterMap { + mock := &MockIFilterMap{ctrl: ctrl} + mock.recorder = &MockIFilterMapMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIFilterMap) EXPECT() *MockIFilterMapMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockIFilterMap) Add(arg0 []net.IP) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockIFilterMapMockRecorder) Add(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockIFilterMap)(nil).Add), arg0) +} + +// Close mocks base method. +func (m *MockIFilterMap) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockIFilterMapMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIFilterMap)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockIFilterMap) Delete(arg0 []net.IP) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockIFilterMapMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockIFilterMap)(nil).Delete), arg0) +} + +// MockIEbpfMap is a mock of IEbpfMap interface. +type MockIEbpfMap struct { + ctrl *gomock.Controller + recorder *MockIEbpfMapMockRecorder +} + +// MockIEbpfMapMockRecorder is the mock recorder for MockIEbpfMap. +type MockIEbpfMapMockRecorder struct { + mock *MockIEbpfMap +} + +// NewMockIEbpfMap creates a new mock instance. +func NewMockIEbpfMap(ctrl *gomock.Controller) *MockIEbpfMap { + mock := &MockIEbpfMap{ctrl: ctrl} + mock.recorder = &MockIEbpfMapMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIEbpfMap) EXPECT() *MockIEbpfMapMockRecorder { + return m.recorder +} + +// BatchDelete mocks base method. +func (m *MockIEbpfMap) BatchDelete(keys interface{}, opts *ebpf.BatchOptions) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchDelete", keys, opts) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchDelete indicates an expected call of BatchDelete. +func (mr *MockIEbpfMapMockRecorder) BatchDelete(keys, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchDelete", reflect.TypeOf((*MockIEbpfMap)(nil).BatchDelete), keys, opts) +} + +// BatchUpdate mocks base method. +func (m *MockIEbpfMap) BatchUpdate(keys, values interface{}, opts *ebpf.BatchOptions) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchUpdate", keys, values, opts) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchUpdate indicates an expected call of BatchUpdate. +func (mr *MockIEbpfMapMockRecorder) BatchUpdate(keys, values, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchUpdate", reflect.TypeOf((*MockIEbpfMap)(nil).BatchUpdate), keys, values, opts) +} + +// Close mocks base method. +func (m *MockIEbpfMap) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIEbpfMapMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIEbpfMap)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockIEbpfMap) Delete(key interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockIEbpfMapMockRecorder) Delete(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockIEbpfMap)(nil).Delete), key) +} + +// Put mocks base method. +func (m *MockIEbpfMap) Put(key, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockIEbpfMapMockRecorder) Put(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockIEbpfMap)(nil).Put), key, value) +} diff --git a/pkg/plugin/filter/types.go b/pkg/plugin/filter/types.go new file mode 100644 index 000000000..374df135d --- /dev/null +++ b/pkg/plugin/filter/types.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package filter + +import ( + "net" + + "github.com/cilium/ebpf" +) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types.go -destination=mocks/mock_types.go -package=mocks + +/* +A thin wrapper around the eBPF map that allows adding and deleting IPv4 addresses. +Adding this separately here because: +- C code uses the lib for generation/compilation +- Plugins import retina_filter.c to use lookup function +*/ +type IFilterMap interface { + Add([]net.IP) error + Delete([]net.IP) error + Close() +} + +// Interface for eBPF map. +// Added for UTs. +type IEbpfMap interface { + BatchUpdate(keys, values interface{}, opts *ebpf.BatchOptions) (int, error) + BatchDelete(keys interface{}, opts *ebpf.BatchOptions) (int, error) + Put(key, value interface{}) error + Delete(key interface{}) error + Close() error +} diff --git a/pkg/plugin/lib/_amd64/placeholder.go b/pkg/plugin/lib/_amd64/placeholder.go new file mode 100644 index 000000000..28e1888e8 --- /dev/null +++ b/pkg/plugin/lib/_amd64/placeholder.go @@ -0,0 +1,3 @@ +package amd64 + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/lib/_amd64/vmlinux.h b/pkg/plugin/lib/_amd64/vmlinux.h new file mode 100644 index 000000000..ff729be39 --- /dev/null +++ b/pkg/plugin/lib/_amd64/vmlinux.h @@ -0,0 +1,130355 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef unsigned char __u8; + +typedef short int __s16; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __u8 u8; + +typedef __s16 s16; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_ulong_t __kernel_size_t; + +typedef __kernel_long_t __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; + +typedef __kernel_gid32_t gid_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_size_t size_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef u64 phys_addr_t; + +typedef struct { + int counter; +} atomic_t; + +typedef struct { + s64 counter; +} atomic64_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct qspinlock { + union { + atomic_t val; + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 wlocked; + u8 __lstate[3]; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + long unsigned int begin; + long unsigned int flags; +}; + +typedef void *fl_owner_t; + +struct file; + +struct kiocb; + +struct iov_iter; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct page; + +struct pipe_inode_info; + +struct seq_file; + +struct file_operations { + struct module *owner; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, bool); + int (*iterate)(struct file *, struct dir_context *); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + long unsigned int mmap_supported_flags; + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long int, struct file_lock **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); +}; + +typedef __s64 time64_t; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct pollfd; + +struct restart_block { + long unsigned int arch_data; + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +struct thread_info { + long unsigned int flags; + long unsigned int syscall_work; + u32 status; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; + u16 src; + u16 dst; +}; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct sched_statistics { + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +}; + +struct util_est { + unsigned int enqueued; + unsigned int ewma; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + struct util_est util_est; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + u64 nr_migrations; + struct sched_statistics statistics; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 64; + long: 64; + long: 64; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +typedef s64 ktime_t; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +struct sched_dl_entity { + struct rb_node rb_node; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct sched_dl_entity *pi_se; +}; + +struct uclamp_se { + unsigned int value: 11; + unsigned int bucket_id: 3; + unsigned int active: 1; + unsigned int user_defined: 1; +}; + +struct cpumask { + long unsigned int bits[128]; +}; + +typedef struct cpumask cpumask_t; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct vmacache { + u64 seqnum; + struct vm_area_struct *vmas[4]; +}; + +struct task_rss_stat { + int events; + int count[4]; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct posix_cputimers_work { + struct callback_head work; + unsigned int scheduled; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[1]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +typedef struct { + uid_t val; +} kuid_t; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct syscall_user_dispatch { + char *selector; + long unsigned int offset; + long unsigned int len; + bool on_dispatch; +}; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[16]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +typedef atomic64_t atomic_long_t; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +struct tlbflush_unmap_batch { + struct arch_tlbflush_unmap_batch arch; + bool flush_required; + bool writable; +}; + +struct page_frag { + struct page *page; + __u32 offset; + __u32 size; +}; + +struct kmap_ctrl {}; + +struct llist_head { + struct llist_node *first; +}; + +struct desc_struct { + u16 limit0; + u16 base0; + u16 base1: 8; + u16 type: 4; + u16 s: 1; + u16 dpl: 2; + u16 p: 1; + u16 limit1: 4; + u16 avl: 1; + u16 l: 1; + u16 d: 1; + u16 g: 1; + u16 base2: 8; +}; + +struct fregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; + u32 status; +}; + +struct fxregs_state { + u16 cwd; + u16 swd; + u16 twd; + u16 fop; + union { + struct { + u64 rip; + u64 rdp; + }; + struct { + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + }; + }; + u32 mxcsr; + u32 mxcsr_mask; + u32 st_space[32]; + u32 xmm_space[64]; + u32 padding[12]; + union { + u32 padding1[12]; + u32 sw_reserved[12]; + }; +}; + +struct math_emu_info; + +struct swregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct math_emu_info *info; + u32 entry_eip; +}; + +struct xstate_header { + u64 xfeatures; + u64 xcomp_bv; + u64 reserved[6]; +}; + +struct xregs_state { + struct fxregs_state i387; + struct xstate_header header; + u8 extended_state_area[0]; +}; + +union fpregs_state { + struct fregs_state fsave; + struct fxregs_state fxsave; + struct swregs_state soft; + struct xregs_state xsave; + u8 __padding[4096]; +}; + +struct fpu { + unsigned int last_cpu; + long unsigned int avx512_timestamp; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union fpregs_state state; +}; + +struct perf_event; + +struct io_bitmap; + +struct thread_struct { + struct desc_struct tls_array[3]; + long unsigned int sp; + short unsigned int es; + short unsigned int ds; + short unsigned int fsindex; + short unsigned int gsindex; + long unsigned int fsbase; + long unsigned int gsbase; + struct perf_event *ptrace_bps[4]; + long unsigned int virtual_dr6; + long unsigned int ptrace_dr7; + long unsigned int cr2; + long unsigned int trap_nr; + long unsigned int error_code; + struct io_bitmap *io_bitmap; + long unsigned int iopl_emul; + unsigned int sig_on_uaccess_err: 1; + long: 63; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct fpu fpu; +}; + +struct sched_class; + +struct task_group; + +struct mm_struct; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct audit_context; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct backing_dev_info; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct compat_robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct mempolicy; + +struct numa_group; + +struct rseq; + +struct task_delay_info; + +struct ftrace_ret_stack; + +struct mem_cgroup; + +struct request_queue; + +struct uprobe_task; + +struct vm_struct; + +struct bpf_local_storage; + +struct task_struct { + struct thread_info thread_info; + volatile long int state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int cpu; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + const struct sched_class *sched_class; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sched_entity se; + struct sched_rt_entity rt; + struct task_group *sched_task_group; + struct sched_dl_entity dl; + struct uclamp_se uclamp_req[2]; + struct uclamp_se uclamp[2]; + struct hlist_head preempt_notifiers; + unsigned int btrace_seq; + unsigned int policy; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + short unsigned int migration_disabled; + short unsigned int migration_flags; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + bool trc_reader_checked; + struct list_head trc_holdout_list; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct vmacache vmacache; + struct task_rss_stat rss_stat; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + unsigned int sched_psi_wake_requeue: 1; + int: 28; + unsigned int sched_remote_wakeup: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int restore_sigmask: 1; + unsigned int in_user_fault: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_group; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + void *pf_io_worker; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + struct posix_cputimers_work posix_cputimers_work; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct audit_context *audit_context; + kuid_t loginuid; + unsigned int sessionid; + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct backing_dev_info *backing_dev_info; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + struct task_io_accounting ioac; + unsigned int psi_flags; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + u32 closid; + u32 rmid; + struct robust_list_head *robust_list; + struct compat_robust_list_head *compat_robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + struct perf_event_context *perf_event_ctxp[2]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct mempolicy *mempolicy; + short int il_prev; + short int pref_node_fork; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + long unsigned int numa_migrate_retry; + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + struct numa_group *numa_group; + long unsigned int *numa_faults; + long unsigned int total_numa_faults; + long unsigned int numa_faults_locality[3]; + long unsigned int numa_pages_migrated; + struct rseq *rseq; + u32 rseq_sig; + long unsigned int rseq_event_mask; + struct tlbflush_unmap_batch tlb_ubc; + union { + refcount_t rcu_users; + struct callback_head rcu; + }; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + int curr_ret_stack; + int curr_ret_depth; + struct ftrace_ret_stack *ret_stack; + long long unsigned int ftrace_timestamp; + atomic_t trace_overrun; + atomic_t tracing_graph_pause; + long unsigned int trace; + long unsigned int trace_recursion; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct request_queue *throttle_queue; + struct uprobe_task *utask; + unsigned int sequential_io; + unsigned int sequential_io_avg; + struct kmap_ctrl kmap_ctrl; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + int patch_state; + void *security; + struct bpf_local_storage *bpf_storage; + void *mce_vaddr; + __u64 mce_kflags; + u64 mce_addr; + __u64 mce_ripv: 1; + __u64 mce_whole_page: 1; + __u64 __mce_reserved: 62; + struct callback_head mce_kill_me; + struct llist_head kretprobe_instances; + long: 64; + long: 64; + long: 64; + long: 64; + struct thread_struct thread; +}; + +struct screen_info { + __u8 orig_x; + __u8 orig_y; + __u16 ext_mem_k; + __u16 orig_video_page; + __u8 orig_video_mode; + __u8 orig_video_cols; + __u8 flags; + __u8 unused2; + __u16 orig_video_ega_bx; + __u16 unused3; + __u8 orig_video_lines; + __u8 orig_video_isVGA; + __u16 orig_video_points; + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u32 lfb_base; + __u32 lfb_size; + __u16 cl_magic; + __u16 cl_offset; + __u16 lfb_linelength; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; + __u16 vesapm_seg; + __u16 vesapm_off; + __u16 pages; + __u16 vesa_attributes; + __u32 capabilities; + __u32 ext_lfb_base; + __u8 _reserved[2]; +} __attribute__((packed)); + +struct apm_bios_info { + __u16 version; + __u16 cseg; + __u32 offset; + __u16 cseg_16; + __u16 dseg; + __u16 flags; + __u16 cseg_len; + __u16 cseg_16_len; + __u16 dseg_len; +}; + +struct edd_device_params { + __u16 length; + __u16 info_flags; + __u32 num_default_cylinders; + __u32 num_default_heads; + __u32 sectors_per_track; + __u64 number_of_sectors; + __u16 bytes_per_sector; + __u32 dpte_ptr; + __u16 key; + __u8 device_path_info_length; + __u8 reserved2; + __u16 reserved3; + __u8 host_bus_type[4]; + __u8 interface_type[8]; + union { + struct { + __u16 base_address; + __u16 reserved1; + __u32 reserved2; + } isa; + struct { + __u8 bus; + __u8 slot; + __u8 function; + __u8 channel; + __u32 reserved; + } pci; + struct { + __u64 reserved; + } ibnd; + struct { + __u64 reserved; + } xprs; + struct { + __u64 reserved; + } htpt; + struct { + __u64 reserved; + } unknown; + } interface_path; + union { + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } ata; + struct { + __u8 device; + __u8 lun; + __u8 reserved1; + __u8 reserved2; + __u32 reserved3; + __u64 reserved4; + } atapi; + struct { + __u16 id; + __u64 lun; + __u16 reserved1; + __u32 reserved2; + } __attribute__((packed)) scsi; + struct { + __u64 serial_number; + __u64 reserved; + } usb; + struct { + __u64 eui; + __u64 reserved; + } i1394; + struct { + __u64 wwid; + __u64 lun; + } fibre; + struct { + __u64 identity_tag; + __u64 reserved; + } i2o; + struct { + __u32 array_number; + __u32 reserved1; + __u64 reserved2; + } raid; + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } sata; + struct { + __u64 reserved1; + __u64 reserved2; + } unknown; + } device_path; + __u8 reserved4; + __u8 checksum; +} __attribute__((packed)); + +struct edd_info { + __u8 device; + __u8 version; + __u16 interface_support; + __u16 legacy_max_cylinder; + __u8 legacy_max_head; + __u8 legacy_sectors_per_track; + struct edd_device_params params; +} __attribute__((packed)); + +struct ist_info { + __u32 signature; + __u32 command; + __u32 event; + __u32 perf_level; +}; + +struct edid_info { + unsigned char dummy[128]; +}; + +struct setup_header { + __u8 setup_sects; + __u16 root_flags; + __u32 syssize; + __u16 ram_size; + __u16 vid_mode; + __u16 root_dev; + __u16 boot_flag; + __u16 jump; + __u32 header; + __u16 version; + __u32 realmode_swtch; + __u16 start_sys_seg; + __u16 kernel_version; + __u8 type_of_loader; + __u8 loadflags; + __u16 setup_move_size; + __u32 code32_start; + __u32 ramdisk_image; + __u32 ramdisk_size; + __u32 bootsect_kludge; + __u16 heap_end_ptr; + __u8 ext_loader_ver; + __u8 ext_loader_type; + __u32 cmd_line_ptr; + __u32 initrd_addr_max; + __u32 kernel_alignment; + __u8 relocatable_kernel; + __u8 min_alignment; + __u16 xloadflags; + __u32 cmdline_size; + __u32 hardware_subarch; + __u64 hardware_subarch_data; + __u32 payload_offset; + __u32 payload_length; + __u64 setup_data; + __u64 pref_address; + __u32 init_size; + __u32 handover_offset; + __u32 kernel_info_offset; +} __attribute__((packed)); + +struct sys_desc_table { + __u16 length; + __u8 table[14]; +}; + +struct olpc_ofw_header { + __u32 ofw_magic; + __u32 ofw_version; + __u32 cif_handler; + __u32 irq_desc_table; +}; + +struct efi_info { + __u32 efi_loader_signature; + __u32 efi_systab; + __u32 efi_memdesc_size; + __u32 efi_memdesc_version; + __u32 efi_memmap; + __u32 efi_memmap_size; + __u32 efi_systab_hi; + __u32 efi_memmap_hi; +}; + +struct boot_e820_entry { + __u64 addr; + __u64 size; + __u32 type; +} __attribute__((packed)); + +struct boot_params { + struct screen_info screen_info; + struct apm_bios_info apm_bios_info; + __u8 _pad2[4]; + __u64 tboot_addr; + struct ist_info ist_info; + __u64 acpi_rsdp_addr; + __u8 _pad3[8]; + __u8 hd0_info[16]; + __u8 hd1_info[16]; + struct sys_desc_table sys_desc_table; + struct olpc_ofw_header olpc_ofw_header; + __u32 ext_ramdisk_image; + __u32 ext_ramdisk_size; + __u32 ext_cmd_line_ptr; + __u8 _pad4[116]; + struct edid_info edid_info; + struct efi_info efi_info; + __u32 alt_mem_k; + __u32 scratch; + __u8 e820_entries; + __u8 eddbuf_entries; + __u8 edd_mbr_sig_buf_entries; + __u8 kbd_status; + __u8 secure_boot; + __u8 _pad5[2]; + __u8 sentinel; + __u8 _pad6[1]; + struct setup_header hdr; + __u8 _pad7[36]; + __u32 edd_mbr_sig_buffer[16]; + struct boot_e820_entry e820_table[128]; + __u8 _pad8[48]; + struct edd_info eddbuf[6]; + __u8 _pad9[276]; +} __attribute__((packed)); + +enum x86_hardware_subarch { + X86_SUBARCH_PC = 0, + X86_SUBARCH_LGUEST = 1, + X86_SUBARCH_XEN = 2, + X86_SUBARCH_INTEL_MID = 3, + X86_SUBARCH_CE4100 = 4, + X86_NR_SUBARCHS = 5, +}; + +struct range { + u64 start; + u64 end; +}; + +struct pt_regs { + long unsigned int r15; + long unsigned int r14; + long unsigned int r13; + long unsigned int r12; + long unsigned int bp; + long unsigned int bx; + long unsigned int r11; + long unsigned int r10; + long unsigned int r9; + long unsigned int r8; + long unsigned int ax; + long unsigned int cx; + long unsigned int dx; + long unsigned int si; + long unsigned int di; + long unsigned int orig_ax; + long unsigned int ip; + long unsigned int cs; + long unsigned int flags; + long unsigned int sp; + long unsigned int ss; +}; + +enum { + GATE_INTERRUPT = 14, + GATE_TRAP = 15, + GATE_CALL = 12, + GATE_TASK = 5, +}; + +struct idt_bits { + u16 ist: 3; + u16 zero: 5; + u16 type: 5; + u16 dpl: 2; + u16 p: 1; +}; + +struct idt_data { + unsigned int vector; + unsigned int segment; + struct idt_bits bits; + const void *addr; +}; + +struct gate_struct { + u16 offset_low; + u16 segment; + struct idt_bits bits; + u16 offset_middle; + u32 offset_high; + u32 reserved; +}; + +typedef struct gate_struct gate_desc; + +struct desc_ptr { + short unsigned int size; + long unsigned int address; +} __attribute__((packed)); + +typedef long unsigned int pteval_t; + +typedef long unsigned int pmdval_t; + +typedef long unsigned int pudval_t; + +typedef long unsigned int p4dval_t; + +typedef long unsigned int pgdval_t; + +typedef long unsigned int pgprotval_t; + +typedef struct { + pteval_t pte; +} pte_t; + +struct pgprot { + pgprotval_t pgprot; +}; + +typedef struct pgprot pgprot_t; + +typedef struct { + pgdval_t pgd; +} pgd_t; + +typedef struct { + p4dval_t p4d; +} p4d_t; + +typedef struct { + pudval_t pud; +} pud_t; + +typedef struct { + pmdval_t pmd; +} pmd_t; + +typedef struct page *pgtable_t; + +struct address_space; + +struct kmem_cache; + +struct dev_pagemap; + +struct page { + long unsigned int flags; + union { + struct { + struct list_head lru; + struct address_space *mapping; + long unsigned int index; + long unsigned int private; + }; + struct { + long unsigned int dma_addr[2]; + }; + struct { + union { + struct list_head slab_list; + struct { + struct page *next; + int pages; + int pobjects; + }; + }; + struct kmem_cache *slab_cache; + void *freelist; + union { + void *s_mem; + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + struct { + long unsigned int compound_head; + unsigned char compound_dtor; + unsigned char compound_order; + atomic_t compound_mapcount; + unsigned int compound_nr; + }; + struct { + long unsigned int _compound_pad_1; + atomic_t hpage_pinned_refcount; + struct list_head deferred_list; + }; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + long unsigned int _pt_pad_2; + union { + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + spinlock_t ptl; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + atomic_t _mapcount; + unsigned int page_type; + unsigned int active; + int units; + }; + atomic_t _refcount; + long unsigned int memcg_data; +}; + +struct paravirt_callee_save { + void *func; +}; + +struct pv_lazy_ops { + void (*enter)(); + void (*leave)(); + void (*flush)(); +}; + +struct pv_cpu_ops { + void (*io_delay)(); + long unsigned int (*get_debugreg)(int); + void (*set_debugreg)(int, long unsigned int); + long unsigned int (*read_cr0)(); + void (*write_cr0)(long unsigned int); + void (*write_cr4)(long unsigned int); + void (*load_tr_desc)(); + void (*load_gdt)(const struct desc_ptr *); + void (*load_idt)(const struct desc_ptr *); + void (*set_ldt)(const void *, unsigned int); + long unsigned int (*store_tr)(); + void (*load_tls)(struct thread_struct *, unsigned int); + void (*load_gs_index)(unsigned int); + void (*write_ldt_entry)(struct desc_struct *, int, const void *); + void (*write_gdt_entry)(struct desc_struct *, int, const void *, int); + void (*write_idt_entry)(gate_desc *, int, const gate_desc *); + void (*alloc_ldt)(struct desc_struct *, unsigned int); + void (*free_ldt)(struct desc_struct *, unsigned int); + void (*load_sp0)(long unsigned int); + void (*invalidate_io_bitmap)(); + void (*update_io_bitmap)(); + void (*wbinvd)(); + void (*cpuid)(unsigned int *, unsigned int *, unsigned int *, unsigned int *); + u64 (*read_msr)(unsigned int); + void (*write_msr)(unsigned int, unsigned int, unsigned int); + u64 (*read_msr_safe)(unsigned int, int *); + int (*write_msr_safe)(unsigned int, unsigned int, unsigned int); + u64 (*read_pmc)(int); + void (*start_context_switch)(struct task_struct *); + void (*end_context_switch)(struct task_struct *); +}; + +struct pv_irq_ops { + struct paravirt_callee_save save_fl; + struct paravirt_callee_save irq_disable; + struct paravirt_callee_save irq_enable; + void (*safe_halt)(); + void (*halt)(); +}; + +struct flush_tlb_info; + +struct mmu_gather; + +struct pv_mmu_ops { + void (*flush_tlb_user)(); + void (*flush_tlb_kernel)(); + void (*flush_tlb_one_user)(long unsigned int); + void (*flush_tlb_multi)(const struct cpumask *, const struct flush_tlb_info *); + void (*tlb_remove_table)(struct mmu_gather *, void *); + void (*exit_mmap)(struct mm_struct *); + struct paravirt_callee_save read_cr2; + void (*write_cr2)(long unsigned int); + long unsigned int (*read_cr3)(); + void (*write_cr3)(long unsigned int); + void (*activate_mm)(struct mm_struct *, struct mm_struct *); + void (*dup_mmap)(struct mm_struct *, struct mm_struct *); + int (*pgd_alloc)(struct mm_struct *); + void (*pgd_free)(struct mm_struct *, pgd_t *); + void (*alloc_pte)(struct mm_struct *, long unsigned int); + void (*alloc_pmd)(struct mm_struct *, long unsigned int); + void (*alloc_pud)(struct mm_struct *, long unsigned int); + void (*alloc_p4d)(struct mm_struct *, long unsigned int); + void (*release_pte)(long unsigned int); + void (*release_pmd)(long unsigned int); + void (*release_pud)(long unsigned int); + void (*release_p4d)(long unsigned int); + void (*set_pte)(pte_t *, pte_t); + void (*set_pmd)(pmd_t *, pmd_t); + pte_t (*ptep_modify_prot_start)(struct vm_area_struct *, long unsigned int, pte_t *); + void (*ptep_modify_prot_commit)(struct vm_area_struct *, long unsigned int, pte_t *, pte_t); + struct paravirt_callee_save pte_val; + struct paravirt_callee_save make_pte; + struct paravirt_callee_save pgd_val; + struct paravirt_callee_save make_pgd; + void (*set_pud)(pud_t *, pud_t); + struct paravirt_callee_save pmd_val; + struct paravirt_callee_save make_pmd; + struct paravirt_callee_save pud_val; + struct paravirt_callee_save make_pud; + void (*set_p4d)(p4d_t *, p4d_t); + struct paravirt_callee_save p4d_val; + struct paravirt_callee_save make_p4d; + void (*set_pgd)(pgd_t *, pgd_t); + struct pv_lazy_ops lazy_mode; + void (*set_fixmap)(unsigned int, phys_addr_t, pgprot_t); +}; + +struct flush_tlb_info { + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + u64 new_tlb_gen; + unsigned int initiating_cpu; + u8 stride_shift; + u8 freed_tables; +}; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct mm_rss_stat { + atomic_long_t count[4]; +}; + +struct ldt_struct; + +struct vdso_image; + +typedef struct { + u64 ctx_id; + atomic64_t tlb_gen; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; + short unsigned int flags; + struct mutex lock; + void *vdso; + const struct vdso_image *vdso_image; + atomic_t perf_rdpmc_allowed; + u16 pkey_allocation_map; + s16 execute_only_pkey; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct linux_binfmt; + +struct core_state; + +struct kioctx_table; + +struct user_namespace; + +struct mmu_notifier_subscriptions; + +struct mm_struct { + struct { + struct vm_area_struct *mmap; + struct rb_root mm_rb; + u64 vmacache_seqnum; + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int mmap_compat_base; + long unsigned int mmap_compat_legacy_base; + long unsigned int task_size; + long unsigned int highest_vm_end; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + atomic_t mm_count; + atomic_t has_pinned; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + seqcount_t write_protect_seq; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[46]; + struct mm_rss_stat rss_stat; + struct linux_binfmt *binfmt; + mm_context_t context; + long unsigned int flags; + struct core_state *core_state; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + struct mmu_notifier_subscriptions *notifier_subscriptions; + long unsigned int numa_next_scan; + long unsigned int numa_scan_offset; + int numa_scan_seq; + atomic_t tlb_flush_pending; + bool tlb_flush_batched; + struct uprobes_state uprobes_state; + atomic_long_t hugetlb_usage; + struct work_struct async_put_work; + u32 pasid; + }; + long unsigned int cpu_bitmap[0]; +}; + +struct userfaultfd_ctx; + +struct vm_userfaultfd_ctx { + struct userfaultfd_ctx *ctx; +}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + long unsigned int vm_start; + long unsigned int vm_end; + struct vm_area_struct *vm_next; + struct vm_area_struct *vm_prev; + struct rb_node vm_rb; + long unsigned int rb_subtree_gap; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + long unsigned int vm_flags; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct mempolicy *vm_policy; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +struct pv_lock_ops { + void (*queued_spin_lock_slowpath)(struct qspinlock *, u32); + struct paravirt_callee_save queued_spin_unlock; + void (*wait)(u8 *, u8); + void (*kick)(int); + struct paravirt_callee_save vcpu_is_preempted; +}; + +struct paravirt_patch_template { + struct pv_cpu_ops cpu; + struct pv_irq_ops irq; + struct pv_mmu_ops mmu; + struct pv_lock_ops lock; +}; + +struct math_emu_info { + long int ___orig_eip; + struct pt_regs *regs; +}; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum tlb_infos { + ENTRIES = 0, + NR_INFO = 1, +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +struct arch_uprobe_task { + long unsigned int saved_scratch_register; + unsigned int saved_trap_nr; + unsigned int saved_tf; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct uprobe; + +struct return_instance; + +struct uprobe_task { + enum uprobe_task_state state; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + long unsigned int xol_vaddr; + struct return_instance *return_instances; + unsigned int depth; +}; + +struct return_instance { + struct uprobe *uprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + struct return_instance *next; +}; + +struct vdso_image { + void *data; + long unsigned int size; + long unsigned int alt; + long unsigned int alt_len; + long unsigned int extable_base; + long unsigned int extable_len; + const void *extable; + long int sym_vvar_start; + long int sym_vvar_page; + long int sym_pvclock_page; + long int sym_hvclock_page; + long int sym_timens_page; + long int sym_VDSO32_NOTE_MASK; + long int sym___kernel_sigreturn; + long int sym___kernel_rt_sigreturn; + long int sym___kernel_vsyscall; + long int sym_int80_landing_pad; + long int sym_vdso32_sigreturn_landing_pad; + long int sym_vdso32_rt_sigreturn_landing_pad; +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + long unsigned int nrpages; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; +}; + +struct vmem_altmap { + long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_FS_DAX = 2, + MEMORY_DEVICE_GENERIC = 3, + MEMORY_DEVICE_PCI_P2PDMA = 4, +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref *ref; + struct percpu_ref internal_ref; + struct completion done; + enum memory_type type; + unsigned int flags; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct range ranges[0]; + }; +}; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +}; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct fown_struct { + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +struct file { + union { + struct llist_node fu_llist; + struct callback_head fu_rcuhead; + } f_u; + struct path f_path; + struct inode *f_inode; + const struct file_operations *f_op; + spinlock_t f_lock; + enum rw_hint f_write_hint; + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + struct mutex f_pos_lock; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + u64 f_version; + void *f_security; + void *private_data; + struct hlist_head *f_ep; + struct address_space *f_mapping; + errseq_t f_wb_err; + errseq_t f_sb_err; +}; + +typedef unsigned int vm_fault_t; + +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD = 1, + PE_SIZE_PUD = 2, +}; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*may_split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + int (*mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int, long unsigned int); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, enum page_entry_size); + vm_fault_t (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + int (*set_policy)(struct vm_area_struct *, struct mempolicy *); + struct mempolicy * (*get_policy)(struct vm_area_struct *, long unsigned int); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +enum fault_flag { + FAULT_FLAG_WRITE = 1, + FAULT_FLAG_MKWRITE = 2, + FAULT_FLAG_ALLOW_RETRY = 4, + FAULT_FLAG_RETRY_NOWAIT = 8, + FAULT_FLAG_KILLABLE = 16, + FAULT_FLAG_TRIED = 32, + FAULT_FLAG_USER = 64, + FAULT_FLAG_REMOTE = 128, + FAULT_FLAG_INSTRUCTION = 256, + FAULT_FLAG_INTERRUPTIBLE = 512, +}; + +struct vm_fault { + const struct { + struct vm_area_struct *vma; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + }; + enum fault_flag flags; + pmd_t *pmd; + pud_t *pud; + pte_t orig_pte; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_ISOLATE = 4, + MIGRATE_TYPES = 5, +}; + +enum numa_stat_item { + NUMA_HIT = 0, + NUMA_MISS = 1, + NUMA_FOREIGN = 2, + NUMA_INTERLEAVE_HIT = 3, + NUMA_LOCAL = 4, + NUMA_OTHER = 5, + NR_VM_NUMA_STAT_ITEMS = 6, +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_BOUNCE = 8, + NR_ZSPAGES = 9, + NR_FREE_CMA_PAGES = 10, + NR_VM_ZONE_STAT_ITEMS = 11, +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_KERNEL_MISC_RECLAIMABLE = 33, + NR_FOLL_PIN_ACQUIRED = 34, + NR_FOLL_PIN_RELEASED = 35, + NR_KERNEL_STACK_KB = 36, + NR_PAGETABLE = 37, + NR_SWAPCACHE = 38, + NR_VM_NODE_STAT_ITEMS = 39, +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +typedef unsigned int isolate_mode_t; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + NR_WMARK = 3, +}; + +enum { + ZONELIST_FALLBACK = 0, + ZONELIST_NOFALLBACK = 1, + MAX_ZONELISTS = 2, +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + void (*kill)(struct dev_pagemap *); + void (*cleanup)(struct dev_pagemap *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); +}; + +struct pid_namespace; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[1]; +}; + +typedef struct { + gid_t val; +} kgid_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[8]; +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t *__sigrestore_t; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[8]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[8]; + __u32 _pkey; + } _addr_pkey; + struct { + long unsigned int _data; + __u32 _type; + } _perf; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct user_struct { + refcount_t __count; + atomic_t processes; + atomic_t sigpending; + atomic_long_t epoll_watches; + long unsigned int mq_bytes; + long unsigned int locked_shm; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + atomic_t nr_watches; + struct ratelimit_state ratelimit; +}; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct tty_struct; + +struct autogroup; + +struct taskstats; + +struct tty_audit_buf; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exit_task; + int group_stop_count; + unsigned int flags; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + int posix_timer_id; + struct list_head posix_timers; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + struct autogroup *autogroup; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + unsigned int audit_tty; + struct tty_audit_buf *tty_audit_buf; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + union { + __u64 ptr64; + __u64 ptr; + } rseq_cs; + __u32 flags; + long: 32; + long: 64; +}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +enum perf_event_task_context { + perf_invalid_context = 4294967295, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +struct rq; + +struct rq_flags; + +struct sched_class { + int uclamp_enabled; + void (*enqueue_task)(struct rq *, struct task_struct *, int); + void (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*check_preempt_curr)(struct rq *, struct task_struct *, int); + struct task_struct * (*pick_next_task)(struct rq *); + void (*put_prev_task)(struct rq *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + int (*select_task_rq)(struct task_struct *, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, const struct cpumask *, u32); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + struct rq * (*find_lock_rq)(struct task_struct *, struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *, int); +}; + +struct kernel_cap_struct { + __u32 cap[2]; +}; + +typedef struct kernel_cap_struct kernel_cap_t; + +struct ucounts; + +struct group_info; + +struct cred { + atomic_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + void *security; + struct user_struct *user; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; +}; + +typedef int32_t key_serial_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + u16 desc_len; + char desc[6]; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct watch_list; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct watch_list *watchers; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + atomic_t nr_tasks; + spinlock_t lock; + short unsigned int ioprio; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +enum fixed_addresses { + VSYSCALL_PAGE = 511, + FIX_DBGP_BASE = 512, + FIX_EARLYCON_MEM_BASE = 513, + FIX_APIC_BASE = 514, + FIX_IO_APIC_BASE_0 = 515, + FIX_IO_APIC_BASE_END = 642, + FIX_PARAVIRT_BOOTMAP = 643, + FIX_APEI_GHES_IRQ = 644, + FIX_APEI_GHES_NMI = 645, + __end_of_permanent_fixed_addresses = 646, + FIX_BTMAP_END = 1024, + FIX_BTMAP_BEGIN = 1535, + FIX_TBOOT_BASE = 1536, + __end_of_fixed_addresses = 1537, +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 hash; + u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[32]; + struct lockref d_lockref; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct list_head d_child; + struct list_head d_subdirs; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +}; + +struct posix_acl; + +struct inode_operations; + +struct bdi_writeback; + +struct file_lock_context; + +struct cdev; + +struct fsnotify_mark_connector; + +struct fscrypt_info; + +struct fsverity_info; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + void *i_security; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 i_atime; + struct timespec64 i_mtime; + struct timespec64 i_ctime; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + long unsigned int i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_generation; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + struct fscrypt_info *i_crypt_info; + struct fsverity_info *i_verity_info; + void *i_private; +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, const struct inode *); + long: 64; + long: 64; + long: 64; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + int frozen; + wait_queue_head_t wait_unfrozen; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; +}; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct fscrypt_operations; + +struct fsverity_operations; + +struct unicode_map; + +struct block_device; + +struct workqueue_struct; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + void *s_security; + const struct xattr_handler **s_xattr; + const struct fscrypt_operations *s_cop; + struct key *s_master_keys; + const struct fsverity_operations *s_vop; + struct unicode_map *s_encoding; + __u16 s_encoding_flags; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector *s_fsnotify_marks; + char s_id[32]; + uuid_t s_uuid; + unsigned int s_max_links; + fmode_t s_mode; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + int cleancache_poolid; + struct shrinker s_shrink; + atomic_long_t s_remove_count; + atomic_long_t s_fsnotify_inode_refs; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + int: 32; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 64; + long: 64; +}; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; + struct user_namespace *mnt_userns; +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one *lru[0]; +}; + +struct list_lru_node { + spinlock_t lock; + struct list_lru_one lru; + struct list_lru_memcg *memcg_lrus; + long int nr_items; + long: 64; + long: 64; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, + MIGRATE_SYNC_NO_COPY = 3, +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct group_info { + atomic_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int, long int); + void *private; + int ki_flags; + u16 ki_hint; + u16 ki_ioprio; + union { + unsigned int ki_cookie; + struct wait_page_queue *ki_waitq; + }; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + kuid_t ia_uid; + kgid_t ia_gid; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + struct mem_dqblk dq_dqb; +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*readpage)(struct file *, struct page *); + int (*writepages)(struct address_space *, struct writeback_control *); + int (*set_page_dirty)(struct page *); + int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage)(struct page *, unsigned int, unsigned int); + int (*releasepage)(struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); + int (*launder_page)(struct page *); + int (*is_partially_uptodate)(struct page *, long unsigned int, long unsigned int); + void (*is_dirty_writeback)(struct page *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); +}; + +struct fiemap_extent_info; + +struct fileattr; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct user_namespace *, struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct user_namespace *, struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct user_namespace *, struct inode *, struct dentry *, const char *); + int (*mkdir)(struct user_namespace *, struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct user_namespace *, struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct user_namespace *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct user_namespace *, struct dentry *, struct iattr *); + int (*getattr)(struct user_namespace *, const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, struct timespec64 *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct user_namespace *, struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct user_namespace *, struct inode *, struct posix_acl *, int); + int (*fileattr_set)(struct user_namespace *, struct dentry *, struct fileattr *); + int (*fileattr_get)(struct dentry *, struct fileattr *); + long: 64; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct fasync_struct; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock *fl_blocker; + struct list_head fl_list; + struct hlist_node fl_link; + struct list_head fl_blocked_requests; + struct list_head fl_blocked_member; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + } fl_u; +}; + +struct lock_manager_operations { + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot ** (*get_dquots)(struct inode *); + int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); +}; + +struct iomap; + +struct fid; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); + u64 (*fetch_iversion)(struct inode *); + long unsigned int flags; +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct user_namespace *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +union fscrypt_policy; + +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + const union fscrypt_policy * (*get_dummy_policy)(struct super_block *); + bool (*empty_dir)(struct inode *); + unsigned int max_namelen; + bool (*has_stable_inodes)(struct super_block *); + void (*get_ino_and_lblk_bits)(struct super_block *, int *, int *); + int (*get_num_devices)(struct super_block *); + void (*get_devices)(struct super_block *, struct request_queue **); +}; + +struct fsverity_operations { + int (*begin_enable_verity)(struct file *); + int (*end_enable_verity)(struct file *, const void *, size_t, u64); + int (*get_verity_descriptor)(struct inode *, void *, size_t); + struct page * (*read_merkle_tree_page)(struct inode *, long unsigned int, long unsigned int); + int (*write_merkle_tree_block)(struct inode *, const void *, u64, int); +}; + +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct p_log; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +enum compound_dtor_id { + NULL_COMPOUND_DTOR = 0, + COMPOUND_PAGE_DTOR = 1, + HUGETLB_PAGE_DTOR = 2, + TRANSHUGE_PAGE_DTOR = 3, + NR_COMPOUND_DTORS = 4, +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA = 4, + PGALLOC_DMA32 = 5, + PGALLOC_NORMAL = 6, + PGALLOC_MOVABLE = 7, + ALLOCSTALL_DMA = 8, + ALLOCSTALL_DMA32 = 9, + ALLOCSTALL_NORMAL = 10, + ALLOCSTALL_MOVABLE = 11, + PGSCAN_SKIP_DMA = 12, + PGSCAN_SKIP_DMA32 = 13, + PGSCAN_SKIP_NORMAL = 14, + PGSCAN_SKIP_MOVABLE = 15, + PGFREE = 16, + PGACTIVATE = 17, + PGDEACTIVATE = 18, + PGLAZYFREE = 19, + PGFAULT = 20, + PGMAJFAULT = 21, + PGLAZYFREED = 22, + PGREFILL = 23, + PGREUSE = 24, + PGSTEAL_KSWAPD = 25, + PGSTEAL_DIRECT = 26, + PGSCAN_KSWAPD = 27, + PGSCAN_DIRECT = 28, + PGSCAN_DIRECT_THROTTLE = 29, + PGSCAN_ANON = 30, + PGSCAN_FILE = 31, + PGSTEAL_ANON = 32, + PGSTEAL_FILE = 33, + PGSCAN_ZONE_RECLAIM_FAILED = 34, + PGINODESTEAL = 35, + SLABS_SCANNED = 36, + KSWAPD_INODESTEAL = 37, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 38, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 39, + PAGEOUTRUN = 40, + PGROTATED = 41, + DROP_PAGECACHE = 42, + DROP_SLAB = 43, + OOM_KILL = 44, + NUMA_PTE_UPDATES = 45, + NUMA_HUGE_PTE_UPDATES = 46, + NUMA_HINT_FAULTS = 47, + NUMA_HINT_FAULTS_LOCAL = 48, + NUMA_PAGE_MIGRATE = 49, + PGMIGRATE_SUCCESS = 50, + PGMIGRATE_FAIL = 51, + THP_MIGRATION_SUCCESS = 52, + THP_MIGRATION_FAIL = 53, + THP_MIGRATION_SPLIT = 54, + COMPACTMIGRATE_SCANNED = 55, + COMPACTFREE_SCANNED = 56, + COMPACTISOLATED = 57, + COMPACTSTALL = 58, + COMPACTFAIL = 59, + COMPACTSUCCESS = 60, + KCOMPACTD_WAKE = 61, + KCOMPACTD_MIGRATE_SCANNED = 62, + KCOMPACTD_FREE_SCANNED = 63, + HTLB_BUDDY_PGALLOC = 64, + HTLB_BUDDY_PGALLOC_FAIL = 65, + UNEVICTABLE_PGCULLED = 66, + UNEVICTABLE_PGSCANNED = 67, + UNEVICTABLE_PGRESCUED = 68, + UNEVICTABLE_PGMLOCKED = 69, + UNEVICTABLE_PGMUNLOCKED = 70, + UNEVICTABLE_PGCLEARED = 71, + UNEVICTABLE_PGSTRANDED = 72, + THP_FAULT_ALLOC = 73, + THP_FAULT_FALLBACK = 74, + THP_FAULT_FALLBACK_CHARGE = 75, + THP_COLLAPSE_ALLOC = 76, + THP_COLLAPSE_ALLOC_FAILED = 77, + THP_FILE_ALLOC = 78, + THP_FILE_FALLBACK = 79, + THP_FILE_FALLBACK_CHARGE = 80, + THP_FILE_MAPPED = 81, + THP_SPLIT_PAGE = 82, + THP_SPLIT_PAGE_FAILED = 83, + THP_DEFERRED_SPLIT_PAGE = 84, + THP_SPLIT_PMD = 85, + THP_SPLIT_PUD = 86, + THP_ZERO_PAGE_ALLOC = 87, + THP_ZERO_PAGE_ALLOC_FAILED = 88, + THP_SWPOUT = 89, + THP_SWPOUT_FALLBACK = 90, + BALLOON_INFLATE = 91, + BALLOON_DEFLATE = 92, + BALLOON_MIGRATE = 93, + SWAP_RA = 94, + SWAP_RA_HIT = 95, + DIRECT_MAP_LEVEL2_SPLIT = 96, + DIRECT_MAP_LEVEL3_SPLIT = 97, + NR_VM_EVENT_ITEMS = 98, +}; + +struct tlb_context { + u64 ctx_id; + u64 tlb_gen; +}; + +struct tlb_state { + struct mm_struct *loaded_mm; + union { + struct mm_struct *last_user_mm; + long unsigned int last_user_mm_ibpb; + }; + u16 loaded_mm_asid; + u16 next_asid; + bool invalidate_other; + short unsigned int user_pcid_flush_mask; + long unsigned int cr4; + struct tlb_context ctxs[6]; +}; + +struct boot_params_to_save { + unsigned int start; + unsigned int len; +}; + +enum cpu_idle_type { + CPU_IDLE = 0, + CPU_NOT_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_SHARE_CPUCAPACITY = 6, + __SD_SHARE_PKG_RESOURCES = 7, + __SD_SERIALIZE = 8, + __SD_ASYM_PACKING = 9, + __SD_PREFER_SIBLING = 10, + __SD_OVERLAP = 11, + __SD_NUMA = 12, + __SD_FLAG_CNT = 13, +}; + +struct x86_legacy_devices { + int pnpbios; +}; + +enum x86_legacy_i8042_state { + X86_LEGACY_I8042_PLATFORM_ABSENT = 0, + X86_LEGACY_I8042_FIRMWARE_ABSENT = 1, + X86_LEGACY_I8042_EXPECTED_PRESENT = 2, +}; + +struct x86_legacy_features { + enum x86_legacy_i8042_state i8042; + int rtc; + int warm_reset; + int no_vga; + int reserve_bios_regions; + struct x86_legacy_devices devices; +}; + +struct ghcb; + +struct x86_hyper_runtime { + void (*pin_vcpu)(int); + void (*sev_es_hcall_prepare)(struct ghcb *, struct pt_regs *); + bool (*sev_es_hcall_finish)(struct ghcb *, struct pt_regs *); +}; + +struct x86_platform_ops { + long unsigned int (*calibrate_cpu)(); + long unsigned int (*calibrate_tsc)(); + void (*get_wallclock)(struct timespec64 *); + int (*set_wallclock)(const struct timespec64 *); + void (*iommu_shutdown)(); + bool (*is_untracked_pat_range)(u64, u64); + void (*nmi_init)(); + unsigned char (*get_nmi_reason)(); + void (*save_sched_clock_state)(); + void (*restore_sched_clock_state)(); + void (*apic_post_init)(); + struct x86_legacy_features legacy; + void (*set_legacy_features)(); + struct x86_hyper_runtime hyper; +}; + +typedef signed char __s8; + +typedef __s8 s8; + +typedef __u32 __le32; + +typedef long unsigned int irq_hw_number_t; + +struct kernel_symbol { + int value_offset; + int name_offset; + int namespace_offset; +}; + +typedef int (*initcall_t)(); + +typedef int initcall_entry_t; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +struct lockdep_map {}; + +struct jump_entry { + s32 code; + s32 target; + long int key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +}; + +struct static_call_site { + s32 addr; + s32 key; +}; + +struct static_call_mod { + struct static_call_mod *next; + struct module *mod; + struct static_call_site *sites; +}; + +struct static_call_key { + void *func; + union { + long unsigned int type; + struct static_call_mod *mods; + struct static_call_site *sites; + }; +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_RUNNING = 2, + SYSTEM_HALT = 3, + SYSTEM_POWER_OFF = 4, + SYSTEM_RESTART = 5, + SYSTEM_SUSPEND = 6, +}; + +struct bug_entry { + int bug_addr_disp; + int file_disp; + short unsigned int line; + short unsigned int flags; +}; + +typedef struct cpumask *cpumask_var_t; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint { + const char *name; + struct static_key key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + int (*regfunc)(); + void (*unregfunc)(); + struct tracepoint_func *funcs; +}; + +typedef const int tracepoint_ptr_t; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 64; +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct fixed_percpu_data { + char gs_base[40]; + long unsigned int stack_canary; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = 4294967292, + PERF_EVENT_STATE_EXIT = 4294967293, + PERF_EVENT_STATE_ERROR = 4294967294, + PERF_EVENT_STATE_OFF = 4294967295, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +typedef struct { + atomic_long_t a; +} local_t; + +typedef struct { + local_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 build_id: 1; + __u64 inherit_thread: 1; + __u64 remove_on_exec: 1; + __u64 sigtrap: 1; + __u64 __reserved_1: 26; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + __u32 __reserved_3; + __u64 sig_data; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + +struct arch_hw_breakpoint { + long unsigned int address; + long unsigned int mask; + u8 len; + u8 type; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct irq_work { + struct __call_single_node node; + void (*func)(struct irq_work *); +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct ftrace_ops; + +struct ftrace_regs; + +typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct ftrace_regs *); + +struct ftrace_hash; + +struct ftrace_ops_hash { + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; + struct mutex regex_lock; +}; + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + long unsigned int flags; + void *private; + ftrace_func_t saved_func; + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + long unsigned int trampoline; + long unsigned int trampoline_size; + struct list_head list; +}; + +struct pmu; + +struct perf_buffer; + +struct perf_addr_filter_range; + +struct bpf_prog; + +struct trace_event_call; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + u64 shadow_ctx_time; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + atomic_long_t refcount; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + int pending_wakeup; + int pending_kill; + int pending_disable; + long unsigned int pending_addr; + struct irq_work pending; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + perf_overflow_handler_t orig_overflow_handler; + struct bpf_prog *prog; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct ftrace_ops ftrace_ops; + struct perf_cgroup *cgrp; + void *security; + struct list_head sb_list; +}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + u32 nr_extents; + union { + struct uid_gid_extent extent[5]; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; + refcount_t count; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + struct ctl_table *ctl_table; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + bool parent_could_setfcap; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct key *persistent_keyring_register; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + int ucount_max[12]; +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +typedef void (*smp_call_func_t)(void *); + +struct __call_single_data { + struct __call_single_node node; + smp_call_func_t func; + void *info; +}; + +struct smp_ops { + void (*smp_prepare_boot_cpu)(); + void (*smp_prepare_cpus)(unsigned int); + void (*smp_cpus_done)(unsigned int); + void (*stop_other_cpus)(int); + void (*crash_stop_other_cpus)(); + void (*smp_send_reschedule)(int); + int (*cpu_up)(unsigned int, struct task_struct *); + int (*cpu_disable)(); + void (*cpu_die)(unsigned int); + void (*play_dead)(); + void (*send_call_func_ipi)(const struct cpumask *); + void (*send_call_func_single_ipi)(int); +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + long int seglen[4]; + u8 flags; +}; + +struct srcu_node; + +struct srcu_struct; + +struct srcu_data { + long unsigned int srcu_lock_count[2]; + long unsigned int srcu_unlock_count[2]; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_struct { + struct srcu_node node[521]; + struct srcu_node *level[4]; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + unsigned int srcu_idx; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_last_gp_end; + struct srcu_data *sda; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + struct delayed_work work; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + unsigned int degree; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +struct mempolicy { + atomic_t refcnt; + short unsigned int mode; + short unsigned int flags; + union { + short int preferred_node; + nodemask_t nodes; + } v; + union { + nodemask_t cpuset_mems_allowed; + nodemask_t user_nodemask; + } w; +}; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct free_area { + struct list_head free_list[5]; + long unsigned int nr_free; +}; + +struct zone_padding { + char x[0]; +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + spinlock_t lru_lock; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; +}; + +struct per_cpu_pageset; + +struct zone { + long unsigned int _watermark[3]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long int lowmem_reserve[5]; + int node; + struct pglist_data *zone_pgdat; + struct per_cpu_pageset *pageset; + int pageset_high; + int pageset_batch; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + seqlock_t span_seqlock; + int initialized; + long: 32; + long: 64; + long: 64; + struct zone_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct zone_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + short: 16; + struct zone_padding _pad3_; + atomic_long_t vm_stat[11]; + atomic_long_t vm_numa_stat[6]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[5121]; +}; + +enum zone_type { + ZONE_DMA = 0, + ZONE_DMA32 = 1, + ZONE_NORMAL = 2, + ZONE_MOVABLE = 3, + ZONE_DEVICE = 4, + __MAX_NR_ZONES = 5, +}; + +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + long unsigned int split_queue_len; +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[5]; + struct zonelist node_zonelists[2]; + int nr_zones; + spinlock_t node_size_lock; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + long unsigned int totalreserve_pages; + long unsigned int min_unmapped_pages; + long unsigned int min_slab_pages; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct zone_padding _pad1_; + struct deferred_split deferred_split_queue; + struct lruvec __lruvec; + long unsigned int flags; + long: 64; + struct zone_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[39]; +}; + +struct per_cpu_pages { + int count; + int high; + int batch; + struct list_head lists[3]; +}; + +struct per_cpu_pageset { + struct per_cpu_pages pcp; + s8 expire; + u16 vm_numa_stat_diff[6]; + s8 stat_threshold; + s8 vm_stat_diff[11]; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[39]; +}; + +typedef struct pglist_data pg_data_t; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, +}; + +struct irq_domain_ops; + +struct fwnode_handle; + +struct irq_domain_chip_generic; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct irq_domain *parent; + irq_hw_number_t hwirq_max; + unsigned int revmap_direct_max_irq; + unsigned int revmap_size; + struct xarray revmap_tree; + struct mutex revmap_tree_mutex; + unsigned int linear_revmap[0]; +}; + +typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + struct ctl_table *child; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, struct ctl_table *); +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u64 Elf64_Off; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +}; + +typedef struct elf64_sym Elf64_Sym; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +typedef struct elf64_shdr Elf64_Shdr; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; +}; + +struct kernfs_node; + +struct kernfs_syscall_ops; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + void *priv; + u64 id; + short unsigned int flags; + umode_t mode; + struct kernfs_iattrs *iattr; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct kobject; + +struct bin_attribute; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + struct address_space *mapping; + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); + const void * (*namespace)(struct kobject *); + void (*get_ownership)(struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(struct kset *, struct kobject *); + const char * (* const name)(struct kset *, struct kobject *); + int (* const uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); +}; + +struct kernel_param; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_layout { + void *base; + unsigned int size; + unsigned int text_size; + unsigned int ro_size; + unsigned int ro_after_init_size; + struct mod_tree_node mtn; +}; + +struct mod_arch_specific {}; + +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +struct module_attribute; + +struct exception_table_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct trace_eval_map; + +struct klp_modinfo; + +struct error_injection_entry; + +struct module { + enum module_state state; + struct list_head list; + char name[56]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + bool sig_ok; + bool async_probe_requested; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + struct module_layout core_layout; + struct module_layout init_layout; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + unsigned int btf_data_size; + void *btf_data; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + unsigned int num_ftrace_callsites; + long unsigned int *ftrace_callsites; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + int num_static_call_sites; + struct static_call_site *static_call_sites; + bool klp; + bool klp_alive; + struct klp_modinfo *klp_info; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct error_injection_entry { + long unsigned int addr; + int etype; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct klp_modinfo { + Elf64_Ehdr hdr; + Elf64_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; + +struct exception_table_entry { + int insn; + int fixup; + int handler; +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct bpf_prog_array; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + struct event_filter *filter; + void *mod; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct cgroup; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int failcnt; + struct page_counter *parent; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct mem_cgroup_threshold_ary; + +struct mem_cgroup_thresholds { + struct mem_cgroup_threshold_ary *primary; + struct mem_cgroup_threshold_ary *spare; +}; + +struct memcg_padding { + char x[0]; +}; + +struct memcg_vmstats { + long int state[42]; + long unsigned int events[98]; + long int state_pending[42]; + long unsigned int events_pending[98]; +}; + +enum memcg_kmem_state { + KMEM_NONE = 0, + KMEM_ALLOCATED = 1, + KMEM_ONLINE = 2, +}; + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + struct list_head list; + s32 *counters; +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + +struct obj_cgroup; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct page_counter kmem; + struct page_counter tcpmem; + struct work_struct high_work; + long unsigned int soft_limit; + struct vmpressure vmpressure; + bool oom_group; + bool oom_lock; + int under_oom; + int swappiness; + int oom_kill_disable; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct mutex thresholds_lock; + struct mem_cgroup_thresholds thresholds; + struct mem_cgroup_thresholds memsw_thresholds; + struct list_head oom_notify; + long unsigned int move_charge_at_immigrate; + spinlock_t move_lock; + long unsigned int move_lock_flags; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct memcg_padding _pad1_; + struct memcg_vmstats vmstats; + atomic_long_t memory_events[8]; + atomic_long_t memory_events_local[8]; + long unsigned int socket_pressure; + bool tcpmem_active; + int tcpmem_pressure; + int kmemcg_id; + enum memcg_kmem_state kmem_state; + struct obj_cgroup *objcg; + struct list_head objcg_list; + long: 64; + long: 64; + struct memcg_padding _pad2_; + atomic_t moving_account; + struct task_struct *move_lock_task; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct list_head event_list; + spinlock_t event_list_lock; + struct deferred_split deferred_split_queue; + struct mem_cgroup_per_node *nodeinfo[0]; + long: 64; +}; + +struct fs_pin; + +struct pid_namespace { + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + atomic_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct blk_plug { + struct list_head mq_list; + struct list_head cb_list; + short unsigned int rq_count; + bool multiple_queues; + bool nowait; +}; + +struct reclaim_state { + long unsigned int reclaimed_slab; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + struct percpu_counter stat[4]; + long unsigned int congested; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + long unsigned int dirty_sleep; + struct list_head bdi_node; + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct device; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; +}; + +struct css_set { + struct cgroup_subsys_state *subsys[14]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[14]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +typedef u32 compat_uptr_t; + +struct compat_robust_list { + compat_uptr_t next; +}; + +typedef s32 compat_long_t; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +struct perf_event_context { + struct pmu *pmu; + raw_spinlock_t lock; + struct mutex mutex; + struct list_head active_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + struct list_head pinned_active; + struct list_head flexible_active; + int nr_events; + int nr_active; + int is_active; + int nr_stat; + int nr_freq; + int rotate_disable; + int rotate_necessary; + refcount_t refcount; + struct task_struct *task; + u64 time; + u64 timestamp; + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + void *task_ctx_data; + struct callback_head callback_head; +}; + +struct task_delay_info { + raw_spinlock_t lock; + unsigned int flags; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u32 freepages_count; + u32 thrashing_count; +}; + +struct ftrace_ret_stack { + long unsigned int ret; + long unsigned int func; + long long unsigned int calltime; + long long unsigned int subtime; + long unsigned int *retp; +}; + +struct blk_integrity_profile; + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +enum blk_bounce { + BLK_BOUNCE_NONE = 0, + BLK_BOUNCE_HIGH = 1, +}; + +enum blk_zoned_model { + BLK_ZONED_NONE = 0, + BLK_ZONED_HA = 1, + BLK_ZONED_HM = 2, +}; + +struct queue_limits { + enum blk_bounce bounce; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int zone_write_granularity; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +struct bsg_ops; + +struct bsg_class_device { + struct device *class_dev; + int minor; + struct request_queue *queue; + const struct bsg_ops *ops; +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + mempool_t bio_pool; + mempool_t bvec_pool; + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; + unsigned int back_pad; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; +}; + +struct request; + +struct elevator_queue; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_mq_hw_ctx; + +struct blk_keyslot_manager; + +struct blk_stat_callback; + +struct blkcg_gq; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + struct request *last_merge; + struct elevator_queue *elevator; + struct percpu_ref q_usage_counter; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + unsigned int queue_depth; + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + struct backing_dev_info *backing_dev_info; + void *queuedata; + long unsigned int queue_flags; + atomic_t pm_only; + int id; + spinlock_t queue_lock; + struct kobject kobj; + struct kobject *mq_kobj; + struct blk_integrity integrity; + struct device *dev; + enum rpm_status rpm_status; + long unsigned int nr_requests; + unsigned int dma_pad_mask; + unsigned int dma_alignment; + struct blk_keyslot_manager *ksm; + unsigned int rq_timeout; + int poll_nsec; + struct blk_stat_callback *poll_cb; + struct blk_rq_stat poll_stat[16]; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_sbitmap; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct queue_limits limits; + unsigned int required_elevator_features; + unsigned int nr_zones; + long unsigned int *conv_zones_bitmap; + long unsigned int *seq_zones_wlock; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; + struct mutex debugfs_mutex; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct bsg_class_device bsg_dev; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct bio_set bio_split; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + bool mq_sysfs_init_done; + size_t cmd_size; + u64 write_hints[5]; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; + +struct psi_group_cpu; + +struct psi_group { + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[6]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + u64 total[12]; + long unsigned int avg[18]; + struct task_struct *poll_task; + struct timer_list poll_timer; + wait_queue_head_t poll_wait; + atomic_t poll_wakeup; + struct mutex trigger_lock; + struct list_head triggers; + u32 nr_triggers[6]; + u32 poll_states; + u64 poll_min_period; + u64 polling_total[6]; + u64 polling_next_update; + u64 polling_until; +}; + +struct cgroup_bpf { + struct bpf_prog_array *effective[39]; + struct list_head progs[39]; + u32 flags[39]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + int e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[14]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[14]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group psi; + struct cgroup_bpf bpf; + atomic_t congestion_count; + struct cgroup_freezer_state freezer; + u64 ancestor_ids[0]; +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + int count; + atomic_t ucount[12]; +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + int: 32; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int no_cgroup_owner: 1; + unsigned int punt_to_cgroup: 1; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + struct file_ra_state *ra; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; +}; + +struct iovec; + +struct kvec; + +struct bio_vec; + +struct iov_iter { + unsigned int type; + size_t iov_offset; + size_t count; + union { + const struct iovec *iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + struct xarray *xarray; + struct pipe_inode_info *pipe; + }; + union { + long unsigned int nr_segs; + struct { + unsigned int head; + unsigned int start_head; + }; + loff_t xarray_start; + }; +}; + +struct swap_cluster_info { + spinlock_t lock; + unsigned int data: 24; + unsigned int flags: 8; +}; + +struct swap_cluster_list { + struct swap_cluster_info head; + struct swap_cluster_info tail; +}; + +struct percpu_cluster; + +struct swap_info_struct { + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + struct swap_cluster_info *cluster_info; + struct swap_cluster_list free_clusters; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + unsigned int old_block_size; + long unsigned int *frontswap_map; + atomic_t frontswap_pages; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct swap_cluster_list discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head defer_sync; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup: 1; + unsigned int async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path: 1; + bool syscore: 1; + bool no_pm_callbacks: 1; + unsigned int must_resume: 1; + unsigned int may_skip_resume: 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + unsigned int idle_notification: 1; + unsigned int request_pending: 1; + unsigned int deferred_resume: 1; + unsigned int needs_force_resume: 1; + unsigned int runtime_auto: 1; + bool ignore_children: 1; + unsigned int no_callbacks: 1; + unsigned int irq_safe: 1; + unsigned int use_autosuspend: 1; + unsigned int timer_autosuspends: 1; + unsigned int memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +struct dev_archdata {}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct em_perf_domain; + +struct dev_pin_info; + +struct dma_map_ops; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct device_node; + +struct class; + +struct iommu_group; + +struct dev_iommu; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct em_perf_domain *em_pd; + struct irq_domain *msi_domain; + struct dev_pin_info *pins; + raw_spinlock_t msi_lock; + struct list_head msi_list; + const struct dma_map_ops *dma_ops; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + int numa_node; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool can_match: 1; +}; + +struct disk_stats; + +struct gendisk; + +struct partition_meta_info; + +struct block_device { + sector_t bd_start_sect; + struct disk_stats *bd_stats; + long unsigned int bd_stamp; + bool bd_read_only; + dev_t bd_dev; + int bd_openers; + struct inode *bd_inode; + struct super_block *bd_super; + struct mutex bd_mutex; + void *bd_claiming; + struct device bd_device; + void *bd_holder; + int bd_holders; + bool bd_write_holder; + struct list_head bd_holder_disks; + struct kobject *bd_holder_dir; + u8 bd_partno; + unsigned int bd_part_count; + spinlock_t bd_size_lock; + struct gendisk *bd_disk; + struct backing_dev_info *bd_bdi; + int bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; + struct super_block *bd_fsfreeze_sb; + struct partition_meta_info *bd_meta_info; +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + unsigned int lsm_flags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + int refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +}; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bio_crypt_ctx; + +struct bio_integrity_payload; + +struct bio { + struct bio *bi_next; + struct block_device *bi_bdev; + unsigned int bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + short unsigned int bi_write_hint; + blk_status_t bi_status; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + struct bio_crypt_ctx *bi_crypt_context; + union { + struct bio_integrity_payload *bi_integrity; + }; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + struct mm_struct *mm; + long unsigned int p; + long unsigned int argmin; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + loff_t written; + loff_t pos; + loff_t to_skip; +}; + +struct em_perf_state { + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; +}; + +struct em_perf_domain { + struct em_perf_state *table; + int nr_perf_states; + int milliwatts; + long unsigned int cpus[0]; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; + struct list_head clock_list; + struct pm_domain_data *domain_data; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); +}; + +struct iommu_ops; + +struct subsys_private; + +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, struct device_driver *); + int (*uevent)(struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + const struct dev_pm_ops *pm; + const struct iommu_ops *iommu_ops; + struct subsys_private *p; + struct lock_class_key lock_key; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY = 0, + IOMMU_CAP_INTR_REMAP = 1, + IOMMU_CAP_NOEXEC = 2, +}; + +typedef u64 dma_addr_t; + +enum iommu_dev_features { + IOMMU_DEV_FEAT_AUX = 0, + IOMMU_DEV_FEAT_SVA = 1, + IOMMU_DEV_FEAT_IOPF = 2, +}; + +struct iommu_domain; + +struct iommu_iotlb_gather; + +struct iommu_device; + +struct iommu_resv_region; + +struct of_phandle_args; + +struct iommu_sva; + +struct iommu_fault_event; + +struct iommu_page_response; + +struct iommu_cache_invalidate_info; + +struct iommu_gpasid_bind_data; + +struct iommu_ops { + bool (*capable)(enum iommu_cap); + struct iommu_domain * (*domain_alloc)(unsigned int); + void (*domain_free)(struct iommu_domain *); + int (*attach_dev)(struct iommu_domain *, struct device *); + void (*detach_dev)(struct iommu_domain *, struct device *); + int (*map)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, int, gfp_t); + size_t (*unmap)(struct iommu_domain *, long unsigned int, size_t, struct iommu_iotlb_gather *); + void (*flush_iotlb_all)(struct iommu_domain *); + void (*iotlb_sync_map)(struct iommu_domain *, long unsigned int, size_t); + void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); + struct iommu_device * (*probe_device)(struct device *); + void (*release_device)(struct device *); + void (*probe_finalize)(struct device *); + struct iommu_group * (*device_group)(struct device *); + int (*enable_nesting)(struct iommu_domain *); + int (*set_pgtable_quirks)(struct iommu_domain *, long unsigned int); + void (*get_resv_regions)(struct device *, struct list_head *); + void (*put_resv_regions)(struct device *, struct list_head *); + void (*apply_resv_region)(struct device *, struct iommu_domain *, struct iommu_resv_region *); + int (*of_xlate)(struct device *, struct of_phandle_args *); + bool (*is_attach_deferred)(struct iommu_domain *, struct device *); + bool (*dev_has_feat)(struct device *, enum iommu_dev_features); + bool (*dev_feat_enabled)(struct device *, enum iommu_dev_features); + int (*dev_enable_feat)(struct device *, enum iommu_dev_features); + int (*dev_disable_feat)(struct device *, enum iommu_dev_features); + int (*aux_attach_dev)(struct iommu_domain *, struct device *); + void (*aux_detach_dev)(struct iommu_domain *, struct device *); + int (*aux_get_pasid)(struct iommu_domain *, struct device *); + struct iommu_sva * (*sva_bind)(struct device *, struct mm_struct *, void *); + void (*sva_unbind)(struct iommu_sva *); + u32 (*sva_get_pasid)(struct iommu_sva *); + int (*page_response)(struct device *, struct iommu_fault_event *, struct iommu_page_response *); + int (*cache_invalidate)(struct iommu_domain *, struct device *, struct iommu_cache_invalidate_info *); + int (*sva_bind_gpasid)(struct iommu_domain *, struct device *, struct iommu_gpasid_bind_data *); + int (*sva_unbind_gpasid)(struct device *, u32); + int (*def_domain_type)(struct device *); + long unsigned int pgsize_bitmap; + struct module *owner; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(struct device *, struct kobj_uevent_env *); + char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct class { + const char *name; + struct module *owner; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + struct kobject *dev_kobj; + int (*dev_uevent)(struct device *, struct kobj_uevent_env *); + char * (*devnode)(struct device *, umode_t *); + void (*class_release)(struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(struct device *); + void (*get_ownership)(struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; + struct subsys_private *p; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[9]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + unsigned int min_align_mask; + long unsigned int segment_boundary_mask; +}; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct sg_table; + +struct scatterlist; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + struct sg_table * (*alloc_noncontiguous)(struct device *, size_t, enum dma_data_direction, gfp_t, long unsigned int); + void (*free_noncontiguous)(struct device *, size_t, struct sg_table *, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; + u64 offset; +}; + +typedef u32 phandle; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; + struct list_head suppliers; + struct list_head consumers; + u8 flags; +}; + +struct property; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + long unsigned int _flags; + void *data; +}; + +enum cpuhp_state { + CPUHP_INVALID = 4294967295, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_APB_DEAD = 8, + CPUHP_X86_MCE_DEAD = 9, + CPUHP_VIRT_NET_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_ACPI_CPUDRV_DEAD = 22, + CPUHP_S390_PFAULT_DEAD = 23, + CPUHP_BLK_MQ_DEAD = 24, + CPUHP_FS_BUFF_DEAD = 25, + CPUHP_PRINTK_DEAD = 26, + CPUHP_MM_MEMCQ_DEAD = 27, + CPUHP_PERCPU_CNT_DEAD = 28, + CPUHP_RADIX_DEAD = 29, + CPUHP_PAGE_ALLOC_DEAD = 30, + CPUHP_NET_DEV_DEAD = 31, + CPUHP_PCI_XGENE_DEAD = 32, + CPUHP_IOMMU_IOVA_DEAD = 33, + CPUHP_LUSTRE_CFS_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_WORKQUEUE_PREP = 37, + CPUHP_POWER_NUMA_PREPARE = 38, + CPUHP_HRTIMERS_PREPARE = 39, + CPUHP_PROFILE_PREPARE = 40, + CPUHP_X2APIC_PREPARE = 41, + CPUHP_SMPCFD_PREPARE = 42, + CPUHP_RELAY_PREPARE = 43, + CPUHP_SLAB_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_NET_FLOW_PREPARE = 54, + CPUHP_TOPOLOGY_PREPARE = 55, + CPUHP_NET_IUCV_PREPARE = 56, + CPUHP_ARM_BL_PREPARE = 57, + CPUHP_TRACE_RB_PREPARE = 58, + CPUHP_MM_ZS_PREPARE = 59, + CPUHP_MM_ZSWP_MEM_PREPARE = 60, + CPUHP_MM_ZSWP_POOL_PREPARE = 61, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 62, + CPUHP_ZCOMP_PREPARE = 63, + CPUHP_TIMERS_PREPARE = 64, + CPUHP_MIPS_SOC_PREPARE = 65, + CPUHP_BP_PREPARE_DYN = 66, + CPUHP_BP_PREPARE_DYN_END = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_SCHED_STARTING = 90, + CPUHP_AP_RCUTREE_DYING = 91, + CPUHP_AP_CPU_PM_STARTING = 92, + CPUHP_AP_IRQ_GIC_STARTING = 93, + CPUHP_AP_IRQ_HIP04_STARTING = 94, + CPUHP_AP_IRQ_APPLE_AIC_STARTING = 95, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 96, + CPUHP_AP_IRQ_BCM2836_STARTING = 97, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 98, + CPUHP_AP_IRQ_RISCV_STARTING = 99, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 100, + CPUHP_AP_ARM_MVEBU_COHERENCY = 101, + CPUHP_AP_MICROCODE_LOADER = 102, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 103, + CPUHP_AP_PERF_X86_STARTING = 104, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 105, + CPUHP_AP_PERF_X86_CQM_STARTING = 106, + CPUHP_AP_PERF_X86_CSTATE_STARTING = 107, + CPUHP_AP_PERF_XTENSA_STARTING = 108, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 109, + CPUHP_AP_ARM_SDEI_STARTING = 110, + CPUHP_AP_ARM_VFP_STARTING = 111, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 112, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 113, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 114, + CPUHP_AP_PERF_ARM_STARTING = 115, + CPUHP_AP_ARM_L2X0_STARTING = 116, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 117, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 118, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 119, + CPUHP_AP_JCORE_TIMER_STARTING = 120, + CPUHP_AP_ARM_TWD_STARTING = 121, + CPUHP_AP_QCOM_TIMER_STARTING = 122, + CPUHP_AP_TEGRA_TIMER_STARTING = 123, + CPUHP_AP_ARMADA_TIMER_STARTING = 124, + CPUHP_AP_MARCO_TIMER_STARTING = 125, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 126, + CPUHP_AP_ARC_TIMER_STARTING = 127, + CPUHP_AP_RISCV_TIMER_STARTING = 128, + CPUHP_AP_CLINT_TIMER_STARTING = 129, + CPUHP_AP_CSKY_TIMER_STARTING = 130, + CPUHP_AP_TI_GP_TIMER_STARTING = 131, + CPUHP_AP_HYPERV_TIMER_STARTING = 132, + CPUHP_AP_KVM_STARTING = 133, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING = 134, + CPUHP_AP_KVM_ARM_VGIC_STARTING = 135, + CPUHP_AP_KVM_ARM_TIMER_STARTING = 136, + CPUHP_AP_DUMMY_TIMER_STARTING = 137, + CPUHP_AP_ARM_XEN_STARTING = 138, + CPUHP_AP_ARM_CORESIGHT_STARTING = 139, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 140, + CPUHP_AP_ARM64_ISNDEP_STARTING = 141, + CPUHP_AP_SMPCFD_DYING = 142, + CPUHP_AP_X86_TBOOT_DYING = 143, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 144, + CPUHP_AP_ONLINE = 145, + CPUHP_TEARDOWN_CPU = 146, + CPUHP_AP_ONLINE_IDLE = 147, + CPUHP_AP_SCHED_WAIT_EMPTY = 148, + CPUHP_AP_SMPBOOT_THREADS = 149, + CPUHP_AP_X86_VDSO_VMA_ONLINE = 150, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 151, + CPUHP_AP_BLK_MQ_ONLINE = 152, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 153, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 154, + CPUHP_AP_PERF_ONLINE = 155, + CPUHP_AP_PERF_X86_ONLINE = 156, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 157, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 158, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 159, + CPUHP_AP_PERF_X86_RAPL_ONLINE = 160, + CPUHP_AP_PERF_X86_CQM_ONLINE = 161, + CPUHP_AP_PERF_X86_CSTATE_ONLINE = 162, + CPUHP_AP_PERF_X86_IDXD_ONLINE = 163, + CPUHP_AP_PERF_S390_CF_ONLINE = 164, + CPUHP_AP_PERF_S390_CFD_ONLINE = 165, + CPUHP_AP_PERF_S390_SF_ONLINE = 166, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 167, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 168, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 169, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 170, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 171, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE = 172, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE = 173, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 174, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 175, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 176, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 177, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 178, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 179, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 180, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 181, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 182, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 183, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 184, + CPUHP_AP_PERF_CSKY_ONLINE = 185, + CPUHP_AP_WATCHDOG_ONLINE = 186, + CPUHP_AP_WORKQUEUE_ONLINE = 187, + CPUHP_AP_RCUTREE_ONLINE = 188, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 189, + CPUHP_AP_ONLINE_DYN = 190, + CPUHP_AP_ONLINE_DYN_END = 220, + CPUHP_AP_X86_HPET_ONLINE = 221, + CPUHP_AP_X86_KVM_CLK_ONLINE = 222, + CPUHP_AP_DTPM_CPU_ONLINE = 223, + CPUHP_AP_ACTIVE = 224, + CPUHP_ONLINE = 225, +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +struct trace_seq { + char buffer[4096]; + struct seq_buf seq; + int full; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op: 5; + __u64 mem_lvl: 14; + __u64 mem_snoop: 5; + __u64 mem_lock: 2; + __u64 mem_dtlb: 7; + __u64 mem_lvl_num: 4; + __u64 mem_remote: 1; + __u64 mem_snoopx: 2; + __u64 mem_blk: 3; + __u64 mem_rsvd: 21; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 reserved: 40; +}; + +union perf_sample_weight { + __u64 full; + struct { + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; + }; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct cgroup_namespace { + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +struct perf_cpu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + int *pmu_disable_count; + struct perf_cpu_context *pmu_cpu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_context *, struct perf_event_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + int (*filter_match)(struct perf_event *); + int (*check_period)(struct perf_event *, u64); +}; + +struct ftrace_regs { + struct pt_regs regs; +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +struct u64_stats_sync {}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_DMA = 2, + NR_KMALLOC_TYPES = 3, +}; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage[2]; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[7]; + u64 state_start; + u32 times_prev[14]; + long: 64; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct cgroup cgrp; + u64 cgrp_ancestor_id_storage; + atomic_t nr_cgrps; + struct list_head root_list; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((packed)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + struct perf_cgroup *cgrp; + struct list_head cgrp_cpuctx_entry; + struct list_head sched_cb_entry; + int sched_cb_usage; + int online; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + union perf_sample_weight weight; + u64 txn; + union perf_mem_data_src data_src; + u64 type; + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + struct perf_callchain_entry *callchain; + u64 aux_size; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 phys_addr; + u64 cgroup; + u64 data_page_size; + u64 code_page_size; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + char *fmt; + unsigned int fmt_size; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned int trace_ctx; + struct pt_regs *regs; +}; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct dentry *dir; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + TRACE_EVENT_FL_FILTERED_BIT = 0, + TRACE_EVENT_FL_CAP_ANY_BIT = 1, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3, + TRACE_EVENT_FL_TRACEPOINT_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, +}; + +enum { + TRACE_EVENT_FL_FILTERED = 1, + TRACE_EVENT_FL_CAP_ANY = 2, + TRACE_EVENT_FL_NO_SET_FILTER = 4, + TRACE_EVENT_FL_IGNORE_ENABLE = 8, + TRACE_EVENT_FL_TRACEPOINT = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_PTR_STRING = 3, + FILTER_TRACE_FN = 4, + FILTER_COMM = 5, + FILTER_CPU = 6, +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + int (*add_links)(struct fwnode_handle *); +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +struct property { + char *name; + int length; + void *value; + struct property *next; +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_data; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct xbc_node { + u16 next; + u16 child; + u16 parent; + u16 data; +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct cdrom_device_info; + +struct badblocks; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct xarray part_tbl; + struct block_device *part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + int flags; + long unsigned int state; + struct kobject *slave_dir; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + struct kobject integrity_kobj; + struct cdrom_device_info *cdi; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; +}; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[5]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; +}; + +typedef unsigned int blk_qc_t; + +struct blk_integrity_iter; + +typedef blk_status_t integrity_processing_fn(struct blk_integrity_iter *); + +typedef void integrity_prepare_fn(struct request *); + +typedef void integrity_complete_fn(struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +struct blk_zone; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + blk_qc_t (*submit_bio)(struct bio *); + int (*open)(struct block_device *, fmode_t); + void (*release)(struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *, bool); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + struct module *owner; + const struct pr_ops *pr_ops; +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *); + int (*fill_hdr)(struct request *, struct sg_io_v4 *, fmode_t); + int (*complete_rq)(struct request *, struct sg_io_v4 *); + void (*free_rq)(struct request *); +}; + +typedef __u32 req_flags_t; + +typedef void rq_end_io_fn(struct request *, blk_status_t); + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +struct blk_ksm_keyslot; + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + unsigned int cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int __data_len; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + struct list_head queuelist; + union { + struct hlist_node hash; + struct llist_node ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + void *completion_data; + int error_count; + }; + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + struct gendisk *rq_disk; + struct block_device *part; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; + short unsigned int write_hint; + short unsigned int ioprio; + enum mq_rq_state state; + refcount_t ref; + unsigned int timeout; + long unsigned int deadline; + union { + struct __call_single_data csd; + u64 fifo_time; + }; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_type; + +struct blk_mq_alloc_data; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct blk_mq_debugfs_attr; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + const unsigned int elevator_features; + struct module *elevator_owner; + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + unsigned int registered: 1; + struct hlist_head hash[64]; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +struct blk_mq_debugfs_attr { + const char *name; + umode_t mode; + int (*show)(void *, struct seq_file *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + const struct seq_operations *seq_ops; +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + int (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + void (*set_rq_budget_token)(struct request *, int); + int (*get_rq_budget_token)(struct request *); + enum blk_eh_timer_return (*timeout)(struct request *, bool); + int (*poll)(struct blk_mq_hw_ctx *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*initialize_rq_fn)(struct request *); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + int (*map_queues)(struct blk_mq_tag_set *); + void (*show_rq)(struct seq_file *, struct request *); +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + short unsigned int interval; + const char *disk_name; +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[5]; + struct list_head all_blkcgs_node; + struct list_head cgwb_list; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; +}; + +typedef long unsigned int efi_status_t; + +typedef u8 efi_bool_t; + +typedef u16 efi_char16_t; + +typedef guid_t efi_guid_t; + +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 get_time; + u32 set_time; + u32 get_wakeup_time; + u32 set_wakeup_time; + u32 set_virtual_address_map; + u32 convert_pointer; + u32 get_variable; + u32 get_next_variable; + u32 set_variable; + u32 get_next_high_mono_count; + u32 reset_system; + u32 update_capsule; + u32 query_capsule_caps; + u32 query_variable_info; +} efi_runtime_services_32_t; + +typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); + +typedef efi_status_t efi_set_time_t(efi_time_t *); + +typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); + +typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); + +typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); + +typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *); + +typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); + +typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); + +typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *); + +typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); + +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int); + +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *); + +typedef union { + struct { + efi_table_hdr_t hdr; + efi_status_t (*get_time)(efi_time_t *, efi_time_cap_t *); + efi_status_t (*set_time)(efi_time_t *); + efi_status_t (*get_wakeup_time)(efi_bool_t *, efi_bool_t *, efi_time_t *); + efi_status_t (*set_wakeup_time)(efi_bool_t, efi_time_t *); + efi_status_t (*set_virtual_address_map)(long unsigned int, long unsigned int, u32, efi_memory_desc_t *); + void *convert_pointer; + efi_status_t (*get_variable)(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); + efi_status_t (*get_next_variable)(long unsigned int *, efi_char16_t *, efi_guid_t *); + efi_status_t (*set_variable)(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); + efi_status_t (*get_next_high_mono_count)(u32 *); + void (*reset_system)(int, efi_status_t, long unsigned int, efi_char16_t *); + efi_status_t (*update_capsule)(efi_capsule_header_t **, long unsigned int, long unsigned int); + efi_status_t (*query_capsule_caps)(efi_capsule_header_t **, long unsigned int, u64 *, int *); + efi_status_t (*query_variable_info)(u32, u64 *, u64 *, u64 *); + }; + efi_runtime_services_32_t mixed_mode; +} efi_runtime_services_t; + +struct efi_memory_map { + phys_addr_t phys_map; + void *map; + void *map_end; + int nr_map; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi { + const efi_runtime_services_t *runtime; + unsigned int runtime_version; + unsigned int runtime_supported_mask; + long unsigned int acpi; + long unsigned int acpi20; + long unsigned int smbios; + long unsigned int smbios3; + long unsigned int esrt; + long unsigned int tpm_log; + long unsigned int tpm_final_log; + long unsigned int mokvar_table; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + struct efi_memory_map memmap; + long unsigned int flags; +}; + +enum memcg_stat_item { + MEMCG_SWAP = 39, + MEMCG_SOCK = 40, + MEMCG_PERCPU_B = 41, + MEMCG_NR_STAT = 42, +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_SWAP_HIGH = 5, + MEMCG_SWAP_MAX = 6, + MEMCG_SWAP_FAIL = 7, + MEMCG_NR_MEMORY_EVENTS = 8, +}; + +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH = 0, + MEM_CGROUP_TARGET_SOFTLIMIT = 1, + MEM_CGROUP_NTARGETS = 2, +}; + +struct memcg_vmstats_percpu { + long int state[42]; + long unsigned int events[98]; + long int state_prev[42]; + long unsigned int events_prev[98]; + long unsigned int nr_page_events; + long unsigned int targets[2]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + unsigned int generation; +}; + +struct lruvec_stat { + long int count[39]; +}; + +struct batched_lruvec_stat { + s32 count[39]; +}; + +struct shrinker_info { + struct callback_head rcu; + atomic_long_t *nr_deferred; + long unsigned int *map; +}; + +struct mem_cgroup_per_node { + struct lruvec lruvec; + struct lruvec_stat *lruvec_stat_local; + struct batched_lruvec_stat *lruvec_stat_cpu; + atomic_long_t lruvec_stat[39]; + long unsigned int lru_zone_size[25]; + struct mem_cgroup_reclaim_iter iter; + struct shrinker_info *shrinker_info; + struct rb_node tree_node; + long unsigned int usage_in_excess; + bool on_tree; + struct mem_cgroup *memcg; +}; + +struct eventfd_ctx; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + long unsigned int threshold; +}; + +struct mem_cgroup_threshold_ary { + int current_threshold; + unsigned int size; + struct mem_cgroup_threshold entries[0]; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct percpu_cluster { + struct swap_cluster_info index; + unsigned int next; +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +struct fs_parse_result { + bool negated; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + }; +}; + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +typedef __u32 Elf32_Word; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +typedef __u16 __le16; + +typedef __u16 __be16; + +typedef __u32 __be32; + +typedef __u64 __be64; + +typedef __u32 __wsum; + +typedef unsigned int slab_flags_t; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +typedef struct { + struct net *net; +} possible_net_t; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +struct proto; + +struct inet_timewait_death_row; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +struct sk_buff; + +struct sk_buff_head { + struct sk_buff *next; + struct sk_buff *prev; + __u32 qlen; + spinlock_t lock; +}; + +typedef u64 netdev_features_t; + +struct sock_cgroup_data { + union { + struct { + u8 is_data: 1; + u8 no_refcnt: 1; + u8 unused: 6; + u8 padding; + u16 prioidx; + u32 classid; + }; + u64 val; + }; +}; + +struct sk_filter; + +struct socket_wq; + +struct xfrm_policy; + +struct dst_entry; + +struct socket; + +struct net_device; + +struct sock_reuseport; + +struct sock { + struct sock_common __sk_common; + socket_lock_t sk_lock; + atomic_t sk_drops; + int sk_rcvlowat; + struct sk_buff_head sk_error_queue; + struct sk_buff *sk_rx_skb_cache; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + int sk_forward_alloc; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + struct xfrm_policy *sk_policy[2]; + struct dst_entry *sk_rx_dst; + struct dst_entry *sk_dst_cache; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff *sk_tx_skb_cache; + struct sk_buff_head sk_write_queue; + __s32 sk_peek_off; + int sk_write_pending; + __u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + long int sk_sndtimeo; + struct timer_list sk_timer; + __u32 sk_priority; + __u32 sk_mark; + long unsigned int sk_pacing_rate; + long unsigned int sk_max_pacing_rate; + struct page_frag sk_frag; + netdev_features_t sk_route_caps; + netdev_features_t sk_route_nocaps; + netdev_features_t sk_route_forced_caps; + int sk_gso_type; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + __u32 sk_txhash; + u8 sk_padding: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_userlocks: 4; + u8 sk_pacing_shift; + u16 sk_type; + u16 sk_protocol; + u16 sk_gso_max_segs; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + u8 sk_prefer_busy_poll; + u16 sk_busy_poll_budget; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long int sk_rcvtimeo; + ktime_t sk_stamp; + u16 sk_tsflags; + u8 sk_shutdown; + u32 sk_tskey; + atomic_t sk_zckey; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + struct socket *sk_socket; + void *sk_user_data; + void *sk_security; + struct sock_cgroup_data sk_cgrp_data; + struct mem_cgroup *sk_memcg; + void (*sk_state_change)(struct sock *); + void (*sk_data_ready)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct pipe_buffer; + +struct watch_queue; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + bool note_loss; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + unsigned int poll_usage; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; + struct watch_queue *watch_queue; +}; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + char sa_data[14]; +}; + +struct msghdr { + void *msg_name; + int msg_namelen; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + __kernel_size_t msg_controllen; + unsigned int msg_flags; + struct kiocb *msg_iocb; +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_operations; + +struct tty_ldisc; + +struct tty_port; + +struct tty_struct { + int magic; + struct kref kref; + struct device *dev; + struct tty_driver *driver; + const struct tty_operations *ops; + int index; + struct ld_semaphore ldisc_sem; + struct tty_ldisc *ldisc; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + spinlock_t ctrl_lock; + spinlock_t flow_lock; + struct ktermios termios; + struct ktermios termios_locked; + char name[64]; + struct pid *pgrp; + struct pid *session; + long unsigned int flags; + int count; + struct winsize winsize; + long unsigned int stopped: 1; + long unsigned int flow_stopped: 1; + int: 30; + long unsigned int unused: 62; + int hw_stopped; + long unsigned int ctrl_status: 8; + long unsigned int packet: 1; + int: 23; + long unsigned int unused_ctrl: 55; + unsigned int receive_room; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + struct list_head tty_files; + int closing; + unsigned char *write_buf; + int write_cnt; + struct work_struct SAK_work; + struct tty_port *port; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + struct callback_head a_rcu; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + int (*write)(struct tty_struct *, const unsigned char *, int); + int (*put_char)(struct tty_struct *, unsigned char); + void (*flush_chars)(struct tty_struct *); + int (*write_room)(struct tty_struct *); + int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, char); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*poll_init)(struct tty_driver *, int, char *); + int (*poll_get_char)(struct tty_driver *, int); + void (*poll_put_char)(struct tty_driver *, int, char); + int (*proc_show)(struct seq_file *, void *); +}; + +struct proc_dir_entry; + +struct tty_driver { + int magic; + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + long unsigned int data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + struct mutex mutex; + struct mutex buf_mutex; + unsigned char *xmit_buf; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_ldisc_ops { + char *name; + int num; + int flags; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t, void **, long unsigned int); + ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t); + int (*ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + int (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, unsigned int); + int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int); + struct module *owner; + int refcount; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + int (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, int); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int *sock_inuse; + struct prot_inuse *prot_inuse; +}; + +struct ipstats_mib; + +struct tcp_mib; + +struct linux_mib; + +struct udp_mib; + +struct linux_xfrm_mib; + +struct linux_tls_mib; + +struct mptcp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct netns_mib { + struct ipstats_mib *ip_statistics; + struct ipstats_mib *ipv6_statistics; + struct tcp_mib *tcp_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udp_stats_in6; + struct linux_xfrm_mib *xfrm_statistics; + struct linux_tls_mib *tls_statistics; + struct mptcp_mib *mptcp_statistics; + struct udp_mib *udplite_statistics; + struct udp_mib *udplite_stats_in6; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct netns_unix { + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + atomic_t tw_count; + char tw_pad[60]; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; +}; + +struct local_ports { + seqlock_t lock; + int range[2]; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct fib_rules_ops; + +struct fib_table; + +struct inet_peer_base; + +struct fqdir; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + struct inet_timewait_death_row tcp_death_row; + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + struct fib_rules_ops *rules_ops; + struct fib_table *fib_main; + struct fib_table *fib_default; + unsigned int fib_rules_require_fldissect; + bool fib_has_custom_rules; + bool fib_has_custom_local_routes; + bool fib_offload_disabled; + int fib_num_tclassid_users; + struct hlist_head *fib_table_hash; + struct sock *fibnl; + struct sock **icmp_sk; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct sock **tcp_sk; + struct fqdir *fqdir; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + struct local_ports ip_local_ports; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_use_pmtu; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; + u8 sysctl_ip_dynaddr; + u8 sysctl_ip_early_demux; + u8 sysctl_raw_l3mdev_accept; + u8 sysctl_tcp_early_demux; + u8 sysctl_udp_early_demux; + u8 sysctl_nexthop_compat_mode; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; + u8 sysctl_tcp_l3mdev_accept; + u8 sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; + int sysctl_tcp_reordering; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_adv_win_scale; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_moderate_rcvbuf; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_challenge_ack_limit; + int sysctl_tcp_min_rtt_wlen; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_autocorking; + u8 sysctl_tcp_reflect_tos; + u8 sysctl_tcp_comp_sack_nr; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + int sysctl_tcp_wmem[3]; + int sysctl_tcp_rmem[3]; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + spinlock_t tcp_fastopen_ctx_lock; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_udp_l3mdev_accept; + u8 sysctl_igmp_llm_reports; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; + u8 sysctl_fib_multipath_use_neigh; + u8 sysctl_fib_multipath_hash_policy; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + int (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *, int); + struct dst_entry * (*negative_advice)(struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + struct percpu_counter pcpuc_entries; + long: 64; + long: 64; + long: 64; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + u8 bindv6only; + u8 multipath_hash_policy; + u8 flowlabel_consistency; + u8 auto_flowlabels; + int icmpv6_time; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[4]; + long unsigned int *icmpv6_ratemask_ptr; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; + int idgen_retries; + int idgen_delay; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + bool skip_notify_on_dev_down; + u8 fib_notify_on_flag_change; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct netns_ipv6 { + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + unsigned int ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned int fib6_rules_require_fldissect; + bool fib6_has_custom_rules; + unsigned int fib6_routes_require_src; + struct rt6_info *ip6_prohibit_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; + struct sock **icmp_sk; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct list_head mr6_tables; + struct fib_rules_ops *mr6_rules_ops; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; + long: 64; + long: 64; + long: 64; +}; + +struct netns_sysctl_lowpan { + struct ctl_table_header *frags_hdr; +}; + +struct netns_ieee802154_lowpan { + struct netns_sysctl_lowpan sysctl; + struct fqdir *fqdir; +}; + +struct sctp_mib; + +struct netns_sctp { + struct sctp_mib *sctp_statistics; + struct proc_dir_entry *proc_net_sctp; + struct ctl_table_header *sysctl_header; + struct sock *ctl_sock; + struct sock *udp4_sock; + struct sock *udp6_sock; + int udp_port; + int encap_port; + struct list_head local_addr_list; + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; + spinlock_t addr_wq_lock; + spinlock_t local_addr_lock; + unsigned int rto_initial; + unsigned int rto_min; + unsigned int rto_max; + int rto_alpha; + int rto_beta; + int max_burst; + int cookie_preserve_enable; + char *sctp_hmac_alg; + unsigned int valid_cookie_life; + unsigned int sack_timeout; + unsigned int hb_interval; + int max_retrans_association; + int max_retrans_path; + int max_retrans_init; + int pf_retrans; + int ps_retrans; + int pf_enable; + int pf_expose; + int sndbuf_policy; + int rcvbuf_policy; + int default_auto_asconf; + int addip_enable; + int addip_noauth; + int prsctp_enable; + int reconf_enable; + int auth_enable; + int intl_enable; + int ecn_enable; + int scope_policy; + int rwnd_upd_shift; + long unsigned int max_autoclose; +}; + +struct nf_queue_handler; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_queue_handler *queue_handler; + const struct nf_logger *nf_loggers[13]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_arp[3]; + struct nf_hook_entries *hooks_bridge[5]; + struct nf_hook_entries *hooks_decnet[7]; +}; + +struct netns_xt { + bool notrack_deprecated_warning; + bool clusterip_deprecated_warning; +}; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_dccp_net { + u8 dccp_loose; + unsigned int dccp_timeout[10]; +}; + +struct nf_sctp_net { + unsigned int timeouts[10]; +}; + +struct nf_gre_net { + struct list_head keymap_list; + unsigned int timeouts[2]; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; + struct nf_dccp_net dccp; + struct nf_sctp_net sctp; + struct nf_gre_net gre; +}; + +struct ct_pcpu; + +struct ip_conntrack_stat; + +struct nf_ct_event_notifier; + +struct nf_exp_event_notifier; + +struct netns_ct { + bool ecache_dwork_pending; + u8 sysctl_log_invalid; + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_auto_assign_helper; + u8 sysctl_tstamp; + u8 sysctl_checksum; + struct ct_pcpu *pcpu_lists; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_exp_event_notifier *nf_expect_event_cb; + struct nf_ip_net nf_ct_proto; + unsigned int labels_used; +}; + +struct netns_nftables { + u8 gencursor; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct xfrm_policy_hash { + struct hlist_head *table; + unsigned int hmask; + u8 dbits4; + u8 sbits4; + u8 dbits6; + u8 sbits6; +}; + +struct xfrm_policy_hthresh { + struct work_struct work; + seqlock_t lock; + u8 lbits4; + u8 rbits4; + u8 lbits6; + u8 rbits6; +}; + +struct netns_xfrm { + struct list_head state_all; + struct hlist_head *state_bydst; + struct hlist_head *state_bysrc; + struct hlist_head *state_byspi; + unsigned int state_hmask; + unsigned int state_num; + struct work_struct state_hash_work; + struct list_head policy_all; + struct hlist_head *policy_byidx; + unsigned int policy_idx_hmask; + struct hlist_head policy_inexact[3]; + struct xfrm_policy_hash policy_bydst[3]; + unsigned int policy_count[6]; + struct work_struct policy_hash_work; + struct xfrm_policy_hthresh policy_hthresh; + struct list_head inexact_bins; + struct sock *nlsk; + struct sock *nlsk_stash; + u32 sysctl_aevent_etime; + u32 sysctl_aevent_rseqth; + int sysctl_larval_drop; + u32 sysctl_acq_expires; + struct ctl_table_header *sysctl_hdr; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct dst_ops xfrm4_dst_ops; + struct dst_ops xfrm6_dst_ops; + spinlock_t xfrm_state_lock; + seqcount_spinlock_t xfrm_state_hash_generation; + seqcount_spinlock_t xfrm_policy_hash_generation; + spinlock_t xfrm_policy_lock; + struct mutex xfrm_cfg_mutex; + long: 64; + long: 64; +}; + +struct netns_ipvs; + +struct mpls_route; + +struct netns_mpls { + int ip_ttl_propagate; + int default_ttl; + size_t platform_labels; + struct mpls_route **platform_label; + struct ctl_table_header *ctl; +}; + +struct can_dev_rcv_lists; + +struct can_pkg_stats; + +struct can_rcv_lists_stats; + +struct netns_can { + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde_stats; + struct proc_dir_entry *pde_reset_stats; + struct proc_dir_entry *pde_rcvlist_all; + struct proc_dir_entry *pde_rcvlist_fil; + struct proc_dir_entry *pde_rcvlist_inv; + struct proc_dir_entry *pde_rcvlist_sff; + struct proc_dir_entry *pde_rcvlist_eff; + struct proc_dir_entry *pde_rcvlist_err; + struct proc_dir_entry *bcmproc_dir; + struct can_dev_rcv_lists *rx_alldev_list; + spinlock_t rcvlists_lock; + struct timer_list stattimer; + struct can_pkg_stats *pkg_stats; + struct can_rcv_lists_stats *rcv_lists_stats; + struct hlist_head cgw_list; +}; + +struct netns_xdp { + struct mutex lock; + struct hlist_head list; +}; + +struct smc_stats; + +struct smc_stats_rsn; + +struct netns_smc { + struct smc_stats *smc_stats; + struct mutex mutex_fback_rsn; + struct smc_stats_rsn *fback_rsn; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + spinlock_t rules_mod_lock; + unsigned int dev_unreg_count; + unsigned int dev_base_seq; + int ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_ieee802154_lowpan ieee802154_lowpan; + struct netns_sctp sctp; + struct netns_nf nf; + struct netns_xt xt; + struct netns_ct ct; + struct netns_nftables nft; + struct sk_buff_head wext_nlevents; + struct net_generic *gen; + struct netns_bpf bpf; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct netns_xfrm xfrm; + u64 net_cookie; + struct netns_ipvs *ipvs; + struct netns_mpls mpls; + struct netns_can can; + struct netns_xdp xdp; + struct sock *crypto_nlsk; + struct sock *diag_nlsk; + struct netns_smc smc; + long: 64; +}; + +typedef struct { + local64_t v; +} u64_stats_t; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + __MAX_BPF_ATTACH_TYPE = 39, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + }; + struct { + __u32 target_fd; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + __u32 target_fd; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + __u32 prog_cnt; + } query; + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + __u32 prog_fd; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + }; + } link_create; + struct { + __u32 link_fd; + __u32 new_prog_fd; + __u32 flags; + __u32 old_prog_fd; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct btf; + +struct btf_type; + +struct bpf_prog_aux; + +struct bpf_local_storage_map; + +struct bpf_verifier_env; + +struct bpf_func_state; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_map *, void *); + int (*map_push_elem)(struct bpf_map *, void *, u64); + int (*map_pop_elem)(struct bpf_map *, void *); + int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(void *); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + int (*map_redirect)(struct bpf_map *, u32, u64); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *); + int (*map_for_each_callback)(struct bpf_map *, void *, void *, u64); + const char * const map_btf_name; + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + void *security; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u32 map_flags; + int spin_lock_off; + u32 id; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + struct btf *btf; + struct mem_cgroup *memcg; + char name[16]; + u32 btf_vmlinux_value_type_id; + bool bypass_spec_v1; + bool frozen; + long: 16; + long: 64; + long: 64; + long: 64; + atomic64_t refcnt; + atomic64_t usercnt; + struct work_struct work; + struct mutex freeze_mutex; + u64 writecnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; + struct btf *base_btf; + u32 start_id; + u32 start_str_off; + char name[56]; + bool kernel_btf; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[128]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_jit_poke_descriptor; + +struct bpf_kfunc_desc_tab; + +struct bpf_prog_ops; + +struct btf_mod_pair; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + struct btf *attach_btf; + const struct bpf_ctx_arg_aux *ctx_arg_info; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool offload_requested; + bool attach_btf_trace; + bool func_proto_unreliable; + bool sleepable; + bool tail_call_reachable; + struct hlist_node tramp_hlist; + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct btf_mod_pair *used_btfs; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + struct bpf_map *cgroup_storage[2]; + char name[16]; + void *security; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + u32 num_exentries; + struct exception_table_entry *extable; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct bpf_prog_stats; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_stats *stats; + int *active; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + struct sock_filter insns[0]; + struct bpf_insn insnsi[0]; +}; + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long: 64; + long: 64; + long: 64; +}; + +struct net_device_stats { + long unsigned int rx_packets; + long unsigned int tx_packets; + long unsigned int rx_bytes; + long unsigned int tx_bytes; + long unsigned int rx_errors; + long unsigned int tx_errors; + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int multicast; + long unsigned int collisions; + long unsigned int rx_length_errors; + long unsigned int rx_over_errors; + long unsigned int rx_crc_errors; + long unsigned int rx_frame_errors; + long unsigned int rx_fifo_errors; + long unsigned int rx_missed_errors; + long unsigned int tx_aborted_errors; + long unsigned int tx_carrier_errors; + long unsigned int tx_fifo_errors; + long unsigned int tx_heartbeat_errors; + long unsigned int tx_window_errors; + long unsigned int rx_compressed; + long unsigned int tx_compressed; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + +struct tipc_bearer; + +struct dn_dev; + +struct mpls_dev; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +enum netdev_ml_priv_type { + ML_PRIV_NONE = 0, + ML_PRIV_CAN = 1, +}; + +struct pcpu_dstats; + +struct garp_port; + +struct mrp_port; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +struct macsec_ops; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +struct netdev_name_node; + +struct dev_ifalias; + +struct net_device_ops; + +struct iw_handler_def; + +struct iw_public_data; + +struct ethtool_ops; + +struct l3mdev_ops; + +struct ndisc_ops; + +struct xfrmdev_ops; + +struct tlsdev_ops; + +struct header_ops; + +struct vlan_info; + +struct dsa_port; + +struct in_device; + +struct inet6_dev; + +struct wireless_dev; + +struct wpan_dev; + +struct netdev_rx_queue; + +struct mini_Qdisc; + +struct netdev_queue; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct xps_dev_maps; + +struct netpoll_info; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct rtnl_link_ops; + +struct dcbnl_rtnl_ops; + +struct netprio_map; + +struct phy_device; + +struct sfp_bus; + +struct udp_tunnel_nic_info; + +struct net_device { + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + long unsigned int state; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + unsigned int flags; + unsigned int priv_flags; + const struct net_device_ops *netdev_ops; + int ifindex; + short unsigned int gflags; + short unsigned int hard_header_len; + unsigned int mtu; + short unsigned int needed_headroom; + short unsigned int needed_tailroom; + netdev_features_t features; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + netdev_features_t gso_partial_features; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + unsigned char min_header_len; + unsigned char name_assign_type; + int group; + struct net_device_stats stats; + atomic_long_t rx_dropped; + atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct iw_handler_def *wireless_handlers; + struct iw_public_data *wireless_data; + const struct ethtool_ops *ethtool_ops; + const struct l3mdev_ops *l3mdev_ops; + const struct ndisc_ops *ndisc_ops; + const struct xfrmdev_ops *xfrmdev_ops; + const struct tlsdev_ops *tlsdev_ops; + const struct header_ops *header_ops; + unsigned char operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + short unsigned int padded; + spinlock_t addr_list_lock; + int irq; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct vlan_info *vlan_info; + struct dsa_port *dsa_ptr; + struct tipc_bearer *tipc_ptr; + void *atalk_ptr; + struct in_device *ip_ptr; + struct dn_dev *dn_ptr; + struct inet6_dev *ip6_ptr; + void *ax25_ptr; + struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; + struct mpls_dev *mpls_ptr; + unsigned char *dev_addr; + struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; + unsigned int real_num_rx_queues; + struct bpf_prog *xdp_prog; + long unsigned int gro_flush_timeout; + int napi_defer_hard_irqs; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + struct mini_Qdisc *miniq_ingress; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + struct netdev_queue *_tx; + unsigned int num_tx_queues; + unsigned int real_num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct xps_dev_maps *xps_maps[2]; + struct mini_Qdisc *miniq_egress; + struct hlist_head qdisc_hash[16]; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct list_head link_watch_list; + enum { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, + } reg_state: 8; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + struct netpoll_info *npinfo; + possible_net_t nd_net; + void *ml_priv; + enum netdev_ml_priv_type ml_priv_type; + union { + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + struct garp_port *garp_port; + struct mrp_port *mrp_port; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + unsigned int gso_max_size; + u16 gso_max_segs; + const struct dcbnl_rtnl_ops *dcbnl_ops; + s16 num_tc; + struct netdev_tc_txq tc_to_txq[16]; + u8 prio_tc_map[16]; + unsigned int fcoe_ddp_xid; + struct netprio_map *priomap; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; + bool proto_down; + unsigned int wol_enabled: 1; + unsigned int threaded: 1; + struct list_head net_notifier_list; + const struct macsec_ops *macsec_ops; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct bpf_xdp_entity xdp_state[3]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_VALUE_OR_NULL = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCKET_OR_NULL = 12, + PTR_TO_SOCK_COMMON = 13, + PTR_TO_SOCK_COMMON_OR_NULL = 14, + PTR_TO_TCP_SOCK = 15, + PTR_TO_TCP_SOCK_OR_NULL = 16, + PTR_TO_TP_BUFFER = 17, + PTR_TO_XDP_SOCK = 18, + PTR_TO_BTF_ID = 19, + PTR_TO_BTF_ID_OR_NULL = 20, + PTR_TO_MEM = 21, + PTR_TO_MEM_OR_NULL = 22, + PTR_TO_RDONLY_BUF = 23, + PTR_TO_RDONLY_BUF_OR_NULL = 24, + PTR_TO_RDWR_BUF = 25, + PTR_TO_RDWR_BUF_OR_NULL = 26, + PTR_TO_PERCPU_BTF_ID = 27, + PTR_TO_FUNC = 28, + PTR_TO_MAP_KEY = 29, + __BPF_REG_TYPE_MAX = 30, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct btf_func_model { + u8 ret_size; + u8 nr_args; + u8 arg_size[12]; +}; + +struct bpf_tramp_image { + void *image; + struct bpf_ksym ksym; + struct percpu_ref pcref; + void *ip_after_call; + void *ip_epilogue; + union { + struct callback_head rcu; + struct work_struct work; + }; +}; + +struct bpf_trampoline { + struct hlist_node hlist; + struct mutex mutex; + refcount_t refcnt; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + struct bpf_tramp_image *cur_image; + u64 selector; + struct module *mod; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + void *aux; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + u32 btf_id; +}; + +struct btf_mod_pair { + struct btf *btf; + struct module *module; +}; + +typedef unsigned int sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + }; + union { + struct sock *sk; + int ip_defrag_offset; + }; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + long unsigned int _sk_redir; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 active_extensions; + __u32 headers_start[0]; + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 nf_trace: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 __pkt_vlan_present_offset[0]; + __u8 vlan_present: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 csum_not_inet: 1; + __u8 dst_pending_confirm: 1; + __u8 ndisc_nodetype: 2; + __u8 ipvs_property: 1; + __u8 inner_protocol_type: 1; + __u8 remcsum_offload: 1; + __u8 offload_fwd_mark: 1; + __u8 offload_l3_fwd_mark: 1; + __u8 tc_skip_classify: 1; + __u8 tc_at_ingress: 1; + __u8 redirected: 1; + __u8 from_ingress: 1; + __u8 decrypted: 1; + __u16 tc_index; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + __u32 secmark; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + __u32 headers_end[0]; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_RAM0 = 1048576, + Root_RAM1 = 1048577, + Root_FD0 = 2097152, + Root_HDA1 = 3145729, + Root_HDA2 = 3145730, + Root_SDA1 = 8388609, + Root_SDA2 = 8388610, + Root_HDC1 = 23068673, + Root_SR0 = 11534336, +}; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + struct flowi_tunnel flowic_tun_key; + __u32 flowic_multipath_hash; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + struct { + __le16 dport; + __le16 sport; + } dnports; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; +}; + +struct flowidn { + struct flowi_common __fl_common; + __le16 daddr; + __le16 saddr; + union flowi_uli uli; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + struct flowidn dn; + } u; +}; + +struct ipstats_mib { + u64 mibs[37]; + struct u64_stats_sync syncp; +}; + +struct icmp_mib { + long unsigned int mibs[28]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[6]; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[6]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[10]; +}; + +struct linux_mib { + long unsigned int mibs[124]; +}; + +struct linux_xfrm_mib { + long unsigned int mibs[29]; +}; + +struct linux_tls_mib { + long unsigned int mibs[11]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 56; + long: 64; + long: 64; + struct rhashtable rhashtable; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t mem; + struct work_struct destroy_work; + struct llist_node free_list; + long: 64; + long: 64; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +struct fib_rule; + +struct fib_lookup_arg; + +struct fib_rule_hdr; + +struct nlattr; + +struct netlink_ext_ack; + +struct nla_policy; + +struct fib_rules_ops { + int family; + struct list_head list; + int rule_size; + int addr_size; + int unresolved_rules; + int nr_goto_rules; + unsigned int fib_rules_seq; + int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); + int (*match)(struct fib_rule *, struct flowi *, int); + int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *); + int (*delete)(struct fib_rule *); + int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); + int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); + size_t (*nlmsg_payload)(struct fib_rule *); + void (*flush_cache)(struct fib_rules_ops *); + int nlgroup; + const struct nla_policy *policy; + struct list_head rules_list; + struct module *owner; + struct net *fro_net; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + void (*cong_control)(struct sock *, const struct rate_sample *); + u32 (*undo_cwnd)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct xfrm_state; + +struct lwtunnel_state; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + struct xfrm_state *xfrm; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + atomic_t __refcnt; + int __use; + long unsigned int lastuse; + struct lwtunnel_state *lwtstate; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[16]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct neighbour *next; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + __u8 flags; + __u8 nud_state; + __u8 type; + __u8 dead; + u8 protocol; + seqlock_t ha_lock; + int: 32; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct callback_head rcu; + struct net_device *dev; + u8 primary_key[0]; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __s32 forwarding; + __s32 hop_limit; + __s32 mtu6; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __u32 ra_defrtr_metric; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_ra_rtr_pref; + __s32 rtr_probe_interval; + __s32 accept_ra_rt_info_min_plen; + __s32 accept_ra_rt_info_max_plen; + __s32 proxy_ndp; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 mc_forwarding; + __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __s32 seg6_require_hmac; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 disable_policy; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + struct ctl_table_header *sysctl_header; +}; + +struct nf_queue_entry; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; +}; + +struct ct_pcpu { + spinlock_t lock; + struct hlist_nulls_head unconfirmed; + struct hlist_nulls_head dying; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 64; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 64; + long: 64; + long: 64; + struct socket_wq wq; +}; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +struct proto_ops { + int family; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, int, bool); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*sendpage_locked)(struct sock *, struct page *, int, size_t, int); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[4]; + u8 chunks; + long: 56; + char data[0]; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + long: 32; + long: 64; + long: 64; +}; + +struct ieee_ets { + __u8 willing; + __u8 ets_cap; + __u8 cbs; + __u8 tc_tx_bw[8]; + __u8 tc_rx_bw[8]; + __u8 tc_tsa[8]; + __u8 prio_tc[8]; + __u8 tc_reco_bw[8]; + __u8 tc_reco_tsa[8]; + __u8 reco_prio_tc[8]; +}; + +struct ieee_maxrate { + __u64 tc_maxrate[8]; +}; + +struct ieee_qcn { + __u8 rpg_enable[8]; + __u32 rppp_max_rps[8]; + __u32 rpg_time_reset[8]; + __u32 rpg_byte_reset[8]; + __u32 rpg_threshold[8]; + __u32 rpg_max_rate[8]; + __u32 rpg_ai_rate[8]; + __u32 rpg_hai_rate[8]; + __u32 rpg_gd[8]; + __u32 rpg_min_dec_fac[8]; + __u32 rpg_min_rate[8]; + __u32 cndd_state_machine[8]; +}; + +struct ieee_qcn_stats { + __u64 rppp_rp_centiseconds[8]; + __u32 rppp_created_rps[8]; +}; + +struct ieee_pfc { + __u8 pfc_cap; + __u8 pfc_en; + __u8 mbc; + __u16 delay; + __u64 requests[8]; + __u64 indications[8]; +}; + +struct dcbnl_buffer { + __u8 prio2buffer[8]; + __u32 buffer_size[8]; + __u32 total_size; +}; + +struct cee_pg { + __u8 willing; + __u8 error; + __u8 pg_en; + __u8 tcs_supported; + __u8 pg_bw[8]; + __u8 prio_pg[8]; +}; + +struct cee_pfc { + __u8 willing; + __u8 error; + __u8 pfc_en; + __u8 tcs_supported; +}; + +struct dcb_app { + __u8 selector; + __u8 priority; + __u16 protocol; +}; + +struct dcb_peer_app_info { + __u8 willing; + __u8 error; +}; + +struct dcbnl_rtnl_ops { + int (*ieee_getets)(struct net_device *, struct ieee_ets *); + int (*ieee_setets)(struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); + int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); + int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); + int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); + int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); + int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); + int (*ieee_getapp)(struct net_device *, struct dcb_app *); + int (*ieee_setapp)(struct net_device *, struct dcb_app *); + int (*ieee_delapp)(struct net_device *, struct dcb_app *); + int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); + int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); + u8 (*getstate)(struct net_device *); + u8 (*setstate)(struct net_device *, u8); + void (*getpermhwaddr)(struct net_device *, u8 *); + void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgtx)(struct net_device *, int, u8); + void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgrx)(struct net_device *, int, u8); + void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); + void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); + void (*setpfccfg)(struct net_device *, int, u8); + void (*getpfccfg)(struct net_device *, int, u8 *); + u8 (*setall)(struct net_device *); + u8 (*getcap)(struct net_device *, int, u8 *); + int (*getnumtcs)(struct net_device *, int, u8 *); + int (*setnumtcs)(struct net_device *, int, u8); + u8 (*getpfcstate)(struct net_device *); + void (*setpfcstate)(struct net_device *, u8); + void (*getbcncfg)(struct net_device *, int, u32 *); + void (*setbcncfg)(struct net_device *, int, u32); + void (*getbcnrp)(struct net_device *, int, u8 *); + void (*setbcnrp)(struct net_device *, int, u8); + int (*setapp)(struct net_device *, u8, u16, u8); + int (*getapp)(struct net_device *, u8, u16); + u8 (*getfeatcfg)(struct net_device *, int, u8 *); + u8 (*setfeatcfg)(struct net_device *, int, u8); + u8 (*getdcbx)(struct net_device *); + u8 (*setdcbx)(struct net_device *, u8); + int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); + int (*peer_getapptable)(struct net_device *, struct dcb_app *); + int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); + int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); + int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); + int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); +}; + +struct netprio_map { + struct callback_head rcu; + u32 priomap_len; + u32 priomap[0]; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + unsigned int napi_id; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize: 8; + u32 frame_sz: 24; + struct xdp_mem_info mem; + struct net_device *dev_rx; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + u8 cookie[20]; + u8 cookie_len; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + struct netlink_range_validation *range; + struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + u16 strict_start_type; + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; +}; + +struct ifla_vf_guid { + __u32 vf; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = 2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct xsk_buff_pool; + +struct netdev_queue { + struct net_device *dev; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + int numa_node; + long unsigned int tx_maxrate; + long unsigned int trans_timeout; + struct net_device *sb_dev; + struct xsk_buff_pool *pool; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct dql dql; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_packed { + __u64 bytes; + __u64 packets; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct gnet_stats_basic_cpu; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + long: 64; + long: 64; + long: 64; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_packed bstats; + seqcount_t running; + struct gnet_stats_queue qstats; + long unsigned int state; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + spinlock_t busylock; + spinlock_t seqlock; + bool empty; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long int privdata[0]; +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +struct netdev_rx_queue { + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *pool; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + unsigned int nr_ids; + s16 num_tc; + struct xps_map *attr_map[0]; +}; + +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN = 1, + DEV_PATH_BRIDGE = 2, + DEV_PATH_PPPOE = 3, + DEV_PATH_DSA = 4, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[6]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP = 0, + DEV_PATH_BR_VLAN_TAG = 1, + DEV_PATH_BR_VLAN_UNTAG = 2, + DEV_PATH_BR_VLAN_UNTAG_HW = 3, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + }; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + const u8 *daddr; + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[2]; +}; + +enum tc_setup_type { + TC_SETUP_QDISC_MQPRIO = 0, + TC_SETUP_CLSU32 = 1, + TC_SETUP_CLSFLOWER = 2, + TC_SETUP_CLSMATCHALL = 3, + TC_SETUP_CLSBPF = 4, + TC_SETUP_BLOCK = 5, + TC_SETUP_QDISC_CBS = 6, + TC_SETUP_QDISC_RED = 7, + TC_SETUP_QDISC_PRIO = 8, + TC_SETUP_QDISC_MQ = 9, + TC_SETUP_QDISC_ETF = 10, + TC_SETUP_ROOT_QDISC = 11, + TC_SETUP_QDISC_GRED = 12, + TC_SETUP_QDISC_TAPRIO = 13, + TC_SETUP_FT = 14, + TC_SETUP_QDISC_ETS = 15, + TC_SETUP_QDISC_TBF = 16, + TC_SETUP_QDISC_FIFO = 17, + TC_SETUP_QDISC_HTB = 18, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct xfrmdev_ops { + int (*xdo_dev_state_add)(struct xfrm_state *); + void (*xdo_dev_state_delete)(struct xfrm_state *); + void (*xdo_dev_state_free)(struct xfrm_state *); + bool (*xdo_dev_offload_ok)(struct sk_buff *, struct xfrm_state *); + void (*xdo_dev_state_advance_esn)(struct xfrm_state *); +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; +}; + +struct devlink_port; + +struct ip_tunnel_parm; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + void (*ndo_poll_controller)(struct net_device *); + int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); + void (*ndo_netpoll_cleanup)(struct net_device *); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_fcoe_enable)(struct net_device *); + int (*ndo_fcoe_disable)(struct net_device *); + int (*ndo_fcoe_ddp_setup)(struct net_device *, u16, struct scatterlist *, unsigned int); + int (*ndo_fcoe_ddp_done)(struct net_device *, u16); + int (*ndo_fcoe_ddp_target)(struct net_device *, u16, struct scatterlist *, unsigned int); + int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); + int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + struct net_device * (*ndo_sk_get_lower_dev)(struct net_device *, struct sock *); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_change_proto_down)(struct net_device *, bool); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *); +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + int data[13]; + long unsigned int data_state[1]; +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_sw_netstats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; + +struct iw_request_info; + +union iwreq_data; + +typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *); + +struct iw_priv_args; + +struct iw_statistics; + +struct iw_handler_def { + const iw_handler *standard; + __u16 num_standard; + __u16 num_private; + __u16 num_private_args; + const iw_handler *private; + const struct iw_priv_args *private_args; + struct iw_statistics * (*get_wireless_stats)(struct net_device *); +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_drvinfo; + +struct ethtool_regs; + +struct ethtool_wolinfo; + +struct ethtool_link_ext_state_info; + +struct ethtool_eeprom; + +struct ethtool_coalesce; + +struct ethtool_ringparam; + +struct ethtool_pause_stats; + +struct ethtool_pauseparam; + +struct ethtool_test; + +struct ethtool_stats; + +struct ethtool_rxnfc; + +struct ethtool_flash; + +struct ethtool_channels; + +struct ethtool_dump; + +struct ethtool_ts_info; + +struct ethtool_modinfo; + +struct ethtool_eee; + +struct ethtool_tunable; + +struct ethtool_link_ksettings; + +struct ethtool_fec_stats; + +struct ethtool_fecparam; + +struct ethtool_module_eeprom; + +struct ethtool_eth_phy_stats; + +struct ethtool_eth_mac_stats; + +struct ethtool_eth_ctrl_stats; + +struct ethtool_rmon_stats; + +struct ethtool_rmon_hist_range; + +struct ethtool_ops { + u32 cap_link_lanes_supported: 1; + u32 supported_coalesce_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); + int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8); + int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32); + int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_eee *); + int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *, struct ethtool_fec_stats *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); + void (*get_eth_phy_stats)(struct net_device *, struct ethtool_eth_phy_stats *); + void (*get_eth_mac_stats)(struct net_device *, struct ethtool_eth_mac_stats *); + void (*get_eth_ctrl_stats)(struct net_device *, struct ethtool_eth_ctrl_stats *); + void (*get_rmon_stats)(struct net_device *, struct ethtool_rmon_stats *, const struct ethtool_rmon_hist_range **); +}; + +struct l3mdev_ops { + u32 (*l3mdev_fib_table)(const struct net_device *); + struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16); + struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16); + struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*is_useropt)(u8); + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +enum tls_offload_ctx_dir { + TLS_OFFLOAD_CTX_DIR_RX = 0, + TLS_OFFLOAD_CTX_DIR_TX = 1, +}; + +struct tls_crypto_info; + +struct tls_context; + +struct tlsdev_ops { + int (*tls_dev_add)(struct net_device *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32); + void (*tls_dev_del)(struct net_device *, struct tls_context *, enum tls_offload_ctx_dir); + int (*tls_dev_resync)(struct net_device *, struct sock *, u32, u8 *, enum tls_offload_ctx_dir); +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct delayed_work mc_gq_work; + struct delayed_work mc_ifc_work; + struct delayed_work mc_dad_work; + struct delayed_work mc_query_work; + struct delayed_work mc_report_work; + struct sk_buff_head mc_query_queue; + struct sk_buff_head mc_report_queue; + spinlock_t mc_query_lock; + spinlock_t mc_report_lock; + struct mutex mc_lock; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; +}; + +struct tcf_proto; + +struct tcf_block; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + struct callback_head rcu; +}; + +struct rtnl_link_ops { + struct list_head list; + const char *kind; + size_t priv_size; + void (*setup)(struct net_device *); + bool netns_refund; + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_info; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + struct { + bool ingress; + struct gnet_stats_queue *qstats; + }; + }; +}; + +struct tcf_walker; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, bool, bool, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct bpf_prog_stats { + u64 cnt; + u64 nsecs; + u64 misses; + struct u64_stats_sync syncp; + long: 64; +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_GC_STALETIME = 7, + NEIGH_VAR_QUEUE_LEN_BYTES = 8, + NEIGH_VAR_PROXY_QLEN = 9, + NEIGH_VAR_ANYCAST_DELAY = 10, + NEIGH_VAR_PROXY_DELAY = 11, + NEIGH_VAR_LOCKTIME = 12, + NEIGH_VAR_QUEUE_LEN = 13, + NEIGH_VAR_RETRANS_TIME_MS = 14, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 15, + NEIGH_VAR_GC_INTERVAL = 16, + NEIGH_VAR_GC_THRESH1 = 17, + NEIGH_VAR_GC_THRESH2 = 18, + NEIGH_VAR_GC_THRESH3 = 19, + NEIGH_VAR_MAX = 20, +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + u8 flags; + u8 protocol; + u8 key[0]; +}; + +struct neigh_hash_table { + struct neighbour **hash_buckets; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_MAX_STATES = 13, +}; + +struct fib_rule_hdr { + __u8 family; + __u8 dst_len; + __u8 src_len; + __u8 tos; + __u8 table; + __u8 res1; + __u8 res2; + __u8 action; + __u32 flags; +}; + +struct fib_rule_port_range { + __u16 start; + __u16 end; +}; + +struct fib_kuid_range { + kuid_t start; + kuid_t end; +}; + +struct fib_rule { + struct list_head list; + int iifindex; + int oifindex; + u32 mark; + u32 mark_mask; + u32 flags; + u32 table; + u8 action; + u8 l3mdev; + u8 proto; + u8 ip_proto; + u32 target; + __be64 tun_id; + struct fib_rule *ctarget; + struct net *fr_net; + refcount_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; + char iifname[16]; + char oifname[16]; + struct fib_kuid_range uid_range; + struct fib_rule_port_range sport_range; + struct fib_rule_port_range dport_range; + struct callback_head rcu; +}; + +struct fib_lookup_arg { + void *lookup_ptr; + const void *lookup_data; + void *result; + struct fib_rule *rule; + u32 table; + int flags; +}; + +struct smc_hashinfo; + +struct sk_psock; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct udp_table; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, int, int *, bool); + int (*ioctl)(struct sock *, int, long unsigned int); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*compat_ioctl)(struct sock *, unsigned int, long unsigned int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int, int *); + int (*sendpage)(struct sock *, struct page *, int, size_t, int); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + bool (*bpf_bypass_getsockopt)(int, int); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*stream_memory_read)(const struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + struct percpu_counter *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + int (*twsk_unique)(struct sock *, struct sock *, void *); + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; + struct callback_head rcu; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct delayed_work mca_work; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; + struct callback_head rcu; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_opts_ri; + struct nd_opt_hdr *nd_opts_ri_end; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; + struct nd_opt_hdr *nd_802154_opt_array[3]; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + __u8 reserved: 6; + __u8 autoconf: 1; + __u8 onlink: 1; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 18, +}; + +enum exception_stack_ordering { + ESTACK_DF = 0, + ESTACK_NMI = 1, + ESTACK_DB = 2, + ESTACK_MCE = 3, + ESTACK_VC = 4, + ESTACK_VC2 = 5, + N_EXCEPTION_STACKS = 6, +}; + +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +typedef phys_addr_t resource_size_t; + +struct __va_list_tag { + unsigned int gp_offset; + unsigned int fp_offset; + void *overflow_arg_area; + void *reg_save_area; +}; + +typedef __builtin_va_list __gnuc_va_list; + +typedef __gnuc_va_list va_list; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +typedef u64 async_cookie_t; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +struct hash { + int ino; + int minor; + int major; + umode_t mode; + struct hash *next; + char name[4098]; +}; + +struct dir_entry { + struct list_head list; + char *name; + time64_t mtime; +}; + +enum state { + Start = 0, + Collect = 1, + GotHeader = 2, + SkipIt = 3, + GotName = 4, + CopyFile = 5, + GotSymlink = 6, + Reset = 7, +}; + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +typedef long int (*sys_call_ptr_t)(const struct pt_regs *); + +struct io_bitmap { + u64 sequence; + refcount_t refcnt; + unsigned int max; + long unsigned int bitmap[1024]; +}; + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +typedef struct { + u16 __softirq_pending; + u8 kvm_cpu_l1tf_flush_l1d; + unsigned int __nmi_count; + unsigned int apic_timer_irqs; + unsigned int irq_spurious_count; + unsigned int icr_read_retry_count; + unsigned int kvm_posted_intr_ipis; + unsigned int kvm_posted_intr_wakeup_ipis; + unsigned int kvm_posted_intr_nested_ipis; + unsigned int x86_platform_ipis; + unsigned int apic_perf_irqs; + unsigned int apic_irq_work_irqs; + unsigned int irq_resched_count; + unsigned int irq_call_count; + unsigned int irq_tlb_count; + unsigned int irq_thermal_count; + unsigned int irq_threshold_count; + unsigned int irq_deferred_error_count; + unsigned int irq_hv_callback_count; + unsigned int irq_hv_reenlightenment_count; + unsigned int hyperv_stimer0_count; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +} irq_cpustat_t; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +enum { + EI_ETYPE_NONE = 0, + EI_ETYPE_NULL = 1, + EI_ETYPE_ERRNO = 2, + EI_ETYPE_ERRNO_NULL = 3, + EI_ETYPE_TRUE = 4, +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +struct irqentry_state { + union { + bool exit_rcu; + bool lockdep; + }; +}; + +typedef struct irqentry_state irqentry_state_t; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + unsigned int node; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + long unsigned int hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + unsigned int *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + cpumask_var_t pending_mask; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct x86_msi_addr_lo { + union { + struct { + u32 reserved_0: 2; + u32 dest_mode_logical: 1; + u32 redirect_hint: 1; + u32 reserved_1: 1; + u32 virt_destid_8_14: 7; + u32 destid_0_7: 8; + u32 base_address: 12; + }; + struct { + u32 dmar_reserved_0: 2; + u32 dmar_index_15: 1; + u32 dmar_subhandle_valid: 1; + u32 dmar_format: 1; + u32 dmar_index_0_14: 15; + u32 dmar_base_address: 12; + }; + }; +}; + +typedef struct x86_msi_addr_lo arch_msi_msg_addr_lo_t; + +struct x86_msi_addr_hi { + u32 reserved: 8; + u32 destid_8_31: 24; +}; + +typedef struct x86_msi_addr_hi arch_msi_msg_addr_hi_t; + +struct x86_msi_data { + u32 vector: 8; + u32 delivery_mode: 3; + u32 dest_mode_logical: 1; + u32 reserved: 2; + u32 active_low: 1; + u32 is_level: 1; + u32 dmar_subhandle; +} __attribute__((packed)); + +typedef struct x86_msi_data arch_msi_msg_data_t; + +struct msi_msg { + union { + u32 address_lo; + arch_msi_msg_addr_lo_t arch_addr_lo; + }; + union { + u32 address_hi; + arch_msi_msg_addr_hi_t arch_addr_hi; + }; + union { + u32 data; + arch_msi_msg_data_t arch_data; + }; +}; + +struct platform_msi_priv_data; + +struct platform_msi_desc { + struct platform_msi_priv_data *msi_priv_data; + u16 msi_index; +}; + +struct fsl_mc_msi_desc { + u16 msi_index; +}; + +struct ti_sci_inta_msi_desc { + u16 dev_index; +}; + +struct msi_desc { + struct list_head list; + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + const void *iommu_cookie; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + union { + struct { + u32 masked; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 maskbit: 1; + u8 is_64: 1; + u8 is_virtual: 1; + u16 entry_nr; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; + }; + struct platform_msi_desc platform; + struct fsl_mc_msi_desc fsl_mc; + struct ti_sci_inta_msi_desc inta; + }; +}; + +struct irq_chip { + struct device *parent_device; + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_cpu_online)(struct irq_data *); + void (*irq_cpu_offline)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; + long unsigned int polarity; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 type_cache; + u32 polarity_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + struct irq_chip_generic *gc[0]; +}; + +struct alt_instr { + s32 instr_offset; + s32 repl_offset; + u16 cpuid; + u8 instrlen; + u8 replacementlen; +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_WRITE = 8, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_HINDEX_MASK = 983040, +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +}; + +struct pvclock_vsyscall_time_info { + struct pvclock_vcpu_time_info pvti; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_TSC = 1, + VDSO_CLOCKMODE_PVCLOCK = 2, + VDSO_CLOCKMODE_HVCLOCK = 3, + VDSO_CLOCKMODE_MAX = 4, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct arch_vdso_data {}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_data arch_data; +}; + +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_FANOTIFY_GROUPS = 10, + UCOUNT_FANOTIFY_MARKS = 11, + UCOUNT_COUNTS = 12, +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_ONCPU = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_CPU_FULL = 5, + PSI_NONIDLE = 6, + NR_PSI_STATES = 7, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + net_prio_cgrp_id = 9, + hugetlb_cgrp_id = 10, + pids_cgrp_id = 11, + rdma_cgrp_id = 12, + misc_cgrp_id = 13, + CGROUP_SUBSYS_COUNT = 14, +}; + +struct vdso_exception_table_entry { + int insn; + int fixup; +}; + +struct cpuinfo_x86 { + __u8 x86; + __u8 x86_vendor; + __u8 x86_model; + __u8 x86_stepping; + int x86_tlbsize; + __u32 vmx_capability[3]; + __u8 x86_virt_bits; + __u8 x86_phys_bits; + __u8 x86_coreid_bits; + __u8 cu_id; + __u32 extended_cpuid_level; + int cpuid_level; + union { + __u32 x86_capability[21]; + long unsigned int x86_capability_alignment; + }; + char x86_vendor_id[16]; + char x86_model_id[64]; + unsigned int x86_cache_size; + int x86_cache_alignment; + int x86_cache_max_rmid; + int x86_cache_occ_scale; + int x86_cache_mbm_width_offset; + int x86_power; + long unsigned int loops_per_jiffy; + u16 x86_max_cores; + u16 apicid; + u16 initial_apicid; + u16 x86_clflush_size; + u16 booted_cores; + u16 phys_proc_id; + u16 logical_proc_id; + u16 cpu_core_id; + u16 cpu_die_id; + u16 logical_die_id; + u16 cpu_index; + u32 microcode; + u8 x86_cache_bits; + unsigned int initialized: 1; +}; + +enum syscall_work_bit { + SYSCALL_WORK_BIT_SECCOMP = 0, + SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT = 1, + SYSCALL_WORK_BIT_SYSCALL_TRACE = 2, + SYSCALL_WORK_BIT_SYSCALL_EMU = 3, + SYSCALL_WORK_BIT_SYSCALL_AUDIT = 4, + SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH = 5, + SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP = 6, +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +enum x86_pf_error_code { + X86_PF_PROT = 1, + X86_PF_WRITE = 2, + X86_PF_USER = 4, + X86_PF_RSVD = 8, + X86_PF_INSTR = 16, + X86_PF_PK = 32, + X86_PF_SGX = 32768, +}; + +struct trace_event_raw_emulate_vsyscall { + struct trace_entry ent; + int nr; + char __data[0]; +}; + +struct trace_event_data_offsets_emulate_vsyscall {}; + +typedef void (*btf_trace_emulate_vsyscall)(void *, int); + +enum { + EMULATE = 0, + XONLY = 1, + NONE = 2, +}; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, + __PERF_SAMPLE_CALLCHAIN_EARLY = 0, +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_MAX = 262144, +}; + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct pv_info { + u16 extra_user_64bit_cs; + const char *name; +}; + +typedef bool (*smp_cond_func_t)(int, void *); + +struct ldt_struct { + struct desc_struct *entries; + unsigned int nr_entries; + int slot; +}; + +enum apic_delivery_modes { + APIC_DELIVERY_MODE_FIXED = 0, + APIC_DELIVERY_MODE_LOWESTPRIO = 1, + APIC_DELIVERY_MODE_SMI = 2, + APIC_DELIVERY_MODE_NMI = 4, + APIC_DELIVERY_MODE_INIT = 5, + APIC_DELIVERY_MODE_EXTINT = 7, +}; + +struct physid_mask { + long unsigned int mask[512]; +}; + +typedef struct physid_mask physid_mask_t; + +struct x86_pmu_capability { + int version; + int num_counters_gp; + int num_counters_fixed; + int bit_width_gp; + int bit_width_fixed; + unsigned int events_mask; + int events_mask_len; +}; + +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[12]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN = 0, + STACK_TYPE_TASK = 1, + STACK_TYPE_IRQ = 2, + STACK_TYPE_SOFTIRQ = 3, + STACK_TYPE_ENTRY = 4, + STACK_TYPE_EXCEPTION = 5, + STACK_TYPE_EXCEPTION_LAST = 10, +}; + +struct stack_info { + enum stack_type type; + long unsigned int *begin; + long unsigned int *end; + long unsigned int *next_sp; +}; + +struct stack_frame { + struct stack_frame *next_frame; + long unsigned int return_address; +}; + +struct stack_frame_ia32 { + u32 next_frame; + u32 return_address; +}; + +struct perf_guest_switch_msr { + unsigned int msr; + u64 host; + u64 guest; +}; + +struct perf_guest_info_callbacks { + int (*is_in_guest)(); + int (*is_user_mode)(); + long unsigned int (*get_guest_ip)(); + void (*handle_intel_pt_intr)(); +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +enum perf_event_x86_regs { + PERF_REG_X86_AX = 0, + PERF_REG_X86_BX = 1, + PERF_REG_X86_CX = 2, + PERF_REG_X86_DX = 3, + PERF_REG_X86_SI = 4, + PERF_REG_X86_DI = 5, + PERF_REG_X86_BP = 6, + PERF_REG_X86_SP = 7, + PERF_REG_X86_IP = 8, + PERF_REG_X86_FLAGS = 9, + PERF_REG_X86_CS = 10, + PERF_REG_X86_SS = 11, + PERF_REG_X86_DS = 12, + PERF_REG_X86_ES = 13, + PERF_REG_X86_FS = 14, + PERF_REG_X86_GS = 15, + PERF_REG_X86_R8 = 16, + PERF_REG_X86_R9 = 17, + PERF_REG_X86_R10 = 18, + PERF_REG_X86_R11 = 19, + PERF_REG_X86_R12 = 20, + PERF_REG_X86_R13 = 21, + PERF_REG_X86_R14 = 22, + PERF_REG_X86_R15 = 23, + PERF_REG_X86_32_MAX = 16, + PERF_REG_X86_64_MAX = 24, + PERF_REG_X86_XMM0 = 32, + PERF_REG_X86_XMM1 = 34, + PERF_REG_X86_XMM2 = 36, + PERF_REG_X86_XMM3 = 38, + PERF_REG_X86_XMM4 = 40, + PERF_REG_X86_XMM5 = 42, + PERF_REG_X86_XMM6 = 44, + PERF_REG_X86_XMM7 = 46, + PERF_REG_X86_XMM8 = 48, + PERF_REG_X86_XMM9 = 50, + PERF_REG_X86_XMM10 = 52, + PERF_REG_X86_XMM11 = 54, + PERF_REG_X86_XMM12 = 56, + PERF_REG_X86_XMM13 = 58, + PERF_REG_X86_XMM14 = 60, + PERF_REG_X86_XMM15 = 62, + PERF_REG_X86_XMM_MAX = 64, +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + +struct perf_pmu_events_hybrid_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + u64 pmu_type; +}; + +struct apic { + void (*eoi_write)(u32, u32); + void (*native_eoi_write)(u32, u32); + void (*write)(u32, u32); + u32 (*read)(u32); + void (*wait_icr_idle)(); + u32 (*safe_wait_icr_idle)(); + void (*send_IPI)(int, int); + void (*send_IPI_mask)(const struct cpumask *, int); + void (*send_IPI_mask_allbutself)(const struct cpumask *, int); + void (*send_IPI_allbutself)(int); + void (*send_IPI_all)(int); + void (*send_IPI_self)(int); + u32 disable_esr; + enum apic_delivery_modes delivery_mode; + bool dest_mode_logical; + u32 (*calc_dest_apicid)(unsigned int); + u64 (*icr_read)(); + void (*icr_write)(u32, u32); + int (*probe)(); + int (*acpi_madt_oem_check)(char *, char *); + int (*apic_id_valid)(u32); + int (*apic_id_registered)(); + bool (*check_apicid_used)(physid_mask_t *, int); + void (*init_apic_ldr)(); + void (*ioapic_phys_id_map)(physid_mask_t *, physid_mask_t *); + void (*setup_apic_routing)(); + int (*cpu_present_to_apicid)(int); + void (*apicid_to_cpu_present)(int, physid_mask_t *); + int (*check_phys_apicid_present)(int); + int (*phys_pkg_id)(int, int); + u32 (*get_apic_id)(long unsigned int); + u32 (*set_apic_id)(unsigned int); + int (*wakeup_secondary_cpu)(int, long unsigned int); + void (*inquire_remote_apic)(int); + char *name; +}; + +enum { + NMI_LOCAL = 0, + NMI_UNKNOWN = 1, + NMI_SERR = 2, + NMI_IO_CHECK = 3, + NMI_MAX = 4, +}; + +typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); + +struct nmiaction { + struct list_head list; + nmi_handler_t handler; + u64 max_duration; + long unsigned int flags; + const char *name; +}; + +struct gdt_page { + struct desc_struct gdt[16]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cyc2ns_data { + u32 cyc2ns_mul; + u32 cyc2ns_shift; + u64 cyc2ns_offset; +}; + +struct unwind_state { + struct stack_info stack_info; + long unsigned int stack_mask; + struct task_struct *task; + int graph_idx; + bool error; + bool got_irq; + long unsigned int *bp; + long unsigned int *orig_sp; + long unsigned int ip; + long unsigned int *next_bp; + struct pt_regs *regs; +}; + +enum extra_reg_type { + EXTRA_REG_NONE = 4294967295, + EXTRA_REG_RSP_0 = 0, + EXTRA_REG_RSP_1 = 1, + EXTRA_REG_LBR = 2, + EXTRA_REG_LDLAT = 3, + EXTRA_REG_FE = 4, + EXTRA_REG_MAX = 5, +}; + +struct event_constraint { + union { + long unsigned int idxmsk[1]; + u64 idxmsk64; + }; + u64 code; + u64 cmask; + int weight; + int overlap; + int flags; + unsigned int size; +}; + +struct amd_nb { + int nb_id; + int refcnt; + struct perf_event *owners[64]; + struct event_constraint event_constraints[64]; +}; + +struct er_account { + raw_spinlock_t lock; + u64 config; + u64 reg; + atomic_t ref; +}; + +struct intel_shared_regs { + struct er_account regs[5]; + int refcnt; + unsigned int core_id; +}; + +enum intel_excl_state_type { + INTEL_EXCL_UNUSED = 0, + INTEL_EXCL_SHARED = 1, + INTEL_EXCL_EXCLUSIVE = 2, +}; + +struct intel_excl_states { + enum intel_excl_state_type state[64]; + bool sched_started; +}; + +struct intel_excl_cntrs { + raw_spinlock_t lock; + struct intel_excl_states states[2]; + union { + u16 has_exclusive[2]; + u32 exclusive_present; + }; + int refcnt; + unsigned int core_id; +}; + +enum { + X86_PERF_KFREE_SHARED = 0, + X86_PERF_KFREE_EXCL = 1, + X86_PERF_KFREE_MAX = 2, +}; + +struct cpu_hw_events { + struct perf_event *events[64]; + long unsigned int active_mask[1]; + long unsigned int dirty[1]; + int enabled; + int n_events; + int n_added; + int n_txn; + int n_txn_pair; + int n_txn_metric; + int assign[64]; + u64 tags[64]; + struct perf_event *event_list[64]; + struct event_constraint *event_constraint[64]; + int n_excl; + unsigned int txn_flags; + int is_fake; + struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; + u64 pebs_enabled; + int n_pebs; + int n_large_pebs; + int n_pebs_via_pt; + int pebs_output; + u64 pebs_data_cfg; + u64 active_pebs_data_cfg; + int pebs_record_size; + int lbr_users; + int lbr_pebs_users; + struct perf_branch_stack lbr_stack; + struct perf_branch_entry lbr_entries[32]; + union { + struct er_account *lbr_sel; + struct er_account *lbr_ctl; + }; + u64 br_sel; + void *last_task_ctx; + int last_log_id; + int lbr_select; + void *lbr_xsave; + u64 intel_ctrl_guest_mask; + u64 intel_ctrl_host_mask; + struct perf_guest_switch_msr guest_switch_msrs[64]; + u64 intel_cp_status; + struct intel_shared_regs *shared_regs; + struct event_constraint *constraint_list; + struct intel_excl_cntrs *excl_cntrs; + int excl_thread_id; + u64 tfa_shadow; + int n_metric; + struct amd_nb *amd_nb; + u64 perf_ctr_virt_mask; + int n_pair; + void *kfree_on_online[2]; + struct pmu *pmu; +}; + +struct extra_reg { + unsigned int event; + unsigned int msr; + u64 config_mask; + u64 valid_mask; + int idx; + bool extra_msr_access; +}; + +union perf_capabilities { + struct { + u64 lbr_format: 6; + u64 pebs_trap: 1; + u64 pebs_arch_reg: 1; + u64 pebs_format: 4; + u64 smm_freeze: 1; + u64 full_width_write: 1; + u64 pebs_baseline: 1; + u64 perf_metrics: 1; + u64 pebs_output_pt_available: 1; + u64 anythread_deprecated: 1; + }; + u64 capabilities; +}; + +struct x86_pmu_quirk { + struct x86_pmu_quirk *next; + void (*func)(); +}; + +enum { + x86_lbr_exclusive_lbr = 0, + x86_lbr_exclusive_bts = 1, + x86_lbr_exclusive_pt = 2, + x86_lbr_exclusive_max = 3, +}; + +struct x86_hybrid_pmu { + struct pmu pmu; + const char *name; + u8 cpu_type; + cpumask_t supported_cpus; + union perf_capabilities intel_cap; + u64 intel_ctrl; + int max_pebs_events; + int num_counters; + int num_counters_fixed; + struct event_constraint unconstrained; + u64 hw_cache_event_ids[42]; + u64 hw_cache_extra_regs[42]; + struct event_constraint *event_constraints; + struct event_constraint *pebs_constraints; + struct extra_reg *extra_regs; + unsigned int late_ack: 1; + unsigned int mid_ack: 1; + unsigned int enabled_ack: 1; +}; + +enum hybrid_pmu_type { + hybrid_big = 64, + hybrid_small = 32, + hybrid_big_small = 96, +}; + +struct x86_pmu { + const char *name; + int version; + int (*handle_irq)(struct pt_regs *); + void (*disable_all)(); + void (*enable_all)(int); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); + void (*add)(struct perf_event *); + void (*del)(struct perf_event *); + void (*read)(struct perf_event *); + int (*hw_config)(struct perf_event *); + int (*schedule_events)(struct cpu_hw_events *, int, int *); + unsigned int eventsel; + unsigned int perfctr; + int (*addr_offset)(int, bool); + int (*rdpmc_index)(int); + u64 (*event_map)(int); + int max_events; + int num_counters; + int num_counters_fixed; + int cntval_bits; + u64 cntval_mask; + union { + long unsigned int events_maskl; + long unsigned int events_mask[1]; + }; + int events_mask_len; + int apic; + u64 max_period; + struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *, int, struct perf_event *); + void (*put_event_constraints)(struct cpu_hw_events *, struct perf_event *); + void (*start_scheduling)(struct cpu_hw_events *); + void (*commit_scheduling)(struct cpu_hw_events *, int, int); + void (*stop_scheduling)(struct cpu_hw_events *); + struct event_constraint *event_constraints; + struct x86_pmu_quirk *quirks; + int perfctr_second_write; + u64 (*limit_period)(struct perf_event *, u64); + unsigned int late_ack: 1; + unsigned int mid_ack: 1; + unsigned int enabled_ack: 1; + int attr_rdpmc_broken; + int attr_rdpmc; + struct attribute **format_attrs; + ssize_t (*events_sysfs_show)(char *, u64); + const struct attribute_group **attr_update; + long unsigned int attr_freeze_on_smi; + int (*cpu_prepare)(int); + void (*cpu_starting)(int); + void (*cpu_dying)(int); + void (*cpu_dead)(int); + void (*check_microcode)(); + void (*sched_task)(struct perf_event_context *, bool); + u64 intel_ctrl; + union perf_capabilities intel_cap; + unsigned int bts: 1; + unsigned int bts_active: 1; + unsigned int pebs: 1; + unsigned int pebs_active: 1; + unsigned int pebs_broken: 1; + unsigned int pebs_prec_dist: 1; + unsigned int pebs_no_tlb: 1; + unsigned int pebs_no_isolation: 1; + unsigned int pebs_block: 1; + int pebs_record_size; + int pebs_buffer_size; + int max_pebs_events; + void (*drain_pebs)(struct pt_regs *, struct perf_sample_data *); + struct event_constraint *pebs_constraints; + void (*pebs_aliases)(struct perf_event *); + long unsigned int large_pebs_flags; + u64 rtm_abort_event; + unsigned int lbr_tos; + unsigned int lbr_from; + unsigned int lbr_to; + unsigned int lbr_info; + unsigned int lbr_nr; + union { + u64 lbr_sel_mask; + u64 lbr_ctl_mask; + }; + union { + const int *lbr_sel_map; + int *lbr_ctl_map; + }; + bool lbr_double_abort; + bool lbr_pt_coexist; + unsigned int lbr_depth_mask: 8; + unsigned int lbr_deep_c_reset: 1; + unsigned int lbr_lip: 1; + unsigned int lbr_cpl: 1; + unsigned int lbr_filter: 1; + unsigned int lbr_call_stack: 1; + unsigned int lbr_mispred: 1; + unsigned int lbr_timed_lbr: 1; + unsigned int lbr_br_type: 1; + void (*lbr_reset)(); + void (*lbr_read)(struct cpu_hw_events *); + void (*lbr_save)(void *); + void (*lbr_restore)(void *); + atomic_t lbr_exclusive[3]; + int num_topdown_events; + u64 (*update_topdown_event)(struct perf_event *); + int (*set_topdown_event_period)(struct perf_event *); + void (*swap_task_ctx)(struct perf_event_context *, struct perf_event_context *); + unsigned int amd_nb_constraints: 1; + u64 perf_ctr_pair_en; + struct extra_reg *extra_regs; + unsigned int flags; + struct perf_guest_switch_msr * (*guest_get_msrs)(int *); + int (*check_period)(struct perf_event *, u64); + int (*aux_output_match)(struct perf_event *); + int (*filter_match)(struct perf_event *); + int num_hybrid_pmus; + struct x86_hybrid_pmu *hybrid_pmu; + u8 (*get_hybrid_cpu_type)(); +}; + +struct sched_state { + int weight; + int event; + int counter; + int unassigned; + int nr_gp; + u64 used; +}; + +struct perf_sched { + int max_weight; + int max_events; + int max_gp; + int saved_states; + struct event_constraint **constraints; + struct sched_state state; + struct sched_state saved[2]; +}; + +struct perf_msr { + u64 msr; + struct attribute_group *grp; + bool (*test)(int, void *); + bool no_check; + u64 mask; +}; + +struct amd_uncore { + int id; + int refcnt; + int cpu; + int num_counters; + int rdpmc_base; + u32 msr_base; + cpumask_t *active_mask; + struct pmu *pmu; + struct perf_event *events[6]; + struct hlist_node node; +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +typedef short unsigned int pci_dev_flags_t; + +struct pci_bus; + +struct pci_slot; + +struct aer_stats; + +struct rcec_ea; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_vpd; + +struct pci_sriov; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u16 aer_cap; + struct aer_stats *aer_stats; + struct rcec_ea *rcec_ea; + struct pci_dev *rcec; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + unsigned int imm_ready: 1; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int runtime_d3cold: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + u16 l1ss; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[17]; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int is_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int reset_fn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + int rom_attr_enabled; + struct bin_attribute *res_attr[17]; + struct bin_attribute *res_attr_wc[17]; + unsigned int broken_cmd_compl: 1; + unsigned int ptm_root: 1; + unsigned int ptm_enabled: 1; + u8 ptm_granularity; + const struct attribute_group **msi_irq_groups; + struct pci_vpd *vpd; + u16 dpc_cap; + unsigned int dpc_rp_extensions: 1; + u8 dpc_rp_log_size; + union { + struct pci_sriov *sriov; + struct pci_dev *physfn; + }; + u16 ats_cap; + u8 ats_stu; + u16 pri_cap; + u32 pri_reqs_alloc; + unsigned int pasid_required: 1; + u16 pasid_cap; + u16 pasid_features; + u16 acs_cap; + phys_addr_t rom; + size_t romlen; + char *driver_override; + long unsigned int priv_flags; +}; + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; +}; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_ops; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_IOV_RESOURCES = 7, + PCI_IOV_RESOURCE_END = 12, + PCI_BRIDGE_RESOURCES = 13, + PCI_BRIDGE_RESOURCE_END = 16, + PCI_NUM_RESOURCES = 17, + DEVICE_COUNT_RESOURCE = 17, +}; + +typedef unsigned int pcie_reset_state_t; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + struct list_head node; + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + int (*sriov_set_msix_vec_count)(struct pci_dev *, int); + u32 (*sriov_get_vf_total_msix)(struct pci_dev *); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + struct device_driver driver; + struct pci_dynids dynids; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +enum ibs_states { + IBS_ENABLED = 0, + IBS_STARTED = 1, + IBS_STOPPING = 2, + IBS_STOPPED = 3, + IBS_MAX_STATES = 4, +}; + +struct cpu_perf_ibs { + struct perf_event *event; + long unsigned int state[1]; +}; + +struct perf_ibs { + struct pmu pmu; + unsigned int msr; + u64 config_mask; + u64 cnt_mask; + u64 enable_mask; + u64 valid_mask; + u64 max_period; + long unsigned int offset_mask[1]; + int offset_max; + unsigned int fetch_count_reset_broken: 1; + unsigned int fetch_ignore_if_zero_rip: 1; + struct cpu_perf_ibs *pcpu; + struct attribute **format_attrs; + struct attribute_group format_group; + const struct attribute_group *attr_groups[2]; + u64 (*get_count)(u64); +}; + +struct perf_ibs_data { + u32 size; + union { + u32 data[0]; + u32 caps; + }; + u64 regs[8]; +}; + +struct amd_iommu; + +struct perf_amd_iommu { + struct list_head list; + struct pmu pmu; + struct amd_iommu *iommu; + char name[16]; + u8 max_banks; + u8 max_counters; + u64 cntr_assign_mask; + raw_spinlock_t lock; +}; + +struct amd_iommu_event_desc { + struct device_attribute attr; + const char *event; +}; + +enum perf_msr_id { + PERF_MSR_TSC = 0, + PERF_MSR_APERF = 1, + PERF_MSR_MPERF = 2, + PERF_MSR_PPERF = 3, + PERF_MSR_SMI = 4, + PERF_MSR_PTSC = 5, + PERF_MSR_IRPERF = 6, + PERF_MSR_THERM = 7, + PERF_MSR_EVENT_MAX = 8, +}; + +struct x86_cpu_desc { + u8 x86_family; + u8 x86_vendor; + u8 x86_model; + u8 x86_stepping; + u32 x86_microcode_rev; +}; + +union cpuid10_eax { + struct { + unsigned int version_id: 8; + unsigned int num_counters: 8; + unsigned int bit_width: 8; + unsigned int mask_length: 8; + } split; + unsigned int full; +}; + +union cpuid10_ebx { + struct { + unsigned int no_unhalted_core_cycles: 1; + unsigned int no_instructions_retired: 1; + unsigned int no_unhalted_reference_cycles: 1; + unsigned int no_llc_reference: 1; + unsigned int no_llc_misses: 1; + unsigned int no_branch_instruction_retired: 1; + unsigned int no_branch_misses_retired: 1; + } split; + unsigned int full; +}; + +union cpuid10_edx { + struct { + unsigned int num_counters_fixed: 5; + unsigned int bit_width_fixed: 8; + unsigned int reserved1: 2; + unsigned int anythread_deprecated: 1; + unsigned int reserved2: 16; + } split; + unsigned int full; +}; + +struct perf_pmu_format_hybrid_attr { + struct device_attribute attr; + u64 pmu_type; +}; + +enum { + LBR_FORMAT_32 = 0, + LBR_FORMAT_LIP = 1, + LBR_FORMAT_EIP = 2, + LBR_FORMAT_EIP_FLAGS = 3, + LBR_FORMAT_EIP_FLAGS2 = 4, + LBR_FORMAT_INFO = 5, + LBR_FORMAT_TIME = 6, + LBR_FORMAT_MAX_KNOWN = 6, +}; + +union x86_pmu_config { + struct { + u64 event: 8; + u64 umask: 8; + u64 usr: 1; + u64 os: 1; + u64 edge: 1; + u64 pc: 1; + u64 interrupt: 1; + u64 __reserved1: 1; + u64 en: 1; + u64 inv: 1; + u64 cmask: 8; + u64 event2: 4; + u64 __reserved2: 4; + u64 go: 1; + u64 ho: 1; + } bits; + u64 value; +}; + +enum pageflags { + PG_locked = 0, + PG_referenced = 1, + PG_uptodate = 2, + PG_dirty = 3, + PG_lru = 4, + PG_active = 5, + PG_workingset = 6, + PG_waiters = 7, + PG_error = 8, + PG_slab = 9, + PG_owner_priv_1 = 10, + PG_arch_1 = 11, + PG_reserved = 12, + PG_private = 13, + PG_private_2 = 14, + PG_writeback = 15, + PG_head = 16, + PG_mappedtodisk = 17, + PG_reclaim = 18, + PG_swapbacked = 19, + PG_unevictable = 20, + PG_mlocked = 21, + PG_uncached = 22, + PG_hwpoison = 23, + PG_young = 24, + PG_idle = 25, + PG_arch_2 = 26, + __NR_PAGEFLAGS = 27, + PG_checked = 10, + PG_swapcache = 10, + PG_fscache = 14, + PG_pinned = 10, + PG_savepinned = 3, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_slob_free = 13, + PG_double_map = 6, + PG_isolated = 18, + PG_reported = 2, +}; + +struct bts_ctx { + struct perf_output_handle handle; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct debug_store ds_back; + int state; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum { + BTS_STATE_STOPPED = 0, + BTS_STATE_INACTIVE = 1, + BTS_STATE_ACTIVE = 2, +}; + +struct bts_phys { + struct page *page; + long unsigned int size; + long unsigned int offset; + long unsigned int displacement; +}; + +struct bts_buffer { + size_t real_size; + unsigned int nr_pages; + unsigned int nr_bufs; + unsigned int cur_buf; + bool snapshot; + local_t data_size; + local_t head; + long unsigned int end; + void **data_pages; + struct bts_phys buf[0]; +}; + +struct lbr_entry { + u64 from; + u64 to; + u64 info; +}; + +struct x86_hw_tss { + u32 reserved1; + u64 sp0; + u64 sp1; + u64 sp2; + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; +} __attribute__((packed)); + +struct entry_stack { + char stack[4096]; +}; + +struct entry_stack_page { + struct entry_stack stack; +}; + +struct x86_io_bitmap { + u64 prev_sequence; + unsigned int prev_max; + long unsigned int bitmap[1025]; + long unsigned int mapall[1025]; +}; + +struct tss_struct { + struct x86_hw_tss x86_tss; + struct x86_io_bitmap io_bitmap; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct debug_store_buffers { + char bts_buffer[65536]; + char pebs_buffer[65536]; +}; + +struct cea_exception_stacks { + char DF_stack_guard[4096]; + char DF_stack[4096]; + char NMI_stack_guard[4096]; + char NMI_stack[4096]; + char DB_stack_guard[4096]; + char DB_stack[4096]; + char MCE_stack_guard[4096]; + char MCE_stack[4096]; + char VC_stack_guard[4096]; + char VC_stack[4096]; + char VC2_stack_guard[4096]; + char VC2_stack[4096]; + char IST_top_guard[4096]; +}; + +struct cpu_entry_area { + char gdt[4096]; + struct entry_stack_page entry_stack_page; + struct tss_struct tss; + struct cea_exception_stacks estacks; + struct debug_store cpu_debug_store; + struct debug_store_buffers cpu_debug_buffers; +}; + +struct pebs_basic { + u64 format_size; + u64 ip; + u64 applicable_counters; + u64 tsc; +}; + +struct pebs_meminfo { + u64 address; + u64 aux; + u64 latency; + u64 tsx_tuning; +}; + +struct pebs_gprs { + u64 flags; + u64 ip; + u64 ax; + u64 cx; + u64 dx; + u64 bx; + u64 sp; + u64 bp; + u64 si; + u64 di; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; +}; + +struct pebs_xmm { + u64 xmm[32]; +}; + +struct x86_perf_regs { + struct pt_regs regs; + u64 *xmm_regs; +}; + +typedef unsigned int insn_attr_t; + +typedef unsigned char insn_byte_t; + +typedef int insn_value_t; + +struct insn_field { + union { + insn_value_t value; + insn_byte_t bytes[4]; + }; + unsigned char got; + unsigned char nbytes; +}; + +struct insn { + struct insn_field prefixes; + struct insn_field rex_prefix; + struct insn_field vex_prefix; + struct insn_field opcode; + struct insn_field modrm; + struct insn_field sib; + struct insn_field displacement; + union { + struct insn_field immediate; + struct insn_field moffset1; + struct insn_field immediate1; + }; + union { + struct insn_field moffset2; + struct insn_field immediate2; + }; + int emulate_prefix_size; + insn_attr_t attr; + unsigned char opnd_bytes; + unsigned char addr_bytes; + unsigned char length; + unsigned char x86_64; + const insn_byte_t *kaddr; + const insn_byte_t *end_kaddr; + const insn_byte_t *next_byte; +}; + +enum { + PERF_TXN_ELISION = 1, + PERF_TXN_TRANSACTION = 2, + PERF_TXN_SYNC = 4, + PERF_TXN_ASYNC = 8, + PERF_TXN_RETRY = 16, + PERF_TXN_CONFLICT = 32, + PERF_TXN_CAPACITY_WRITE = 64, + PERF_TXN_CAPACITY_READ = 128, + PERF_TXN_MAX = 256, + PERF_TXN_ABORT_MASK = 0, + PERF_TXN_ABORT_SHIFT = 32, +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +union intel_x86_pebs_dse { + u64 val; + struct { + unsigned int ld_dse: 4; + unsigned int ld_stlb_miss: 1; + unsigned int ld_locked: 1; + unsigned int ld_data_blk: 1; + unsigned int ld_addr_blk: 1; + unsigned int ld_reserved: 24; + }; + struct { + unsigned int st_l1d_hit: 1; + unsigned int st_reserved1: 3; + unsigned int st_stlb_miss: 1; + unsigned int st_locked: 1; + unsigned int st_reserved2: 26; + }; + struct { + unsigned int st_lat_dse: 4; + unsigned int st_lat_stlb_miss: 1; + unsigned int st_lat_locked: 1; + unsigned int ld_reserved3: 26; + }; +}; + +struct pebs_record_core { + u64 flags; + u64 ip; + u64 ax; + u64 bx; + u64 cx; + u64 dx; + u64 si; + u64 di; + u64 bp; + u64 sp; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; +}; + +struct pebs_record_nhm { + u64 flags; + u64 ip; + u64 ax; + u64 bx; + u64 cx; + u64 dx; + u64 si; + u64 di; + u64 bp; + u64 sp; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u64 status; + u64 dla; + u64 dse; + u64 lat; +}; + +union hsw_tsx_tuning { + struct { + u32 cycles_last_block: 32; + u32 hle_abort: 1; + u32 rtm_abort: 1; + u32 instruction_abort: 1; + u32 non_instruction_abort: 1; + u32 retry: 1; + u32 data_conflict: 1; + u32 capacity_writes: 1; + u32 capacity_reads: 1; + }; + u64 value; +}; + +struct pebs_record_skl { + u64 flags; + u64 ip; + u64 ax; + u64 bx; + u64 cx; + u64 dx; + u64 si; + u64 di; + u64 bp; + u64 sp; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u64 status; + u64 dla; + u64 dse; + u64 lat; + u64 real_ip; + u64 tsx_tuning; + u64 tsc; +}; + +struct bts_record { + u64 from; + u64 to; + u64 flags; +}; + +enum { + PERF_BR_UNKNOWN = 0, + PERF_BR_COND = 1, + PERF_BR_UNCOND = 2, + PERF_BR_IND = 3, + PERF_BR_CALL = 4, + PERF_BR_IND_CALL = 5, + PERF_BR_RET = 6, + PERF_BR_SYSCALL = 7, + PERF_BR_SYSRET = 8, + PERF_BR_COND_CALL = 9, + PERF_BR_COND_RET = 10, + PERF_BR_MAX = 11, +}; + +enum xfeature { + XFEATURE_FP = 0, + XFEATURE_SSE = 1, + XFEATURE_YMM = 2, + XFEATURE_BNDREGS = 3, + XFEATURE_BNDCSR = 4, + XFEATURE_OPMASK = 5, + XFEATURE_ZMM_Hi256 = 6, + XFEATURE_Hi16_ZMM = 7, + XFEATURE_PT_UNIMPLEMENTED_SO_FAR = 8, + XFEATURE_PKRU = 9, + XFEATURE_PASID = 10, + XFEATURE_RSRVD_COMP_11 = 11, + XFEATURE_RSRVD_COMP_12 = 12, + XFEATURE_RSRVD_COMP_13 = 13, + XFEATURE_RSRVD_COMP_14 = 14, + XFEATURE_LBR = 15, + XFEATURE_MAX = 16, +}; + +struct arch_lbr_state { + u64 lbr_ctl; + u64 lbr_depth; + u64 ler_from; + u64 ler_to; + u64 ler_info; + struct lbr_entry entries[0]; +}; + +union cpuid28_eax { + struct { + unsigned int lbr_depth_mask: 8; + unsigned int reserved: 22; + unsigned int lbr_deep_c_reset: 1; + unsigned int lbr_lip: 1; + } split; + unsigned int full; +}; + +union cpuid28_ebx { + struct { + unsigned int lbr_cpl: 1; + unsigned int lbr_filter: 1; + unsigned int lbr_call_stack: 1; + } split; + unsigned int full; +}; + +union cpuid28_ecx { + struct { + unsigned int lbr_mispred: 1; + unsigned int lbr_timed_lbr: 1; + unsigned int lbr_br_type: 1; + } split; + unsigned int full; +}; + +struct x86_pmu_lbr { + unsigned int nr; + unsigned int from; + unsigned int to; + unsigned int info; +}; + +struct x86_perf_task_context_opt { + int lbr_callstack_users; + int lbr_stack_state; + int log_id; +}; + +struct x86_perf_task_context { + u64 lbr_sel; + int tos; + int valid_lbrs; + struct x86_perf_task_context_opt opt; + struct lbr_entry lbr[32]; +}; + +struct x86_perf_task_context_arch_lbr { + struct x86_perf_task_context_opt opt; + struct lbr_entry entries[0]; +}; + +struct x86_perf_task_context_arch_lbr_xsave { + struct x86_perf_task_context_opt opt; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct xregs_state xsave; + struct { + struct fxregs_state i387; + struct xstate_header header; + struct arch_lbr_state lbr; + long: 64; + long: 64; + long: 64; + }; + }; +}; + +enum { + X86_BR_NONE = 0, + X86_BR_USER = 1, + X86_BR_KERNEL = 2, + X86_BR_CALL = 4, + X86_BR_RET = 8, + X86_BR_SYSCALL = 16, + X86_BR_SYSRET = 32, + X86_BR_INT = 64, + X86_BR_IRET = 128, + X86_BR_JCC = 256, + X86_BR_JMP = 512, + X86_BR_IRQ = 1024, + X86_BR_IND_CALL = 2048, + X86_BR_ABORT = 4096, + X86_BR_IN_TX = 8192, + X86_BR_NO_TX = 16384, + X86_BR_ZERO_CALL = 32768, + X86_BR_CALL_STACK = 65536, + X86_BR_IND_JMP = 131072, + X86_BR_TYPE_SAVE = 262144, +}; + +enum { + LBR_NONE = 0, + LBR_VALID = 1, +}; + +enum { + ARCH_LBR_BR_TYPE_JCC = 0, + ARCH_LBR_BR_TYPE_NEAR_IND_JMP = 1, + ARCH_LBR_BR_TYPE_NEAR_REL_JMP = 2, + ARCH_LBR_BR_TYPE_NEAR_IND_CALL = 3, + ARCH_LBR_BR_TYPE_NEAR_REL_CALL = 4, + ARCH_LBR_BR_TYPE_NEAR_RET = 5, + ARCH_LBR_BR_TYPE_KNOWN_MAX = 5, + ARCH_LBR_BR_TYPE_MAP_MAX = 16, +}; + +enum P4_EVENTS { + P4_EVENT_TC_DELIVER_MODE = 0, + P4_EVENT_BPU_FETCH_REQUEST = 1, + P4_EVENT_ITLB_REFERENCE = 2, + P4_EVENT_MEMORY_CANCEL = 3, + P4_EVENT_MEMORY_COMPLETE = 4, + P4_EVENT_LOAD_PORT_REPLAY = 5, + P4_EVENT_STORE_PORT_REPLAY = 6, + P4_EVENT_MOB_LOAD_REPLAY = 7, + P4_EVENT_PAGE_WALK_TYPE = 8, + P4_EVENT_BSQ_CACHE_REFERENCE = 9, + P4_EVENT_IOQ_ALLOCATION = 10, + P4_EVENT_IOQ_ACTIVE_ENTRIES = 11, + P4_EVENT_FSB_DATA_ACTIVITY = 12, + P4_EVENT_BSQ_ALLOCATION = 13, + P4_EVENT_BSQ_ACTIVE_ENTRIES = 14, + P4_EVENT_SSE_INPUT_ASSIST = 15, + P4_EVENT_PACKED_SP_UOP = 16, + P4_EVENT_PACKED_DP_UOP = 17, + P4_EVENT_SCALAR_SP_UOP = 18, + P4_EVENT_SCALAR_DP_UOP = 19, + P4_EVENT_64BIT_MMX_UOP = 20, + P4_EVENT_128BIT_MMX_UOP = 21, + P4_EVENT_X87_FP_UOP = 22, + P4_EVENT_TC_MISC = 23, + P4_EVENT_GLOBAL_POWER_EVENTS = 24, + P4_EVENT_TC_MS_XFER = 25, + P4_EVENT_UOP_QUEUE_WRITES = 26, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE = 27, + P4_EVENT_RETIRED_BRANCH_TYPE = 28, + P4_EVENT_RESOURCE_STALL = 29, + P4_EVENT_WC_BUFFER = 30, + P4_EVENT_B2B_CYCLES = 31, + P4_EVENT_BNR = 32, + P4_EVENT_SNOOP = 33, + P4_EVENT_RESPONSE = 34, + P4_EVENT_FRONT_END_EVENT = 35, + P4_EVENT_EXECUTION_EVENT = 36, + P4_EVENT_REPLAY_EVENT = 37, + P4_EVENT_INSTR_RETIRED = 38, + P4_EVENT_UOPS_RETIRED = 39, + P4_EVENT_UOP_TYPE = 40, + P4_EVENT_BRANCH_RETIRED = 41, + P4_EVENT_MISPRED_BRANCH_RETIRED = 42, + P4_EVENT_X87_ASSIST = 43, + P4_EVENT_MACHINE_CLEAR = 44, + P4_EVENT_INSTR_COMPLETED = 45, +}; + +enum P4_EVENT_OPCODES { + P4_EVENT_TC_DELIVER_MODE_OPCODE = 257, + P4_EVENT_BPU_FETCH_REQUEST_OPCODE = 768, + P4_EVENT_ITLB_REFERENCE_OPCODE = 6147, + P4_EVENT_MEMORY_CANCEL_OPCODE = 517, + P4_EVENT_MEMORY_COMPLETE_OPCODE = 2050, + P4_EVENT_LOAD_PORT_REPLAY_OPCODE = 1026, + P4_EVENT_STORE_PORT_REPLAY_OPCODE = 1282, + P4_EVENT_MOB_LOAD_REPLAY_OPCODE = 770, + P4_EVENT_PAGE_WALK_TYPE_OPCODE = 260, + P4_EVENT_BSQ_CACHE_REFERENCE_OPCODE = 3079, + P4_EVENT_IOQ_ALLOCATION_OPCODE = 774, + P4_EVENT_IOQ_ACTIVE_ENTRIES_OPCODE = 6662, + P4_EVENT_FSB_DATA_ACTIVITY_OPCODE = 5894, + P4_EVENT_BSQ_ALLOCATION_OPCODE = 1287, + P4_EVENT_BSQ_ACTIVE_ENTRIES_OPCODE = 1543, + P4_EVENT_SSE_INPUT_ASSIST_OPCODE = 13313, + P4_EVENT_PACKED_SP_UOP_OPCODE = 2049, + P4_EVENT_PACKED_DP_UOP_OPCODE = 3073, + P4_EVENT_SCALAR_SP_UOP_OPCODE = 2561, + P4_EVENT_SCALAR_DP_UOP_OPCODE = 3585, + P4_EVENT_64BIT_MMX_UOP_OPCODE = 513, + P4_EVENT_128BIT_MMX_UOP_OPCODE = 6657, + P4_EVENT_X87_FP_UOP_OPCODE = 1025, + P4_EVENT_TC_MISC_OPCODE = 1537, + P4_EVENT_GLOBAL_POWER_EVENTS_OPCODE = 4870, + P4_EVENT_TC_MS_XFER_OPCODE = 1280, + P4_EVENT_UOP_QUEUE_WRITES_OPCODE = 2304, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE_OPCODE = 1282, + P4_EVENT_RETIRED_BRANCH_TYPE_OPCODE = 1026, + P4_EVENT_RESOURCE_STALL_OPCODE = 257, + P4_EVENT_WC_BUFFER_OPCODE = 1285, + P4_EVENT_B2B_CYCLES_OPCODE = 5635, + P4_EVENT_BNR_OPCODE = 2051, + P4_EVENT_SNOOP_OPCODE = 1539, + P4_EVENT_RESPONSE_OPCODE = 1027, + P4_EVENT_FRONT_END_EVENT_OPCODE = 2053, + P4_EVENT_EXECUTION_EVENT_OPCODE = 3077, + P4_EVENT_REPLAY_EVENT_OPCODE = 2309, + P4_EVENT_INSTR_RETIRED_OPCODE = 516, + P4_EVENT_UOPS_RETIRED_OPCODE = 260, + P4_EVENT_UOP_TYPE_OPCODE = 514, + P4_EVENT_BRANCH_RETIRED_OPCODE = 1541, + P4_EVENT_MISPRED_BRANCH_RETIRED_OPCODE = 772, + P4_EVENT_X87_ASSIST_OPCODE = 773, + P4_EVENT_MACHINE_CLEAR_OPCODE = 517, + P4_EVENT_INSTR_COMPLETED_OPCODE = 1796, +}; + +enum P4_ESCR_EMASKS { + P4_EVENT_TC_DELIVER_MODE__DD = 512, + P4_EVENT_TC_DELIVER_MODE__DB = 1024, + P4_EVENT_TC_DELIVER_MODE__DI = 2048, + P4_EVENT_TC_DELIVER_MODE__BD = 4096, + P4_EVENT_TC_DELIVER_MODE__BB = 8192, + P4_EVENT_TC_DELIVER_MODE__BI = 16384, + P4_EVENT_TC_DELIVER_MODE__ID = 32768, + P4_EVENT_BPU_FETCH_REQUEST__TCMISS = 512, + P4_EVENT_ITLB_REFERENCE__HIT = 512, + P4_EVENT_ITLB_REFERENCE__MISS = 1024, + P4_EVENT_ITLB_REFERENCE__HIT_UK = 2048, + P4_EVENT_MEMORY_CANCEL__ST_RB_FULL = 2048, + P4_EVENT_MEMORY_CANCEL__64K_CONF = 4096, + P4_EVENT_MEMORY_COMPLETE__LSC = 512, + P4_EVENT_MEMORY_COMPLETE__SSC = 1024, + P4_EVENT_LOAD_PORT_REPLAY__SPLIT_LD = 1024, + P4_EVENT_STORE_PORT_REPLAY__SPLIT_ST = 1024, + P4_EVENT_MOB_LOAD_REPLAY__NO_STA = 1024, + P4_EVENT_MOB_LOAD_REPLAY__NO_STD = 4096, + P4_EVENT_MOB_LOAD_REPLAY__PARTIAL_DATA = 8192, + P4_EVENT_MOB_LOAD_REPLAY__UNALGN_ADDR = 16384, + P4_EVENT_PAGE_WALK_TYPE__DTMISS = 512, + P4_EVENT_PAGE_WALK_TYPE__ITMISS = 1024, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITS = 512, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITE = 1024, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITM = 2048, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITS = 4096, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITE = 8192, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITM = 16384, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_MISS = 131072, + P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_MISS = 262144, + P4_EVENT_BSQ_CACHE_REFERENCE__WR_2ndL_MISS = 524288, + P4_EVENT_IOQ_ALLOCATION__DEFAULT = 512, + P4_EVENT_IOQ_ALLOCATION__ALL_READ = 16384, + P4_EVENT_IOQ_ALLOCATION__ALL_WRITE = 32768, + P4_EVENT_IOQ_ALLOCATION__MEM_UC = 65536, + P4_EVENT_IOQ_ALLOCATION__MEM_WC = 131072, + P4_EVENT_IOQ_ALLOCATION__MEM_WT = 262144, + P4_EVENT_IOQ_ALLOCATION__MEM_WP = 524288, + P4_EVENT_IOQ_ALLOCATION__MEM_WB = 1048576, + P4_EVENT_IOQ_ALLOCATION__OWN = 4194304, + P4_EVENT_IOQ_ALLOCATION__OTHER = 8388608, + P4_EVENT_IOQ_ALLOCATION__PREFETCH = 16777216, + P4_EVENT_IOQ_ACTIVE_ENTRIES__DEFAULT = 512, + P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_READ = 16384, + P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_WRITE = 32768, + P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_UC = 65536, + P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WC = 131072, + P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WT = 262144, + P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WP = 524288, + P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WB = 1048576, + P4_EVENT_IOQ_ACTIVE_ENTRIES__OWN = 4194304, + P4_EVENT_IOQ_ACTIVE_ENTRIES__OTHER = 8388608, + P4_EVENT_IOQ_ACTIVE_ENTRIES__PREFETCH = 16777216, + P4_EVENT_FSB_DATA_ACTIVITY__DRDY_DRV = 512, + P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OWN = 1024, + P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OTHER = 2048, + P4_EVENT_FSB_DATA_ACTIVITY__DBSY_DRV = 4096, + P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OWN = 8192, + P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OTHER = 16384, + P4_EVENT_BSQ_ALLOCATION__REQ_TYPE0 = 512, + P4_EVENT_BSQ_ALLOCATION__REQ_TYPE1 = 1024, + P4_EVENT_BSQ_ALLOCATION__REQ_LEN0 = 2048, + P4_EVENT_BSQ_ALLOCATION__REQ_LEN1 = 4096, + P4_EVENT_BSQ_ALLOCATION__REQ_IO_TYPE = 16384, + P4_EVENT_BSQ_ALLOCATION__REQ_LOCK_TYPE = 32768, + P4_EVENT_BSQ_ALLOCATION__REQ_CACHE_TYPE = 65536, + P4_EVENT_BSQ_ALLOCATION__REQ_SPLIT_TYPE = 131072, + P4_EVENT_BSQ_ALLOCATION__REQ_DEM_TYPE = 262144, + P4_EVENT_BSQ_ALLOCATION__REQ_ORD_TYPE = 524288, + P4_EVENT_BSQ_ALLOCATION__MEM_TYPE0 = 1048576, + P4_EVENT_BSQ_ALLOCATION__MEM_TYPE1 = 2097152, + P4_EVENT_BSQ_ALLOCATION__MEM_TYPE2 = 4194304, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE0 = 512, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE1 = 1024, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN0 = 2048, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN1 = 4096, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_IO_TYPE = 16384, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LOCK_TYPE = 32768, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_CACHE_TYPE = 65536, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_SPLIT_TYPE = 131072, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_DEM_TYPE = 262144, + P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_ORD_TYPE = 524288, + P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE0 = 1048576, + P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE1 = 2097152, + P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE2 = 4194304, + P4_EVENT_SSE_INPUT_ASSIST__ALL = 16777216, + P4_EVENT_PACKED_SP_UOP__ALL = 16777216, + P4_EVENT_PACKED_DP_UOP__ALL = 16777216, + P4_EVENT_SCALAR_SP_UOP__ALL = 16777216, + P4_EVENT_SCALAR_DP_UOP__ALL = 16777216, + P4_EVENT_64BIT_MMX_UOP__ALL = 16777216, + P4_EVENT_128BIT_MMX_UOP__ALL = 16777216, + P4_EVENT_X87_FP_UOP__ALL = 16777216, + P4_EVENT_TC_MISC__FLUSH = 8192, + P4_EVENT_GLOBAL_POWER_EVENTS__RUNNING = 512, + P4_EVENT_TC_MS_XFER__CISC = 512, + P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_BUILD = 512, + P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_DELIVER = 1024, + P4_EVENT_UOP_QUEUE_WRITES__FROM_ROM = 2048, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CONDITIONAL = 1024, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CALL = 2048, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__RETURN = 4096, + P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__INDIRECT = 8192, + P4_EVENT_RETIRED_BRANCH_TYPE__CONDITIONAL = 1024, + P4_EVENT_RETIRED_BRANCH_TYPE__CALL = 2048, + P4_EVENT_RETIRED_BRANCH_TYPE__RETURN = 4096, + P4_EVENT_RETIRED_BRANCH_TYPE__INDIRECT = 8192, + P4_EVENT_RESOURCE_STALL__SBFULL = 16384, + P4_EVENT_WC_BUFFER__WCB_EVICTS = 512, + P4_EVENT_WC_BUFFER__WCB_FULL_EVICTS = 1024, + P4_EVENT_FRONT_END_EVENT__NBOGUS = 512, + P4_EVENT_FRONT_END_EVENT__BOGUS = 1024, + P4_EVENT_EXECUTION_EVENT__NBOGUS0 = 512, + P4_EVENT_EXECUTION_EVENT__NBOGUS1 = 1024, + P4_EVENT_EXECUTION_EVENT__NBOGUS2 = 2048, + P4_EVENT_EXECUTION_EVENT__NBOGUS3 = 4096, + P4_EVENT_EXECUTION_EVENT__BOGUS0 = 8192, + P4_EVENT_EXECUTION_EVENT__BOGUS1 = 16384, + P4_EVENT_EXECUTION_EVENT__BOGUS2 = 32768, + P4_EVENT_EXECUTION_EVENT__BOGUS3 = 65536, + P4_EVENT_REPLAY_EVENT__NBOGUS = 512, + P4_EVENT_REPLAY_EVENT__BOGUS = 1024, + P4_EVENT_INSTR_RETIRED__NBOGUSNTAG = 512, + P4_EVENT_INSTR_RETIRED__NBOGUSTAG = 1024, + P4_EVENT_INSTR_RETIRED__BOGUSNTAG = 2048, + P4_EVENT_INSTR_RETIRED__BOGUSTAG = 4096, + P4_EVENT_UOPS_RETIRED__NBOGUS = 512, + P4_EVENT_UOPS_RETIRED__BOGUS = 1024, + P4_EVENT_UOP_TYPE__TAGLOADS = 1024, + P4_EVENT_UOP_TYPE__TAGSTORES = 2048, + P4_EVENT_BRANCH_RETIRED__MMNP = 512, + P4_EVENT_BRANCH_RETIRED__MMNM = 1024, + P4_EVENT_BRANCH_RETIRED__MMTP = 2048, + P4_EVENT_BRANCH_RETIRED__MMTM = 4096, + P4_EVENT_MISPRED_BRANCH_RETIRED__NBOGUS = 512, + P4_EVENT_X87_ASSIST__FPSU = 512, + P4_EVENT_X87_ASSIST__FPSO = 1024, + P4_EVENT_X87_ASSIST__POAO = 2048, + P4_EVENT_X87_ASSIST__POAU = 4096, + P4_EVENT_X87_ASSIST__PREA = 8192, + P4_EVENT_MACHINE_CLEAR__CLEAR = 512, + P4_EVENT_MACHINE_CLEAR__MOCLEAR = 1024, + P4_EVENT_MACHINE_CLEAR__SMCLEAR = 2048, + P4_EVENT_INSTR_COMPLETED__NBOGUS = 512, + P4_EVENT_INSTR_COMPLETED__BOGUS = 1024, +}; + +enum P4_PEBS_METRIC { + P4_PEBS_METRIC__none = 0, + P4_PEBS_METRIC__1stl_cache_load_miss_retired = 1, + P4_PEBS_METRIC__2ndl_cache_load_miss_retired = 2, + P4_PEBS_METRIC__dtlb_load_miss_retired = 3, + P4_PEBS_METRIC__dtlb_store_miss_retired = 4, + P4_PEBS_METRIC__dtlb_all_miss_retired = 5, + P4_PEBS_METRIC__tagged_mispred_branch = 6, + P4_PEBS_METRIC__mob_load_replay_retired = 7, + P4_PEBS_METRIC__split_load_retired = 8, + P4_PEBS_METRIC__split_store_retired = 9, + P4_PEBS_METRIC__max = 10, +}; + +struct p4_event_bind { + unsigned int opcode; + unsigned int escr_msr[2]; + unsigned int escr_emask; + unsigned int shared; + char cntr[6]; +}; + +struct p4_pebs_bind { + unsigned int metric_pebs; + unsigned int metric_vert; +}; + +struct p4_event_alias { + u64 original; + u64 alternative; +}; + +enum cpuid_regs_idx { + CPUID_EAX = 0, + CPUID_EBX = 1, + CPUID_ECX = 2, + CPUID_EDX = 3, +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +enum pt_capabilities { + PT_CAP_max_subleaf = 0, + PT_CAP_cr3_filtering = 1, + PT_CAP_psb_cyc = 2, + PT_CAP_ip_filtering = 3, + PT_CAP_mtc = 4, + PT_CAP_ptwrite = 5, + PT_CAP_power_event_trace = 6, + PT_CAP_topa_output = 7, + PT_CAP_topa_multiple_entries = 8, + PT_CAP_single_range_output = 9, + PT_CAP_output_subsys = 10, + PT_CAP_payloads_lip = 11, + PT_CAP_num_address_ranges = 12, + PT_CAP_mtc_periods = 13, + PT_CAP_cycle_thresholds = 14, + PT_CAP_psb_periods = 15, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct topa_entry { + u64 end: 1; + u64 rsvd0: 1; + u64 intr: 1; + u64 rsvd1: 1; + u64 stop: 1; + u64 rsvd2: 1; + u64 size: 4; + u64 rsvd3: 2; + u64 base: 36; + u64 rsvd4: 16; +}; + +struct pt_pmu { + struct pmu pmu; + u32 caps[8]; + bool vmx; + bool branch_en_always_on; + long unsigned int max_nonturbo_ratio; + unsigned int tsc_art_num; + unsigned int tsc_art_den; +}; + +struct topa; + +struct pt_buffer { + struct list_head tables; + struct topa *first; + struct topa *last; + struct topa *cur; + unsigned int cur_idx; + size_t output_off; + long unsigned int nr_pages; + local_t data_size; + local64_t head; + bool snapshot; + bool single; + long int stop_pos; + long int intr_pos; + struct topa_entry *stop_te; + struct topa_entry *intr_te; + void **data_pages; +}; + +struct topa { + struct list_head list; + u64 offset; + size_t size; + int last; + unsigned int z_count; +}; + +struct pt_filter { + long unsigned int msr_a; + long unsigned int msr_b; + long unsigned int config; +}; + +struct pt_filters { + struct pt_filter filter[4]; + unsigned int nr_filters; +}; + +struct pt { + struct perf_output_handle handle; + struct pt_filters filters; + int handle_nmi; + int vmx_on; + u64 output_base; + u64 output_mask; +}; + +struct pt_cap_desc { + const char *name; + u32 leaf; + u8 reg; + u32 mask; +}; + +struct pt_address_range { + long unsigned int msr_a; + long unsigned int msr_b; + unsigned int reg_off; +}; + +struct topa_page { + struct topa_entry table[507]; + struct topa topa; +}; + +typedef void (*exitcall_t)(); + +struct x86_cpu_id { + __u16 vendor; + __u16 family; + __u16 model; + __u16 steppings; + __u16 feature; + kernel_ulong_t driver_data; +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +struct acpi_device; + +struct pci_sysdata { + int domain; + int node; + struct acpi_device *companion; + void *iommu; + void *fwnode; + struct pci_dev *vmd_dev; +}; + +struct pci_extra_dev { + struct pci_dev *dev[4]; +}; + +struct intel_uncore_pmu; + +struct intel_uncore_ops; + +struct uncore_event_desc; + +struct freerunning_counters; + +struct intel_uncore_topology; + +struct intel_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + int num_freerunning_types; + int type_id; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + u64 *box_ctls; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int mmio_map_size; + unsigned int num_shared_regs: 8; + unsigned int single_fixed: 1; + unsigned int pair_ctr_ctl: 1; + union { + unsigned int *msr_offsets; + unsigned int *pci_offsets; + unsigned int *mmio_offsets; + }; + unsigned int *box_ids; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct intel_uncore_pmu *pmus; + struct intel_uncore_ops *ops; + struct uncore_event_desc *event_descs; + struct freerunning_counters *freerunning; + const struct attribute_group *attr_groups[4]; + const struct attribute_group **attr_update; + struct pmu *pmu; + struct intel_uncore_topology *topology; + int (*set_mapping)(struct intel_uncore_type *); + void (*cleanup_mapping)(struct intel_uncore_type *); +}; + +struct intel_uncore_box; + +struct intel_uncore_pmu { + struct pmu pmu; + char name[32]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct intel_uncore_type *type; + struct intel_uncore_box **boxes; +}; + +struct intel_uncore_ops { + void (*init_box)(struct intel_uncore_box *); + void (*exit_box)(struct intel_uncore_box *); + void (*disable_box)(struct intel_uncore_box *); + void (*enable_box)(struct intel_uncore_box *); + void (*disable_event)(struct intel_uncore_box *, struct perf_event *); + void (*enable_event)(struct intel_uncore_box *, struct perf_event *); + u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); + int (*hw_config)(struct intel_uncore_box *, struct perf_event *); + struct event_constraint * (*get_constraint)(struct intel_uncore_box *, struct perf_event *); + void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); +}; + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct freerunning_counters { + unsigned int counter_base; + unsigned int counter_offset; + unsigned int box_offset; + unsigned int num_counters; + unsigned int bits; + unsigned int *box_offsets; +}; + +struct intel_uncore_topology { + u64 configuration; + int segment; +}; + +struct intel_uncore_extra_reg { + raw_spinlock_t lock; + u64 config; + u64 config1; + u64 config2; + atomic_t ref; +}; + +struct intel_uncore_box { + int dieid; + int n_active; + int n_events; + int cpu; + long unsigned int flags; + atomic_t refcnt; + struct perf_event *events[10]; + struct perf_event *event_list[10]; + struct event_constraint *event_constraint[10]; + long unsigned int active_mask[1]; + u64 tags[10]; + struct pci_dev *pci_dev; + struct intel_uncore_pmu *pmu; + u64 hrtimer_duration; + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void *io_addr; + struct intel_uncore_extra_reg shared_regs[0]; +}; + +struct pci2phy_map { + struct list_head list; + int segment; + int pbus_to_dieid[256]; +}; + +struct intel_uncore_init_fun { + void (*cpu_init)(); + int (*pci_init)(); + void (*mmio_init)(); +}; + +enum { + EXTRA_REG_NHMEX_M_FILTER = 0, + EXTRA_REG_NHMEX_M_DSP = 1, + EXTRA_REG_NHMEX_M_ISS = 2, + EXTRA_REG_NHMEX_M_MAP = 3, + EXTRA_REG_NHMEX_M_MSC_THR = 4, + EXTRA_REG_NHMEX_M_PGT = 5, + EXTRA_REG_NHMEX_M_PLD = 6, + EXTRA_REG_NHMEX_M_ZDP_CTL_FVC = 7, +}; + +enum { + SNB_PCI_UNCORE_IMC = 0, +}; + +enum perf_snb_uncore_imc_freerunning_types { + SNB_PCI_UNCORE_IMC_DATA_READS = 0, + SNB_PCI_UNCORE_IMC_DATA_WRITES = 1, + SNB_PCI_UNCORE_IMC_GT_REQUESTS = 2, + SNB_PCI_UNCORE_IMC_IA_REQUESTS = 3, + SNB_PCI_UNCORE_IMC_IO_REQUESTS = 4, + SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX = 5, +}; + +struct imc_uncore_pci_dev { + __u32 pci_id; + struct pci_driver *driver; +}; + +enum perf_tgl_uncore_imc_freerunning_types { + TGL_MMIO_UNCORE_IMC_DATA_TOTAL = 0, + TGL_MMIO_UNCORE_IMC_DATA_READ = 1, + TGL_MMIO_UNCORE_IMC_DATA_WRITE = 2, + TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX = 3, +}; + +enum { + SNBEP_PCI_QPI_PORT0_FILTER = 0, + SNBEP_PCI_QPI_PORT1_FILTER = 1, + BDX_PCI_QPI_PORT2_FILTER = 2, +}; + +enum { + SNBEP_PCI_UNCORE_HA = 0, + SNBEP_PCI_UNCORE_IMC = 1, + SNBEP_PCI_UNCORE_QPI = 2, + SNBEP_PCI_UNCORE_R2PCIE = 3, + SNBEP_PCI_UNCORE_R3QPI = 4, +}; + +enum { + IVBEP_PCI_UNCORE_HA = 0, + IVBEP_PCI_UNCORE_IMC = 1, + IVBEP_PCI_UNCORE_IRP = 2, + IVBEP_PCI_UNCORE_QPI = 3, + IVBEP_PCI_UNCORE_R2PCIE = 4, + IVBEP_PCI_UNCORE_R3QPI = 5, +}; + +enum { + KNL_PCI_UNCORE_MC_UCLK = 0, + KNL_PCI_UNCORE_MC_DCLK = 1, + KNL_PCI_UNCORE_EDC_UCLK = 2, + KNL_PCI_UNCORE_EDC_ECLK = 3, + KNL_PCI_UNCORE_M2PCIE = 4, + KNL_PCI_UNCORE_IRP = 5, +}; + +enum { + HSWEP_PCI_UNCORE_HA = 0, + HSWEP_PCI_UNCORE_IMC = 1, + HSWEP_PCI_UNCORE_IRP = 2, + HSWEP_PCI_UNCORE_QPI = 3, + HSWEP_PCI_UNCORE_R2PCIE = 4, + HSWEP_PCI_UNCORE_R3QPI = 5, +}; + +enum { + BDX_PCI_UNCORE_HA = 0, + BDX_PCI_UNCORE_IMC = 1, + BDX_PCI_UNCORE_IRP = 2, + BDX_PCI_UNCORE_QPI = 3, + BDX_PCI_UNCORE_R2PCIE = 4, + BDX_PCI_UNCORE_R3QPI = 5, +}; + +enum perf_uncore_iio_freerunning_type_id { + SKX_IIO_MSR_IOCLK = 0, + SKX_IIO_MSR_BW = 1, + SKX_IIO_MSR_UTIL = 2, + SKX_IIO_FREERUNNING_TYPE_MAX = 3, +}; + +enum { + SKX_PCI_UNCORE_IMC = 0, + SKX_PCI_UNCORE_M2M = 1, + SKX_PCI_UNCORE_UPI = 2, + SKX_PCI_UNCORE_M2PCIE = 3, + SKX_PCI_UNCORE_M3UPI = 4, +}; + +enum perf_uncore_snr_iio_freerunning_type_id { + SNR_IIO_MSR_IOCLK = 0, + SNR_IIO_MSR_BW_IN = 1, + SNR_IIO_FREERUNNING_TYPE_MAX = 2, +}; + +enum { + SNR_PCI_UNCORE_M2M = 0, + SNR_PCI_UNCORE_PCIE3 = 1, +}; + +enum perf_uncore_snr_imc_freerunning_type_id { + SNR_IMC_DCLK = 0, + SNR_IMC_DDR = 1, + SNR_IMC_FREERUNNING_TYPE_MAX = 2, +}; + +enum perf_uncore_icx_iio_freerunning_type_id { + ICX_IIO_MSR_IOCLK = 0, + ICX_IIO_MSR_BW_IN = 1, + ICX_IIO_FREERUNNING_TYPE_MAX = 2, +}; + +enum { + ICX_PCI_UNCORE_M2M = 0, + ICX_PCI_UNCORE_UPI = 1, + ICX_PCI_UNCORE_M3UPI = 2, +}; + +enum perf_uncore_icx_imc_freerunning_type_id { + ICX_IMC_DCLK = 0, + ICX_IMC_DDR = 1, + ICX_IMC_DDRT = 2, + ICX_IMC_FREERUNNING_TYPE_MAX = 3, +}; + +enum uncore_access_type { + UNCORE_ACCESS_MSR = 0, + UNCORE_ACCESS_MMIO = 1, + UNCORE_ACCESS_PCI = 2, + UNCORE_ACCESS_MAX = 3, +}; + +struct uncore_global_discovery { + union { + u64 table1; + struct { + u64 type: 8; + u64 stride: 8; + u64 max_units: 10; + u64 __reserved_1: 36; + u64 access_type: 2; + }; + }; + u64 ctl; + union { + u64 table3; + struct { + u64 status_offset: 8; + u64 num_status: 16; + u64 __reserved_2: 40; + }; + }; +}; + +struct uncore_unit_discovery { + union { + u64 table1; + struct { + u64 num_regs: 8; + u64 ctl_offset: 8; + u64 bit_width: 8; + u64 ctr_offset: 8; + u64 status_offset: 8; + u64 __reserved_1: 22; + u64 access_type: 2; + }; + }; + u64 ctl; + union { + u64 table3; + struct { + u64 box_type: 16; + u64 box_id: 16; + u64 __reserved_2: 32; + }; + }; +}; + +struct intel_uncore_discovery_type { + struct rb_node node; + enum uncore_access_type access_type; + u64 box_ctrl; + u64 *box_ctrl_die; + u16 type; + u8 num_counters; + u8 counter_width; + u8 ctl_offset; + u8 ctr_offset; + u16 num_boxes; + unsigned int *ids; + unsigned int *box_offset; +}; + +typedef s8 int8_t; + +typedef u8 uint8_t; + +typedef u64 uint64_t; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +typedef long unsigned int xen_pfn_t; + +typedef long unsigned int xen_ulong_t; + +struct arch_shared_info { + long unsigned int max_pfn; + xen_pfn_t pfn_to_mfn_frame_list_list; + long unsigned int nmi_reason; + long unsigned int p2m_cr3; + long unsigned int p2m_vaddr; + long unsigned int p2m_generation; +}; + +struct arch_vcpu_info { + long unsigned int cr2; + long unsigned int pad; +}; + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +}; + +struct vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + xen_ulong_t evtchn_pending_sel; + struct arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; +}; + +struct shared_info { + struct vcpu_info vcpu_info[32]; + xen_ulong_t evtchn_pending[64]; + xen_ulong_t evtchn_mask[64]; + struct pvclock_wall_clock wc; + uint32_t wc_sec_hi; + struct arch_shared_info arch; +}; + +struct start_info { + char magic[32]; + long unsigned int nr_pages; + long unsigned int shared_info; + uint32_t flags; + xen_pfn_t store_mfn; + uint32_t store_evtchn; + union { + struct { + xen_pfn_t mfn; + uint32_t evtchn; + } domU; + struct { + uint32_t info_off; + uint32_t info_size; + } dom0; + } console; + long unsigned int pt_base; + long unsigned int nr_pt_frames; + long unsigned int mfn_list; + long unsigned int mod_start; + long unsigned int mod_len; + int8_t cmd_line[1024]; + long unsigned int first_p2m_pfn; + long unsigned int nr_p2m_frames; +}; + +struct sched_shutdown { + unsigned int reason; +}; + +struct sched_pin_override { + int32_t pcpu; +}; + +struct vcpu_register_vcpu_info { + uint64_t mfn; + uint32_t offset; + uint32_t rsvd; +}; + +struct xmaddr { + phys_addr_t maddr; +}; + +typedef struct xmaddr xmaddr_t; + +struct xpaddr { + phys_addr_t paddr; +}; + +typedef struct xpaddr xpaddr_t; + +typedef s16 int16_t; + +typedef u16 uint16_t; + +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER = 1, + CSID_MAX = 2, +}; + +struct clocksource { + u64 (*read)(struct clocksource *); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + enum clocksource_ids id; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct list_head wd_list; + u64 cs_last; + u64 wd_last; + struct module *owner; +}; + +struct x86_init_mpparse { + void (*setup_ioapic_ids)(); + void (*find_smp_config)(); + void (*get_smp_config)(unsigned int); +}; + +struct x86_init_resources { + void (*probe_roms)(); + void (*reserve_resources)(); + char * (*memory_setup)(); +}; + +struct x86_init_irqs { + void (*pre_vector_init)(); + void (*intr_init)(); + void (*intr_mode_select)(); + void (*intr_mode_init)(); + struct irq_domain * (*create_pci_msi_domain)(); +}; + +struct x86_init_oem { + void (*arch_setup)(); + void (*banner)(); +}; + +struct x86_init_paging { + void (*pagetable_init)(); +}; + +struct x86_init_timers { + void (*setup_percpu_clockev)(); + void (*timer_init)(); + void (*wallclock_init)(); +}; + +struct x86_init_iommu { + int (*iommu_init)(); +}; + +struct x86_init_pci { + int (*arch_init)(); + int (*init)(); + void (*init_irq)(); + void (*fixup_irqs)(); +}; + +struct x86_hyper_init { + void (*init_platform)(); + void (*guest_late_init)(); + bool (*x2apic_available)(); + bool (*msi_ext_dest_id)(); + void (*init_mem_mapping)(); + void (*init_after_bootmem)(); +}; + +struct x86_init_acpi { + void (*set_root_pointer)(u64); + u64 (*get_root_pointer)(); + void (*reduced_hw_early_init)(); +}; + +struct x86_init_ops { + struct x86_init_resources resources; + struct x86_init_mpparse mpparse; + struct x86_init_irqs irqs; + struct x86_init_oem oem; + struct x86_init_paging paging; + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; + struct x86_hyper_init hyper; + struct x86_init_acpi acpi; +}; + +struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(); + void (*early_percpu_clock_init)(); + void (*fixup_cpu_id)(struct cpuinfo_x86 *, int); +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tk_read_base { + struct clocksource *clock; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + struct tk_read_base tkr_raw; + u64 xtime_sec; + long unsigned int ktime_sec; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + ktime_t next_leap_ktime; + u64 raw_sec; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; +}; + +typedef unsigned char *__guest_handle_uchar; + +typedef char *__guest_handle_char; + +typedef void *__guest_handle_void; + +typedef uint64_t *__guest_handle_uint64_t; + +typedef uint32_t *__guest_handle_uint32_t; + +struct vcpu_time_info { + uint32_t version; + uint32_t pad0; + uint64_t tsc_timestamp; + uint64_t system_time; + uint32_t tsc_to_system_mul; + int8_t tsc_shift; + int8_t pad1[3]; +}; + +struct xenpf_settime32 { + uint32_t secs; + uint32_t nsecs; + uint64_t system_time; +}; + +struct xenpf_settime64 { + uint64_t secs; + uint32_t nsecs; + uint32_t mbz; + uint64_t system_time; +}; + +struct xenpf_add_memtype { + xen_pfn_t mfn; + uint64_t nr_mfns; + uint32_t type; + uint32_t handle; + uint32_t reg; +}; + +struct xenpf_del_memtype { + uint32_t handle; + uint32_t reg; +}; + +struct xenpf_read_memtype { + uint32_t reg; + xen_pfn_t mfn; + uint64_t nr_mfns; + uint32_t type; +}; + +struct xenpf_microcode_update { + __guest_handle_void data; + uint32_t length; +}; + +struct xenpf_platform_quirk { + uint32_t quirk_id; +}; + +struct xenpf_efi_time { + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t hour; + uint8_t min; + uint8_t sec; + uint32_t ns; + int16_t tz; + uint8_t daylight; +}; + +struct xenpf_efi_guid { + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +}; + +struct xenpf_efi_runtime_call { + uint32_t function; + uint32_t misc; + xen_ulong_t status; + union { + struct { + struct xenpf_efi_time time; + uint32_t resolution; + uint32_t accuracy; + } get_time; + struct xenpf_efi_time set_time; + struct xenpf_efi_time get_wakeup_time; + struct xenpf_efi_time set_wakeup_time; + struct { + __guest_handle_void name; + xen_ulong_t size; + __guest_handle_void data; + struct xenpf_efi_guid vendor_guid; + } get_variable; + struct { + __guest_handle_void name; + xen_ulong_t size; + __guest_handle_void data; + struct xenpf_efi_guid vendor_guid; + } set_variable; + struct { + xen_ulong_t size; + __guest_handle_void name; + struct xenpf_efi_guid vendor_guid; + } get_next_variable_name; + struct { + uint32_t attr; + uint64_t max_store_size; + uint64_t remain_store_size; + uint64_t max_size; + } query_variable_info; + struct { + __guest_handle_void capsule_header_array; + xen_ulong_t capsule_count; + uint64_t max_capsule_size; + uint32_t reset_type; + } query_capsule_capabilities; + struct { + __guest_handle_void capsule_header_array; + xen_ulong_t capsule_count; + uint64_t sg_list; + } update_capsule; + } u; +}; + +union xenpf_efi_info { + uint32_t version; + struct { + uint64_t addr; + uint32_t nent; + } cfg; + struct { + uint32_t revision; + uint32_t bufsz; + __guest_handle_void name; + } vendor; + struct { + uint64_t addr; + uint64_t size; + uint64_t attr; + uint32_t type; + } mem; +}; + +struct xenpf_firmware_info { + uint32_t type; + uint32_t index; + union { + struct { + uint8_t device; + uint8_t version; + uint16_t interface_support; + uint16_t legacy_max_cylinder; + uint8_t legacy_max_head; + uint8_t legacy_sectors_per_track; + __guest_handle_void edd_params; + } disk_info; + struct { + uint8_t device; + uint32_t mbr_signature; + } disk_mbr_signature; + struct { + uint8_t capabilities; + uint8_t edid_transfer_time; + __guest_handle_uchar edid; + } vbeddc_info; + union xenpf_efi_info efi_info; + uint8_t kbd_shift_flags; + } u; +}; + +struct xenpf_enter_acpi_sleep { + uint16_t val_a; + uint16_t val_b; + uint32_t sleep_state; + uint32_t flags; +}; + +struct xenpf_change_freq { + uint32_t flags; + uint32_t cpu; + uint64_t freq; +}; + +struct xenpf_getidletime { + __guest_handle_uchar cpumap_bitmap; + uint32_t cpumap_nr_cpus; + __guest_handle_uint64_t idletime; + uint64_t now; +}; + +struct xen_power_register { + uint32_t space_id; + uint32_t bit_width; + uint32_t bit_offset; + uint32_t access_size; + uint64_t address; +}; + +struct xen_processor_csd { + uint32_t domain; + uint32_t coord_type; + uint32_t num; +}; + +typedef struct xen_processor_csd *__guest_handle_xen_processor_csd; + +struct xen_processor_cx { + struct xen_power_register reg; + uint8_t type; + uint32_t latency; + uint32_t power; + uint32_t dpcnt; + __guest_handle_xen_processor_csd dp; +}; + +typedef struct xen_processor_cx *__guest_handle_xen_processor_cx; + +struct xen_processor_flags { + uint32_t bm_control: 1; + uint32_t bm_check: 1; + uint32_t has_cst: 1; + uint32_t power_setup_done: 1; + uint32_t bm_rld_set: 1; +}; + +struct xen_processor_power { + uint32_t count; + struct xen_processor_flags flags; + __guest_handle_xen_processor_cx states; +}; + +struct xen_pct_register { + uint8_t descriptor; + uint16_t length; + uint8_t space_id; + uint8_t bit_width; + uint8_t bit_offset; + uint8_t reserved; + uint64_t address; +}; + +struct xen_processor_px { + uint64_t core_frequency; + uint64_t power; + uint64_t transition_latency; + uint64_t bus_master_latency; + uint64_t control; + uint64_t status; +}; + +typedef struct xen_processor_px *__guest_handle_xen_processor_px; + +struct xen_psd_package { + uint64_t num_entries; + uint64_t revision; + uint64_t domain; + uint64_t coord_type; + uint64_t num_processors; +}; + +struct xen_processor_performance { + uint32_t flags; + uint32_t platform_limit; + struct xen_pct_register control_register; + struct xen_pct_register status_register; + uint32_t state_count; + __guest_handle_xen_processor_px states; + struct xen_psd_package domain_info; + uint32_t shared_type; +}; + +struct xenpf_set_processor_pminfo { + uint32_t id; + uint32_t type; + union { + struct xen_processor_power power; + struct xen_processor_performance perf; + __guest_handle_uint32_t pdc; + }; +}; + +struct xenpf_pcpuinfo { + uint32_t xen_cpuid; + uint32_t max_present; + uint32_t flags; + uint32_t apic_id; + uint32_t acpi_id; +}; + +struct xenpf_cpu_ol { + uint32_t cpuid; +}; + +struct xenpf_cpu_hotadd { + uint32_t apic_id; + uint32_t acpi_id; + uint32_t pxm; +}; + +struct xenpf_mem_hotadd { + uint64_t spfn; + uint64_t epfn; + uint32_t pxm; + uint32_t flags; +}; + +struct xenpf_core_parking { + uint32_t type; + uint32_t idle_nums; +}; + +struct xenpf_symdata { + uint32_t namelen; + uint32_t symnum; + __guest_handle_char name; + uint64_t address; + char type; +}; + +struct xen_platform_op { + uint32_t cmd; + uint32_t interface_version; + union { + struct xenpf_settime32 settime32; + struct xenpf_settime64 settime64; + struct xenpf_add_memtype add_memtype; + struct xenpf_del_memtype del_memtype; + struct xenpf_read_memtype read_memtype; + struct xenpf_microcode_update microcode; + struct xenpf_platform_quirk platform_quirk; + struct xenpf_efi_runtime_call efi_runtime_call; + struct xenpf_firmware_info firmware_info; + struct xenpf_enter_acpi_sleep enter_acpi_sleep; + struct xenpf_change_freq change_freq; + struct xenpf_getidletime getidletime; + struct xenpf_set_processor_pminfo set_pminfo; + struct xenpf_pcpuinfo pcpu_info; + struct xenpf_cpu_ol cpu_ol; + struct xenpf_cpu_hotadd cpu_add; + struct xenpf_mem_hotadd mem_add; + struct xenpf_core_parking core_parking; + struct xenpf_symdata symdata; + uint8_t pad[128]; + } u; +}; + +struct vcpu_set_singleshot_timer { + uint64_t timeout_abs_ns; + uint32_t flags; +}; + +typedef struct vcpu_time_info *__guest_handle_vcpu_time_info; + +struct vcpu_register_time_memory_area { + union { + __guest_handle_vcpu_time_info h; + struct pvclock_vcpu_time_info *v; + uint64_t p; + } addr; +}; + +struct xen_clock_event_device { + struct clock_event_device evt; + char name[16]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +typedef uint16_t grant_status_t; + +struct grant_frames { + xen_pfn_t *pfn; + unsigned int count; + void *vaddr; +}; + +struct gnttab_vm_area { + struct vm_struct *area; + pte_t **ptes; + int idx; +}; + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC = 1, + ACPI_IRQ_MODEL_IOSAPIC = 2, + ACPI_IRQ_MODEL_PLATFORM = 3, + ACPI_IRQ_MODEL_GIC = 4, + ACPI_IRQ_MODEL_COUNT = 5, +}; + +typedef uint16_t domid_t; + +struct xen_add_to_physmap { + domid_t domid; + uint16_t size; + unsigned int space; + xen_ulong_t idx; + xen_pfn_t gpfn; +}; + +struct machine_ops { + void (*restart)(char *); + void (*halt)(); + void (*power_off)(); + void (*shutdown)(); + void (*crash_shutdown)(struct pt_regs *); + void (*emergency_restart)(); +}; + +enum x86_hypervisor_type { + X86_HYPER_NATIVE = 0, + X86_HYPER_VMWARE = 1, + X86_HYPER_MS_HYPERV = 2, + X86_HYPER_XEN_PV = 3, + X86_HYPER_XEN_HVM = 4, + X86_HYPER_KVM = 5, + X86_HYPER_JAILHOUSE = 6, + X86_HYPER_ACRN = 7, +}; + +struct hypervisor_x86 { + const char *name; + uint32_t (*detect)(); + enum x86_hypervisor_type type; + struct x86_hyper_init init; + struct x86_hyper_runtime runtime; + bool ignore_nopv; +}; + +enum e820_type { + E820_TYPE_RAM = 1, + E820_TYPE_RESERVED = 2, + E820_TYPE_ACPI = 3, + E820_TYPE_NVS = 4, + E820_TYPE_UNUSABLE = 5, + E820_TYPE_PMEM = 7, + E820_TYPE_PRAM = 12, + E820_TYPE_SOFT_RESERVED = 4026531839, + E820_TYPE_RESERVED_KERN = 128, +}; + +struct xen_hvm_pagetable_dying { + domid_t domid; + __u64 gpa; +}; + +enum hvmmem_type_t { + HVMMEM_ram_rw = 0, + HVMMEM_ram_ro = 1, + HVMMEM_mmio_dm = 2, +}; + +struct xen_hvm_get_mem_type { + domid_t domid; + uint16_t mem_type; + uint16_t pad[2]; + uint64_t pfn; +}; + +struct e820_entry { + u64 addr; + u64 size; + enum e820_type type; +} __attribute__((packed)); + +struct e820_table { + __u32 nr_entries; + struct e820_entry entries[3200]; +} __attribute__((packed)); + +typedef xen_pfn_t *__guest_handle_xen_pfn_t; + +typedef long unsigned int xen_callback_t; + +struct mmu_update { + uint64_t ptr; + uint64_t val; +}; + +struct xen_memory_region { + long unsigned int start_pfn; + long unsigned int n_pfns; +}; + +struct callback_register { + uint16_t type; + uint16_t flags; + xen_callback_t address; +}; + +struct xen_memory_reservation { + __guest_handle_xen_pfn_t extent_start; + xen_ulong_t nr_extents; + unsigned int extent_order; + unsigned int address_bits; + domid_t domid; +}; + +struct xen_memory_map { + unsigned int nr_entries; + __guest_handle_void buffer; +}; + +struct x86_apic_ops { + unsigned int (*io_apic_read)(unsigned int, unsigned int); + void (*restore)(); +}; + +struct physdev_apic { + long unsigned int apic_physbase; + uint32_t reg; + uint32_t value; +}; + +typedef long unsigned int uintptr_t; + +struct xen_pmu_amd_ctxt { + uint32_t counters; + uint32_t ctrls; + uint64_t regs[0]; +}; + +struct xen_pmu_cntr_pair { + uint64_t counter; + uint64_t control; +}; + +struct xen_pmu_intel_ctxt { + uint32_t fixed_counters; + uint32_t arch_counters; + uint64_t global_ctrl; + uint64_t global_ovf_ctrl; + uint64_t global_status; + uint64_t fixed_ctrl; + uint64_t ds_area; + uint64_t pebs_enable; + uint64_t debugctl; + uint64_t regs[0]; +}; + +struct xen_pmu_regs { + uint64_t ip; + uint64_t sp; + uint64_t flags; + uint16_t cs; + uint16_t ss; + uint8_t cpl; + uint8_t pad[3]; +}; + +struct xen_pmu_arch { + union { + struct xen_pmu_regs regs; + uint8_t pad[64]; + } r; + uint64_t pmu_flags; + union { + uint32_t lapic_lvtpc; + uint64_t pad; + } l; + union { + struct xen_pmu_amd_ctxt amd; + struct xen_pmu_intel_ctxt intel; + uint8_t pad[128]; + } c; +}; + +struct xen_pmu_params { + struct { + uint32_t maj; + uint32_t min; + } version; + uint64_t val; + uint32_t vcpu; + uint32_t pad; +}; + +struct xen_pmu_data { + uint32_t vcpu_id; + uint32_t pcpu_id; + domid_t domain_id; + uint8_t pad[6]; + struct xen_pmu_arch pmu; +}; + +struct xenpmu { + struct xen_pmu_data *xenpmu_data; + uint8_t flags; +}; + +enum pg_level { + PG_LEVEL_NONE = 0, + PG_LEVEL_4K = 1, + PG_LEVEL_2M = 2, + PG_LEVEL_1G = 3, + PG_LEVEL_512G = 4, + PG_LEVEL_NUM = 5, +}; + +typedef uint32_t grant_ref_t; + +typedef uint32_t grant_handle_t; + +struct gnttab_map_grant_ref { + uint64_t host_addr; + uint32_t flags; + grant_ref_t ref; + domid_t dom; + int16_t status; + grant_handle_t handle; + uint64_t dev_bus_addr; +}; + +struct gnttab_unmap_grant_ref { + uint64_t host_addr; + uint64_t dev_bus_addr; + grant_handle_t handle; + int16_t status; +}; + +enum { + DESC_TSS = 9, + DESC_LDT = 2, + DESCTYPE_S = 16, +}; + +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE = 0, + PARAVIRT_LAZY_MMU = 1, + PARAVIRT_LAZY_CPU = 2, +}; + +struct plist_head { + struct list_head node_list; +}; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +typedef long int xen_long_t; + +struct trap_info { + uint8_t vector; + uint8_t flags; + uint16_t cs; + long unsigned int address; +}; + +struct mmuext_op { + unsigned int cmd; + union { + xen_pfn_t mfn; + long unsigned int linear_addr; + } arg1; + union { + unsigned int nr_ents; + void *vcpumask; + xen_pfn_t src_mfn; + } arg2; +}; + +struct multicall_entry { + xen_ulong_t op; + xen_long_t result; + xen_ulong_t args[6]; +}; + +struct dom0_vga_console_info { + uint8_t video_type; + union { + struct { + uint16_t font_height; + uint16_t cursor_x; + uint16_t cursor_y; + uint16_t rows; + uint16_t columns; + } text_mode_3; + struct { + uint16_t width; + uint16_t height; + uint16_t bytes_per_line; + uint16_t bits_per_pixel; + uint32_t lfb_base; + uint32_t lfb_size; + uint8_t red_pos; + uint8_t red_size; + uint8_t green_pos; + uint8_t green_size; + uint8_t blue_pos; + uint8_t blue_size; + uint8_t rsvd_pos; + uint8_t rsvd_size; + uint32_t gbl_caps; + uint16_t mode_attrs; + } vesa_lfb; + } u; +}; + +struct physdev_set_iopl { + uint32_t iopl; +}; + +struct physdev_set_iobitmap { + uint8_t *bitmap; + uint32_t nr_ports; +}; + +struct xen_extraversion { + char extraversion[16]; +}; + +typedef u32 acpi_status; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +struct multicall_space { + struct multicall_entry *mc; + void *args; +}; + +struct tls_descs { + struct desc_struct desc[3]; +}; + +struct trap_array_entry { + void (*orig)(); + void (*xen)(); + bool ist_okay; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; +}; + +struct mmu_table_batch; + +struct mmu_gather { + struct mm_struct *mm; + struct mmu_table_batch *batch; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +struct mmu_table_batch { + struct callback_head rcu; + unsigned int nr; + void *tables[0]; +}; + +struct xen_memory_exchange { + struct xen_memory_reservation in; + struct xen_memory_reservation out; + xen_ulong_t nr_exchanged; +}; + +struct xen_machphys_mapping { + xen_ulong_t v_start; + xen_ulong_t v_end; + xen_ulong_t max_mfn; +}; + +enum pt_level { + PT_PGD = 0, + PT_P4D = 1, + PT_PUD = 2, + PT_PMD = 3, + PT_PTE = 4, +}; + +struct remap_data { + xen_pfn_t *pfn; + bool contiguous; + bool no_translate; + pgprot_t prot; + struct mmu_update *mmu_update; +}; + +enum xen_mc_flush_reason { + XEN_MC_FL_NONE = 0, + XEN_MC_FL_BATCH = 1, + XEN_MC_FL_ARGS = 2, + XEN_MC_FL_CALLBACK = 3, +}; + +enum xen_mc_extend_args { + XEN_MC_XE_OK = 0, + XEN_MC_XE_BAD_OP = 1, + XEN_MC_XE_NO_SPACE = 2, +}; + +typedef void (*xen_mc_callback_fn_t)(void *); + +struct callback { + void (*fn)(void *); + void *data; +}; + +struct mc_buffer { + unsigned int mcidx; + unsigned int argidx; + unsigned int cbidx; + struct multicall_entry entries[32]; + unsigned char args[512]; + struct callback callbacks[32]; +}; + +struct hvm_start_info { + uint32_t magic; + uint32_t version; + uint32_t flags; + uint32_t nr_modules; + uint64_t modlist_paddr; + uint64_t cmdline_paddr; + uint64_t rsdp_paddr; + uint64_t memmap_paddr; + uint32_t memmap_entries; + uint32_t reserved; +}; + +struct trace_event_raw_xen_mc__batch { + struct trace_entry ent; + enum paravirt_lazy_mode mode; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_entry { + struct trace_entry ent; + unsigned int op; + unsigned int nargs; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_entry_alloc { + struct trace_entry ent; + size_t args; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_callback { + struct trace_entry ent; + xen_mc_callback_fn_t fn; + void *data; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_flush_reason { + struct trace_entry ent; + enum xen_mc_flush_reason reason; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_flush { + struct trace_entry ent; + unsigned int mcidx; + unsigned int argidx; + unsigned int cbidx; + char __data[0]; +}; + +struct trace_event_raw_xen_mc_extend_args { + struct trace_entry ent; + unsigned int op; + size_t args; + enum xen_mc_extend_args res; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu__set_pte { + struct trace_entry ent; + pte_t *ptep; + pteval_t pteval; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_set_pmd { + struct trace_entry ent; + pmd_t *pmdp; + pmdval_t pmdval; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_set_pud { + struct trace_entry ent; + pud_t *pudp; + pudval_t pudval; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_set_p4d { + struct trace_entry ent; + p4d_t *p4dp; + p4d_t *user_p4dp; + p4dval_t p4dval; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_ptep_modify_prot { + struct trace_entry ent; + struct mm_struct *mm; + long unsigned int addr; + pte_t *ptep; + pteval_t pteval; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_alloc_ptpage { + struct trace_entry ent; + struct mm_struct *mm; + long unsigned int pfn; + unsigned int level; + bool pinned; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_release_ptpage { + struct trace_entry ent; + long unsigned int pfn; + unsigned int level; + bool pinned; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_pgd { + struct trace_entry ent; + struct mm_struct *mm; + pgd_t *pgd; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_flush_tlb_one_user { + struct trace_entry ent; + long unsigned int addr; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_flush_tlb_multi { + struct trace_entry ent; + unsigned int ncpus; + struct mm_struct *mm; + long unsigned int addr; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_xen_mmu_write_cr3 { + struct trace_entry ent; + bool kernel; + long unsigned int cr3; + char __data[0]; +}; + +struct trace_event_raw_xen_cpu_write_ldt_entry { + struct trace_entry ent; + struct desc_struct *dt; + int entrynum; + u64 desc; + char __data[0]; +}; + +struct trace_event_raw_xen_cpu_write_idt_entry { + struct trace_entry ent; + gate_desc *dt; + int entrynum; + char __data[0]; +}; + +struct trace_event_raw_xen_cpu_load_idt { + struct trace_entry ent; + long unsigned int addr; + char __data[0]; +}; + +struct trace_event_raw_xen_cpu_write_gdt_entry { + struct trace_entry ent; + u64 desc; + struct desc_struct *dt; + int entrynum; + int type; + char __data[0]; +}; + +struct trace_event_raw_xen_cpu_set_ldt { + struct trace_entry ent; + const void *addr; + unsigned int entries; + char __data[0]; +}; + +struct trace_event_data_offsets_xen_mc__batch {}; + +struct trace_event_data_offsets_xen_mc_entry {}; + +struct trace_event_data_offsets_xen_mc_entry_alloc {}; + +struct trace_event_data_offsets_xen_mc_callback {}; + +struct trace_event_data_offsets_xen_mc_flush_reason {}; + +struct trace_event_data_offsets_xen_mc_flush {}; + +struct trace_event_data_offsets_xen_mc_extend_args {}; + +struct trace_event_data_offsets_xen_mmu__set_pte {}; + +struct trace_event_data_offsets_xen_mmu_set_pmd {}; + +struct trace_event_data_offsets_xen_mmu_set_pud {}; + +struct trace_event_data_offsets_xen_mmu_set_p4d {}; + +struct trace_event_data_offsets_xen_mmu_ptep_modify_prot {}; + +struct trace_event_data_offsets_xen_mmu_alloc_ptpage {}; + +struct trace_event_data_offsets_xen_mmu_release_ptpage {}; + +struct trace_event_data_offsets_xen_mmu_pgd {}; + +struct trace_event_data_offsets_xen_mmu_flush_tlb_one_user {}; + +struct trace_event_data_offsets_xen_mmu_flush_tlb_multi {}; + +struct trace_event_data_offsets_xen_mmu_write_cr3 {}; + +struct trace_event_data_offsets_xen_cpu_write_ldt_entry {}; + +struct trace_event_data_offsets_xen_cpu_write_idt_entry {}; + +struct trace_event_data_offsets_xen_cpu_load_idt {}; + +struct trace_event_data_offsets_xen_cpu_write_gdt_entry {}; + +struct trace_event_data_offsets_xen_cpu_set_ldt {}; + +typedef void (*btf_trace_xen_mc_batch)(void *, enum paravirt_lazy_mode); + +typedef void (*btf_trace_xen_mc_issue)(void *, enum paravirt_lazy_mode); + +typedef void (*btf_trace_xen_mc_entry)(void *, struct multicall_entry *, unsigned int); + +typedef void (*btf_trace_xen_mc_entry_alloc)(void *, size_t); + +typedef void (*btf_trace_xen_mc_callback)(void *, xen_mc_callback_fn_t, void *); + +typedef void (*btf_trace_xen_mc_flush_reason)(void *, enum xen_mc_flush_reason); + +typedef void (*btf_trace_xen_mc_flush)(void *, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_xen_mc_extend_args)(void *, long unsigned int, size_t, enum xen_mc_extend_args); + +typedef void (*btf_trace_xen_mmu_set_pte)(void *, pte_t *, pte_t); + +typedef void (*btf_trace_xen_mmu_set_pmd)(void *, pmd_t *, pmd_t); + +typedef void (*btf_trace_xen_mmu_set_pud)(void *, pud_t *, pud_t); + +typedef void (*btf_trace_xen_mmu_set_p4d)(void *, p4d_t *, p4d_t *, p4d_t); + +typedef void (*btf_trace_xen_mmu_ptep_modify_prot_start)(void *, struct mm_struct *, long unsigned int, pte_t *, pte_t); + +typedef void (*btf_trace_xen_mmu_ptep_modify_prot_commit)(void *, struct mm_struct *, long unsigned int, pte_t *, pte_t); + +typedef void (*btf_trace_xen_mmu_alloc_ptpage)(void *, struct mm_struct *, long unsigned int, unsigned int, bool); + +typedef void (*btf_trace_xen_mmu_release_ptpage)(void *, long unsigned int, unsigned int, bool); + +typedef void (*btf_trace_xen_mmu_pgd_pin)(void *, struct mm_struct *, pgd_t *); + +typedef void (*btf_trace_xen_mmu_pgd_unpin)(void *, struct mm_struct *, pgd_t *); + +typedef void (*btf_trace_xen_mmu_flush_tlb_one_user)(void *, long unsigned int); + +typedef void (*btf_trace_xen_mmu_flush_tlb_multi)(void *, const struct cpumask *, struct mm_struct *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_xen_mmu_write_cr3)(void *, bool, long unsigned int); + +typedef void (*btf_trace_xen_cpu_write_ldt_entry)(void *, struct desc_struct *, int, u64); + +typedef void (*btf_trace_xen_cpu_write_idt_entry)(void *, gate_desc *, int, const gate_desc *); + +typedef void (*btf_trace_xen_cpu_load_idt)(void *, const struct desc_ptr *); + +typedef void (*btf_trace_xen_cpu_write_gdt_entry)(void *, struct desc_struct *, int, const void *, int); + +typedef void (*btf_trace_xen_cpu_set_ldt)(void *, const void *, unsigned int); + +enum ipi_vector { + XEN_RESCHEDULE_VECTOR = 0, + XEN_CALL_FUNCTION_VECTOR = 1, + XEN_CALL_FUNCTION_SINGLE_VECTOR = 2, + XEN_SPIN_UNLOCK_VECTOR = 3, + XEN_IRQ_WORK_VECTOR = 4, + XEN_NMI_VECTOR = 5, + XEN_NR_IPIS = 6, +}; + +struct xen_common_irq { + int irq; + char *name; +}; + +struct cpu_user_regs { + uint64_t r15; + uint64_t r14; + uint64_t r13; + uint64_t r12; + union { + uint64_t rbp; + uint64_t ebp; + uint32_t _ebp; + }; + union { + uint64_t rbx; + uint64_t ebx; + uint32_t _ebx; + }; + uint64_t r11; + uint64_t r10; + uint64_t r9; + uint64_t r8; + union { + uint64_t rax; + uint64_t eax; + uint32_t _eax; + }; + union { + uint64_t rcx; + uint64_t ecx; + uint32_t _ecx; + }; + union { + uint64_t rdx; + uint64_t edx; + uint32_t _edx; + }; + union { + uint64_t rsi; + uint64_t esi; + uint32_t _esi; + }; + union { + uint64_t rdi; + uint64_t edi; + uint32_t _edi; + }; + uint32_t error_code; + uint32_t entry_vector; + union { + uint64_t rip; + uint64_t eip; + uint32_t _eip; + }; + uint16_t cs; + uint16_t _pad0[1]; + uint8_t saved_upcall_mask; + uint8_t _pad1[3]; + union { + uint64_t rflags; + uint64_t eflags; + uint32_t _eflags; + }; + union { + uint64_t rsp; + uint64_t esp; + uint32_t _esp; + }; + uint16_t ss; + uint16_t _pad2[3]; + uint16_t es; + uint16_t _pad3[3]; + uint16_t ds; + uint16_t _pad4[3]; + uint16_t fs; + uint16_t _pad5[3]; + uint16_t gs; + uint16_t _pad6[3]; +}; + +struct vcpu_guest_context { + struct { + char x[512]; + } fpu_ctxt; + long unsigned int flags; + struct cpu_user_regs user_regs; + struct trap_info trap_ctxt[256]; + long unsigned int ldt_base; + long unsigned int ldt_ents; + long unsigned int gdt_frames[16]; + long unsigned int gdt_ents; + long unsigned int kernel_ss; + long unsigned int kernel_sp; + long unsigned int ctrlreg[8]; + long unsigned int debugreg[8]; + long unsigned int event_callback_eip; + long unsigned int failsafe_callback_eip; + long unsigned int syscall_callback_eip; + long unsigned int vm_assist; + uint64_t fs_base; + uint64_t gs_base_kernel; + uint64_t gs_base_user; +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +enum swiotlb_force { + SWIOTLB_NORMAL = 0, + SWIOTLB_FORCE = 1, + SWIOTLB_NO_FORCE = 2, +}; + +struct iommu_table_entry { + initcall_t detect; + initcall_t depend; + void (*early_init)(); + void (*late_init)(); + int flags; +}; + +union efi_boot_services; + +typedef union efi_boot_services efi_boot_services_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +union efi_simple_text_input_protocol; + +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; + +union efi_simple_text_output_protocol; + +typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; + +typedef union { + struct { + efi_table_hdr_t hdr; + long unsigned int fw_vendor; + u32 fw_revision; + long unsigned int con_in_handle; + efi_simple_text_input_protocol_t *con_in; + long unsigned int con_out_handle; + efi_simple_text_output_protocol_t *con_out; + long unsigned int stderr_handle; + long unsigned int stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + long unsigned int nr_tables; + long unsigned int tables; + }; + efi_system_table_32_t mixed_mode; +} efi_system_table_t; + +enum efi_secureboot_mode { + efi_secureboot_mode_unset = 0, + efi_secureboot_mode_unknown = 1, + efi_secureboot_mode_disabled = 2, + efi_secureboot_mode_enabled = 3, +}; + +struct hvm_modlist_entry { + uint64_t paddr; + uint64_t size; + uint64_t cmdline_paddr; + uint64_t reserved; +}; + +struct hvm_memmap_table_entry { + uint64_t addr; + uint64_t size; + uint32_t type; + uint32_t reserved; +}; + +enum { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_DELAYED_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_DELAYED = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, + WORK_NR_COLORS = 15, + WORK_NO_COLOR = 15, + WORK_CPU_UNBOUND = 8192, + WORK_STRUCT_FLAG_BITS = 8, + WORK_OFFQ_FLAG_BASE = 4, + __WORK_OFFQ_CANCELING = 4, + WORK_OFFQ_CANCELING = 16, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_POOL_SHIFT = 5, + WORK_OFFQ_LEFT = 59, + WORK_OFFQ_POOL_BITS = 31, + WORK_OFFQ_POOL_NONE = 2147483647, + WORK_STRUCT_FLAG_MASK = 255, + WORK_STRUCT_WQ_DATA_MASK = 4294967040, + WORK_STRUCT_NO_POOL = 4294967264, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 24, +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +enum hv_isolation_type { + HV_ISOLATION_TYPE_NONE = 0, + HV_ISOLATION_TYPE_VBS = 1, + HV_ISOLATION_TYPE_SNP = 2, +}; + +union hv_x64_msr_hypercall_contents { + u64 as_uint64; + struct { + u64 enable: 1; + u64 reserved: 11; + u64 guest_physical_address: 52; + }; +}; + +struct hv_reenlightenment_control { + __u64 vector: 8; + __u64 reserved1: 8; + __u64 enabled: 1; + __u64 reserved2: 15; + __u64 target_vp: 32; +}; + +struct hv_tsc_emulation_control { + __u64 enabled: 1; + __u64 reserved: 63; +}; + +struct hv_tsc_emulation_status { + __u64 inprogress: 1; + __u64 reserved: 63; +}; + +struct hv_nested_enlightenments_control { + struct { + __u32 directhypercall: 1; + __u32 reserved: 31; + } features; + struct { + __u32 reserved; + } hypercallControls; +}; + +struct hv_vp_assist_page { + __u32 apic_assist; + __u32 reserved1; + __u64 vtl_control[3]; + struct hv_nested_enlightenments_control nested_control; + __u8 enlighten_vmentry; + __u8 reserved2[7]; + __u64 current_nested_vmcs; +}; + +struct hv_get_partition_id { + u64 partition_id; +}; + +struct ms_hyperv_info { + u32 features; + u32 priv_high; + u32 misc_features; + u32 hints; + u32 nested_features; + u32 max_vp_index; + u32 max_lp_index; + u32 isolation_config_a; + u32 isolation_config_b; +}; + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARSE_4K = 0, + HV_GENERIC_SET_ALL = 1, +}; + +struct hv_vpset { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[0]; +}; + +struct hv_tlb_flush { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[0]; +}; + +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + u64 gva_list[0]; +}; + +struct trace_event_raw_hyperv_mmu_flush_tlb_multi { + struct trace_entry ent; + unsigned int ncpus; + struct mm_struct *mm; + long unsigned int addr; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_hyperv_nested_flush_guest_mapping { + struct trace_entry ent; + u64 as; + int ret; + char __data[0]; +}; + +struct trace_event_raw_hyperv_nested_flush_guest_mapping_range { + struct trace_entry ent; + u64 as; + int ret; + char __data[0]; +}; + +struct trace_event_raw_hyperv_send_ipi_mask { + struct trace_entry ent; + unsigned int ncpus; + int vector; + char __data[0]; +}; + +struct trace_event_raw_hyperv_send_ipi_one { + struct trace_entry ent; + int cpu; + int vector; + char __data[0]; +}; + +struct trace_event_data_offsets_hyperv_mmu_flush_tlb_multi {}; + +struct trace_event_data_offsets_hyperv_nested_flush_guest_mapping {}; + +struct trace_event_data_offsets_hyperv_nested_flush_guest_mapping_range {}; + +struct trace_event_data_offsets_hyperv_send_ipi_mask {}; + +struct trace_event_data_offsets_hyperv_send_ipi_one {}; + +typedef void (*btf_trace_hyperv_mmu_flush_tlb_multi)(void *, const struct cpumask *, const struct flush_tlb_info *); + +typedef void (*btf_trace_hyperv_nested_flush_guest_mapping)(void *, u64, int); + +typedef void (*btf_trace_hyperv_nested_flush_guest_mapping_range)(void *, u64, int); + +typedef void (*btf_trace_hyperv_send_ipi_mask)(void *, const struct cpumask *, int); + +typedef void (*btf_trace_hyperv_send_ipi_one)(void *, int, int); + +struct hv_guest_mapping_flush { + u64 address_space; + u64 flags; +}; + +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages: 11; + u64 largepage: 1; + u64 basepfn: 52; + } page; + struct { + u64 reserved: 12; + u64 page_size: 1; + u64 reserved1: 8; + u64 base_large_pfn: 43; + }; +}; + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[510]; +}; + +typedef int (*hyperv_fill_flush_list_func)(struct hv_guest_mapping_flush_list *, void *); + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, + IRQCHIP_AFFINITY_PRE_STARTUP = 1024, +}; + +enum irq_alloc_type { + X86_IRQ_ALLOC_TYPE_IOAPIC = 1, + X86_IRQ_ALLOC_TYPE_HPET = 2, + X86_IRQ_ALLOC_TYPE_PCI_MSI = 3, + X86_IRQ_ALLOC_TYPE_PCI_MSIX = 4, + X86_IRQ_ALLOC_TYPE_DMAR = 5, + X86_IRQ_ALLOC_TYPE_AMDVI = 6, + X86_IRQ_ALLOC_TYPE_UV = 7, +}; + +struct ioapic_alloc_info { + int pin; + int node; + u32 is_level: 1; + u32 active_low: 1; + u32 valid: 1; +}; + +struct uv_alloc_info { + int limit; + int blade; + long unsigned int offset; + char *name; +}; + +struct irq_alloc_info { + enum irq_alloc_type type; + u32 flags; + u32 devid; + irq_hw_number_t hwirq; + const struct cpumask *mask; + struct msi_desc *desc; + void *data; + union { + struct ioapic_alloc_info ioapic; + struct uv_alloc_info uv; + }; +}; + +struct irq_cfg { + unsigned int dest_apicid; + unsigned int vector; +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +typedef struct irq_alloc_info msi_alloc_info_t; + +struct msi_domain_info; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_check)(struct irq_domain *, struct msi_domain_info *, struct device *); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*msi_finish)(msi_alloc_info_t *, int); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*handle_error)(struct irq_domain *, struct msi_desc *, int); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); +}; + +struct msi_domain_info { + u32 flags; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_MULTI_PCI_MSI = 4, + MSI_FLAG_PCI_MSIX = 8, + MSI_FLAG_ACTIVATE_EARLY = 16, + MSI_FLAG_MUST_REACTIVATE = 32, + MSI_FLAG_LEVEL_CAPABLE = 64, +}; + +enum hv_interrupt_type { + HV_X64_INTERRUPT_TYPE_FIXED = 0, + HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 1, + HV_X64_INTERRUPT_TYPE_SMI = 2, + HV_X64_INTERRUPT_TYPE_REMOTEREAD = 3, + HV_X64_INTERRUPT_TYPE_NMI = 4, + HV_X64_INTERRUPT_TYPE_INIT = 5, + HV_X64_INTERRUPT_TYPE_SIPI = 6, + HV_X64_INTERRUPT_TYPE_EXTINT = 7, + HV_X64_INTERRUPT_TYPE_LOCALINT0 = 8, + HV_X64_INTERRUPT_TYPE_LOCALINT1 = 9, + HV_X64_INTERRUPT_TYPE_MAXIMUM = 10, +}; + +union hv_msi_address_register { + u32 as_uint32; + struct { + u32 reserved1: 2; + u32 destination_mode: 1; + u32 redirection_hint: 1; + u32 reserved2: 8; + u32 destination_id: 8; + u32 msi_base: 12; + }; +}; + +union hv_msi_data_register { + u32 as_uint32; + struct { + u32 vector: 8; + u32 delivery_mode: 3; + u32 reserved1: 3; + u32 level_assert: 1; + u32 trigger_mode: 1; + u32 reserved2: 16; + }; +}; + +union hv_msi_entry { + u64 as_uint64; + struct { + union hv_msi_address_register address; + union hv_msi_data_register data; + }; +}; + +union hv_ioapic_rte { + u64 as_uint64; + struct { + u32 vector: 8; + u32 delivery_mode: 3; + u32 destination_mode: 1; + u32 delivery_status: 1; + u32 interrupt_polarity: 1; + u32 remote_irr: 1; + u32 trigger_mode: 1; + u32 interrupt_mask: 1; + u32 reserved1: 15; + u32 reserved2: 24; + u32 destination_id: 8; + }; + struct { + u32 low_uint32; + u32 high_uint32; + }; +}; + +struct hv_interrupt_entry { + u32 source; + u32 reserved1; + union { + union hv_msi_entry msi_entry; + union hv_ioapic_rte ioapic_rte; + }; +}; + +struct hv_device_interrupt_target { + u32 vector; + u32 flags; + union { + u64 vp_mask; + struct hv_vpset vp_set; + }; +}; + +enum hv_device_type { + HV_DEVICE_TYPE_LOGICAL = 0, + HV_DEVICE_TYPE_PCI = 1, + HV_DEVICE_TYPE_IOAPIC = 2, + HV_DEVICE_TYPE_ACPI = 3, +}; + +typedef u16 hv_pci_rid; + +typedef u16 hv_pci_segment; + +union hv_pci_bdf { + u16 as_uint16; + struct { + u8 function: 3; + u8 device: 5; + u8 bus; + }; +}; + +union hv_pci_bus_range { + u16 as_uint16; + struct { + u8 subordinate_bus; + u8 secondary_bus; + }; +}; + +union hv_device_id { + u64 as_uint64; + struct { + u64 reserved0: 62; + u64 device_type: 2; + }; + struct { + u64 id: 62; + u64 device_type: 2; + } logical; + struct { + union { + hv_pci_rid rid; + union hv_pci_bdf bdf; + }; + hv_pci_segment segment; + union hv_pci_bus_range shadow_bus_range; + u16 phantom_function_bits: 2; + u16 source_shadow: 1; + u16 rsvdz0: 11; + u16 device_type: 2; + } pci; + struct { + u8 ioapic_id; + u8 rsvdz0; + u16 rsvdz1; + u16 rsvdz2; + u16 rsvdz3: 14; + u16 device_type: 2; + } ioapic; + struct { + u32 input_mapping_base; + u32 input_mapping_count: 30; + u32 device_type: 2; + } acpi; +}; + +enum hv_interrupt_trigger_mode { + HV_INTERRUPT_TRIGGER_MODE_EDGE = 0, + HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1, +}; + +struct hv_device_interrupt_descriptor { + u32 interrupt_type; + u32 trigger_mode; + u32 vector_count; + u32 reserved; + struct hv_device_interrupt_target target; +}; + +struct hv_input_map_device_interrupt { + u64 partition_id; + u64 device_id; + u64 flags; + struct hv_interrupt_entry logical_interrupt_entry; + struct hv_device_interrupt_descriptor interrupt_descriptor; +}; + +struct hv_output_map_device_interrupt { + struct hv_interrupt_entry interrupt_entry; +}; + +struct hv_input_unmap_device_interrupt { + u64 partition_id; + u64 device_id; + struct hv_interrupt_entry interrupt_entry; +}; + +struct rid_data { + struct pci_dev *bridge; + u32 rid; +}; + +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +}; + +struct hv_send_ipi_ex { + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +}; + +struct hv_deposit_memory { + u64 partition_id; + u64 gpa_page_list[0]; +}; + +struct hv_proximity_domain_flags { + u32 proximity_preferred: 1; + u32 reserved: 30; + u32 proximity_info_valid: 1; +}; + +union hv_proximity_domain_info { + struct { + u32 domain_id; + struct hv_proximity_domain_flags flags; + }; + u64 as_uint64; +}; + +struct hv_lp_startup_status { + u64 hv_status; + u64 substatus1; + u64 substatus2; + u64 substatus3; + u64 substatus4; + u64 substatus5; + u64 substatus6; +}; + +struct hv_add_logical_processor_in { + u32 lp_index; + u32 apic_id; + union hv_proximity_domain_info proximity_domain_info; + u64 flags; +}; + +struct hv_add_logical_processor_out { + struct hv_lp_startup_status startup_status; +}; + +enum HV_SUBNODE_TYPE { + HvSubnodeAny = 0, + HvSubnodeSocket = 1, + HvSubnodeAmdNode = 2, + HvSubnodeL3 = 3, + HvSubnodeCount = 4, + HvSubnodeInvalid = 4294967295, +}; + +struct hv_create_vp { + u64 partition_id; + u32 vp_index; + u8 padding[3]; + u8 subnode_type; + u64 subnode_id; + union hv_proximity_domain_info proximity_domain_info; + u64 flags; +}; + +struct real_mode_header { + u32 text_start; + u32 ro_end; + u32 trampoline_start; + u32 trampoline_header; + u32 sev_es_trampoline_start; + u32 trampoline_pgd; + u32 wakeup_start; + u32 wakeup_header; + u32 machine_real_restart_asm; + u32 machine_real_restart_seg; +}; + +struct trampoline_header { + u64 start; + u64 efer; + u32 cr4; + u32 flags; +}; + +struct pkru_state { + u32 pkru; + u32 pad; +}; + +enum show_regs_mode { + SHOW_REGS_SHORT = 0, + SHOW_REGS_USER = 1, + SHOW_REGS_ALL = 2, +}; + +struct resctrl_pqr_state { + u32 cur_rmid; + u32 cur_closid; + u32 default_rmid; + u32 default_closid; +}; + +enum which_selector { + FS = 0, + GS = 1, +}; + +struct sigcontext_64 { + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 di; + __u64 si; + __u64 bp; + __u64 bx; + __u64 dx; + __u64 ax; + __u64 cx; + __u64 sp; + __u64 ip; + __u64 flags; + __u16 cs; + __u16 gs; + __u16 fs; + __u16 ss; + __u64 err; + __u64 trapno; + __u64 oldmask; + __u64 cr2; + __u64 fpstate; + __u64 reserved1[8]; +}; + +struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +typedef struct siginfo siginfo_t; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +struct __large_struct { + long unsigned int buf[100]; +}; + +typedef u32 compat_size_t; + +typedef s32 compat_clock_t; + +typedef s32 compat_pid_t; + +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; + +typedef u32 compat_ulong_t; + +typedef s64 compat_s64; + +typedef u32 __compat_uid32_t; + +typedef u32 compat_sigset_word; + +struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +}; + +typedef struct compat_sigaltstack compat_stack_t; + +typedef struct { + compat_sigset_word sig[2]; +} compat_sigset_t; + +union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +}; + +typedef union compat_sigval compat_sigval_t; + +struct compat_siginfo { + int si_signo; + int si_errno; + int si_code; + union { + int _pad[29]; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + } _kill; + struct { + compat_timer_t _tid; + int _overrun; + compat_sigval_t _sigval; + } _timer; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + compat_sigval_t _sigval; + } _rt; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_s64 _utime; + compat_s64 _stime; + } __attribute__((packed)) _sigchld_x32; + struct { + compat_uptr_t _addr; + union { + int _trapno; + short int _addr_lsb; + struct { + char _dummy_bnd[4]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + u32 _pkey; + } _addr_pkey; + struct { + compat_ulong_t _data; + u32 _type; + } _perf; + }; + } _sigfault; + struct { + compat_long_t _band; + int _fd; + } _sigpoll; + struct { + compat_uptr_t _call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; + } _sifields; +} __attribute__((packed)); + +typedef struct compat_siginfo compat_siginfo_t; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext_64 uc_mcontext; + sigset_t uc_sigmask; +}; + +struct kernel_vm86_regs { + struct pt_regs pt; + short unsigned int es; + short unsigned int __esh; + short unsigned int ds; + short unsigned int __dsh; + short unsigned int fs; + short unsigned int __fsh; + short unsigned int gs; + short unsigned int __gsh; +}; + +struct rt_sigframe { + char *pretcode; + struct ucontext uc; + struct siginfo info; +}; + +struct ucontext_x32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + unsigned int uc__pad0; + struct sigcontext_64 uc_mcontext; + compat_sigset_t uc_sigmask; +}; + +struct rt_sigframe_x32 { + u64 pretcode; + struct ucontext_x32 uc; + compat_siginfo_t info; +}; + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +enum insn_mode { + INSN_MODE_32 = 0, + INSN_MODE_64 = 1, + INSN_MODE_KERN = 2, + INSN_NUM_MODES = 3, +}; + +typedef u8 kprobe_opcode_t; + +struct kprobe; + +struct arch_specific_insn { + kprobe_opcode_t *insn; + unsigned int boostable: 1; + unsigned char size; + union { + unsigned char opcode; + struct { + unsigned char type; + } jcc; + struct { + unsigned char type; + unsigned char asize; + } loop; + struct { + unsigned char reg; + } indirect; + }; + s32 rel32; + void (*emulate_op)(struct kprobe *, struct pt_regs *); + int tp_len; +}; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +typedef int (*kprobe_fault_handler_t)(struct kprobe *, struct pt_regs *, int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_fault_handler_t fault_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +enum die_val { + DIE_OOPS = 1, + DIE_INT3 = 2, + DIE_DEBUG = 3, + DIE_PANIC = 4, + DIE_NMI = 5, + DIE_DIE = 6, + DIE_KERNELDEBUG = 7, + DIE_TRAP = 8, + DIE_GPF = 9, + DIE_CALL = 10, + DIE_PAGE_FAULT = 11, + DIE_NMIUNKNOWN = 12, +}; + +enum kernel_gp_hint { + GP_NO_HINT = 0, + GP_NON_CANONICAL = 1, + GP_CANONICAL = 2, +}; + +struct bad_iret_stack { + void *error_entry_ret; + struct pt_regs regs; +}; + +typedef struct irq_desc *vector_irq_t[256]; + +struct trace_event_raw_x86_irq_vector { + struct trace_entry ent; + int vector; + char __data[0]; +}; + +struct trace_event_raw_vector_config { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + unsigned int cpu; + unsigned int apicdest; + char __data[0]; +}; + +struct trace_event_raw_vector_mod { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + unsigned int cpu; + unsigned int prev_vector; + unsigned int prev_cpu; + char __data[0]; +}; + +struct trace_event_raw_vector_reserve { + struct trace_entry ent; + unsigned int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_alloc { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + bool reserved; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_alloc_managed { + struct trace_entry ent; + unsigned int irq; + unsigned int vector; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_activate { + struct trace_entry ent; + unsigned int irq; + bool is_managed; + bool can_reserve; + bool reserve; + char __data[0]; +}; + +struct trace_event_raw_vector_teardown { + struct trace_entry ent; + unsigned int irq; + bool is_managed; + bool has_reserved; + char __data[0]; +}; + +struct trace_event_raw_vector_setup { + struct trace_entry ent; + unsigned int irq; + bool is_legacy; + int ret; + char __data[0]; +}; + +struct trace_event_raw_vector_free_moved { + struct trace_entry ent; + unsigned int irq; + unsigned int cpu; + unsigned int vector; + bool is_managed; + char __data[0]; +}; + +struct trace_event_data_offsets_x86_irq_vector {}; + +struct trace_event_data_offsets_vector_config {}; + +struct trace_event_data_offsets_vector_mod {}; + +struct trace_event_data_offsets_vector_reserve {}; + +struct trace_event_data_offsets_vector_alloc {}; + +struct trace_event_data_offsets_vector_alloc_managed {}; + +struct trace_event_data_offsets_vector_activate {}; + +struct trace_event_data_offsets_vector_teardown {}; + +struct trace_event_data_offsets_vector_setup {}; + +struct trace_event_data_offsets_vector_free_moved {}; + +typedef void (*btf_trace_local_timer_entry)(void *, int); + +typedef void (*btf_trace_local_timer_exit)(void *, int); + +typedef void (*btf_trace_spurious_apic_entry)(void *, int); + +typedef void (*btf_trace_spurious_apic_exit)(void *, int); + +typedef void (*btf_trace_error_apic_entry)(void *, int); + +typedef void (*btf_trace_error_apic_exit)(void *, int); + +typedef void (*btf_trace_x86_platform_ipi_entry)(void *, int); + +typedef void (*btf_trace_x86_platform_ipi_exit)(void *, int); + +typedef void (*btf_trace_irq_work_entry)(void *, int); + +typedef void (*btf_trace_irq_work_exit)(void *, int); + +typedef void (*btf_trace_reschedule_entry)(void *, int); + +typedef void (*btf_trace_reschedule_exit)(void *, int); + +typedef void (*btf_trace_call_function_entry)(void *, int); + +typedef void (*btf_trace_call_function_exit)(void *, int); + +typedef void (*btf_trace_call_function_single_entry)(void *, int); + +typedef void (*btf_trace_call_function_single_exit)(void *, int); + +typedef void (*btf_trace_threshold_apic_entry)(void *, int); + +typedef void (*btf_trace_threshold_apic_exit)(void *, int); + +typedef void (*btf_trace_deferred_error_apic_entry)(void *, int); + +typedef void (*btf_trace_deferred_error_apic_exit)(void *, int); + +typedef void (*btf_trace_thermal_apic_entry)(void *, int); + +typedef void (*btf_trace_thermal_apic_exit)(void *, int); + +typedef void (*btf_trace_vector_config)(void *, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_vector_update)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_vector_clear)(void *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_vector_reserve_managed)(void *, unsigned int, int); + +typedef void (*btf_trace_vector_reserve)(void *, unsigned int, int); + +typedef void (*btf_trace_vector_alloc)(void *, unsigned int, unsigned int, bool, int); + +typedef void (*btf_trace_vector_alloc_managed)(void *, unsigned int, unsigned int, int); + +typedef void (*btf_trace_vector_activate)(void *, unsigned int, bool, bool, bool); + +typedef void (*btf_trace_vector_deactivate)(void *, unsigned int, bool, bool, bool); + +typedef void (*btf_trace_vector_teardown)(void *, unsigned int, bool, bool); + +typedef void (*btf_trace_vector_setup)(void *, unsigned int, bool, int); + +typedef void (*btf_trace_vector_free_moved)(void *, unsigned int, unsigned int, unsigned int, bool); + +struct irq_stack { + char stack[16384]; +}; + +struct estack_pages { + u32 offs; + u16 size; + u16 type; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_PCMCIA_CIS = 10, + LOCKDOWN_TIOCSSERIAL = 11, + LOCKDOWN_MODULE_PARAMETERS = 12, + LOCKDOWN_MMIOTRACE = 13, + LOCKDOWN_DEBUGFS = 14, + LOCKDOWN_XMON_WR = 15, + LOCKDOWN_BPF_WRITE_USER = 16, + LOCKDOWN_INTEGRITY_MAX = 17, + LOCKDOWN_KCORE = 18, + LOCKDOWN_KPROBES = 19, + LOCKDOWN_BPF_READ = 20, + LOCKDOWN_PERF = 21, + LOCKDOWN_TRACEFS = 22, + LOCKDOWN_XMON_RW = 23, + LOCKDOWN_XFRM_SECRET = 24, + LOCKDOWN_CONFIDENTIALITY_MAX = 25, +}; + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +struct trace_event_raw_nmi_handler { + struct trace_entry ent; + void *handler; + s64 delta_ns; + int handled; + char __data[0]; +}; + +struct trace_event_data_offsets_nmi_handler {}; + +typedef void (*btf_trace_nmi_handler)(void *, void *, s64, int); + +struct nmi_desc { + raw_spinlock_t lock; + struct list_head head; +}; + +struct nmi_stats { + unsigned int normal; + unsigned int unknown; + unsigned int external; + unsigned int swallow; +}; + +enum nmi_states { + NMI_NOT_RUNNING = 0, + NMI_EXECUTING = 1, + NMI_LATCHED = 2, +}; + +struct user_desc { + unsigned int entry_number; + unsigned int base_addr; + unsigned int limit; + unsigned int seg_32bit: 1; + unsigned int contents: 2; + unsigned int read_exec_only: 1; + unsigned int limit_in_pages: 1; + unsigned int seg_not_present: 1; + unsigned int useable: 1; + unsigned int lm: 1; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_data; + +struct console_font; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, int); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, int, int, int, int); + void (*con_putc)(struct vc_data *, int, int, int); + void (*con_putcs)(struct vc_data *, const short unsigned int *, int, int, int); + void (*con_cursor)(struct vc_data *, int); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + int (*con_switch)(struct vc_data *); + int (*con_blank)(struct vc_data *, int, int); + int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *); + int (*con_font_default)(struct vc_data *, struct console_font *, char *); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + int (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + u16 * (*con_screen_pos)(const struct vc_data *, int); + long unsigned int (*con_getxy)(struct vc_data *, long unsigned int, int *, int *); + void (*con_flush_scrollback)(struct vc_data *); + int (*con_debug_enter)(struct vc_data *); + int (*con_debug_leave)(struct vc_data *); +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct uni_pagedir; + +struct uni_screen; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + unsigned int vc_cell_height; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[4]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_resize_user; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedir *vc_uni_pagedir; + struct uni_pagedir **vc_uni_pagedir_loc; + struct uni_screen *vc_uni_screen; +}; + +struct edd { + unsigned int mbr_signature[16]; + struct edd_info edd_info[6]; + unsigned char mbr_signature_nr; + unsigned char edd_info_nr; +}; + +struct setup_data { + __u64 next; + __u32 type; + __u32 len; + __u8 data[0]; +}; + +struct setup_indirect { + __u32 type; + __u32 reserved; + __u64 len; + __u64 addr; +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; + int nid; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct x86_msi_ops { + void (*restore_msi_irqs)(struct pci_dev *); +}; + +struct legacy_pic { + int nr_legacy_irqs; + struct irq_chip *chip; + void (*mask)(unsigned int); + void (*unmask)(unsigned int); + void (*mask_all)(); + void (*restore_mask)(); + void (*init)(int); + int (*probe)(); + int (*irq_pending)(unsigned int); + void (*make_irq)(unsigned int); +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +union text_poke_insn { + u8 text[5]; + struct { + u8 opcode; + s32 disp; + } __attribute__((packed)); +}; + +enum { + JL_STATE_START = 0, + JL_STATE_NO_UPDATE = 1, + JL_STATE_UPDATE = 2, +}; + +typedef short unsigned int __kernel_old_uid_t; + +typedef short unsigned int __kernel_old_gid_t; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef __kernel_old_uid_t old_uid_t; + +typedef __kernel_old_gid_t old_gid_t; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + int exit_signal; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + int io_thread; + struct cgroup *cgrp; + struct css_set *cset; +}; + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; +}; + +struct stat64 { + long long unsigned int st_dev; + unsigned char __pad0[4]; + unsigned int __st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + long long unsigned int st_rdev; + unsigned char __pad3[4]; + long long int st_size; + unsigned int st_blksize; + long long int st_blocks; + unsigned int st_atime; + unsigned int st_atime_nsec; + unsigned int st_mtime; + unsigned int st_mtime_nsec; + unsigned int st_ctime; + unsigned int st_ctime_nsec; + long long unsigned int st_ino; +} __attribute__((packed)); + +struct mmap_arg_struct32 { + unsigned int addr; + unsigned int len; + unsigned int prot; + unsigned int flags; + unsigned int fd; + unsigned int offset; +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; +}; + +enum align_flags { + ALIGN_VA_32 = 1, + ALIGN_VA_64 = 2, +}; + +struct va_alignment { + int flags; + long unsigned int mask; + long unsigned int bits; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_MAX = 28, +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTPKTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + __IPSTATS_MIB_MAX = 37, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + __ICMP_MIB_MAX = 28, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + __ICMP6_MIB_MAX = 6, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + UDP_MIB_MEMERRORS = 9, + __UDP_MIB_MAX = 10, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + __LINUX_MIB_MAX = 124, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + __LINUX_MIB_XFRMMAX = 29, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + __LINUX_MIB_TLSMAX = 11, +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO = 13, +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +enum ct_dccp_states { + CT_DCCP_NONE = 0, + CT_DCCP_REQUEST = 1, + CT_DCCP_RESPOND = 2, + CT_DCCP_PARTOPEN = 3, + CT_DCCP_OPEN = 4, + CT_DCCP_CLOSEREQ = 5, + CT_DCCP_CLOSING = 6, + CT_DCCP_TIMEWAIT = 7, + CT_DCCP_IGNORE = 8, + CT_DCCP_INVALID = 9, + __CT_DCCP_MAX = 10, +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +enum gre_conntrack { + GRE_CT_UNREPLIED = 0, + GRE_CT_REPLIED = 1, + GRE_CT_MAX = 2, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = 4294967295, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_SEC_PATH = 1, + TC_SKB_EXT = 2, + SKB_EXT_MPTCP = 3, + SKB_EXT_NUM = 4, +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +typedef void (*swap_func_t)(void *, void *, int); + +typedef int (*cmp_func_t)(const void *, const void *); + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, +}; + +struct change_member { + struct e820_entry *entry; + long long unsigned int addr; +}; + +struct iopf_device_param; + +struct iommu_fault_param; + +struct iommu_fwspec; + +struct dev_iommu { + struct mutex lock; + struct iommu_fault_param *fault_param; + struct iopf_device_param *iopf_param; + struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; + void *priv; +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +struct iommu_fault_unrecoverable { + __u32 reason; + __u32 flags; + __u32 pasid; + __u32 perm; + __u64 addr; + __u64 fetch_addr; +}; + +struct iommu_fault_page_request { + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u64 private_data[2]; +}; + +struct iommu_fault { + __u32 type; + __u32 padding; + union { + struct iommu_fault_unrecoverable event; + struct iommu_fault_page_request prm; + __u8 padding2[56]; + }; +}; + +struct iommu_page_response { + __u32 argsz; + __u32 version; + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 code; +}; + +struct iommu_inv_addr_info { + __u32 flags; + __u32 archid; + __u64 pasid; + __u64 addr; + __u64 granule_size; + __u64 nb_granules; +}; + +struct iommu_inv_pasid_info { + __u32 flags; + __u32 archid; + __u64 pasid; +}; + +struct iommu_cache_invalidate_info { + __u32 argsz; + __u32 version; + __u8 cache; + __u8 granularity; + __u8 padding[6]; + union { + struct iommu_inv_pasid_info pasid_info; + struct iommu_inv_addr_info addr_info; + } granu; +}; + +struct iommu_gpasid_bind_data_vtd { + __u64 flags; + __u32 pat; + __u32 emt; +}; + +struct iommu_gpasid_bind_data { + __u32 argsz; + __u32 version; + __u32 format; + __u32 addr_width; + __u64 flags; + __u64 gpgd; + __u64 hpasid; + __u64 gpasid; + __u8 padding[8]; + union { + struct iommu_gpasid_bind_data_vtd vtd; + } vendor; +}; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_domain { + unsigned int type; + const struct iommu_ops *ops; + long unsigned int pgsize_bitmap; + iommu_fault_handler_t handler; + void *handler_token; + struct iommu_domain_geometry geometry; + void *iova_cookie; +}; + +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); + +enum iommu_resv_type { + IOMMU_RESV_DIRECT = 0, + IOMMU_RESV_DIRECT_RELAXABLE = 1, + IOMMU_RESV_RESERVED = 2, + IOMMU_RESV_MSI = 3, + IOMMU_RESV_SW_MSI = 4, +}; + +struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; + enum iommu_resv_type type; +}; + +struct iommu_iotlb_gather { + long unsigned int start; + long unsigned int end; + size_t pgsize; + struct page *freelist; +}; + +struct iommu_device { + struct list_head list; + const struct iommu_ops *ops; + struct fwnode_handle *fwnode; + struct device *dev; +}; + +struct iommu_sva { + struct device *dev; +}; + +struct iommu_fault_event { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; + struct list_head faults; + struct mutex lock; +}; + +struct iommu_fwspec { + const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + u32 flags; + unsigned int num_ids; + u32 ids[0]; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +enum { + NONE_FORCE_HPET_RESUME = 0, + OLD_ICH_FORCE_HPET_RESUME = 1, + ICH_FORCE_HPET_RESUME = 2, + VT8237_FORCE_HPET_RESUME = 3, + NVIDIA_FORCE_HPET_RESUME = 4, + ATI_FORCE_HPET_RESUME = 5, +}; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +struct x86_cpu { + struct cpu cpu; +}; + +struct paravirt_patch_site { + u8 *instr; + u8 type; + u8 len; +}; + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +struct tlb_state_shared { + bool is_lazy; +}; + +struct smp_alt_module { + struct module *mod; + char *name; + const s32 *locks; + const s32 *locks_end; + u8 *text; + u8 *text_end; + struct list_head next; +}; + +typedef struct { + struct mm_struct *mm; +} temp_mm_state_t; + +struct text_poke_loc { + s32 rel_addr; + s32 rel32; + u8 opcode; + const u8 text[5]; + u8 old; +}; + +struct bp_patching_desc { + struct text_poke_loc *vec; + int nr_entries; + atomic_t refs; +}; + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_3 = 3, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_5 = 5, + HW_BREAKPOINT_LEN_6 = 6, + HW_BREAKPOINT_LEN_7 = 7, + HW_BREAKPOINT_LEN_8 = 8, +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +typedef unsigned int u_int; + +typedef long long unsigned int cycles_t; + +struct system_counterval_t { + u64 cycles; + struct clocksource *cs; +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct clk; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct cpufreq_stats; + +struct thermal_cooling_device; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +struct cpufreq_freqs { + struct cpufreq_policy *policy; + unsigned int old; + unsigned int new; + u8 flags; +}; + +struct cyc2ns { + struct cyc2ns_data data[2]; + seqcount_latch_t seq; +}; + +struct muldiv { + u32 multiplier; + u32 divider; +}; + +struct freq_desc { + bool use_msr_plat; + struct muldiv muldiv[16]; + u32 freqs[16]; + u32 mask; +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +struct pdev_archdata {}; + +struct platform_device_id; + +struct mfd_cell; + +struct platform_device { + const char *name; + int id; + bool id_auto; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct pnp_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; + struct { + __u8 id[8]; + } devs[8]; +}; + +struct acpi_table_header { + char signature[4]; + u32 length; + u8 revision; + u8 checksum; + char oem_id[6]; + char oem_table_id[8]; + u32 oem_revision; + char asl_compiler_id[4]; + u32 asl_compiler_revision; +}; + +struct acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct acpi_table_fadt { + struct acpi_table_header header; + u32 facs; + u32 dsdt; + u8 model; + u8 preferred_profile; + u16 sci_interrupt; + u32 smi_command; + u8 acpi_enable; + u8 acpi_disable; + u8 s4_bios_request; + u8 pstate_control; + u32 pm1a_event_block; + u32 pm1b_event_block; + u32 pm1a_control_block; + u32 pm1b_control_block; + u32 pm2_control_block; + u32 pm_timer_block; + u32 gpe0_block; + u32 gpe1_block; + u8 pm1_event_length; + u8 pm1_control_length; + u8 pm2_control_length; + u8 pm_timer_length; + u8 gpe0_block_length; + u8 gpe1_block_length; + u8 gpe1_base; + u8 cst_control; + u16 c2_latency; + u16 c3_latency; + u16 flush_size; + u16 flush_stride; + u8 duty_offset; + u8 duty_width; + u8 day_alarm; + u8 month_alarm; + u8 century; + u16 boot_flags; + u8 reserved; + u32 flags; + struct acpi_generic_address reset_register; + u8 reset_value; + u16 arm_boot_flags; + u8 minor_revision; + u64 Xfacs; + u64 Xdsdt; + struct acpi_generic_address xpm1a_event_block; + struct acpi_generic_address xpm1b_event_block; + struct acpi_generic_address xpm1a_control_block; + struct acpi_generic_address xpm1b_control_block; + struct acpi_generic_address xpm2_control_block; + struct acpi_generic_address xpm_timer_block; + struct acpi_generic_address xgpe0_block; + struct acpi_generic_address xgpe1_block; + struct acpi_generic_address sleep_control; + struct acpi_generic_address sleep_status; + u64 hypervisor_id; +} __attribute__((packed)); + +struct pnp_protocol; + +struct pnp_id; + +struct pnp_card { + struct device dev; + unsigned char number; + struct list_head global_list; + struct list_head protocol_list; + struct list_head devices; + struct pnp_protocol *protocol; + struct pnp_id *id; + char name[50]; + unsigned char pnpver; + unsigned char productver; + unsigned int serial; + unsigned char checksum; + struct proc_dir_entry *procdir; +}; + +struct pnp_dev; + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + int (*get)(struct pnp_dev *); + int (*set)(struct pnp_dev *); + int (*disable)(struct pnp_dev *); + bool (*can_wakeup)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + unsigned char number; + struct device dev; + struct list_head cards; + struct list_head devices; +}; + +struct pnp_id { + char id[8]; + struct pnp_id *next; +}; + +struct pnp_card_driver; + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +struct pnp_driver { + const char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_dev *, const struct pnp_device_id *); + void (*remove)(struct pnp_dev *); + void (*shutdown)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + struct device_driver driver; +}; + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); + void (*remove)(struct pnp_card_link *); + int (*suspend)(struct pnp_card_link *, pm_message_t); + int (*resume)(struct pnp_card_link *); + struct pnp_driver link; +}; + +struct pnp_dev { + struct device dev; + u64 dma_mask; + unsigned int number; + int status; + struct list_head global_list; + struct list_head protocol_list; + struct list_head card_list; + struct list_head rdev_list; + struct pnp_protocol *protocol; + struct pnp_card *card; + struct pnp_driver *driver; + struct pnp_card_link *card_link; + struct pnp_id *id; + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + char name[50]; + int flags; + struct proc_dir_entry *procent; + void *data; +}; + +enum insn_type { + CALL = 0, + NOP = 1, + JMP = 2, + RET = 3, +}; + +struct ldttss_desc { + u16 limit0; + u16 base0; + u16 base1: 8; + u16 type: 5; + u16 dpl: 2; + u16 p: 1; + u16 limit1: 4; + u16 zero0: 3; + u16 g: 1; + u16 base2: 8; + u32 base3; + u32 zero1; +}; + +typedef struct ldttss_desc tss_desc; + +enum idle_boot_override { + IDLE_NO_OVERRIDE = 0, + IDLE_HALT = 1, + IDLE_NOMWAIT = 2, + IDLE_POLL = 3, +}; + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +struct inactive_task_frame { + long unsigned int r15; + long unsigned int r14; + long unsigned int r13; + long unsigned int r12; + long unsigned int bx; + long unsigned int bp; + long unsigned int ret_addr; +}; + +struct fork_frame { + struct inactive_task_frame frame; + struct pt_regs regs; +}; + +struct ssb_state { + struct ssb_state *shared_state; + raw_spinlock_t lock; + unsigned int disable_state; + long unsigned int local_state; +}; + +struct trace_event_raw_x86_fpu { + struct trace_entry ent; + struct fpu *fpu; + bool load_fpu; + u64 xfeatures; + u64 xcomp_bv; + char __data[0]; +}; + +struct trace_event_data_offsets_x86_fpu {}; + +typedef void (*btf_trace_x86_fpu_before_save)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_after_save)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_before_restore)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_after_restore)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_regs_activated)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_regs_deactivated)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_init_state)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_dropped)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_copy_src)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_copy_dst)(void *, struct fpu *); + +typedef void (*btf_trace_x86_fpu_xstate_check_failed)(void *, struct fpu *); + +struct _fpreg { + __u16 significand[4]; + __u16 exponent; +}; + +struct _fpxreg { + __u16 significand[4]; + __u16 exponent; + __u16 padding[3]; +}; + +struct user_i387_ia32_struct { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; +}; + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct _fpx_sw_bytes { + __u32 magic1; + __u32 extended_size; + __u64 xfeatures; + __u32 xstate_size; + __u32 padding[7]; +}; + +struct _xmmreg { + __u32 element[4]; +}; + +struct _fpstate_32 { + __u32 cw; + __u32 sw; + __u32 tag; + __u32 ipoff; + __u32 cssel; + __u32 dataoff; + __u32 datasel; + struct _fpreg _st[8]; + __u16 status; + __u16 magic; + __u32 _fxsr_env[6]; + __u32 mxcsr; + __u32 reserved; + struct _fpxreg _fxsr_st[8]; + struct _xmmreg _xmm[8]; + union { + __u32 padding1[44]; + __u32 padding[44]; + }; + union { + __u32 padding2[12]; + struct _fpx_sw_bytes sw_reserved; + }; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +enum x86_regset { + REGSET_GENERAL = 0, + REGSET_FP = 1, + REGSET_XFP = 2, + REGSET_IOPERM64 = 2, + REGSET_XSTATE = 3, + REGSET_TLS = 4, + REGSET_IOPERM32 = 5, +}; + +struct pt_regs_offset { + const char *name; + int offset; +}; + +enum { + TB_SHUTDOWN_REBOOT = 0, + TB_SHUTDOWN_S5 = 1, + TB_SHUTDOWN_S4 = 2, + TB_SHUTDOWN_S3 = 3, + TB_SHUTDOWN_HALT = 4, + TB_SHUTDOWN_WFS = 5, +}; + +struct tboot_mac_region { + u64 start; + u32 size; +} __attribute__((packed)); + +struct tboot_acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct tboot_acpi_sleep_info { + struct tboot_acpi_generic_address pm1a_cnt_blk; + struct tboot_acpi_generic_address pm1b_cnt_blk; + struct tboot_acpi_generic_address pm1a_evt_blk; + struct tboot_acpi_generic_address pm1b_evt_blk; + u16 pm1a_cnt_val; + u16 pm1b_cnt_val; + u64 wakeup_vector; + u32 vector_width; + u64 kernel_s3_resume_vector; +} __attribute__((packed)); + +struct tboot { + u8 uuid[16]; + u32 version; + u32 log_addr; + u32 shutdown_entry; + u32 shutdown_type; + struct tboot_acpi_sleep_info acpi_sinfo; + u32 tboot_base; + u32 tboot_size; + u8 num_mac_regions; + struct tboot_mac_region mac_regions[32]; + u8 s3_key[64]; + u8 reserved_align[3]; + u32 num_in_wfs; +} __attribute__((packed)); + +struct sha1_hash { + u8 hash[20]; +}; + +struct sinit_mle_data { + u32 version; + struct sha1_hash bios_acm_id; + u32 edx_senter_flags; + u64 mseg_valid; + struct sha1_hash sinit_hash; + struct sha1_hash mle_hash; + struct sha1_hash stm_hash; + struct sha1_hash lcp_policy_hash; + u32 lcp_policy_control; + u32 rlp_wakeup_addr; + u32 reserved; + u32 num_mdrs; + u32 mdrs_off; + u32 num_vtd_dmars; + u32 vtd_dmars_off; +} __attribute__((packed)); + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +struct stack_frame_user { + const void *next_fp; + long unsigned int ret_addr; +}; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; +}; + +struct amd_l3_cache { + unsigned int indices; + u8 subcaches[4]; +}; + +struct threshold_block { + unsigned int block; + unsigned int bank; + unsigned int cpu; + u32 address; + u16 interrupt_enable; + bool interrupt_capable; + u16 threshold_limit; + struct kobject kobj; + struct list_head miscj; +}; + +struct threshold_bank { + struct kobject *kobj; + struct threshold_block *blocks; + refcount_t cpus; + unsigned int shared; +}; + +struct amd_northbridge { + struct pci_dev *root; + struct pci_dev *misc; + struct pci_dev *link; + struct amd_l3_cache l3_cache; + struct threshold_bank *bank4; +}; + +struct _cache_table { + unsigned char descriptor; + char cache_type; + short int size; +}; + +enum _cache_type { + CTYPE_NULL = 0, + CTYPE_DATA = 1, + CTYPE_INST = 2, + CTYPE_UNIFIED = 3, +}; + +union _cpuid4_leaf_eax { + struct { + enum _cache_type type: 5; + unsigned int level: 3; + unsigned int is_self_initializing: 1; + unsigned int is_fully_associative: 1; + unsigned int reserved: 4; + unsigned int num_threads_sharing: 12; + unsigned int num_cores_on_die: 6; + } split; + u32 full; +}; + +union _cpuid4_leaf_ebx { + struct { + unsigned int coherency_line_size: 12; + unsigned int physical_line_partition: 10; + unsigned int ways_of_associativity: 10; + } split; + u32 full; +}; + +union _cpuid4_leaf_ecx { + struct { + unsigned int number_of_sets: 32; + } split; + u32 full; +}; + +struct _cpuid4_info_regs { + union _cpuid4_leaf_eax eax; + union _cpuid4_leaf_ebx ebx; + union _cpuid4_leaf_ecx ecx; + unsigned int id; + long unsigned int size; + struct amd_northbridge *nb; +}; + +union l1_cache { + struct { + unsigned int line_size: 8; + unsigned int lines_per_tag: 8; + unsigned int assoc: 8; + unsigned int size_in_kb: 8; + }; + unsigned int val; +}; + +union l2_cache { + struct { + unsigned int line_size: 8; + unsigned int lines_per_tag: 4; + unsigned int assoc: 4; + unsigned int size_in_kb: 16; + }; + unsigned int val; +}; + +union l3_cache { + struct { + unsigned int line_size: 8; + unsigned int lines_per_tag: 4; + unsigned int assoc: 4; + unsigned int res: 2; + unsigned int size_encoded: 14; + }; + unsigned int val; +}; + +struct cpuid_bit { + u16 feature; + u8 reg; + u8 bit; + u32 level; + u32 sub_leaf; +}; + +enum cpuid_leafs { + CPUID_1_EDX = 0, + CPUID_8000_0001_EDX = 1, + CPUID_8086_0001_EDX = 2, + CPUID_LNX_1 = 3, + CPUID_1_ECX = 4, + CPUID_C000_0001_EDX = 5, + CPUID_8000_0001_ECX = 6, + CPUID_LNX_2 = 7, + CPUID_LNX_3 = 8, + CPUID_7_0_EBX = 9, + CPUID_D_1_EAX = 10, + CPUID_LNX_4 = 11, + CPUID_7_1_EAX = 12, + CPUID_8000_0008_EBX = 13, + CPUID_6_EAX = 14, + CPUID_8000_000A_EDX = 15, + CPUID_7_ECX = 16, + CPUID_8000_0007_EBX = 17, + CPUID_7_EDX = 18, + CPUID_8000_001F_EAX = 19, +}; + +enum kgdb_bptype { + BP_BREAKPOINT = 0, + BP_HARDWARE_BREAKPOINT = 1, + BP_WRITE_WATCHPOINT = 2, + BP_READ_WATCHPOINT = 3, + BP_ACCESS_WATCHPOINT = 4, + BP_POKE_BREAKPOINT = 5, +}; + +struct kgdb_arch { + unsigned char gdb_bpt_instr[1]; + long unsigned int flags; + int (*set_breakpoint)(long unsigned int, char *); + int (*remove_breakpoint)(long unsigned int, char *); + int (*set_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); + int (*remove_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); + void (*disable_hw_break)(struct pt_regs *); + void (*remove_all_hw_break)(); + void (*correct_hw_break)(); + void (*enable_nmi)(bool); +}; + +struct cpu_dev { + const char *c_vendor; + const char *c_ident[2]; + void (*c_early_init)(struct cpuinfo_x86 *); + void (*c_bsp_init)(struct cpuinfo_x86 *); + void (*c_init)(struct cpuinfo_x86 *); + void (*c_identify)(struct cpuinfo_x86 *); + void (*c_detect_tlb)(struct cpuinfo_x86 *); + int c_x86_vendor; +}; + +struct cpuid_dependent_feature { + u32 feature; + u32 level; +}; + +enum spectre_v2_mitigation { + SPECTRE_V2_NONE = 0, + SPECTRE_V2_RETPOLINE_GENERIC = 1, + SPECTRE_V2_RETPOLINE_AMD = 2, + SPECTRE_V2_IBRS_ENHANCED = 3, +}; + +enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE = 0, + SPECTRE_V2_USER_STRICT = 1, + SPECTRE_V2_USER_STRICT_PREFERRED = 2, + SPECTRE_V2_USER_PRCTL = 3, + SPECTRE_V2_USER_SECCOMP = 4, +}; + +enum ssb_mitigation { + SPEC_STORE_BYPASS_NONE = 0, + SPEC_STORE_BYPASS_DISABLE = 1, + SPEC_STORE_BYPASS_PRCTL = 2, + SPEC_STORE_BYPASS_SECCOMP = 3, +}; + +enum l1tf_mitigations { + L1TF_MITIGATION_OFF = 0, + L1TF_MITIGATION_FLUSH_NOWARN = 1, + L1TF_MITIGATION_FLUSH = 2, + L1TF_MITIGATION_FLUSH_NOSMT = 3, + L1TF_MITIGATION_FULL = 4, + L1TF_MITIGATION_FULL_FORCE = 5, +}; + +enum mds_mitigations { + MDS_MITIGATION_OFF = 0, + MDS_MITIGATION_FULL = 1, + MDS_MITIGATION_VMWERV = 2, +}; + +enum cpuhp_smt_control { + CPU_SMT_ENABLED = 0, + CPU_SMT_DISABLED = 1, + CPU_SMT_FORCE_DISABLED = 2, + CPU_SMT_NOT_SUPPORTED = 3, + CPU_SMT_NOT_IMPLEMENTED = 4, +}; + +enum vmx_l1d_flush_state { + VMENTER_L1D_FLUSH_AUTO = 0, + VMENTER_L1D_FLUSH_NEVER = 1, + VMENTER_L1D_FLUSH_COND = 2, + VMENTER_L1D_FLUSH_ALWAYS = 3, + VMENTER_L1D_FLUSH_EPT_DISABLED = 4, + VMENTER_L1D_FLUSH_NOT_REQUIRED = 5, +}; + +enum taa_mitigations { + TAA_MITIGATION_OFF = 0, + TAA_MITIGATION_UCODE_NEEDED = 1, + TAA_MITIGATION_VERW = 2, + TAA_MITIGATION_TSX_DISABLED = 3, +}; + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF = 0, + SRBDS_MITIGATION_UCODE_NEEDED = 1, + SRBDS_MITIGATION_FULL = 2, + SRBDS_MITIGATION_TSX_OFF = 3, + SRBDS_MITIGATION_HYPERVISOR = 4, +}; + +enum spectre_v1_mitigation { + SPECTRE_V1_MITIGATION_NONE = 0, + SPECTRE_V1_MITIGATION_AUTO = 1, +}; + +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE = 0, + SPECTRE_V2_CMD_AUTO = 1, + SPECTRE_V2_CMD_FORCE = 2, + SPECTRE_V2_CMD_RETPOLINE = 3, + SPECTRE_V2_CMD_RETPOLINE_GENERIC = 4, + SPECTRE_V2_CMD_RETPOLINE_AMD = 5, +}; + +enum spectre_v2_user_cmd { + SPECTRE_V2_USER_CMD_NONE = 0, + SPECTRE_V2_USER_CMD_AUTO = 1, + SPECTRE_V2_USER_CMD_FORCE = 2, + SPECTRE_V2_USER_CMD_PRCTL = 3, + SPECTRE_V2_USER_CMD_PRCTL_IBPB = 4, + SPECTRE_V2_USER_CMD_SECCOMP = 5, + SPECTRE_V2_USER_CMD_SECCOMP_IBPB = 6, +}; + +enum ssb_mitigation_cmd { + SPEC_STORE_BYPASS_CMD_NONE = 0, + SPEC_STORE_BYPASS_CMD_AUTO = 1, + SPEC_STORE_BYPASS_CMD_ON = 2, + SPEC_STORE_BYPASS_CMD_PRCTL = 3, + SPEC_STORE_BYPASS_CMD_SECCOMP = 4, +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +struct aperfmperf_sample { + unsigned int khz; + atomic_t scfpending; + ktime_t time; + u64 aperf; + u64 mperf; +}; + +struct cpuid_dep { + unsigned int feature; + unsigned int depends; +}; + +enum vmx_feature_leafs { + MISC_FEATURES = 0, + PRIMARY_CTLS = 1, + SECONDARY_CTLS = 2, + NR_VMX_FEATURE_WORDS = 3, +}; + +struct _tlb_table { + unsigned char descriptor; + char tlb_type; + unsigned int entries; + char info[128]; +}; + +enum tsx_ctrl_states { + TSX_CTRL_ENABLE = 0, + TSX_CTRL_DISABLE = 1, + TSX_CTRL_NOT_SUPPORTED = 2, +}; + +enum split_lock_detect_state { + sld_off = 0, + sld_warn = 1, + sld_fatal = 2, +}; + +struct sku_microcode { + u8 model; + u8 stepping; + u32 microcode; +}; + +struct cpuid_regs { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +}; + +enum pconfig_target { + INVALID_TARGET = 0, + MKTME_TARGET = 1, + PCONFIG_TARGET_NR = 2, +}; + +enum { + PCONFIG_CPUID_SUBLEAF_INVALID = 0, + PCONFIG_CPUID_SUBLEAF_TARGETID = 1, +}; + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, +}; + +enum mf_flags { + MF_COUNT_INCREASED = 1, + MF_ACTION_REQUIRED = 2, + MF_MUST_KILL = 4, + MF_SOFT_OFFLINE = 8, +}; + +struct mce { + __u64 status; + __u64 misc; + __u64 addr; + __u64 mcgstatus; + __u64 ip; + __u64 tsc; + __u64 time; + __u8 cpuvendor; + __u8 inject_flags; + __u8 severity; + __u8 pad; + __u32 cpuid; + __u8 cs; + __u8 bank; + __u8 cpu; + __u8 finished; + __u32 extcpu; + __u32 socketid; + __u32 apicid; + __u64 mcgcap; + __u64 synd; + __u64 ipid; + __u64 ppin; + __u32 microcode; + __u64 kflags; +}; + +enum mce_notifier_prios { + MCE_PRIO_LOWEST = 0, + MCE_PRIO_MCELOG = 1, + MCE_PRIO_EDAC = 2, + MCE_PRIO_NFIT = 3, + MCE_PRIO_EXTLOG = 4, + MCE_PRIO_UC = 5, + MCE_PRIO_EARLY = 6, + MCE_PRIO_CEC = 7, + MCE_PRIO_HIGHEST = 7, +}; + +typedef long unsigned int mce_banks_t[1]; + +enum mcp_flags { + MCP_TIMESTAMP = 1, + MCP_UC = 2, + MCP_DONTLOG = 4, + MCP_QUEUE_LOG = 8, +}; + +enum severity_level { + MCE_NO_SEVERITY = 0, + MCE_DEFERRED_SEVERITY = 1, + MCE_UCNA_SEVERITY = 1, + MCE_KEEP_SEVERITY = 2, + MCE_SOME_SEVERITY = 3, + MCE_AO_SEVERITY = 4, + MCE_UC_SEVERITY = 5, + MCE_AR_SEVERITY = 6, + MCE_PANIC_SEVERITY = 7, +}; + +struct mce_evt_llist { + struct llist_node llnode; + struct mce mce; +}; + +struct mca_config { + bool dont_log_ce; + bool cmci_disabled; + bool ignore_ce; + bool print_all; + __u64 lmce_disabled: 1; + __u64 disabled: 1; + __u64 ser: 1; + __u64 recovery: 1; + __u64 bios_cmci_threshold: 1; + int: 27; + __u64 __reserved: 59; + s8 bootlog; + int tolerant; + int monarch_timeout; + int panic_timeout; + u32 rip_msr; +}; + +struct mce_vendor_flags { + __u64 overflow_recov: 1; + __u64 succor: 1; + __u64 smca: 1; + __u64 amd_threshold: 1; + __u64 __reserved_0: 60; +}; + +struct mca_msr_regs { + u32 (*ctl)(int); + u32 (*status)(int); + u32 (*addr)(int); + u32 (*misc)(int); +}; + +struct trace_event_raw_mce_record { + struct trace_entry ent; + u64 mcgcap; + u64 mcgstatus; + u64 status; + u64 addr; + u64 misc; + u64 synd; + u64 ipid; + u64 ip; + u64 tsc; + u64 walltime; + u32 cpu; + u32 cpuid; + u32 apicid; + u32 socketid; + u8 cs; + u8 bank; + u8 cpuvendor; + char __data[0]; +}; + +struct trace_event_data_offsets_mce_record {}; + +typedef void (*btf_trace_mce_record)(void *, struct mce *); + +struct mce_bank { + u64 ctl; + bool init; +}; + +struct mce_bank_dev { + struct device_attribute attr; + char attrname[16]; + u8 bank; +}; + +enum handler_type { + EX_HANDLER_NONE = 0, + EX_HANDLER_FAULT = 1, + EX_HANDLER_UACCESS = 2, + EX_HANDLER_OTHER = 3, +}; + +enum context { + IN_KERNEL = 1, + IN_USER = 2, + IN_KERNEL_RECOV = 3, +}; + +enum ser { + SER_REQUIRED = 1, + NO_SER = 2, +}; + +enum exception { + EXCP_CONTEXT = 1, + NO_EXCP = 2, +}; + +struct severity { + u64 mask; + u64 result; + unsigned char sev; + unsigned char mcgmask; + unsigned char mcgres; + unsigned char ser; + unsigned char context; + unsigned char excp; + unsigned char covered; + unsigned char cpu_model; + unsigned char cpu_minstepping; + unsigned char bank_lo; + unsigned char bank_hi; + char *msg; +}; + +struct gen_pool; + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +enum { + CMCI_STORM_NONE = 0, + CMCI_STORM_ACTIVE = 1, + CMCI_STORM_SUBSIDED = 2, +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +enum smca_bank_types { + SMCA_LS = 0, + SMCA_LS_V2 = 1, + SMCA_IF = 2, + SMCA_L2_CACHE = 3, + SMCA_DE = 4, + SMCA_RESERVED = 5, + SMCA_EX = 6, + SMCA_FP = 7, + SMCA_L3_CACHE = 8, + SMCA_CS = 9, + SMCA_CS_V2 = 10, + SMCA_PIE = 11, + SMCA_UMC = 12, + SMCA_PB = 13, + SMCA_PSP = 14, + SMCA_PSP_V2 = 15, + SMCA_SMU = 16, + SMCA_SMU_V2 = 17, + SMCA_MP5 = 18, + SMCA_NBIO = 19, + SMCA_PCIE = 20, + N_SMCA_BANK_TYPES = 21, +}; + +struct smca_hwid { + unsigned int bank_type; + u32 hwid_mcatype; + u8 count; +}; + +struct smca_bank { + struct smca_hwid *hwid; + u32 id; + u8 sysfs_id; +}; + +struct smca_bank_name { + const char *name; + const char *long_name; +}; + +struct thresh_restart { + struct threshold_block *b; + int reset; + int set_lvt_off; + int lvt_off; + u16 old_limit; +}; + +struct threshold_attr { + struct attribute attr; + ssize_t (*show)(struct threshold_block *, char *); + ssize_t (*store)(struct threshold_block *, const char *, size_t); +}; + +enum { + CPER_SEV_RECOVERABLE = 0, + CPER_SEV_FATAL = 1, + CPER_SEV_CORRECTED = 2, + CPER_SEV_INFORMATIONAL = 3, +}; + +struct cper_record_header { + char signature[4]; + u16 revision; + u32 signature_end; + u16 section_count; + u32 error_severity; + u32 validation_bits; + u32 record_length; + u64 timestamp; + guid_t platform_id; + guid_t partition_id; + guid_t creator_id; + guid_t notification_type; + u64 record_id; + u32 flags; + u64 persistence_information; + u8 reserved[12]; +} __attribute__((packed)); + +struct cper_section_descriptor { + u32 section_offset; + u32 section_length; + u16 revision; + u8 validation_bits; + u8 reserved; + u32 flags; + guid_t section_type; + guid_t fru_id; + u32 section_severity; + u8 fru_text[20]; +}; + +struct cper_ia_proc_ctx { + u16 reg_ctx_type; + u16 reg_arr_size; + u32 msr_addr; + u64 mm_reg_addr; +}; + +struct cper_sec_mem_err { + u64 validation_bits; + u64 error_status; + u64 physical_addr; + u64 physical_addr_mask; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u8 error_type; + u8 extended; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; +}; + +enum { + GHES_SEV_NO = 0, + GHES_SEV_CORRECTED = 1, + GHES_SEV_RECOVERABLE = 2, + GHES_SEV_PANIC = 3, +}; + +struct cper_mce_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + struct mce mce; +}; + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +typedef struct poll_table_struct poll_table; + +struct mce_log_buffer { + char signature[12]; + unsigned int len; + unsigned int next; + unsigned int flags; + unsigned int recordlen; + struct mce entry[0]; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +typedef __u8 mtrr_type; + +struct mtrr_ops { + u32 vendor; + u32 use_intel_if; + void (*set)(unsigned int, long unsigned int, long unsigned int, mtrr_type); + void (*set_all)(); + void (*get)(unsigned int, long unsigned int *, long unsigned int *, mtrr_type *); + int (*get_free_region)(long unsigned int, long unsigned int, int); + int (*validate_add_page)(long unsigned int, long unsigned int, unsigned int); + int (*have_wrcomb)(); +}; + +struct set_mtrr_data { + long unsigned int smp_base; + long unsigned int smp_size; + unsigned int smp_reg; + mtrr_type smp_type; +}; + +struct mtrr_value { + mtrr_type ltype; + long unsigned int lbase; + long unsigned int lsize; +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*proc_compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +struct mtrr_sentry { + __u64 base; + __u32 size; + __u32 type; +}; + +struct mtrr_gentry { + __u64 base; + __u32 size; + __u32 regnum; + __u32 type; + __u32 _pad; +}; + +typedef u32 compat_uint_t; + +struct mtrr_sentry32 { + compat_ulong_t base; + compat_uint_t size; + compat_uint_t type; +}; + +struct mtrr_gentry32 { + compat_ulong_t regnum; + compat_uint_t base; + compat_uint_t size; + compat_uint_t type; +}; + +struct mtrr_var_range { + __u32 base_lo; + __u32 base_hi; + __u32 mask_lo; + __u32 mask_hi; +}; + +struct mtrr_state_type { + struct mtrr_var_range var_ranges[256]; + mtrr_type fixed_ranges[88]; + unsigned char enabled; + unsigned char have_fixed; + mtrr_type def_type; +}; + +struct fixed_range_block { + int base_msr; + int ranges; +}; + +struct var_mtrr_range_state { + long unsigned int base_pfn; + long unsigned int size_pfn; + mtrr_type type; +}; + +struct var_mtrr_state { + long unsigned int range_startk; + long unsigned int range_sizek; + long unsigned int chunk_sizek; + long unsigned int gran_sizek; + unsigned int reg; +}; + +struct mtrr_cleanup_result { + long unsigned int gran_sizek; + long unsigned int chunk_sizek; + long unsigned int lose_cover_sizek; + unsigned int num_reg; + int bad; +}; + +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct property_entry; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + u64 dma_mask; + const struct property_entry *properties; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[1]; + } value; + }; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +struct cpu_signature { + unsigned int sig; + unsigned int pf; + unsigned int rev; +}; + +enum ucode_state { + UCODE_OK = 0, + UCODE_NEW = 1, + UCODE_UPDATED = 2, + UCODE_NFOUND = 3, + UCODE_ERROR = 4, +}; + +struct microcode_ops { + enum ucode_state (*request_microcode_user)(int, const void *, size_t); + enum ucode_state (*request_microcode_fw)(int, struct device *, bool); + void (*microcode_fini_cpu)(int); + enum ucode_state (*apply_microcode)(int); + int (*collect_cpu_info)(int, struct cpu_signature *); +}; + +struct ucode_cpu_info { + struct cpu_signature cpu_sig; + int valid; + void *mc; +}; + +struct cpu_info_ctx { + struct cpu_signature *cpu_sig; + int err; +}; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +struct ucode_patch { + struct list_head plist; + void *data; + u32 patch_id; + u16 equiv_cpu; +}; + +struct microcode_header_intel { + unsigned int hdrver; + unsigned int rev; + unsigned int date; + unsigned int sig; + unsigned int cksum; + unsigned int ldrver; + unsigned int pf; + unsigned int datasize; + unsigned int totalsize; + unsigned int reserved[3]; +}; + +struct microcode_intel { + struct microcode_header_intel hdr; + unsigned int bits[0]; +}; + +struct extended_signature { + unsigned int sig; + unsigned int pf; + unsigned int cksum; +}; + +struct extended_sigtable { + unsigned int count; + unsigned int cksum; + unsigned int reserved[3]; + struct extended_signature sigs[0]; +}; + +struct equiv_cpu_entry { + u32 installed_cpu; + u32 fixed_errata_mask; + u32 fixed_errata_compare; + u16 equiv_cpu; + u16 res; +}; + +struct microcode_header_amd { + u32 data_code; + u32 patch_id; + u16 mc_patch_data_id; + u8 mc_patch_data_len; + u8 init_flag; + u32 mc_patch_data_checksum; + u32 nb_dev_id; + u32 sb_dev_id; + u16 processor_rev_id; + u8 nb_rev_id; + u8 sb_rev_id; + u8 bios_api_rev; + u8 reserved1[3]; + u32 match_reg[8]; +}; + +struct microcode_amd { + struct microcode_header_amd hdr; + unsigned int mpb[0]; +}; + +struct equiv_cpu_table { + unsigned int num_entries; + struct equiv_cpu_entry *entry; +}; + +struct cont_desc { + struct microcode_amd *mc; + u32 cpuid_1_eax; + u32 psize; + u8 *data; + size_t size; +}; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP = 1, + RDT_NUM_GROUP = 2, +}; + +struct rdtgroup; + +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; +}; + +enum rdtgrp_mode { + RDT_MODE_SHAREABLE = 0, + RDT_MODE_EXCLUSIVE = 1, + RDT_MODE_PSEUDO_LOCKSETUP = 2, + RDT_MODE_PSEUDO_LOCKED = 3, + RDT_NUM_MODES = 4, +}; + +struct pseudo_lock_region; + +struct rdtgroup { + struct kernfs_node *kn; + struct list_head rdtgroup_list; + u32 closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + enum rdtgrp_mode mode; + struct pseudo_lock_region *plr; +}; + +struct rdt_cache { + unsigned int cbm_len; + unsigned int min_cbm_bits; + unsigned int cbm_idx_mult; + unsigned int cbm_idx_offset; + unsigned int shareable_bits; + bool arch_has_sparse_bitmaps; + bool arch_has_empty_bitmaps; + bool arch_has_per_cpu_cfg; +}; + +enum membw_throttle_mode { + THREAD_THROTTLE_UNDEFINED = 0, + THREAD_THROTTLE_MAX = 1, + THREAD_THROTTLE_PER_THREAD = 2, +}; + +struct rdt_membw { + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + bool arch_needs_linear; + enum membw_throttle_mode throttle_mode; + bool mba_sc; + u32 *mb_map; +}; + +struct rdt_domain; + +struct msr_param; + +struct rdt_parse_data; + +struct rdt_resource { + int rid; + bool alloc_enabled; + bool mon_enabled; + bool alloc_capable; + bool mon_capable; + char *name; + int num_closid; + int cache_level; + u32 default_ctrl; + unsigned int msr_base; + void (*msr_update)(struct rdt_domain *, struct msr_param *, struct rdt_resource *); + int data_width; + struct list_head domains; + struct rdt_cache cache; + struct rdt_membw membw; + const char *format_str; + int (*parse_ctrlval)(struct rdt_parse_data *, struct rdt_resource *, struct rdt_domain *); + struct list_head evt_list; + int num_rmid; + unsigned int mon_scale; + unsigned int mbm_width; + long unsigned int fflags; +}; + +struct mbm_state; + +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + long unsigned int *rmid_busy_llc; + struct mbm_state *mbm_total; + struct mbm_state *mbm_local; + struct delayed_work mbm_over; + struct delayed_work cqm_limbo; + int mbm_work_cpu; + int cqm_work_cpu; + u32 *ctrl_val; + u32 *mbps_val; + u32 new_ctrl; + bool have_new_ctrl; + struct pseudo_lock_region *plr; +}; + +struct pseudo_lock_region { + struct rdt_resource *r; + struct rdt_domain *d; + u32 cbm; + wait_queue_head_t lock_thread_wq; + int thread_done; + int cpu; + unsigned int line_size; + unsigned int size; + void *kmem; + unsigned int minor; + struct dentry *debugfs_dir; + struct list_head pm_reqs; +}; + +struct mbm_state { + u64 chunks; + u64 prev_msr; + u64 prev_bw_msr; + u32 prev_bw; + u32 delta_bw; + bool delta_comp; +}; + +struct msr_param { + struct rdt_resource *res; + int low; + int high; +}; + +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +enum { + RDT_RESOURCE_L3 = 0, + RDT_RESOURCE_L3DATA = 1, + RDT_RESOURCE_L3CODE = 2, + RDT_RESOURCE_L2 = 3, + RDT_RESOURCE_L2DATA = 4, + RDT_RESOURCE_L2CODE = 5, + RDT_RESOURCE_MBA = 6, + RDT_NUM_RESOURCES = 7, +}; + +union cpuid_0x10_1_eax { + struct { + unsigned int cbm_len: 5; + } split; + unsigned int full; +}; + +union cpuid_0x10_3_eax { + struct { + unsigned int max_delay: 12; + } split; + unsigned int full; +}; + +union cpuid_0x10_x_edx { + struct { + unsigned int cos_max: 16; + } split; + unsigned int full; +}; + +enum { + RDT_FLAG_CMT = 0, + RDT_FLAG_MBM_TOTAL = 1, + RDT_FLAG_MBM_LOCAL = 2, + RDT_FLAG_L3_CAT = 3, + RDT_FLAG_L3_CDP = 4, + RDT_FLAG_L2_CAT = 5, + RDT_FLAG_L2_CDP = 6, + RDT_FLAG_MBA = 7, +}; + +struct rdt_options { + char *name; + int flag; + bool force_off; + bool force_on; +}; + +typedef unsigned int uint; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; +}; + +struct mon_evt { + u32 evtid; + char *name; + struct list_head list; +}; + +union mon_data_bits { + void *priv; + struct { + unsigned int rid: 10; + unsigned int evtid: 8; + unsigned int domid: 14; + } u; +}; + +struct rmid_read { + struct rdtgroup *rgrp; + struct rdt_resource *r; + struct rdt_domain *d; + int evtid; + bool first; + u64 val; +}; + +struct rftype { + char *name; + umode_t mode; + const struct kernfs_ops *kf_ops; + long unsigned int flags; + long unsigned int fflags; + int (*seq_show)(struct kernfs_open_file *, struct seq_file *, void *); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); +}; + +enum rdt_param { + Opt_cdp = 0, + Opt_cdpl2 = 1, + Opt_mba_mbps = 2, + nr__rdt_params = 3, +}; + +struct rmid_entry { + u32 rmid; + int busy; + struct list_head list; +}; + +struct mbm_correction_factor_table { + u32 rmidthreshold; + u64 cf; +}; + +struct trace_event_raw_pseudo_lock_mem_latency { + struct trace_entry ent; + u32 latency; + char __data[0]; +}; + +struct trace_event_raw_pseudo_lock_l2 { + struct trace_entry ent; + u64 l2_hits; + u64 l2_miss; + char __data[0]; +}; + +struct trace_event_raw_pseudo_lock_l3 { + struct trace_entry ent; + u64 l3_hits; + u64 l3_miss; + char __data[0]; +}; + +struct trace_event_data_offsets_pseudo_lock_mem_latency {}; + +struct trace_event_data_offsets_pseudo_lock_l2 {}; + +struct trace_event_data_offsets_pseudo_lock_l3 {}; + +typedef void (*btf_trace_pseudo_lock_mem_latency)(void *, u32); + +typedef void (*btf_trace_pseudo_lock_l2)(void *, u64, u64); + +typedef void (*btf_trace_pseudo_lock_l3)(void *, u64, u64); + +struct pseudo_lock_pm_req { + struct list_head list; + struct dev_pm_qos_request req; +}; + +struct residency_counts { + u64 miss_before; + u64 hits_before; + u64 miss_after; + u64 hits_after; +}; + +enum mmu_notifier_event { + MMU_NOTIFY_UNMAP = 0, + MMU_NOTIFY_CLEAR = 1, + MMU_NOTIFY_PROTECTION_VMA = 2, + MMU_NOTIFY_PROTECTION_PAGE = 3, + MMU_NOTIFY_SOFT_DIRTY = 4, + MMU_NOTIFY_RELEASE = 5, + MMU_NOTIFY_MIGRATE = 6, +}; + +struct mmu_notifier; + +struct mmu_notifier_range; + +struct mmu_notifier_ops { + void (*release)(struct mmu_notifier *, struct mm_struct *); + int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*clear_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*test_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int); + void (*change_pte)(struct mmu_notifier *, struct mm_struct *, long unsigned int, pte_t); + int (*invalidate_range_start)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range_end)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + struct mmu_notifier * (*alloc_notifier)(struct mm_struct *); + void (*free_notifier)(struct mmu_notifier *); +}; + +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct callback_head rcu; + unsigned int users; +}; + +struct mmu_notifier_range { + struct vm_area_struct *vma; + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + unsigned int flags; + enum mmu_notifier_event event; + void *migrate_pgmap_owner; +}; + +enum sgx_page_type { + SGX_PAGE_TYPE_SECS = 0, + SGX_PAGE_TYPE_TCS = 1, + SGX_PAGE_TYPE_REG = 2, + SGX_PAGE_TYPE_VA = 3, + SGX_PAGE_TYPE_TRIM = 4, +}; + +struct sgx_encl_page; + +struct sgx_epc_page { + unsigned int section; + unsigned int flags; + struct sgx_encl_page *owner; + struct list_head list; +}; + +struct sgx_encl; + +struct sgx_va_page; + +struct sgx_encl_page { + long unsigned int desc; + long unsigned int vm_max_prot_bits; + struct sgx_epc_page *epc_page; + struct sgx_encl *encl; + struct sgx_va_page *va_page; +}; + +struct sgx_encl { + long unsigned int base; + long unsigned int size; + long unsigned int flags; + unsigned int page_cnt; + unsigned int secs_child_cnt; + struct mutex lock; + struct xarray page_array; + struct sgx_encl_page secs; + long unsigned int attributes; + long unsigned int attributes_mask; + cpumask_t cpumask; + struct file *backing; + struct kref refcount; + struct list_head va_pages; + long unsigned int mm_list_version; + struct list_head mm_list; + spinlock_t mm_lock; + struct srcu_struct srcu; +}; + +struct sgx_va_page { + struct sgx_epc_page *epc_page; + long unsigned int slots[8]; + struct list_head list; +}; + +struct sgx_encl_mm { + struct sgx_encl *encl; + struct mm_struct *mm; + struct list_head list; + struct mmu_notifier mmu_notifier; +}; + +typedef unsigned int xa_mark_t; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[3]; + long unsigned int marks[3]; + }; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +enum sgx_encls_function { + ECREATE = 0, + EADD = 1, + EINIT = 2, + EREMOVE = 3, + EDGBRD = 4, + EDGBWR = 5, + EEXTEND = 6, + ELDU = 8, + EBLOCK = 9, + EPA = 10, + EWB = 11, + ETRACK = 12, + EAUG = 13, + EMODPR = 14, + EMODT = 15, +}; + +struct sgx_pageinfo { + u64 addr; + u64 contents; + u64 metadata; + u64 secs; +}; + +struct sgx_numa_node { + struct list_head free_page_list; + spinlock_t lock; +}; + +struct sgx_epc_section { + long unsigned int phys_addr; + void *virt_addr; + struct sgx_epc_page *pages; + struct sgx_numa_node *node; +}; + +enum sgx_encl_flags { + SGX_ENCL_IOCTL = 1, + SGX_ENCL_DEBUG = 2, + SGX_ENCL_CREATED = 4, + SGX_ENCL_INITIALIZED = 8, +}; + +struct sgx_backing { + long unsigned int page_index; + struct page *contents; + struct page *pcmd; + long unsigned int pcmd_offset; +}; + +enum sgx_return_code { + SGX_NOT_TRACKED = 11, + SGX_CHILD_PRESENT = 13, + SGX_INVALID_EINITTOKEN = 16, + SGX_UNMASKED_EVENT = 128, +}; + +enum sgx_attribute { + SGX_ATTR_INIT = 1, + SGX_ATTR_DEBUG = 2, + SGX_ATTR_MODE64BIT = 4, + SGX_ATTR_PROVISIONKEY = 16, + SGX_ATTR_EINITTOKENKEY = 32, + SGX_ATTR_KSS = 128, +}; + +struct sgx_secs { + u64 size; + u64 base; + u32 ssa_frame_size; + u32 miscselect; + u8 reserved1[24]; + u64 attributes; + u64 xfrm; + u32 mrenclave[8]; + u8 reserved2[32]; + u32 mrsigner[8]; + u8 reserved3[32]; + u32 config_id[16]; + u16 isv_prod_id; + u16 isv_svn; + u16 config_svn; + u8 reserved4[3834]; +}; + +enum sgx_secinfo_flags { + SGX_SECINFO_R = 1, + SGX_SECINFO_W = 2, + SGX_SECINFO_X = 4, + SGX_SECINFO_SECS = 0, + SGX_SECINFO_TCS = 256, + SGX_SECINFO_REG = 512, + SGX_SECINFO_VA = 768, + SGX_SECINFO_TRIM = 1024, +}; + +struct sgx_secinfo { + u64 flags; + u8 reserved[56]; +}; + +struct sgx_sigstruct_header { + u64 header1[2]; + u32 vendor; + u32 date; + u64 header2[2]; + u32 swdefined; + u8 reserved1[84]; +}; + +struct sgx_sigstruct_body { + u32 miscselect; + u32 misc_mask; + u8 reserved2[20]; + u64 attributes; + u64 xfrm; + u64 attributes_mask; + u64 xfrm_mask; + u8 mrenclave[32]; + u8 reserved3[32]; + u16 isvprodid; + u16 isvsvn; +} __attribute__((packed)); + +struct sgx_sigstruct { + struct sgx_sigstruct_header header; + u8 modulus[384]; + u32 exponent; + u8 signature[384]; + struct sgx_sigstruct_body body; + u8 reserved4[12]; + u8 q1[384]; + u8 q2[384]; +} __attribute__((packed)); + +struct crypto_alg; + +struct crypto_tfm { + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; + +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; + +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; + +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; + union { + struct crypto_istat_aead aead; + struct crypto_istat_akcipher akcipher; + struct crypto_istat_cipher cipher; + struct crypto_istat_compress compress; + struct crypto_istat_hash hash; + struct crypto_istat_rng rng; + struct crypto_istat_kpp kpp; + } stats; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init)(struct crypto_tfm *, u32, u32); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[0]; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; + +enum sgx_page_flags { + SGX_PAGE_MEASURE = 1, +}; + +struct sgx_enclave_create { + __u64 src; +}; + +struct sgx_enclave_add_pages { + __u64 src; + __u64 offset; + __u64 length; + __u64 secinfo; + __u64 flags; + __u64 count; +}; + +struct sgx_enclave_init { + __u64 sigstruct; +}; + +struct sgx_enclave_provision { + __u64 fd; +}; + +struct sgx_vepc { + struct xarray page_array; + struct mutex lock; +}; + +struct vmcb_seg { + u16 selector; + u16 attrib; + u32 limit; + u64 base; +}; + +struct vmcb_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + u8 reserved_1[43]; + u8 cpl; + u8 reserved_2[4]; + u64 efer; + u8 reserved_3[104]; + u64 xss; + u64 cr4; + u64 cr3; + u64 cr0; + u64 dr7; + u64 dr6; + u64 rflags; + u64 rip; + u8 reserved_4[88]; + u64 rsp; + u8 reserved_5[24]; + u64 rax; + u64 star; + u64 lstar; + u64 cstar; + u64 sfmask; + u64 kernel_gs_base; + u64 sysenter_cs; + u64 sysenter_esp; + u64 sysenter_eip; + u64 cr2; + u8 reserved_6[32]; + u64 g_pat; + u64 dbgctl; + u64 br_from; + u64 br_to; + u64 last_excp_from; + u64 last_excp_to; + u8 reserved_7[72]; + u32 spec_ctrl; + u8 reserved_7b[4]; + u32 pkru; + u8 reserved_7a[20]; + u64 reserved_8; + u64 rcx; + u64 rdx; + u64 rbx; + u64 reserved_9; + u64 rbp; + u64 rsi; + u64 rdi; + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u8 reserved_10[16]; + u64 sw_exit_code; + u64 sw_exit_info_1; + u64 sw_exit_info_2; + u64 sw_scratch; + u8 reserved_11[56]; + u64 xcr0; + u8 valid_bitmap[16]; + u64 x87_state_gpa; +}; + +struct ghcb { + struct vmcb_save_area save; + u8 reserved_save[1016]; + u8 shared_buffer[2032]; + u8 reserved_1[10]; + u16 protocol_version; + u32 ghcb_usage; +}; + +enum intercept_words { + INTERCEPT_CR = 0, + INTERCEPT_DR = 1, + INTERCEPT_EXCEPTION = 2, + INTERCEPT_WORD3 = 3, + INTERCEPT_WORD4 = 4, + INTERCEPT_WORD5 = 5, + MAX_INTERCEPT = 6, +}; + +struct vmware_steal_time { + union { + uint64_t clock; + struct { + uint32_t clock_low; + uint32_t clock_high; + }; + }; + uint64_t reserved[7]; +}; + +struct mpc_intsrc { + unsigned char type; + unsigned char irqtype; + short unsigned int irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3, +}; + +enum mp_bustype { + MP_BUS_ISA = 1, + MP_BUS_EISA = 2, + MP_BUS_PCI = 3, +}; + +typedef u64 acpi_io_address; + +typedef u64 acpi_physical_address; + +typedef char *acpi_string; + +typedef void *acpi_handle; + +typedef u32 acpi_object_type; + +typedef u8 acpi_adr_space_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_subtable_header { + u8 type; + u8 length; +}; + +struct acpi_table_boot { + struct acpi_table_header header; + u8 cmos_index; + u8 reserved[3]; +}; + +struct acpi_hmat_structure { + u16 type; + u16 reserved; + u32 length; +}; + +struct acpi_table_hpet { + struct acpi_table_header header; + u32 id; + struct acpi_generic_address address; + u8 sequence; + u16 minimum_tick; + u8 flags; +} __attribute__((packed)); + +struct acpi_table_madt { + struct acpi_table_header header; + u32 address; + u32 flags; +}; + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, + ACPI_MADT_TYPE_RESERVED = 17, +}; + +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u32 lapic_flags; +}; + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 address; + u32 global_irq_base; +}; + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; + u8 source_irq; + u32 global_irq; + u16 inti_flags; +} __attribute__((packed)); + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; +}; + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; + u16 inti_flags; + u8 lint; +} __attribute__((packed)); + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; + u64 address; +} __attribute__((packed)); + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u8 eid; + u8 reserved[3]; + u32 lapic_flags; + u32 uid; + char uid_string[1]; +} __attribute__((packed)); + +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; + u32 local_apic_id; + u32 lapic_flags; + u32 uid; +}; + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; + u8 lint; + u8 reserved[3]; +}; + +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; +}; + +typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *); + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const long unsigned int); + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + +typedef u32 phys_cpuid_t; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +enum ioapic_domain_type { + IOAPIC_DOMAIN_INVALID = 0, + IOAPIC_DOMAIN_LEGACY = 1, + IOAPIC_DOMAIN_STRICT = 2, + IOAPIC_DOMAIN_DYNAMIC = 3, +}; + +struct ioapic_domain_cfg { + enum ioapic_domain_type type; + const struct irq_domain_ops *ops; + struct device_node *dev; +}; + +struct wakeup_header { + u16 video_mode; + u32 pmode_entry; + u16 pmode_cs; + u32 pmode_cr0; + u32 pmode_cr3; + u32 pmode_cr4; + u32 pmode_efer_low; + u32 pmode_efer_high; + u64 pmode_gdt; + u32 pmode_misc_en_low; + u32 pmode_misc_en_high; + u32 pmode_behavior; + u32 realmode_flags; + u32 real_magic; + u32 signature; +} __attribute__((packed)); + +struct acpi_hest_header { + u16 type; + u16 source_id; +}; + +struct acpi_hest_ia_error_bank { + u8 bank_number; + u8 clear_status_on_init; + u8 status_format; + u8 reserved; + u32 control_register; + u64 control_data; + u32 status_register; + u32 address_register; + u32 misc_register; +} __attribute__((packed)); + +struct acpi_hest_notify { + u8 type; + u8 length; + u16 config_write_enable; + u32 poll_interval; + u32 vector; + u32 polling_threshold_value; + u32 polling_threshold_window; + u32 error_threshold_value; + u32 error_threshold_window; +}; + +struct acpi_hest_ia_corrected { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct acpi_power_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct acpi_processor_cx { + u8 valid; + u8 type; + u32 address; + u8 entry_method; + u8 index; + u32 latency; + u8 bm_sts_skip; + char desc[32]; +}; + +struct acpi_processor_flags { + u8 power: 1; + u8 performance: 1; + u8 throttling: 1; + u8 limit: 1; + u8 bm_control: 1; + u8 bm_check: 1; + u8 has_cst: 1; + u8 has_lpi: 1; + u8 power_setup_done: 1; + u8 bm_rld_set: 1; + u8 need_hotplug_init: 1; +}; + +struct cstate_entry { + struct { + unsigned int eax; + unsigned int ecx; + } states[8]; +}; + +enum reboot_mode { + REBOOT_UNDEFINED = 4294967295, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +typedef void (*nmi_shootdown_cb)(int, struct pt_regs *); + +struct intel_early_ops { + resource_size_t (*stolen_size)(int, int, int); + resource_size_t (*stolen_base)(int, int, int, resource_size_t); +}; + +struct chipset { + u32 vendor; + u32 device; + u32 class; + u32 class_mask; + u32 flags; + void (*f)(int, int, int); +}; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_SHARE_CPUCAPACITY = 64, + SD_SHARE_PKG_RESOURCES = 128, + SD_SERIALIZE = 256, + SD_ASYM_PACKING = 512, + SD_PREFER_SIBLING = 1024, + SD_OVERLAP = 2048, + SD_NUMA = 4096, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + u64 max_newidle_lb_cost; + long unsigned int next_decay_max_lb_cost; + u64 avg_scan_cost; + unsigned int lb_count[3]; + unsigned int lb_failed[3]; + unsigned int lb_balanced[3]; + unsigned int lb_imbalance[3]; + unsigned int lb_gained[3]; + unsigned int lb_hot_gained[3]; + unsigned int lb_nobusyg[3]; + unsigned int lb_nobusyq[3]; + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sched_group_capacity; + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +enum apic_intr_mode_id { + APIC_PIC = 0, + APIC_VIRTUAL_WIRE = 1, + APIC_VIRTUAL_WIRE_NO_CONFIG = 2, + APIC_SYMMETRIC_IO = 3, + APIC_SYMMETRIC_IO_NO_ROUTING = 4, +}; + +struct cppc_perf_caps { + u32 guaranteed_perf; + u32 highest_perf; + u32 nominal_perf; + u32 lowest_perf; + u32 lowest_nonlinear_perf; + u32 lowest_freq; + u32 nominal_freq; +}; + +struct tsc_adjust { + s64 bootval; + s64 adjusted; + long unsigned int nextcheck; + bool warned; +}; + +typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int, size_t, size_t); + +typedef void (*pcpu_fc_free_fn_t)(void *, size_t); + +typedef void (*pcpu_fc_populate_pte_fn_t)(long unsigned int); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +struct mpf_intel { + char signature[4]; + unsigned int physptr; + unsigned char length; + unsigned char specification; + unsigned char checksum; + unsigned char feature1; + unsigned char feature2; + unsigned char feature3; + unsigned char feature4; + unsigned char feature5; +}; + +struct mpc_table { + char signature[4]; + short unsigned int length; + char spec; + char checksum; + char oem[8]; + char productid[12]; + unsigned int oemptr; + short unsigned int oemsize; + short unsigned int oemcount; + unsigned int lapic; + unsigned int reserved; +}; + +struct mpc_cpu { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char cpuflag; + unsigned int cpufeature; + unsigned int featureflag; + unsigned int reserved[2]; +}; + +struct mpc_bus { + unsigned char type; + unsigned char busid; + unsigned char bustype[6]; +}; + +struct mpc_ioapic { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char flags; + unsigned int apicaddr; +}; + +struct mpc_lintsrc { + unsigned char type; + unsigned char irqtype; + short unsigned int irqflag; + unsigned char srcbusid; + unsigned char srcbusirq; + unsigned char destapic; + unsigned char destapiclint; +}; + +enum page_cache_mode { + _PAGE_CACHE_MODE_WB = 0, + _PAGE_CACHE_MODE_WC = 1, + _PAGE_CACHE_MODE_UC_MINUS = 2, + _PAGE_CACHE_MODE_UC = 3, + _PAGE_CACHE_MODE_WT = 4, + _PAGE_CACHE_MODE_WP = 5, + _PAGE_CACHE_MODE_NUM = 8, +}; + +enum { + IRQ_REMAP_XAPIC_MODE = 0, + IRQ_REMAP_X2APIC_MODE = 1, +}; + +union apic_ir { + long unsigned int map[4]; + u32 regs[8]; +}; + +enum { + X2APIC_OFF = 0, + X2APIC_ON = 1, + X2APIC_DISABLED = 2, +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_MSI_NOMASK_QUIRK = 134217728, + IRQD_HANDLE_ENFORCE_IRQCTX = 268435456, + IRQD_AFFINITY_ON_ACTIVATE = 536870912, + IRQD_IRQ_ENABLED_ON_SUSPEND = 1073741824, +}; + +enum { + X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 1, + X86_IRQ_ALLOC_LEGACY = 2, +}; + +struct apic_chip_data { + struct irq_cfg hw_irq_cfg; + unsigned int vector; + unsigned int prev_vector; + unsigned int cpu; + unsigned int prev_cpu; + unsigned int irq; + struct hlist_node clist; + unsigned int move_in_progress: 1; + unsigned int is_managed: 1; + unsigned int can_reserve: 1; + unsigned int has_reserved: 1; +}; + +struct irq_matrix; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, +}; + +struct clock_event_device___2; + +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2: 14; + u32 LTS: 1; + u32 delivery_type: 1; + u32 __reserved_1: 8; + u32 ID: 8; + } bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version: 8; + u32 __reserved_2: 7; + u32 PRQ: 1; + u32 entries: 8; + u32 __reserved_1: 8; + } bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2: 24; + u32 arbitration: 4; + u32 __reserved_1: 4; + } bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT: 1; + u32 __reserved_1: 31; + } bits; +}; + +struct IO_APIC_route_entry { + union { + struct { + u64 vector: 8; + u64 delivery_mode: 3; + u64 dest_mode_logical: 1; + u64 delivery_status: 1; + u64 active_low: 1; + u64 irr: 1; + u64 is_level: 1; + u64 masked: 1; + u64 reserved_0: 15; + u64 reserved_1: 17; + u64 virt_destid_8_14: 7; + u64 destid_0_7: 8; + }; + struct { + u64 ir_shared_0: 8; + u64 ir_zero: 3; + u64 ir_index_15: 1; + u64 ir_shared_1: 5; + u64 ir_reserved_0: 31; + u64 ir_format: 1; + u64 ir_index_0_14: 15; + }; + struct { + u64 w1: 32; + u64 w2: 32; + }; + }; +}; + +struct irq_pin_list { + struct list_head list; + int apic; + int pin; +}; + +struct mp_chip_data { + struct list_head irq_2_pin; + struct IO_APIC_route_entry entry; + bool is_level; + bool active_low; + bool isa_irq; + u32 count; +}; + +struct mp_ioapic_gsi { + u32 gsi_base; + u32 gsi_end; +}; + +struct ioapic { + int nr_registers; + struct IO_APIC_route_entry *saved_registers; + struct mpc_ioapic mp_config; + struct mp_ioapic_gsi gsi_config; + struct ioapic_domain_cfg irqdomain_cfg; + struct irq_domain *irqdomain; + struct resource *iomem_res; +}; + +struct io_apic { + unsigned int index; + unsigned int unused[3]; + unsigned int data; + unsigned int unused2[11]; + unsigned int eoi; +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_MSI_REMAP = 32, + IRQ_DOMAIN_MSI_NOMASK_QUIRK = 64, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +struct acpi_device_status { + u32 present: 1; + u32 enabled: 1; + u32 show_in_ui: 1; + u32 functional: 1; + u32 battery_present: 1; + u32 reserved: 27; +}; + +struct acpi_device_flags { + u32 dynamic_status: 1; + u32 removable: 1; + u32 ejectable: 1; + u32 power_manageable: 1; + u32 match_driver: 1; + u32 initialized: 1; + u32 visited: 1; + u32 hotplug_notify: 1; + u32 is_dock_station: 1; + u32 of_compatible_ok: 1; + u32 coherent_dma: 1; + u32 cca_seen: 1; + u32 enumeration_by_parent: 1; + u32 reserved: 19; +}; + +typedef char acpi_bus_id[8]; + +struct acpi_pnp_type { + u32 hardware_id: 1; + u32 bus_address: 1; + u32 platform_id: 1; + u32 reserved: 29; +}; + +typedef u64 acpi_bus_address; + +typedef char acpi_device_name[40]; + +typedef char acpi_device_class[20]; + +struct acpi_device_pnp { + acpi_bus_id bus_id; + int instance_no; + struct acpi_pnp_type type; + acpi_bus_address bus_address; + char *unique_id; + struct list_head ids; + acpi_device_name device_name; + acpi_device_class device_class; + union acpi_object *str_obj; +}; + +struct acpi_device_power_flags { + u32 explicit_get: 1; + u32 power_resources: 1; + u32 inrush_current: 1; + u32 power_removed: 1; + u32 ignore_parent: 1; + u32 dsw_present: 1; + u32 reserved: 26; +}; + +struct acpi_device_power_state { + struct { + u8 valid: 1; + u8 explicit_set: 1; + u8 reserved: 6; + } flags; + int power; + int latency; + struct list_head resources; +}; + +struct acpi_device_power { + int state; + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[5]; +}; + +struct acpi_device_wakeup_flags { + u8 valid: 1; + u8 notifier_present: 1; +}; + +struct acpi_device_wakeup_context { + void (*func)(struct acpi_device_wakeup_context *); + struct device *dev; +}; + +struct acpi_device_wakeup { + acpi_handle gpe_device; + u64 gpe_number; + u64 sleep_state; + struct list_head resources; + struct acpi_device_wakeup_flags flags; + struct acpi_device_wakeup_context context; + struct wakeup_source *ws; + int prepare_count; + int enable_count; +}; + +struct acpi_device_perf_flags { + u8 reserved: 8; +}; + +struct acpi_device_perf_state; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +struct acpi_device_data { + const union acpi_object *pointer; + struct list_head properties; + const union acpi_object *of_compatible; + struct list_head subnodes; +}; + +struct acpi_scan_handler; + +struct acpi_hotplug_context; + +struct acpi_driver; + +struct acpi_gpio_mapping; + +struct acpi_device { + int device_type; + acpi_handle handle; + struct fwnode_handle fwnode; + struct acpi_device *parent; + struct list_head children; + struct list_head node; + struct list_head wakeup_list; + struct list_head del_list; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_wakeup wakeup; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_data data; + struct acpi_scan_handler *handler; + struct acpi_hotplug_context *hp; + struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; + void *driver_data; + struct device dev; + unsigned int physical_node_count; + unsigned int dep_unmet; + struct list_head physical_node_list; + struct mutex physical_node_lock; + void (*remove)(struct acpi_device *); +}; + +struct acpi_hotplug_profile { + struct kobject kobj; + int (*scan_dependent)(struct acpi_device *); + void (*notify_online)(struct acpi_device *); + bool enabled: 1; + bool demand_offline: 1; +}; + +struct acpi_scan_handler { + const struct acpi_device_id *ids; + struct list_head list_node; + bool (*match)(const char *, const struct acpi_device_id **); + int (*attach)(struct acpi_device *, const struct acpi_device_id *); + void (*detach)(struct acpi_device *); + void (*bind)(struct device *); + void (*unbind)(struct device *); + struct acpi_hotplug_profile hotplug; +}; + +struct acpi_hotplug_context { + struct acpi_device *self; + int (*notify)(struct acpi_device *, u32); + void (*uevent)(struct acpi_device *, u32); + void (*fixup)(struct acpi_device *); +}; + +typedef int (*acpi_op_add)(struct acpi_device *); + +typedef int (*acpi_op_remove)(struct acpi_device *); + +typedef void (*acpi_op_notify)(struct acpi_device *, u32); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_notify notify; +}; + +struct acpi_driver { + char name[80]; + char class[80]; + const struct acpi_device_id *ids; + unsigned int flags; + struct acpi_device_ops ops; + struct device_driver drv; + struct module *owner; +}; + +struct acpi_device_perf_state { + struct { + u8 valid: 1; + u8 reserved: 7; + } flags; + u8 power; + u8 performance; + int latency; +}; + +struct acpi_gpio_params; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + unsigned int quirks; +}; + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct uvyh_gr0_gam_gr_config_s { + long unsigned int rsvd_0_9: 10; + long unsigned int subspace: 1; + long unsigned int rsvd_11_63: 53; +}; + +struct uv5h_gr0_gam_gr_config_s { + long unsigned int rsvd_0_9: 10; + long unsigned int subspace: 1; + long unsigned int rsvd_11_63: 53; +}; + +struct uv4h_gr0_gam_gr_config_s { + long unsigned int rsvd_0_9: 10; + long unsigned int subspace: 1; + long unsigned int rsvd_11_63: 53; +}; + +struct uv3h_gr0_gam_gr_config_s { + long unsigned int m_skt: 6; + long unsigned int undef_6_9: 4; + long unsigned int subspace: 1; + long unsigned int reserved: 53; +}; + +struct uv2h_gr0_gam_gr_config_s { + long unsigned int n_gr: 4; + long unsigned int reserved: 60; +}; + +union uvyh_gr0_gam_gr_config_u { + long unsigned int v; + struct uvyh_gr0_gam_gr_config_s sy; + struct uv5h_gr0_gam_gr_config_s s5; + struct uv4h_gr0_gam_gr_config_s s4; + struct uv3h_gr0_gam_gr_config_s s3; + struct uv2h_gr0_gam_gr_config_s s2; +}; + +struct uvh_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int rsvd_32_63: 32; +}; + +struct uvxh_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 15; + long unsigned int rsvd_47_49: 3; + long unsigned int nodes_per_bit: 7; + long unsigned int ni_port: 5; + long unsigned int rsvd_62_63: 2; +}; + +struct uvyh_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 7; + long unsigned int rsvd_39_56: 18; + long unsigned int ni_port: 6; + long unsigned int rsvd_63: 1; +}; + +struct uv5h_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 7; + long unsigned int rsvd_39_56: 18; + long unsigned int ni_port: 6; + long unsigned int rsvd_63: 1; +}; + +struct uv4h_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 15; + long unsigned int rsvd_47: 1; + long unsigned int router_select: 1; + long unsigned int rsvd_49: 1; + long unsigned int nodes_per_bit: 7; + long unsigned int ni_port: 5; + long unsigned int rsvd_62_63: 2; +}; + +struct uv3h_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 15; + long unsigned int rsvd_47: 1; + long unsigned int router_select: 1; + long unsigned int rsvd_49: 1; + long unsigned int nodes_per_bit: 7; + long unsigned int ni_port: 5; + long unsigned int rsvd_62_63: 2; +}; + +struct uv2h_node_id_s { + long unsigned int force1: 1; + long unsigned int manufacturer: 11; + long unsigned int part_number: 16; + long unsigned int revision: 4; + long unsigned int node_id: 15; + long unsigned int rsvd_47_49: 3; + long unsigned int nodes_per_bit: 7; + long unsigned int ni_port: 5; + long unsigned int rsvd_62_63: 2; +}; + +union uvh_node_id_u { + long unsigned int v; + struct uvh_node_id_s s; + struct uvxh_node_id_s sx; + struct uvyh_node_id_s sy; + struct uv5h_node_id_s s5; + struct uv4h_node_id_s s4; + struct uv3h_node_id_s s3; + struct uv2h_node_id_s s2; +}; + +struct uvh_rh10_gam_addr_map_config_s { + long unsigned int undef_0_5: 6; + long unsigned int n_skt: 3; + long unsigned int undef_9_11: 3; + long unsigned int ls_enable: 1; + long unsigned int undef_13_15: 3; + long unsigned int mk_tme_keyid_bits: 4; + long unsigned int rsvd_20_63: 44; +}; + +struct uvyh_rh10_gam_addr_map_config_s { + long unsigned int undef_0_5: 6; + long unsigned int n_skt: 3; + long unsigned int undef_9_11: 3; + long unsigned int ls_enable: 1; + long unsigned int undef_13_15: 3; + long unsigned int mk_tme_keyid_bits: 4; + long unsigned int rsvd_20_63: 44; +}; + +struct uv5h_rh10_gam_addr_map_config_s { + long unsigned int undef_0_5: 6; + long unsigned int n_skt: 3; + long unsigned int undef_9_11: 3; + long unsigned int ls_enable: 1; + long unsigned int undef_13_15: 3; + long unsigned int mk_tme_keyid_bits: 4; +}; + +union uvh_rh10_gam_addr_map_config_u { + long unsigned int v; + struct uvh_rh10_gam_addr_map_config_s s; + struct uvyh_rh10_gam_addr_map_config_s sy; + struct uv5h_rh10_gam_addr_map_config_s s5; +}; + +struct uvh_rh10_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uvyh_rh10_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uv5h_rh10_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +union uvh_rh10_gam_mmioh_overlay_config0_u { + long unsigned int v; + struct uvh_rh10_gam_mmioh_overlay_config0_s s; + struct uvyh_rh10_gam_mmioh_overlay_config0_s sy; + struct uv5h_rh10_gam_mmioh_overlay_config0_s s5; +}; + +struct uvh_rh10_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uvyh_rh10_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uv5h_rh10_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +union uvh_rh10_gam_mmioh_overlay_config1_u { + long unsigned int v; + struct uvh_rh10_gam_mmioh_overlay_config1_s s; + struct uvyh_rh10_gam_mmioh_overlay_config1_s sy; + struct uv5h_rh10_gam_mmioh_overlay_config1_s s5; +}; + +struct uvh_rh10_gam_mmr_overlay_config_s { + long unsigned int undef_0_24: 25; + long unsigned int base: 27; + long unsigned int undef_52_62: 11; + long unsigned int enable: 1; +}; + +struct uvyh_rh10_gam_mmr_overlay_config_s { + long unsigned int undef_0_24: 25; + long unsigned int base: 27; + long unsigned int undef_52_62: 11; + long unsigned int enable: 1; +}; + +struct uv5h_rh10_gam_mmr_overlay_config_s { + long unsigned int undef_0_24: 25; + long unsigned int base: 27; + long unsigned int undef_52_62: 11; + long unsigned int enable: 1; +}; + +union uvh_rh10_gam_mmr_overlay_config_u { + long unsigned int v; + struct uvh_rh10_gam_mmr_overlay_config_s s; + struct uvyh_rh10_gam_mmr_overlay_config_s sy; + struct uv5h_rh10_gam_mmr_overlay_config_s s5; +}; + +struct uvh_rh_gam_addr_map_config_s { + long unsigned int rsvd_0_5: 6; + long unsigned int n_skt: 4; + long unsigned int rsvd_10_63: 54; +}; + +struct uvxh_rh_gam_addr_map_config_s { + long unsigned int rsvd_0_5: 6; + long unsigned int n_skt: 4; + long unsigned int rsvd_10_63: 54; +}; + +struct uv4h_rh_gam_addr_map_config_s { + long unsigned int rsvd_0_5: 6; + long unsigned int n_skt: 4; + long unsigned int rsvd_10_63: 54; +}; + +struct uv3h_rh_gam_addr_map_config_s { + long unsigned int m_skt: 6; + long unsigned int n_skt: 4; + long unsigned int rsvd_10_63: 54; +}; + +struct uv2h_rh_gam_addr_map_config_s { + long unsigned int m_skt: 6; + long unsigned int n_skt: 4; + long unsigned int rsvd_10_63: 54; +}; + +union uvh_rh_gam_addr_map_config_u { + long unsigned int v; + struct uvh_rh_gam_addr_map_config_s s; + struct uvxh_rh_gam_addr_map_config_s sx; + struct uv4h_rh_gam_addr_map_config_s s4; + struct uv3h_rh_gam_addr_map_config_s s3; + struct uv2h_rh_gam_addr_map_config_s s2; +}; + +struct uvh_rh_gam_alias_2_overlay_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int base: 8; + long unsigned int rsvd_32_47: 16; + long unsigned int m_alias: 5; + long unsigned int rsvd_53_62: 10; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_alias_2_overlay_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int base: 8; + long unsigned int rsvd_32_47: 16; + long unsigned int m_alias: 5; + long unsigned int rsvd_53_62: 10; + long unsigned int enable: 1; +}; + +struct uv4h_rh_gam_alias_2_overlay_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int base: 8; + long unsigned int rsvd_32_47: 16; + long unsigned int m_alias: 5; + long unsigned int rsvd_53_62: 10; + long unsigned int enable: 1; +}; + +struct uv3h_rh_gam_alias_2_overlay_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int base: 8; + long unsigned int rsvd_32_47: 16; + long unsigned int m_alias: 5; + long unsigned int rsvd_53_62: 10; + long unsigned int enable: 1; +}; + +struct uv2h_rh_gam_alias_2_overlay_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int base: 8; + long unsigned int rsvd_32_47: 16; + long unsigned int m_alias: 5; + long unsigned int rsvd_53_62: 10; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_alias_2_overlay_config_u { + long unsigned int v; + struct uvh_rh_gam_alias_2_overlay_config_s s; + struct uvxh_rh_gam_alias_2_overlay_config_s sx; + struct uv4h_rh_gam_alias_2_overlay_config_s s4; + struct uv3h_rh_gam_alias_2_overlay_config_s s3; + struct uv2h_rh_gam_alias_2_overlay_config_s s2; +}; + +struct uvh_rh_gam_alias_2_redirect_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int dest_base: 22; + long unsigned int rsvd_46_63: 18; +}; + +struct uvxh_rh_gam_alias_2_redirect_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int dest_base: 22; + long unsigned int rsvd_46_63: 18; +}; + +struct uv4h_rh_gam_alias_2_redirect_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int dest_base: 22; + long unsigned int rsvd_46_63: 18; +}; + +struct uv3h_rh_gam_alias_2_redirect_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int dest_base: 22; + long unsigned int rsvd_46_63: 18; +}; + +struct uv2h_rh_gam_alias_2_redirect_config_s { + long unsigned int rsvd_0_23: 24; + long unsigned int dest_base: 22; + long unsigned int rsvd_46_63: 18; +}; + +union uvh_rh_gam_alias_2_redirect_config_u { + long unsigned int v; + struct uvh_rh_gam_alias_2_redirect_config_s s; + struct uvxh_rh_gam_alias_2_redirect_config_s sx; + struct uv4h_rh_gam_alias_2_redirect_config_s s4; + struct uv3h_rh_gam_alias_2_redirect_config_s s3; + struct uv2h_rh_gam_alias_2_redirect_config_s s2; +}; + +struct uvh_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_45: 46; + long unsigned int rsvd_46_51: 6; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_45: 46; + long unsigned int rsvd_46_51: 6; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv4ah_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_24: 25; + long unsigned int undef_25: 1; + long unsigned int base: 26; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv4h_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_24: 25; + long unsigned int undef_25: 1; + long unsigned int base: 20; + long unsigned int rsvd_46_51: 6; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv3h_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_27: 28; + long unsigned int base: 18; + long unsigned int rsvd_46_51: 6; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_61: 6; + long unsigned int mode: 1; + long unsigned int enable: 1; +}; + +struct uv2h_rh_gam_gru_overlay_config_s { + long unsigned int rsvd_0_27: 28; + long unsigned int base: 18; + long unsigned int rsvd_46_51: 6; + long unsigned int n_gru: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_gru_overlay_config_u { + long unsigned int v; + struct uvh_rh_gam_gru_overlay_config_s s; + struct uvxh_rh_gam_gru_overlay_config_s sx; + struct uv4ah_rh_gam_gru_overlay_config_s s4a; + struct uv4h_rh_gam_gru_overlay_config_s s4; + struct uv3h_rh_gam_gru_overlay_config_s s3; + struct uv2h_rh_gam_gru_overlay_config_s s2; +}; + +struct uvh_rh_gam_mmioh_overlay_config_s { + long unsigned int rsvd_0_26: 27; + long unsigned int base: 19; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_mmioh_overlay_config_s { + long unsigned int rsvd_0_26: 27; + long unsigned int base: 19; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv2h_rh_gam_mmioh_overlay_config_s { + long unsigned int rsvd_0_26: 27; + long unsigned int base: 19; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_mmioh_overlay_config_u { + long unsigned int v; + struct uvh_rh_gam_mmioh_overlay_config_s s; + struct uvxh_rh_gam_mmioh_overlay_config_s sx; + struct uv2h_rh_gam_mmioh_overlay_config_s s2; +}; + +struct uvh_rh_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uv4h_rh_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv3h_rh_gam_mmioh_overlay_config0_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_mmioh_overlay_config0_u { + long unsigned int v; + struct uvh_rh_gam_mmioh_overlay_config0_s s; + struct uvxh_rh_gam_mmioh_overlay_config0_s sx; + struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s s4a; + struct uv4h_rh_gam_mmioh_overlay_config0_s s4; + struct uv3h_rh_gam_mmioh_overlay_config0_s s3; +}; + +struct uvh_rh_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 26; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int undef_62: 1; + long unsigned int enable: 1; +}; + +struct uv4h_rh_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +struct uv3h_rh_gam_mmioh_overlay_config1_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int m_io: 6; + long unsigned int n_io: 4; + long unsigned int rsvd_56_62: 7; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_mmioh_overlay_config1_u { + long unsigned int v; + struct uvh_rh_gam_mmioh_overlay_config1_s s; + struct uvxh_rh_gam_mmioh_overlay_config1_s sx; + struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s s4a; + struct uv4h_rh_gam_mmioh_overlay_config1_s s4; + struct uv3h_rh_gam_mmioh_overlay_config1_s s3; +}; + +struct uvh_rh_gam_mmr_overlay_config_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int rsvd_46_62: 17; + long unsigned int enable: 1; +}; + +struct uvxh_rh_gam_mmr_overlay_config_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int rsvd_46_62: 17; + long unsigned int enable: 1; +}; + +struct uv4h_rh_gam_mmr_overlay_config_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int rsvd_46_62: 17; + long unsigned int enable: 1; +}; + +struct uv3h_rh_gam_mmr_overlay_config_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int rsvd_46_62: 17; + long unsigned int enable: 1; +}; + +struct uv2h_rh_gam_mmr_overlay_config_s { + long unsigned int rsvd_0_25: 26; + long unsigned int base: 20; + long unsigned int rsvd_46_62: 17; + long unsigned int enable: 1; +}; + +union uvh_rh_gam_mmr_overlay_config_u { + long unsigned int v; + struct uvh_rh_gam_mmr_overlay_config_s s; + struct uvxh_rh_gam_mmr_overlay_config_s sx; + struct uv4h_rh_gam_mmr_overlay_config_s s4; + struct uv3h_rh_gam_mmr_overlay_config_s s3; + struct uv2h_rh_gam_mmr_overlay_config_s s2; +}; + +enum uv_system_type { + UV_NONE = 0, + UV_LEGACY_APIC = 1, + UV_X2APIC = 2, +}; + +enum { + BIOS_STATUS_MORE_PASSES = 1, + BIOS_STATUS_SUCCESS = 0, + BIOS_STATUS_UNIMPLEMENTED = 4294967258, + BIOS_STATUS_EINVAL = 4294967274, + BIOS_STATUS_UNAVAIL = 4294967280, + BIOS_STATUS_ABORT = 4294967292, +}; + +struct uv_gam_parameters { + u64 mmr_base; + u64 gru_base; + u8 mmr_shift; + u8 gru_shift; + u8 gpa_shift; + u8 unused1; +}; + +struct uv_gam_range_entry { + char type; + char unused1; + u16 nasid; + u16 sockid; + u16 pnode; + u32 unused2; + u32 limit; +}; + +struct uv_arch_type_entry { + char archtype[8]; +}; + +struct uv_systab { + char signature[4]; + u32 revision; + u64 function; + u32 size; + struct { + u32 type: 8; + u32 offset: 24; + } entry[1]; +}; + +enum { + BIOS_FREQ_BASE_PLATFORM = 0, + BIOS_FREQ_BASE_INTERVAL_TIMER = 1, + BIOS_FREQ_BASE_REALTIME_CLOCK = 2, +}; + +struct uv_gam_range_s { + u32 limit; + u16 nasid; + s8 base; + u8 reserved; +}; + +struct uv_hub_info_s { + unsigned int hub_type; + unsigned char hub_revision; + long unsigned int global_mmr_base; + long unsigned int global_mmr_shift; + long unsigned int gpa_mask; + short unsigned int *socket_to_node; + short unsigned int *socket_to_pnode; + short unsigned int *pnode_to_socket; + struct uv_gam_range_s *gr_table; + short unsigned int min_socket; + short unsigned int min_pnode; + unsigned char m_val; + unsigned char n_val; + unsigned char gr_table_len; + unsigned char apic_pnode_shift; + unsigned char gpa_shift; + unsigned char nasid_shift; + unsigned char m_shift; + unsigned char n_lshift; + unsigned int gnode_extra; + long unsigned int gnode_upper; + long unsigned int lowmem_remap_top; + long unsigned int lowmem_remap_base; + long unsigned int global_gru_base; + long unsigned int global_gru_shift; + short unsigned int pnode; + short unsigned int pnode_mask; + short unsigned int coherency_domain_number; + short unsigned int numa_blade_id; + short unsigned int nr_possible_cpus; + short unsigned int nr_online_cpus; + short int memory_nid; +}; + +struct uv_cpu_info_s { + void *p_uv_hub_info; + unsigned char blade_cpu_id; + void *reserved; +}; + +struct uvh_apicid_s { + long unsigned int local_apic_mask: 24; + long unsigned int local_apic_shift: 5; + long unsigned int unused1: 3; + long unsigned int pnode_mask: 24; + long unsigned int pnode_shift: 5; + long unsigned int unused2: 3; +}; + +union uvh_apicid { + long unsigned int v; + struct uvh_apicid_s s; +}; + +enum map_type { + map_wb = 0, + map_uc = 1, +}; + +enum mmioh_arch { + UV2_MMIOH = 4294967295, + UVY_MMIOH0 = 0, + UVY_MMIOH1 = 1, + UVX_MMIOH0 = 2, + UVX_MMIOH1 = 3, +}; + +struct mn { + unsigned char m_val; + unsigned char n_val; + unsigned char m_shift; + unsigned char n_lshift; +}; + +struct cluster_mask { + unsigned int clusterid; + int node; + struct cpumask mask; +}; + +struct dyn_arch_ftrace {}; + +enum { + FTRACE_OPS_FL_ENABLED = 1, + FTRACE_OPS_FL_DYNAMIC = 2, + FTRACE_OPS_FL_SAVE_REGS = 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, + FTRACE_OPS_FL_RECURSION = 16, + FTRACE_OPS_FL_STUB = 32, + FTRACE_OPS_FL_INITIALIZED = 64, + FTRACE_OPS_FL_DELETED = 128, + FTRACE_OPS_FL_ADDING = 256, + FTRACE_OPS_FL_REMOVING = 512, + FTRACE_OPS_FL_MODIFYING = 1024, + FTRACE_OPS_FL_ALLOC_TRAMP = 2048, + FTRACE_OPS_FL_IPMODIFY = 4096, + FTRACE_OPS_FL_PID = 8192, + FTRACE_OPS_FL_RCU = 16384, + FTRACE_OPS_FL_TRACE_ARRAY = 32768, + FTRACE_OPS_FL_PERMANENT = 65536, + FTRACE_OPS_FL_DIRECT = 131072, +}; + +enum { + FTRACE_FL_ENABLED = 2147483648, + FTRACE_FL_REGS = 1073741824, + FTRACE_FL_REGS_EN = 536870912, + FTRACE_FL_TRAMP = 268435456, + FTRACE_FL_TRAMP_EN = 134217728, + FTRACE_FL_IPMODIFY = 67108864, + FTRACE_FL_DISABLED = 33554432, + FTRACE_FL_DIRECT = 16777216, + FTRACE_FL_DIRECT_EN = 8388608, +}; + +struct dyn_ftrace { + long unsigned int ip; + long unsigned int flags; + struct dyn_arch_ftrace arch; +}; + +enum { + FTRACE_UPDATE_IGNORE = 0, + FTRACE_UPDATE_MAKE_CALL = 1, + FTRACE_UPDATE_MODIFY_CALL = 2, + FTRACE_UPDATE_MAKE_NOP = 3, +}; + +union ftrace_op_code_union { + char code[7]; + struct { + char op[3]; + int offset; + } __attribute__((packed)); +}; + +struct ftrace_rec_iter; + +typedef __s64 Elf64_Sxword; + +struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +}; + +typedef struct elf64_rela Elf64_Rela; + +struct kimage_arch { + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct purgatory_info { + const Elf64_Ehdr *ehdr; + Elf64_Shdr *sechdrs; + void *purgatory_buf; +}; + +typedef int kexec_probe_t(const char *, long unsigned int); + +struct kimage; + +typedef void *kexec_load_t(struct kimage *, char *, long unsigned int, char *, long unsigned int, char *, long unsigned int); + +struct kexec_file_ops; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + struct kimage_arch arch; + void *kernel_buf; + long unsigned int kernel_buf_len; + void *initrd_buf; + long unsigned int initrd_buf_len; + char *cmdline_buf; + long unsigned int cmdline_buf_len; + const struct kexec_file_ops *fops; + void *image_loader_data; + struct purgatory_info purgatory_info; + void *elf_headers; + long unsigned int elf_headers_sz; + long unsigned int elf_load_addr; +}; + +typedef int kexec_cleanup_t(void *); + +typedef int kexec_verify_sig_t(const char *, long unsigned int); + +struct kexec_file_ops { + kexec_probe_t *probe; + kexec_load_t *load; + kexec_cleanup_t *cleanup; + kexec_verify_sig_t *verify_sig; +}; + +struct x86_mapping_info { + void * (*alloc_pgt_page)(void *); + void *context; + long unsigned int page_flag; + long unsigned int offset; + bool direct_gbpages; + long unsigned int kernpg_flag; +}; + +struct init_pgtable_data { + struct x86_mapping_info *info; + pgd_t *level4p; +}; + +typedef void crash_vmclear_fn(); + +struct kexec_buf { + struct kimage *image; + void *buffer; + long unsigned int bufsz; + long unsigned int mem; + long unsigned int memsz; + long unsigned int buf_align; + long unsigned int buf_min; + long unsigned int buf_max; + bool top_down; +}; + +struct crash_mem_range { + u64 start; + u64 end; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct crash_mem_range ranges[0]; +}; + +struct crash_memmap_data { + struct boot_params *params; + unsigned int type; +}; + +struct kexec_entry64_regs { + uint64_t rax; + uint64_t rcx; + uint64_t rdx; + uint64_t rbx; + uint64_t rsp; + uint64_t rbp; + uint64_t rsi; + uint64_t rdi; + uint64_t r8; + uint64_t r9; + uint64_t r10; + uint64_t r11; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + uint64_t rip; +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +struct efi_setup_data { + u64 fw_vendor; + u64 __unused; + u64 tables; + u64 smbios; + u64 reserved[8]; +}; + +struct bzimage64_data { + void *bootparams_buf; +}; + +struct freelist_node { + atomic_t refs; + struct freelist_node *next; +}; + +struct freelist_head { + struct freelist_node *head; +}; + +struct prev_kprobe { + struct kprobe *kp; + long unsigned int status; + long unsigned int old_flags; + long unsigned int saved_flags; +}; + +struct kprobe_ctlblk { + long unsigned int kprobe_status; + long unsigned int kprobe_old_flags; + long unsigned int kprobe_saved_flags; + struct prev_kprobe prev_kprobe; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe_holder; + +struct kretprobe_instance { + union { + struct freelist_node freelist; + struct callback_head rcu; + }; + struct llist_node llist; + struct kretprobe_holder *rph; + kprobe_opcode_t *ret_addr; + void *fp; + char data[0]; +}; + +struct kretprobe; + +struct kretprobe_holder { + struct kretprobe *rp; + refcount_t ref; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct freelist_head freelist; + struct kretprobe_holder *rph; +}; + +struct kretprobe_blackpoint { + const char *name; + void *addr; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +struct __arch_relative_insn { + u8 op; + s32 raddr; +} __attribute__((packed)); + +struct arch_optimized_insn { + kprobe_opcode_t copied_insn[4]; + kprobe_opcode_t *insn; + size_t size; +}; + +struct optimized_kprobe { + struct kprobe kp; + struct list_head list; + struct arch_optimized_insn optinsn; +}; + +enum { + TRACE_FTRACE_BIT = 0, + TRACE_FTRACE_NMI_BIT = 1, + TRACE_FTRACE_IRQ_BIT = 2, + TRACE_FTRACE_SIRQ_BIT = 3, + TRACE_INTERNAL_BIT = 4, + TRACE_INTERNAL_NMI_BIT = 5, + TRACE_INTERNAL_IRQ_BIT = 6, + TRACE_INTERNAL_SIRQ_BIT = 7, + TRACE_BRANCH_BIT = 8, + TRACE_IRQ_BIT = 9, + TRACE_GRAPH_BIT = 10, + TRACE_GRAPH_DEPTH_START_BIT = 11, + TRACE_GRAPH_DEPTH_END_BIT = 12, + TRACE_GRAPH_NOTRACE_BIT = 13, + TRACE_TRANSITION_BIT = 14, + TRACE_RECORD_RECURSION_BIT = 15, +}; + +enum { + TRACE_CTX_NMI = 0, + TRACE_CTX_IRQ = 1, + TRACE_CTX_SOFTIRQ = 2, + TRACE_CTX_NORMAL = 3, +}; + +enum regnames { + GDB_AX = 0, + GDB_BX = 1, + GDB_CX = 2, + GDB_DX = 3, + GDB_SI = 4, + GDB_DI = 5, + GDB_BP = 6, + GDB_SP = 7, + GDB_R8 = 8, + GDB_R9 = 9, + GDB_R10 = 10, + GDB_R11 = 11, + GDB_R12 = 12, + GDB_R13 = 13, + GDB_R14 = 14, + GDB_R15 = 15, + GDB_PC = 16, + GDB_PS = 17, + GDB_CS = 18, + GDB_SS = 19, + GDB_DS = 20, + GDB_ES = 21, + GDB_FS = 22, + GDB_GS = 23, +}; + +enum kgdb_bpstate { + BP_UNDEFINED = 0, + BP_REMOVED = 1, + BP_SET = 2, + BP_ACTIVE = 3, +}; + +struct kgdb_bkpt { + long unsigned int bpt_addr; + unsigned char saved_instr[1]; + enum kgdb_bptype type; + enum kgdb_bpstate state; +}; + +struct dbg_reg_def_t { + char *name; + int size; + int offset; +}; + +struct hw_breakpoint { + unsigned int enabled; + long unsigned int addr; + int len; + int type; + struct perf_event **pev; +}; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + void *data; + struct console *next; +}; + +struct hpet_data { + long unsigned int hd_phys_address; + void *hd_address; + short unsigned int hd_nirqs; + unsigned int hd_state; + unsigned int hd_irq[32]; +}; + +typedef irqreturn_t (*rtc_irq_handler)(int, void *); + +enum hpet_mode { + HPET_MODE_UNUSED = 0, + HPET_MODE_LEGACY = 1, + HPET_MODE_CLOCKEVT = 2, + HPET_MODE_DEVICE = 3, +}; + +struct hpet_channel { + struct clock_event_device evt; + unsigned int num; + unsigned int cpu; + unsigned int irq; + unsigned int in_use; + enum hpet_mode mode; + unsigned int boot_cfg; + char name[10]; + long: 48; + long: 64; + long: 64; + long: 64; +}; + +struct hpet_base { + unsigned int nr_channels; + unsigned int nr_clockevents; + unsigned int boot_cfg; + struct hpet_channel *channels; +}; + +union hpet_lock { + struct { + arch_spinlock_t lock; + u32 value; + }; + u64 lockval; +}; + +struct amd_nb_bus_dev_range { + u8 bus; + u8 dev_base; + u8 dev_limit; +}; + +struct amd_northbridge_info { + u16 num; + u64 flags; + struct amd_northbridge *nb; +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u8 preempted; + __u8 u8_pad[3]; + __u32 pad[11]; +}; + +struct kvm_vcpu_pv_apf_data { + __u32 flags; + __u32 token; + __u8 pad[56]; + __u32 enabled; +}; + +struct kvm_task_sleep_node { + struct hlist_node link; + struct swait_queue_head wq; + u32 token; + int cpu; +}; + +struct kvm_task_sleep_head { + raw_spinlock_t lock; + struct hlist_head list; +}; + +typedef struct ldttss_desc ldt_desc; + +struct branch { + unsigned char opcode; + u32 delta; +} __attribute__((packed)); + +typedef long unsigned int ulong; + +struct jailhouse_setup_data { + struct { + __u16 version; + __u16 compatible_version; + } hdr; + struct { + __u16 pm_timer_address; + __u16 num_cpus; + __u64 pci_mmconfig_base; + __u32 tsc_khz; + __u32 apic_khz; + __u8 standard_ioapic; + __u8 cpu_ids[255]; + } __attribute__((packed)) v1; + struct { + __u32 flags; + } v2; +} __attribute__((packed)); + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +struct serial_rs485 { + __u32 flags; + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; + __u32 padding[5]; +}; + +struct serial_iso7816 { + __u32 flags; + __u32 tg; + __u32 sc_fi; + __u32 sc_di; + __u32 clk; + __u32 reserved[5]; +}; + +struct uart_port; + +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char); + void (*stop_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + const char * (*type)(struct uart_port *); + void (*release_port)(struct uart_port *); + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); + int (*poll_init)(struct uart_port *); + void (*poll_put_char)(struct uart_port *, unsigned char); + int (*poll_get_char)(struct uart_port *); +}; + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef unsigned int upf_t; + +typedef unsigned int upstat_t; + +struct gpio_desc; + +struct uart_state; + +struct uart_port { + spinlock_t lock; + long unsigned int iobase; + unsigned char *membase; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); + void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, struct serial_rs485 *); + int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + unsigned int fifosize; + unsigned char x_char; + unsigned char regshift; + unsigned char iotype; + unsigned char quirks; + unsigned int read_status_mask; + unsigned int ignore_status_mask; + struct uart_state *state; + struct uart_icount icount; + struct console *cons; + upf_t flags; + upstat_t status; + int hw_stopped; + unsigned int mctrl; + unsigned int timeout; + unsigned int type; + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; + unsigned int minor; + resource_size_t mapbase; + resource_size_t mapsize; + struct device *dev; + long unsigned int sysrq; + unsigned int sysrq_ch; + unsigned char has_sysrq; + unsigned char sysrq_seq; + unsigned char hub6; + unsigned char suspended; + unsigned char console_reinit; + const char *name; + struct attribute_group *attr_group; + const struct attribute_group **tty_groups; + struct serial_rs485 rs485; + struct gpio_desc *rs485_term_gpio; + struct serial_iso7816 iso7816; + void *private_data; +}; + +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, + UART_PM_STATE_UNDEFINED = 4, +}; + +struct uart_state { + struct tty_port port; + enum uart_pm_state pm_state; + struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +struct pci_mmcfg_region { + struct list_head list; + struct resource res; + u64 address; + char *virt; + u16 segment; + u8 start_bus; + u8 end_bus; + char name[30]; +}; + +struct scan_area { + u64 addr; + u64 size; +}; + +struct uprobe_xol_ops; + +struct arch_uprobe { + union { + u8 insn[16]; + u8 ixol[16]; + }; + const struct uprobe_xol_ops *ops; + union { + struct { + s32 offs; + u8 ilen; + u8 opc1; + } branch; + struct { + u8 fixups; + u8 ilen; + } defparam; + struct { + u8 reg_offset; + u8 ilen; + } push; + }; +}; + +struct uprobe_xol_ops { + bool (*emulate)(struct arch_uprobe *, struct pt_regs *); + int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); + int (*post_xol)(struct arch_uprobe *, struct pt_regs *); + void (*abort)(struct arch_uprobe *, struct pt_regs *); +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct simplefb_platform_data { + u32 width; + u32 height; + u32 stride; + const char *format; +}; + +enum { + M_I17 = 0, + M_I20 = 1, + M_I20_SR = 2, + M_I24 = 3, + M_I24_8_1 = 4, + M_I24_10_1 = 5, + M_I27_11_1 = 6, + M_MINI = 7, + M_MINI_3_1 = 8, + M_MINI_4_1 = 9, + M_MB = 10, + M_MB_2 = 11, + M_MB_3 = 12, + M_MB_5_1 = 13, + M_MB_6_1 = 14, + M_MB_7_1 = 15, + M_MB_SR = 16, + M_MBA = 17, + M_MBA_3 = 18, + M_MBP = 19, + M_MBP_2 = 20, + M_MBP_2_2 = 21, + M_MBP_SR = 22, + M_MBP_4 = 23, + M_MBP_5_1 = 24, + M_MBP_5_2 = 25, + M_MBP_5_3 = 26, + M_MBP_6_1 = 27, + M_MBP_6_2 = 28, + M_MBP_7_1 = 29, + M_MBP_8_2 = 30, + M_UNKNOWN = 31, +}; + +struct efifb_dmi_info { + char *optname; + long unsigned int base; + int stride; + int width; + int height; + int flags; +}; + +enum { + OVERRIDE_NONE = 0, + OVERRIDE_BASE = 1, + OVERRIDE_STRIDE = 2, + OVERRIDE_HEIGHT = 4, + OVERRIDE_WIDTH = 8, +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +struct va_format { + const char *fmt; + va_list *va; +}; + +enum es_result { + ES_OK = 0, + ES_UNSUPPORTED = 1, + ES_VMM_ERROR = 2, + ES_DECODE_FAILED = 3, + ES_EXCEPTION = 4, + ES_RETRY = 5, +}; + +struct es_fault_info { + long unsigned int vector; + long unsigned int error_code; + long unsigned int cr2; +}; + +struct es_em_ctxt { + struct pt_regs *regs; + struct insn insn; + struct es_fault_info fi; +}; + +struct sev_es_runtime_data { + struct ghcb ghcb_page; + char ist_stack[4096]; + char fallback_stack[4096]; + struct ghcb backup_ghcb; + bool ghcb_active; + bool backup_ghcb_active; + long unsigned int dr7; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ghcb_state { + struct ghcb *ghcb; +}; + +enum chipset_type { + NOT_SUPPORTED = 0, + SUPPORTED = 1, +}; + +struct agp_version { + u16 major; + u16 minor; +}; + +struct agp_kern_info { + struct agp_version version; + struct pci_dev *device; + enum chipset_type chipset; + long unsigned int mode; + long unsigned int aper_base; + size_t aper_size; + int max_memory; + int current_memory; + bool cant_use_aperture; + long unsigned int page_mask; + const struct vm_operations_struct *vm_ops; +}; + +struct agp_bridge_data; + +struct pci_hostbridge_probe { + u32 bus; + u32 slot; + u32 vendor; + u32 device; +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH = 0, + TLB_REMOTE_SHOOTDOWN = 1, + TLB_LOCAL_SHOOTDOWN = 2, + TLB_LOCAL_MM_SHOOTDOWN = 3, + TLB_REMOTE_SEND_IPI = 4, + NR_TLB_FLUSH_REASONS = 5, +}; + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +struct trace_event_raw_tlb_flush { + struct trace_entry ent; + int reason; + long unsigned int pages; + char __data[0]; +}; + +struct trace_event_data_offsets_tlb_flush {}; + +typedef void (*btf_trace_tlb_flush)(void *, int, long unsigned int); + +struct map_range { + long unsigned int start; + long unsigned int end; + unsigned int page_size_mask; +}; + +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = 12, + MIX_SECTION_INFO = 13, + NODE_INFO = 14, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14, +}; + +struct mhp_params { + struct vmem_altmap *altmap; + pgprot_t pgprot; +}; + +struct mem_section_usage { + long unsigned int subsection_map[1]; + long unsigned int pageblock_flags[0]; +}; + +struct mem_section { + long unsigned int section_mem_map; + struct mem_section_usage *usage; +}; + +enum kcore_type { + KCORE_TEXT = 0, + KCORE_VMALLOC = 1, + KCORE_RAM = 2, + KCORE_VMEMMAP = 3, + KCORE_USER = 4, + KCORE_OTHER = 5, + KCORE_REMAP = 6, +}; + +struct kcore_list { + struct list_head list; + long unsigned int addr; + long unsigned int vaddr; + size_t size; + int type; +}; + +struct hstate { + struct mutex resize_lock; + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + long unsigned int mask; + long unsigned int max_huge_pages; + long unsigned int nr_huge_pages; + long unsigned int free_huge_pages; + long unsigned int resv_huge_pages; + long unsigned int surplus_huge_pages; + long unsigned int nr_overcommit_huge_pages; + struct list_head hugepage_activelist; + struct list_head hugepage_freelists[1024]; + unsigned int nr_huge_pages_node[1024]; + unsigned int free_huge_pages_node[1024]; + unsigned int surplus_huge_pages_node[1024]; + struct cftype cgroup_files_dfl[7]; + struct cftype cgroup_files_legacy[9]; + char name[32]; +}; + +struct trace_event_raw_x86_exceptions { + struct trace_entry ent; + long unsigned int address; + long unsigned int ip; + long unsigned int error_code; + char __data[0]; +}; + +struct trace_event_data_offsets_x86_exceptions {}; + +typedef void (*btf_trace_page_fault_user)(void *, long unsigned int, struct pt_regs *, long unsigned int); + +typedef void (*btf_trace_page_fault_kernel)(void *, long unsigned int, struct pt_regs *, long unsigned int); + +enum { + IORES_MAP_SYSTEM_RAM = 1, + IORES_MAP_ENCRYPTED = 2, +}; + +struct ioremap_desc { + unsigned int flags; +}; + +typedef bool (*ex_handler_t)(const struct exception_table_entry *, struct pt_regs *, int, long unsigned int, long unsigned int); + +struct hugepage_subpool { + spinlock_t lock; + long int count; + long int max_hpages; + long int used_hpages; + struct hstate *hstate; + long int min_hpages; + long int rsv_hpages; +}; + +struct hugetlbfs_sb_info { + long int max_inodes; + long int free_inodes; + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +struct exception_stacks { + char DF_stack_guard[0]; + char DF_stack[4096]; + char NMI_stack_guard[0]; + char NMI_stack[4096]; + char DB_stack_guard[0]; + char DB_stack[4096]; + char MCE_stack_guard[0]; + char MCE_stack[4096]; + char VC_stack_guard[0]; + char VC_stack[0]; + char VC2_stack_guard[0]; + char VC2_stack[0]; + char IST_top_guard[0]; +}; + +struct vm_event_state { + long unsigned int event[98]; +}; + +struct cpa_data { + long unsigned int *vaddr; + pgd_t *pgd; + pgprot_t mask_set; + pgprot_t mask_clr; + long unsigned int numpages; + long unsigned int curpage; + long unsigned int pfn; + unsigned int flags; + unsigned int force_split: 1; + unsigned int force_static_prot: 1; + unsigned int force_flush_all: 1; + struct page **pages; +}; + +enum cpa_warn { + CPA_CONFLICT = 0, + CPA_PROTECT = 1, + CPA_DETECT = 2, +}; + +typedef struct { + u64 val; +} pfn_t; + +struct memtype { + u64 start; + u64 end; + u64 subtree_max_end; + enum page_cache_mode type; + struct rb_node rb; +}; + +enum { + PAT_UC = 0, + PAT_WC = 1, + PAT_WT = 4, + PAT_WP = 5, + PAT_WB = 6, + PAT_UC_MINUS = 7, +}; + +struct pagerange_state { + long unsigned int cur_pfn; + int ram; + int not_ram; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +enum { + MEMTYPE_EXACT_MATCH = 0, + MEMTYPE_END_MATCH = 1, +}; + +struct ptdump_range { + long unsigned int start; + long unsigned int end; +}; + +struct ptdump_state { + void (*note_page)(struct ptdump_state *, long unsigned int, int, u64); + void (*effective_prot)(struct ptdump_state *, int, u64); + const struct ptdump_range *range; +}; + +struct addr_marker; + +struct pg_state { + struct ptdump_state ptdump; + int level; + pgprotval_t current_prot; + pgprotval_t effective_prot; + pgprotval_t prot_levels[5]; + long unsigned int start_address; + const struct addr_marker *marker; + long unsigned int lines; + bool to_dmesg; + bool check_wx; + long unsigned int wx_pages; + struct seq_file *seq; +}; + +struct addr_marker { + long unsigned int start_address; + const char *name; + long unsigned int max_lines; +}; + +enum address_markers_idx { + USER_SPACE_NR = 0, + KERNEL_SPACE_NR = 1, + LDT_NR = 2, + LOW_KERNEL_NR = 3, + VMALLOC_START_NR = 4, + VMEMMAP_START_NR = 5, + CPU_ENTRY_AREA_NR = 6, + ESPFIX_START_NR = 7, + EFI_END_NR = 8, + HIGH_KERNEL_NR = 9, + MODULES_VADDR_NR = 10, + MODULES_END_NR = 11, + FIXADDR_START_NR = 12, + END_OF_SPACE_NR = 13, +}; + +typedef void (*rcu_callback_t)(struct callback_head *); + +struct kmmio_probe; + +typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, struct pt_regs *, long unsigned int); + +typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, long unsigned int, struct pt_regs *); + +struct kmmio_probe { + struct list_head list; + long unsigned int addr; + long unsigned int len; + kmmio_pre_handler_t pre_handler; + kmmio_post_handler_t post_handler; + void *private; +}; + +struct kmmio_fault_page { + struct list_head list; + struct kmmio_fault_page *release_next; + long unsigned int addr; + pteval_t old_presence; + bool armed; + int count; + bool scheduled_for_release; +}; + +struct kmmio_delayed_release { + struct callback_head rcu; + struct kmmio_fault_page *release_list; +}; + +struct kmmio_context { + struct kmmio_fault_page *fpage; + struct kmmio_probe *probe; + long unsigned int saved_flags; + long unsigned int addr; + int active; +}; + +enum reason_type { + NOT_ME = 0, + NOTHING = 1, + REG_READ = 2, + REG_WRITE = 3, + IMM_WRITE = 4, + OTHERS = 5, +}; + +struct prefix_bits { + unsigned int shorted: 1; + unsigned int enlarged: 1; + unsigned int rexr: 1; + unsigned int rex: 1; +}; + +enum { + arg_AL = 0, + arg_CL = 1, + arg_DL = 2, + arg_BL = 3, + arg_AH = 4, + arg_CH = 5, + arg_DH = 6, + arg_BH = 7, + arg_AX = 0, + arg_CX = 1, + arg_DX = 2, + arg_BX = 3, + arg_SP = 4, + arg_BP = 5, + arg_SI = 6, + arg_DI = 7, + arg_R8 = 8, + arg_R9 = 9, + arg_R10 = 10, + arg_R11 = 11, + arg_R12 = 12, + arg_R13 = 13, + arg_R14 = 14, + arg_R15 = 15, +}; + +enum mm_io_opcode { + MMIO_READ = 1, + MMIO_WRITE = 2, + MMIO_PROBE = 3, + MMIO_UNPROBE = 4, + MMIO_UNKNOWN_OP = 5, +}; + +struct mmiotrace_rw { + resource_size_t phys; + long unsigned int value; + long unsigned int pc; + int map_id; + unsigned char opcode; + unsigned char width; +}; + +struct mmiotrace_map { + resource_size_t phys; + long unsigned int virt; + long unsigned int len; + int map_id; + unsigned char opcode; +}; + +struct trap_reason { + long unsigned int addr; + long unsigned int ip; + enum reason_type type; + int active_traces; +}; + +struct remap_trace { + struct list_head list; + struct kmmio_probe probe; + resource_size_t phys; + long unsigned int id; +}; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[2048]; +}; + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +struct kaslr_memory_region { + long unsigned int *base; + long unsigned int size_tb; +}; + +enum pti_mode { + PTI_AUTO = 0, + PTI_FORCE_OFF = 1, + PTI_FORCE_ON = 2, +}; + +enum pti_clone_level { + PTI_CLONE_PMD = 0, + PTI_CLONE_PTE = 1, +}; + +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + pmdval_t pmd_flags; + pteval_t pte_flags; + long unsigned int paddr; + long unsigned int vaddr; + long unsigned int vaddr_end; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + unsigned int descsize; + int: 32; + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct sigcontext_32 { + __u16 gs; + __u16 __gsh; + __u16 fs; + __u16 __fsh; + __u16 es; + __u16 __esh; + __u16 ds; + __u16 __dsh; + __u32 di; + __u32 si; + __u32 bp; + __u32 sp; + __u32 bx; + __u32 dx; + __u32 cx; + __u32 ax; + __u32 trapno; + __u32 err; + __u32 ip; + __u16 cs; + __u16 __csh; + __u32 flags; + __u32 sp_at_signal; + __u16 ss; + __u16 __ssh; + __u32 fpstate; + __u32 oldmask; + __u32 cr2; +}; + +struct ucontext_ia32 { + unsigned int uc_flags; + unsigned int uc_link; + compat_stack_t uc_stack; + struct sigcontext_32 uc_mcontext; + compat_sigset_t uc_sigmask; +}; + +struct sigframe_ia32 { + u32 pretcode; + int sig; + struct sigcontext_32 sc; + struct _fpstate_32 fpstate_unused; + unsigned int extramask[1]; + char retcode[8]; +}; + +struct rt_sigframe_ia32 { + u32 pretcode; + int sig; + u32 pinfo; + u32 puc; + compat_siginfo_t info; + struct ucontext_ia32 uc; + char retcode[8]; +} __attribute__((packed)); + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +struct efi_memory_map_data { + phys_addr_t phys_map; + long unsigned int size; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi_mem_range { + struct range range; + u64 attribute; +}; + +enum efi_rts_ids { + EFI_NONE = 0, + EFI_GET_TIME = 1, + EFI_SET_TIME = 2, + EFI_GET_WAKEUP_TIME = 3, + EFI_SET_WAKEUP_TIME = 4, + EFI_GET_VARIABLE = 5, + EFI_GET_NEXT_VARIABLE = 6, + EFI_SET_VARIABLE = 7, + EFI_QUERY_VARIABLE_INFO = 8, + EFI_GET_NEXT_HIGH_MONO_COUNT = 9, + EFI_RESET_SYSTEM = 10, + EFI_UPDATE_CAPSULE = 11, + EFI_QUERY_CAPSULE_CAPS = 12, +}; + +struct efi_runtime_work { + void *arg1; + void *arg2; + void *arg3; + void *arg4; + void *arg5; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; +}; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef union { + struct { + efi_guid_t guid; + void *table; + }; + efi_config_table_32_t mixed_mode; +} efi_config_table_t; + +typedef struct { + efi_guid_t guid; + long unsigned int *ptr; + const char name[16]; +} efi_config_table_type_t; + +typedef struct { + efi_table_hdr_t hdr; + u64 fw_vendor; + u32 fw_revision; + u32 __pad1; + u64 con_in_handle; + u64 con_in; + u64 con_out_handle; + u64 con_out; + u64 stderr_handle; + u64 stderr; + u64 runtime; + u64 boottime; + u32 nr_tables; + u32 __pad2; + u64 tables; +} efi_system_table_64_t; + +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); + +typedef u16 ucs2_char_t; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +enum uv_bios_cmd { + UV_BIOS_COMMON = 0, + UV_BIOS_GET_SN_INFO = 1, + UV_BIOS_FREQ_BASE = 2, + UV_BIOS_WATCHLIST_ALLOC = 3, + UV_BIOS_WATCHLIST_FREE = 4, + UV_BIOS_MEMPROTECT = 5, + UV_BIOS_GET_PARTITION_ADDR = 6, + UV_BIOS_SET_LEGACY_VGA_TARGET = 7, +}; + +union partition_info_u { + u64 val; + struct { + u64 hub_version: 8; + u64 partition_id: 16; + u64 coherence_id: 16; + u64 region_size: 24; + }; +}; + +enum uv_memprotect { + UV_MEMPROT_RESTRICT_ACCESS = 0, + UV_MEMPROT_ALLOW_AMO = 1, + UV_MEMPROT_ALLOW_RW = 2, +}; + +struct uv_IO_APIC_route_entry { + __u64 vector: 8; + __u64 delivery_mode: 3; + __u64 dest_mode: 1; + __u64 delivery_status: 1; + __u64 polarity: 1; + __u64 __reserved_1: 1; + __u64 trigger: 1; + __u64 mask: 1; + __u64 __reserved_2: 15; + __u64 dest: 32; +}; + +enum { + UV_AFFINITY_ALL = 0, + UV_AFFINITY_NODE = 1, + UV_AFFINITY_CPU = 2, +}; + +struct uv_irq_2_mmr_pnode { + long unsigned int offset; + int pnode; +}; + +struct uv_rtc_timer_head { + spinlock_t lock; + int next_cpu; + int ncpus; + struct { + int lcpu; + u64 expires; + } cpu[0]; +}; + +struct uv_hub_nmi_s { + raw_spinlock_t nmi_lock; + atomic_t in_nmi; + atomic_t cpu_owner; + atomic_t read_mmr_count; + atomic_t nmi_count; + long unsigned int nmi_value; + bool hub_present; + bool pch_owner; +}; + +struct uv_cpu_nmi_s { + struct uv_hub_nmi_s *hub; + int state; + int pinging; + int queries; + int pings; +}; + +struct nmi_action { + char *action; + char *desc; +}; + +typedef char action_t[16]; + +struct init_nmi { + unsigned int offset; + unsigned int mask; + unsigned int data; +}; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +struct bpf_tramp_progs { + struct bpf_prog *progs[38]; + int nr_progs; +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +struct bpf_array_aux { + enum bpf_prog_type type; + bool jited; + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + union { + char value[0]; + void *ptrs[0]; + void *pptrs[0]; + }; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +struct bpf_binary_header { + u32 pages; + int: 32; + u8 image[0]; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +struct jit_context { + int cleanup_addr; +}; + +struct x64_jit_data { + struct bpf_binary_header *header; + int *addrs; + u8 *image; + int proglen; + struct jit_context ctx; +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +typedef long unsigned int vm_flags_t; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[64]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +struct taint_flag { + char c_true; + char c_false; + bool module; +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + int cpu; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +typedef struct {} mm_segment_t; + +struct compat_rusage { + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +struct softirq_action { + void (*action)(struct softirq_action *); +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +struct wait_bit_key { + void *flags; + int bit_nr; + long unsigned int timeout; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler **xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +typedef void (*dr_release_t)(struct device *, void *); + +typedef int (*dr_match_t)(struct device *, void *, void *); + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + void *alignf_data; +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +typedef __kernel_clock_t clock_t; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = 4294967295, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct user_struct *user; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +typedef long unsigned int old_sigset_t; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_TRAPNO = 4, + SIL_FAULT_MCEERR = 5, + SIL_FAULT_BNDERR = 6, + SIL_FAULT_PKUERR = 7, + SIL_PERF_EVENT = 8, + SIL_CHLD = 9, + SIL_RT = 10, + SIL_SYS = 11, +}; + +struct fd { + struct file *file; + unsigned int flags; +}; + +typedef u32 compat_old_sigset_t; + +struct compat_sigaction { + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; + compat_sigset_t sa_mask; +}; + +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[0]; +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +struct oldold_utsname { + char sysname[9]; + char nodename[9]; + char release[9]; + char version[9]; + char machine[9]; +}; + +struct old_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; +}; + +enum uts_proc { + UTS_PROC_OSTYPE = 0, + UTS_PROC_OSRELEASE = 1, + UTS_PROC_VERSION = 2, + UTS_PROC_HOSTNAME = 3, + UTS_PROC_DOMAINNAME = 4, +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; +}; + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct getcpu_cache { + long unsigned int blob[16]; +}; + +struct compat_sysinfo { + s32 uptime; + u32 loads[3]; + u32 totalram; + u32 freeram; + u32 sharedram; + u32 bufferram; + u32 totalswap; + u32 freeswap; + u16 procs; + u16 pad; + u32 totalhigh; + u32 freehigh; + u32 mem_unit; + char _f[8]; +}; + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int saved_max_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[24]; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + unsigned int flags; + struct pool_workqueue *cpu_pwqs; + struct pool_workqueue *numa_pwq_tbl[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + bool no_numa; +}; + +struct execute_work { + struct work_struct work; +}; + +enum { + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_ORDERED_EXPLICIT = 524288, + WQ_MAX_ACTIVE = 512, + WQ_MAX_UNBOUND_PER_CPU = 4, + WQ_DFL_ACTIVE = 256, +}; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct ida { + struct xarray xa; +}; + +struct __una_u32 { + u32 x; +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + int sleeping; + char desc[24]; + struct workqueue_struct *rescue_wq; + work_func_t last_func; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[15]; + int nr_active; + int max_active; + struct list_head delayed_works; + struct list_head pwqs_node; + struct list_head mayday_node; + struct work_struct unbound_release_work; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct completion *detach_completion; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + long: 32; + long: 64; + long: 64; + long: 64; + atomic_t nr_running; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum { + POOL_MANAGER_ACTIVE = 1, + POOL_DISASSOCIATED = 4, + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 75000, + MAYDAY_INITIAL_TIMEOUT = 2, + MAYDAY_INTERVAL = 25, + CREATE_COOLDOWN = 250, + RESCUER_NICE_LEVEL = 4294967276, + HIGHPRI_NICE_LEVEL = 4294967276, + WQ_NAME_LEN = 24, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_device { + struct workqueue_struct *wq; + struct device dev; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + u32 __data_loc_workqueue; + unsigned int req_cpu; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work { + u32 workqueue; +}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, unsigned int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct cwt_wait { + wait_queue_entry_t wait; + struct work_struct *work; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct sched_param { + int sched_priority; +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +struct kthread_create_info { + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int (*threadfn)(void *); + void *data; + mm_segment_t oldfs; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct pt_regs___2; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + int next_id; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + atomic_t msg_bytes; + atomic_t msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +struct lsmblob { + u32 secid[4]; +}; + +enum what { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +struct pin_cookie {}; + +struct preempt_notifier; + +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *, int); + void (*sched_out)(struct preempt_notifier *, struct task_struct *); +}; + +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +typedef struct __call_single_data call_single_data_t; + +struct dl_bw { + raw_spinlock_t lock; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[101]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + int overload; + int overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + struct dl_bw dl_bw; + struct cpudl cpudl; + u64 visit_gen; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + long unsigned int max_cpu_capacity; + struct perf_domain *pd; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_h_nr_running; + u64 exec_clock; + u64 min_vruntime; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + struct sched_entity *last; + struct sched_entity *skip; + unsigned int nr_spread_over; + long: 32; + long: 64; + long: 64; + long: 64; + struct sched_avg avg; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 64; + long: 64; + long: 64; + long: 64; + } removed; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_clock; + u64 throttled_clock_task; + u64 throttled_clock_task_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + ktime_t period; + u64 quota; + u64 runtime; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + u64 throttled_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t load_avg; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct autogroup *autogroup; + struct cfs_bandwidth cfs_bandwidth; + unsigned int uclamp_pct[2]; + struct uclamp_se uclamp_req[2]; + struct uclamp_se uclamp[2]; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct autogroup { + struct kref kref; + struct task_group *tg; + struct rw_semaphore lock; + long unsigned int id; + int nice; +}; + +enum ctx_state { + CONTEXT_DISABLED = 4294967295, + CONTEXT_KERNEL = 0, + CONTEXT_USER = 1, + CONTEXT_GUEST = 2, +}; + +struct kernel_cpustat { + u64 cpustat[10]; +}; + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *worker; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_work_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int success; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 delay; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 runtime; + u64 vruntime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_kthread_work_queue_work {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_start {}; + +struct trace_event_data_offsets_sched_kthread_work_execute_end {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; +}; + +struct trace_event_data_offsets_sched_stat_template {}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_kthread_work_queue_work)(void *, struct kthread_worker *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_start)(void *, struct kthread_work *); + +typedef void (*btf_trace_sched_kthread_work_execute_end)(void *, struct kthread_work *, kthread_work_func_t); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_wait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_sleep)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_iowait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_blocked)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +typedef void (*btf_trace_pelt_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_pelt_rt_tp)(void *, struct rq *); + +struct uclamp_bucket { + long unsigned int value: 11; + long unsigned int tasks: 53; +}; + +struct uclamp_rq { + unsigned int value; + struct uclamp_bucket bucket[5]; +}; + +struct rt_prio_array { + long unsigned int bitmap[2]; + struct list_head queue[100]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + long unsigned int rt_nr_migratory; + long unsigned int rt_nr_total; + int overloaded; + struct plist_head pushable_tasks; + int rt_queued; + int rt_throttled; + u64 rt_time; + u64 rt_runtime; + raw_spinlock_t rt_runtime_lock; +}; + +struct dl_rq { + struct rb_root_cached root; + long unsigned int dl_nr_running; + struct { + u64 curr; + u64 next; + } earliest_dl; + long unsigned int dl_nr_migratory; + int overloaded; + struct rb_root_cached pushable_dl_tasks_root; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 bw_ratio; +}; + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + long unsigned int caller; + void *arg; + struct cpu_stop_done *done; +}; + +struct cpuidle_state; + +struct rq { + raw_spinlock_t lock; + unsigned int nr_running; + unsigned int nr_numa_running; + unsigned int nr_preferred_running; + unsigned int numa_migrate_on; + long unsigned int last_blocked_load_update_tick; + unsigned int has_blocked_load; + long: 32; + long: 64; + long: 64; + long: 64; + call_single_data_t nohz_csd; + unsigned int nohz_tick_stopped; + atomic_t nohz_flags; + unsigned int ttwu_pending; + u64 nr_switches; + long: 64; + struct uclamp_rq uclamp[2]; + unsigned int uclamp_flags; + long: 32; + long: 64; + long: 64; + long: 64; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + long unsigned int nr_uninterruptible; + struct task_struct *curr; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + u64 clock; + long: 64; + long: 64; + long: 64; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + atomic_t nr_iowait; + u64 last_seen_need_resched_ns; + int ticks_without_resched; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + long unsigned int cpu_capacity_orig; + struct callback_head *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + long: 64; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + struct rcuwait hotplug_wait; + u64 prev_steal_time; + long unsigned int calc_load_update; + long int calc_load_active; + long: 64; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + ktime_t hrtick_time; + struct sched_info rq_sched_info; + long long unsigned int rq_cpu_time; + unsigned int yld_count; + unsigned int sched_count; + unsigned int sched_goidle; + unsigned int ttwu_count; + unsigned int ttwu_local; + struct cpuidle_state *idle_state; + unsigned int nr_pinned; + unsigned int push_busy; + struct cpu_stop_work push_work; + long: 64; + long: 64; + long: 64; +}; + +typedef void (*btf_trace_pelt_dl_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_thermal_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_irq_tp)(void *, struct rq *); + +typedef void (*btf_trace_pelt_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_cpu_capacity_tp)(void *, struct rq *); + +typedef void (*btf_trace_sched_overutilized_tp)(void *, struct root_domain *, bool); + +typedef void (*btf_trace_sched_util_est_cfs_tp)(void *, struct cfs_rq *); + +typedef void (*btf_trace_sched_util_est_se_tp)(void *, struct sched_entity *); + +typedef void (*btf_trace_sched_update_nr_running_tp)(void *, struct rq *, int); + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; + long long unsigned int s2idle_usage; + long long unsigned int s2idle_time; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + s64 exit_latency_ns; + s64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + int (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); +}; + +struct cpuidle_driver_kobj; + +struct cpuidle_state_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +struct rt_bandwidth { + raw_spinlock_t rt_runtime_lock; + ktime_t rt_period; + u64 rt_runtime; + struct hrtimer rt_period_timer; + unsigned int rt_period_active; +}; + +struct dl_bandwidth { + raw_spinlock_t dl_runtime_lock; + u64 dl_runtime; + u64 dl_period; +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +enum { + __SCHED_FEAT_GENTLE_FAIR_SLEEPERS = 0, + __SCHED_FEAT_START_DEBIT = 1, + __SCHED_FEAT_NEXT_BUDDY = 2, + __SCHED_FEAT_LAST_BUDDY = 3, + __SCHED_FEAT_CACHE_HOT_BUDDY = 4, + __SCHED_FEAT_WAKEUP_PREEMPTION = 5, + __SCHED_FEAT_HRTICK = 6, + __SCHED_FEAT_HRTICK_DL = 7, + __SCHED_FEAT_DOUBLE_TICK = 8, + __SCHED_FEAT_NONTASK_CAPACITY = 9, + __SCHED_FEAT_TTWU_QUEUE = 10, + __SCHED_FEAT_SIS_PROP = 11, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 12, + __SCHED_FEAT_RT_PUSH_IPI = 13, + __SCHED_FEAT_RT_RUNTIME_SHARE = 14, + __SCHED_FEAT_LB_MIN = 15, + __SCHED_FEAT_ATTACH_AGE_LOAD = 16, + __SCHED_FEAT_WA_IDLE = 17, + __SCHED_FEAT_WA_WEIGHT = 18, + __SCHED_FEAT_WA_BIAS = 19, + __SCHED_FEAT_UTIL_EST = 20, + __SCHED_FEAT_UTIL_EST_FASTUP = 21, + __SCHED_FEAT_LATENCY_WARN = 22, + __SCHED_FEAT_ALT_PERIOD = 23, + __SCHED_FEAT_BASE_SLICE = 24, + __SCHED_FEAT_NR = 25, +}; + +enum cpu_util_type { + FREQUENCY_UTIL = 0, + ENERGY_UTIL = 1, +}; + +struct set_affinity_pending; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; + struct set_affinity_pending *pending; +}; + +struct set_affinity_pending { + refcount_t refs; + unsigned int stop_pending; + struct completion done; + struct cpu_stop_work stop_work; + struct migration_arg arg; +}; + +struct migration_swap_arg { + struct task_struct *src_task; + struct task_struct *dst_task; + int src_cpu; + int dst_cpu; +}; + +struct uclamp_request { + s64 percent; + u64 util; + int ret; +}; + +struct cfs_schedulable_data { + struct task_group *tg; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +struct sched_clock_data { + u64 tick_raw; + u64 tick_gtod; + u64 clock; +}; + +enum s2idle_states { + S2IDLE_STATE_NONE = 0, + S2IDLE_STATE_ENTER = 1, + S2IDLE_STATE_WAKE = 2, +}; + +struct idle_timer { + struct hrtimer timer; + int done; +}; + +struct numa_group { + refcount_t refcount; + spinlock_t lock; + int nr_tasks; + pid_t gid; + int active_nodes; + struct callback_head rcu; + long unsigned int total_faults; + long unsigned int max_faults_cpu; + long unsigned int *faults_cpu; + long unsigned int faults[0]; +}; + +struct update_util_data { + void (*func)(struct update_util_data *, u64, unsigned int); +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +enum numa_topology_type { + NUMA_DIRECT = 0, + NUMA_GLUELESS_MESH = 1, + NUMA_BACKPLANE = 2, +}; + +enum numa_faults_stats { + NUMA_MEM = 0, + NUMA_CPU = 1, + NUMA_MEMBUF = 2, + NUMA_CPUBUF = 3, +}; + +enum numa_type { + node_has_spare = 0, + node_fully_busy = 1, + node_overloaded = 2, +}; + +struct numa_stats { + long unsigned int load; + long unsigned int runnable; + long unsigned int util; + long unsigned int compute_capacity; + unsigned int nr_running; + unsigned int weight; + enum numa_type node_type; + int idle_cpu; +}; + +struct task_numa_env { + struct task_struct *p; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + struct numa_stats src_stats; + struct numa_stats dst_stats; + int imbalance_pct; + int dist; + struct task_struct *best_task; + long int best_imp; + int best_cpu; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_asym_packing = 3, + group_imbalanced = 4, + group_overloaded = 5, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + long unsigned int group_misfit_task_load; + unsigned int nr_numa_running; + unsigned int nr_preferred_running; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct_usage { + u64 usages[2]; +}; + +struct cpuacct { + struct cgroup_subsys_state css; + struct cpuacct_usage *cpuusage; + struct kernel_cpustat *cpustat; +}; + +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *, char *); + ssize_t (*store)(struct gov_attr_set *, const char *, size_t); +}; + +struct sugov_tunables { + struct gov_attr_set attr_set; + unsigned int rate_limit_us; +}; + +struct sugov_policy { + struct cpufreq_policy *policy; + struct sugov_tunables *tunables; + struct list_head tunables_hook; + raw_spinlock_t update_lock; + u64 last_freq_update_time; + s64 freq_update_delay_ns; + unsigned int next_freq; + unsigned int cached_raw_freq; + struct irq_work irq_work; + struct kthread_work work; + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; + bool work_in_progress; + bool limits_changed; + bool need_freq_update; +}; + +struct sugov_cpu { + struct update_util_data update_util; + struct sugov_policy *sg_policy; + unsigned int cpu; + bool iowait_boost_pending; + unsigned int iowait_boost; + u64 last_update; + long unsigned int util; + long unsigned int bw_dl; + long unsigned int max; + long unsigned int saved_idle_calls; +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + int event; + struct psi_window win; + u64 last_event_time; + struct kref refcount; +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum writer_wait_state { + WRITER_NOT_FIRST = 0, + WRITER_FIRST = 1, + WRITER_HANDOFF = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct mcs_spinlock { + struct mcs_spinlock *next; + int locked; + int count; +}; + +struct qnode { + struct mcs_spinlock mcs; + long int reserved[2]; +}; + +enum vcpu_state { + vcpu_running = 0, + vcpu_halted = 1, + vcpu_hashed = 2, +}; + +struct pv_node { + struct mcs_spinlock mcs; + int cpu; + u8 state; +}; + +struct pv_hash_entry { + struct qspinlock *lock; + struct pv_node *node; +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +struct rt_mutex; + +struct rt_mutex_waiter { + struct rb_node tree_entry; + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; + int prio; + u64 deadline; +}; + +struct rt_mutex { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +typedef int suspend_state_t; + +enum suspend_stat_step { + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE = 2, + SUSPEND_SUSPEND = 3, + SUSPEND_SUSPEND_LATE = 4, + SUSPEND_SUSPEND_NOIRQ = 5, + SUSPEND_RESUME_NOIRQ = 6, + SUSPEND_RESUME_EARLY = 7, + SUSPEND_RESUME = 8, +}; + +struct suspend_stats { + int success; + int fail; + int failed_freeze; + int failed_prepare; + int failed_suspend; + int failed_suspend_late; + int failed_suspend_noirq; + int failed_resume; + int failed_resume_early; + int failed_resume_noirq; + int last_failed_dev; + char failed_devs[80]; + int last_failed_errno; + int errno[2]; + int last_failed_step; + enum suspend_stat_step failed_steps[2]; +}; + +enum { + TEST_NONE = 0, + TEST_CORE = 1, + TEST_CPUS = 2, + TEST_PLATFORM = 3, + TEST_DEVICES = 4, + TEST_FREEZER = 5, + __TEST_AFTER_LAST = 6, +}; + +struct pm_vt_switch { + struct list_head head; + struct device *dev; + bool required; +}; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +struct platform_s2idle_ops { + int (*begin)(); + int (*prepare)(); + int (*prepare_late)(); + bool (*wake)(); + void (*restore_early)(); + void (*restore)(); + void (*end)(); +}; + +struct platform_hibernation_ops { + int (*begin)(pm_message_t); + void (*end)(); + int (*pre_snapshot)(); + void (*finish)(); + int (*prepare)(); + int (*enter)(); + void (*leave)(); + int (*pre_restore)(); + void (*restore_cleanup)(); + void (*recover)(); +}; + +enum { + HIBERNATION_INVALID = 0, + HIBERNATION_PLATFORM = 1, + HIBERNATION_SHUTDOWN = 2, + HIBERNATION_REBOOT = 3, + HIBERNATION_SUSPEND = 4, + HIBERNATION_TEST_RESUME = 5, + __HIBERNATION_AFTER_LAST = 6, +}; + +struct pbe { + void *address; + void *orig_address; + struct pbe *next; +}; + +struct swsusp_info { + struct new_utsname uts; + u32 version_code; + long unsigned int num_physpages; + int cpus; + long unsigned int image_pages; + long unsigned int pages; + long unsigned int size; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct snapshot_handle { + unsigned int cur; + void *buffer; + int sync_read; +}; + +struct linked_page { + struct linked_page *next; + char data[4088]; +}; + +struct chain_allocator { + struct linked_page *chain; + unsigned int used_space; + gfp_t gfp_mask; + int safe_needed; +}; + +struct rtree_node { + struct list_head list; + long unsigned int *data; +}; + +struct mem_zone_bm_rtree { + struct list_head list; + struct list_head nodes; + struct list_head leaves; + long unsigned int start_pfn; + long unsigned int end_pfn; + struct rtree_node *rtree; + int levels; + unsigned int blocks; +}; + +struct bm_position { + struct mem_zone_bm_rtree *zone; + struct rtree_node *node; + long unsigned int node_pfn; + int node_bit; +}; + +struct memory_bitmap { + struct list_head zones; + struct linked_page *p_list; + struct bm_position cur; +}; + +struct mem_extent { + struct list_head hook; + long unsigned int start; + long unsigned int end; +}; + +struct nosave_region { + struct list_head list; + long unsigned int start_pfn; + long unsigned int end_pfn; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +enum { + BIO_NO_PAGE_REF = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_WORKINGSET = 3, + BIO_QUIET = 4, + BIO_CHAIN = 5, + BIO_REFFED = 6, + BIO_THROTTLED = 7, + BIO_TRACE_COMPLETION = 8, + BIO_CGROUP_ACCT = 9, + BIO_TRACKED = 10, + BIO_REMAPPED = 11, + BIO_ZONE_WRITE_LOCKED = 12, + BIO_FLAG_LAST = 13, +}; + +enum req_opf { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_WRITE_SAME = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_APPEND = 13, + REQ_OP_ZONE_RESET = 15, + REQ_OP_ZONE_RESET_ALL = 17, + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_CGROUP_PUNT = 22, + __REQ_NOUNMAP = 23, + __REQ_HIPRI = 24, + __REQ_DRV = 25, + __REQ_SWAP = 26, + __REQ_NR_BITS = 27, +}; + +struct swap_map_page { + sector_t entries[511]; + sector_t next_swap; +}; + +struct swap_map_page_list { + struct swap_map_page *map; + struct swap_map_page_list *next; +}; + +struct swap_map_handle { + struct swap_map_page *cur; + struct swap_map_page_list *maps; + sector_t cur_swap; + sector_t first_sector; + unsigned int k; + long unsigned int reqd_free_pages; + u32 crc32; +}; + +struct swsusp_header { + char reserved[4060]; + u32 crc32; + sector_t image; + unsigned int flags; + char orig_sig[10]; + char sig[10]; +}; + +struct swsusp_extent { + struct rb_node node; + long unsigned int start; + long unsigned int end; +}; + +struct hib_bio_batch { + atomic_t count; + wait_queue_head_t wait; + blk_status_t error; + struct blk_plug plug; +}; + +struct crc_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + unsigned int run_threads; + wait_queue_head_t go; + wait_queue_head_t done; + u32 *crc32; + size_t *unc_len[3]; + unsigned char *unc[3]; +}; + +struct cmp_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; + unsigned char wrk[16384]; +}; + +struct dec_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; +}; + +typedef s64 compat_loff_t; + +struct resume_swap_area { + __kernel_loff_t offset; + __u32 dev; +} __attribute__((packed)); + +struct snapshot_data { + struct snapshot_handle handle; + int swap; + int mode; + bool frozen; + bool ready; + bool platform_support; + bool free_bitmaps; + dev_t dev; +}; + +struct compat_resume_swap_area { + compat_loff_t offset; + u32 dev; +} __attribute__((packed)); + +struct wakelock { + char *name; + struct rb_node node; + struct wakeup_source *ws; + struct list_head lru; +}; + +struct sysrq_key_op { + void (* const handler)(int); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +struct em_data_callback { + int (*active_power)(long unsigned int *, long unsigned int *, struct device *); +}; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason); + enum kmsg_dump_reason max_reason; + bool registered; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = 4294967295, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct console_cmdline { + char name[16]; + int index; + bool user_specified; + char *options; +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +enum log_flags { + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +struct latched_seq { + seqcount_latch_t latch; + u64 val[2]; +}; + +struct devkmsg_user { + atomic64_t seq; + struct ratelimit_state rs; + struct mutex lock; + char buf[8192]; + struct printk_info info; + char text_buf[8192]; + struct printk_record record; +}; + +enum kdb_msgsrc { + KDB_MSGSRC_INTERNAL = 0, + KDB_MSGSRC_PRINTK = 1, +}; + +struct printk_safe_seq_buf { + atomic_t len; + atomic_t message_lost; + struct irq_work work; + unsigned char buffer[8160]; +}; + +struct dev_printk_info___2; + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 0, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQF_MODIFY_MASK = 2096911, +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct irq_generic_chip_devres { + struct irq_chip_generic *gc; + u32 msk; + unsigned int clr; + unsigned int set; +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +struct node_vectors { + unsigned int id; + union { + unsigned int nvectors; + unsigned int ncpus; + }; +}; + +struct cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + bool initialized; + bool online; + long unsigned int alloc_map[4]; + long unsigned int managed_map[4]; +}; + +struct irq_matrix___2 { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct cpumap *maps; + long unsigned int scratch_map[4]; + long unsigned int system_map[4]; +}; + +struct trace_event_raw_irq_matrix_global { + struct trace_entry ent; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_raw_irq_matrix_global_update { + struct trace_entry ent; + int bit; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_raw_irq_matrix_cpu { + struct trace_entry ent; + int bit; + unsigned int cpu; + bool online; + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int online_maps; + unsigned int global_available; + unsigned int global_reserved; + unsigned int total_allocated; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_matrix_global {}; + +struct trace_event_data_offsets_irq_matrix_global_update {}; + +struct trace_event_data_offsets_irq_matrix_cpu {}; + +typedef void (*btf_trace_irq_matrix_online)(void *, struct irq_matrix___2 *); + +typedef void (*btf_trace_irq_matrix_offline)(void *, struct irq_matrix___2 *); + +typedef void (*btf_trace_irq_matrix_reserve)(void *, struct irq_matrix___2 *); + +typedef void (*btf_trace_irq_matrix_remove_reserved)(void *, struct irq_matrix___2 *); + +typedef void (*btf_trace_irq_matrix_assign_system)(void *, int, struct irq_matrix___2 *); + +typedef void (*btf_trace_irq_matrix_alloc_reserved)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_reserve_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_remove_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_alloc_managed)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_assign)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_alloc)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*btf_trace_irq_matrix_free)(void *, int, unsigned int, struct irq_matrix___2 *, struct cpumap *); + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_raw_rcu_stall_warning { + struct trace_entry ent; + const char *rcuname; + const char *msg; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +struct trace_event_data_offsets_rcu_stall_warning {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +typedef void (*btf_trace_rcu_stall_warning)(void *, const char *, const char *); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks { + struct callback_head *cbs_head; + struct callback_head **cbs_tail; + struct wait_queue_head cbs_wq; + raw_spinlock_t cbs_lock; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int n_gps; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + char *name; + char *kname; +}; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +enum rcutorture_type { + RCU_FLAVOR = 0, + RCU_TASKS_FLAVOR = 1, + RCU_TASKS_RUDE_FLAVOR = 2, + RCU_TASKS_TRACING_FLAVOR = 3, + RCU_TRIVIAL_FLAVOR = 4, + SRCU_FLAVOR = 5, + INVALID_RCU_FLAVOR = 6, +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct work_struct rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int ofl_seq; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long: 32; + long: 64; + long: 64; + raw_spinlock_t fqslock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + long: 56; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool exp_deferred_qs; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int dynticks_snap; + long int dynticks_nesting; + long int dynticks_nmi_nesting; + atomic_t dynticks; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + struct callback_head barrier_head; + int exp_dynticks_snap; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_flags; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_flags; + long unsigned int last_fqs_resched; + int cpu; +}; + +struct rcu_state { + struct rcu_node node[521]; + struct rcu_node *level[4]; + int ncpus; + int n_online_cpus; + long: 64; + long: 64; + long: 64; + u8 boost; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 56; + long: 64; + long: 64; + raw_spinlock_t ofl_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kvfree_rcu_bulk_data { + long unsigned int nr_records; + struct kvfree_rcu_bulk_data *next; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct kvfree_rcu_bulk_data *bkvhead_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + struct kvfree_rcu_bulk_data *bkvhead[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool monitor_todo; + bool initialized; + int count; + struct work_struct page_cache_work; + atomic_t work_in_progress; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +struct klp_func { + const char *old_name; + void *new_func; + long unsigned int old_sympos; + void *old_func; + struct kobject kobj; + struct list_head node; + struct list_head stack_node; + long unsigned int old_size; + long unsigned int new_size; + bool nop; + bool patched; + bool transition; +}; + +struct klp_object; + +struct klp_callbacks { + int (*pre_patch)(struct klp_object *); + void (*post_patch)(struct klp_object *); + void (*pre_unpatch)(struct klp_object *); + void (*post_unpatch)(struct klp_object *); + bool post_unpatch_enabled; +}; + +struct klp_object { + const char *name; + struct klp_func *funcs; + struct klp_callbacks callbacks; + struct kobject kobj; + struct list_head func_list; + struct list_head node; + struct module *mod; + bool dynamic; + bool patched; +}; + +struct klp_state { + long unsigned int id; + unsigned int version; + void *data; +}; + +struct klp_patch { + struct module *mod; + struct klp_object *objs; + struct klp_state *states; + bool replace; + struct list_head list; + struct kobject kobj; + struct list_head obj_list; + bool enabled; + bool forced; + struct work_struct free_work; + struct completion finish; +}; + +struct klp_find_arg { + const char *objname; + const char *name; + long unsigned int addr; + long unsigned int count; + long unsigned int pos; +}; + +struct klp_ops { + struct list_head node; + struct list_head func_stack; + struct ftrace_ops fops; +}; + +typedef int (*klp_shadow_ctor_t)(void *, void *, void *); + +typedef void (*klp_shadow_dtor_t)(void *, void *); + +struct klp_shadow { + struct hlist_node node; + struct callback_head callback_head; + void *obj; + long unsigned int id; + char data[0]; +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_LONGTERM_PIN = 7, + MR_TYPES = 8, +}; + +struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; +}; + +struct io_tlb_mem { + phys_addr_t start; + phys_addr_t end; + long unsigned int nslabs; + long unsigned int used; + unsigned int index; + spinlock_t lock; + struct dentry *debugfs; + bool late_alloc; + struct io_tlb_slot slots[0]; +}; + +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +struct trace_event_raw_swiotlb_bounced { + struct trace_entry ent; + u32 __data_loc_dev_name; + u64 dma_mask; + dma_addr_t dev_addr; + size_t size; + enum swiotlb_force swiotlb_force; + char __data[0]; +}; + +struct trace_event_data_offsets_swiotlb_bounced { + u32 dev_name; +}; + +typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t, enum swiotlb_force); + +struct cma; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +struct kvm_regs { + __u64 rax; + __u64 rbx; + __u64 rcx; + __u64 rdx; + __u64 rsi; + __u64 rdi; + __u64 rsp; + __u64 rbp; + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 rip; + __u64 rflags; +}; + +struct kvm_segment { + __u64 base; + __u32 limit; + __u16 selector; + __u8 type; + __u8 present; + __u8 dpl; + __u8 db; + __u8 s; + __u8 l; + __u8 g; + __u8 avl; + __u8 unusable; + __u8 padding; +}; + +struct kvm_dtable { + __u64 base; + __u16 limit; + __u16 padding[3]; +}; + +struct kvm_sregs { + struct kvm_segment cs; + struct kvm_segment ds; + struct kvm_segment es; + struct kvm_segment fs; + struct kvm_segment gs; + struct kvm_segment ss; + struct kvm_segment tr; + struct kvm_segment ldt; + struct kvm_dtable gdt; + struct kvm_dtable idt; + __u64 cr0; + __u64 cr2; + __u64 cr3; + __u64 cr4; + __u64 cr8; + __u64 efer; + __u64 apic_base; + __u64 interrupt_bitmap[4]; +}; + +struct kvm_msr_entry { + __u32 index; + __u32 reserved; + __u64 data; +}; + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +struct kvm_vcpu_events { + struct { + __u8 injected; + __u8 nr; + __u8 has_error_code; + __u8 pending; + __u32 error_code; + } exception; + struct { + __u8 injected; + __u8 nr; + __u8 soft; + __u8 shadow; + } interrupt; + struct { + __u8 injected; + __u8 pending; + __u8 masked; + __u8 pad; + } nmi; + __u32 sipi_vector; + __u32 flags; + struct { + __u8 smm; + __u8 pending; + __u8 smm_inside_nmi; + __u8 latched_init; + } smi; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; +}; + +struct kvm_sync_regs { + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu_events events; +}; + +struct kvm_vmx_nested_state_data { + __u8 vmcs12[4096]; + __u8 shadow_vmcs12[4096]; +}; + +struct kvm_vmx_nested_state_hdr { + __u64 vmxon_pa; + __u64 vmcs12_pa; + struct { + __u16 flags; + } smm; + __u16 pad; + __u32 flags; + __u64 preemption_timer_deadline; +}; + +struct kvm_svm_nested_state_data { + __u8 vmcb12[4096]; +}; + +struct kvm_svm_nested_state_hdr { + __u64 vmcb_pa; +}; + +struct kvm_nested_state { + __u16 flags; + __u16 format; + __u32 size; + union { + struct kvm_vmx_nested_state_hdr vmx; + struct kvm_svm_nested_state_hdr svm; + __u8 pad[120]; + } hdr; + union { + struct kvm_vmx_nested_state_data vmx[0]; + struct kvm_svm_nested_state_data svm[0]; + } data; +}; + +struct kvm_pmu_event_filter { + __u32 action; + __u32 nevents; + __u32 fixed_counter_bitmap; + __u32 flags; + __u32 pad[4]; + __u64 events[0]; +}; + +struct kvm_hyperv_exit { + __u32 type; + __u32 pad1; + union { + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; + } u; +}; + +struct kvm_xen_exit { + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + +struct kvm_run { + __u8 request_interrupt_window; + __u8 immediate_exit; + __u8 padding1[6]; + __u32 exit_reason; + __u8 ready_for_interrupt_injection; + __u8 if_flag; + __u16 flags; + __u64 cr8; + __u64 apic_base; + union { + struct { + __u64 hardware_exit_reason; + } hw; + struct { + __u64 hardware_entry_failure_reason; + __u32 cpu; + } fail_entry; + struct { + __u32 exception; + __u32 error_code; + } ex; + struct { + __u8 direction; + __u8 size; + __u16 port; + __u32 count; + __u64 data_offset; + } io; + struct { + struct kvm_debug_exit_arch arch; + } debug; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + __u32 longmode; + __u32 pad; + } hypercall; + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + struct { + __u8 icptcode; + __u16 ipa; + __u32 ipb; + } s390_sieic; + __u64 s390_reset_flags; + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + struct { + __u32 suberror; + __u32 ndata; + __u64 data[16]; + } internal; + struct { + __u64 gprs[32]; + } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + struct { + __u32 epr; + } epr; + struct { + __u32 type; + __u64 flags; + } system_event; + struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; + } s390_stsi; + struct { + __u8 vector; + } eoi; + struct kvm_hyperv_exit hyperv; + struct { + __u64 esr_iss; + __u64 fault_ipa; + } arm_nisv; + struct { + __u8 error; + __u8 pad[7]; + __u32 reason; + __u32 index; + __u64 data; + } msr; + struct kvm_xen_exit xen; + char padding[256]; + }; + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[2048]; + } s; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + union { + __u32 pad; + __u32 pio; + }; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first; + __u32 last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +struct kvm_xen_hvm_config { + __u32 flags; + __u32 msr; + __u64 blob_addr_32; + __u64 blob_addr_64; + __u8 blob_size_32; + __u8 blob_size_64; + __u8 pad2[30]; +}; + +struct kvm_enc_region { + __u64 addr; + __u64 size; +}; + +struct kvm_dirty_gfn { + __u32 flags; + __u32 slot; + __u64 offset; +}; + +typedef long unsigned int gva_t; + +typedef u64 gpa_t; + +typedef u64 gfn_t; + +typedef u64 hpa_t; + +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +struct kvm_memory_slot; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + long unsigned int hva; + long unsigned int len; + struct kvm_memory_slot *memslot; +}; + +struct kvm_rmap_head; + +struct kvm_lpage_info; + +struct kvm_arch_memory_slot { + struct kvm_rmap_head *rmap[3]; + struct kvm_lpage_info *lpage_info[2]; + short unsigned int *gfn_track[1]; +}; + +struct kvm_memory_slot { + gfn_t base_gfn; + long unsigned int npages; + long unsigned int *dirty_bitmap; + struct kvm_arch_memory_slot arch; + long unsigned int userspace_addr; + u32 flags; + short int id; + u16 as_id; +}; + +struct gfn_to_pfn_cache { + u64 generation; + gfn_t gfn; + kvm_pfn_t pfn; + bool dirty; +}; + +struct kvm_mmu_memory_cache { + int nobjs; + gfp_t gfp_zero; + struct kmem_cache *kmem_cache; + void *objects[40]; +}; + +struct hv_partition_assist_pg { + u32 tlb_lock_count; +}; + +union hv_message_flags { + __u8 asu8; + struct { + __u8 msg_pending: 1; + __u8 reserved: 7; + }; +}; + +union hv_port_id { + __u32 asu32; + struct { + __u32 id: 24; + __u32 reserved: 8; + } u; +}; + +struct hv_message_header { + __u32 message_type; + __u8 payload_size; + union hv_message_flags message_flags; + __u8 reserved[2]; + union { + __u64 sender; + union hv_port_id port; + }; +}; + +struct hv_message { + struct hv_message_header header; + union { + __u64 payload[30]; + } u; +}; + +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable: 1; + u64 periodic: 1; + u64 lazy: 1; + u64 auto_enable: 1; + u64 apic_vector: 8; + u64 direct_mode: 1; + u64 reserved_z0: 3; + u64 sintx: 4; + u64 reserved_z1: 44; + }; +}; + +enum kvm_page_track_mode { + KVM_PAGE_TRACK_WRITE = 0, + KVM_PAGE_TRACK_MAX = 1, +}; + +struct kvm_page_track_notifier_head { + struct srcu_struct track_srcu; + struct hlist_head track_notifier_list; +}; + +struct kvm_vcpu; + +struct kvm; + +struct kvm_page_track_notifier_node { + struct hlist_node node; + void (*track_write)(struct kvm_vcpu *, gpa_t, const u8 *, int, struct kvm_page_track_notifier_node *); + void (*track_flush_slot)(struct kvm *, struct kvm_memory_slot *, struct kvm_page_track_notifier_node *); +}; + +struct kvm_vcpu_stat { + u64 pf_fixed; + u64 pf_guest; + u64 tlb_flush; + u64 invlpg; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 signal_exits; + u64 irq_window_exits; + u64 nmi_window_exits; + u64 l1d_flush; + u64 halt_exits; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 request_irq_exits; + u64 irq_exits; + u64 host_state_reload; + u64 fpu_reload; + u64 insn_emulation; + u64 insn_emulation_fail; + u64 hypercalls; + u64 irq_injections; + u64 nmi_injections; + u64 req_event; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 nested_run; + u64 directed_yield_attempted; + u64 directed_yield_successful; +}; + +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned int len; +}; + +struct kvm_lapic; + +struct x86_exception; + +struct kvm_mmu_page; + +union kvm_mmu_page_role { + u32 word; + struct { + unsigned int level: 4; + unsigned int gpte_is_8_bytes: 1; + unsigned int quadrant: 2; + unsigned int direct: 1; + unsigned int access: 3; + unsigned int invalid: 1; + unsigned int nxe: 1; + unsigned int cr0_wp: 1; + unsigned int smep_andnot_wp: 1; + unsigned int smap_andnot_wp: 1; + unsigned int ad_disabled: 1; + unsigned int guest_mode: 1; + char: 6; + unsigned int smm: 8; + }; +}; + +union kvm_mmu_extended_role { + u32 word; + struct { + unsigned int valid: 1; + unsigned int execonly: 1; + unsigned int cr0_pg: 1; + unsigned int cr4_pae: 1; + unsigned int cr4_pse: 1; + unsigned int cr4_pke: 1; + unsigned int cr4_smap: 1; + unsigned int cr4_smep: 1; + unsigned int cr4_la57: 1; + unsigned int maxphyaddr: 6; + }; +}; + +union kvm_mmu_role { + u64 as_u64; + struct { + union kvm_mmu_page_role base; + union kvm_mmu_extended_role ext; + }; +}; + +struct kvm_mmu_root_info { + gpa_t pgd; + hpa_t hpa; +}; + +struct rsvd_bits_validate { + u64 rsvd_bits_mask[10]; + u64 bad_mt_xwr; +}; + +struct kvm_mmu { + long unsigned int (*get_guest_pgd)(struct kvm_vcpu *); + u64 (*get_pdptr)(struct kvm_vcpu *, int); + int (*page_fault)(struct kvm_vcpu *, gpa_t, u32, bool); + void (*inject_page_fault)(struct kvm_vcpu *, struct x86_exception *); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *); + gpa_t (*translate_gpa)(struct kvm_vcpu *, gpa_t, u32, struct x86_exception *); + int (*sync_page)(struct kvm_vcpu *, struct kvm_mmu_page *); + void (*invlpg)(struct kvm_vcpu *, gva_t, hpa_t); + hpa_t root_hpa; + gpa_t root_pgd; + union kvm_mmu_role mmu_role; + u8 root_level; + u8 shadow_root_level; + u8 ept_ad; + bool direct_map; + struct kvm_mmu_root_info prev_roots[3]; + u8 permissions[16]; + u32 pkru_mask; + u64 *pae_root; + u64 *pml4_root; + struct rsvd_bits_validate shadow_zero_check; + struct rsvd_bits_validate guest_rsvd_check; + u8 last_nonleaf_level; + bool nx; + u64 pdptrs[4]; +}; + +struct kvm_pio_request { + long unsigned int linear_rip; + long unsigned int count; + int in; + int port; + int size; +}; + +struct kvm_queued_exception { + bool pending; + bool injected; + bool has_error_code; + u8 nr; + u32 error_code; + long unsigned int payload; + bool has_payload; + u8 nested_apf; +}; + +struct kvm_queued_interrupt { + bool injected; + bool soft; + u8 nr; +}; + +struct x86_emulate_ctxt; + +struct kvm_mtrr_range { + u64 base; + u64 mask; + struct list_head node; +}; + +struct kvm_mtrr { + struct kvm_mtrr_range var_ranges[8]; + mtrr_type fixed_ranges[88]; + u64 deftype; + struct list_head head; +}; + +enum pmc_type { + KVM_PMC_GP = 0, + KVM_PMC_FIXED = 1, +}; + +struct kvm_pmc { + enum pmc_type type; + u8 idx; + u64 counter; + u64 eventsel; + struct perf_event *perf_event; + struct kvm_vcpu *vcpu; + u64 current_config; +}; + +struct kvm_pmu { + unsigned int nr_arch_gp_counters; + unsigned int nr_arch_fixed_counters; + unsigned int available_event_types; + u64 fixed_ctr_ctrl; + u64 global_ctrl; + u64 global_status; + u64 global_ovf_ctrl; + u64 counter_bitmask[2]; + u64 global_ctrl_mask; + u64 global_ovf_ctrl_mask; + u64 reserved_bits; + u8 version; + struct kvm_pmc gp_counters[32]; + struct kvm_pmc fixed_counters[4]; + struct irq_work irq_work; + long unsigned int reprogram_pmi[1]; + long unsigned int all_valid_pmc_idx[1]; + long unsigned int pmc_in_use[1]; + bool need_cleanup; + u8 event_count; +}; + +struct kvm_vcpu_xen { + u64 hypercall_rip; + u32 current_runstate; + bool vcpu_info_set; + bool vcpu_time_info_set; + bool runstate_set; + struct gfn_to_hva_cache vcpu_info_cache; + struct gfn_to_hva_cache vcpu_time_info_cache; + struct gfn_to_hva_cache runstate_cache; + u64 last_steal; + u64 runstate_entry_time; + u64 runstate_times[4]; +}; + +struct kvm_vcpu_hv; + +struct kvm_vcpu_arch { + long unsigned int regs[17]; + u32 regs_avail; + u32 regs_dirty; + long unsigned int cr0; + long unsigned int cr0_guest_owned_bits; + long unsigned int cr2; + long unsigned int cr3; + long unsigned int cr4; + long unsigned int cr4_guest_owned_bits; + long unsigned int cr4_guest_rsvd_bits; + long unsigned int cr8; + u32 host_pkru; + u32 pkru; + u32 hflags; + u64 efer; + u64 apic_base; + struct kvm_lapic *apic; + bool apicv_active; + bool load_eoi_exitmap_pending; + long unsigned int ioapic_handled_vectors[4]; + long unsigned int apic_attention; + int32_t apic_arb_prio; + int mp_state; + u64 ia32_misc_enable_msr; + u64 smbase; + u64 smi_count; + bool tpr_access_reporting; + bool xsaves_enabled; + u64 ia32_xss; + u64 microcode_version; + u64 arch_capabilities; + u64 perf_capabilities; + struct kvm_mmu *mmu; + struct kvm_mmu root_mmu; + struct kvm_mmu guest_mmu; + struct kvm_mmu nested_mmu; + struct kvm_mmu *walk_mmu; + struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; + struct kvm_mmu_memory_cache mmu_shadow_page_cache; + struct kvm_mmu_memory_cache mmu_gfn_array_cache; + struct kvm_mmu_memory_cache mmu_page_header_cache; + struct fpu *user_fpu; + struct fpu *guest_fpu; + u64 xcr0; + u64 guest_supported_xcr0; + struct kvm_pio_request pio; + void *pio_data; + void *guest_ins_data; + u8 event_exit_inst_len; + struct kvm_queued_exception exception; + struct kvm_queued_interrupt interrupt; + int halt_request; + int cpuid_nent; + struct kvm_cpuid_entry2 *cpuid_entries; + u64 reserved_gpa_bits; + int maxphyaddr; + int max_tdp_level; + struct x86_emulate_ctxt *emulate_ctxt; + bool emulate_regs_need_sync_to_vcpu; + bool emulate_regs_need_sync_from_vcpu; + int (*complete_userspace_io)(struct kvm_vcpu *); + gpa_t time; + struct pvclock_vcpu_time_info hv_clock; + unsigned int hw_tsc_khz; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; + bool pvclock_set_guest_stopped_request; + struct { + u8 preempted; + u64 msr_val; + u64 last_steal; + struct gfn_to_pfn_cache cache; + } st; + u64 l1_tsc_offset; + u64 tsc_offset; + u64 last_guest_tsc; + u64 last_host_tsc; + u64 tsc_offset_adjustment; + u64 this_tsc_nsec; + u64 this_tsc_write; + u64 this_tsc_generation; + bool tsc_catchup; + bool tsc_always_catchup; + s8 virtual_tsc_shift; + u32 virtual_tsc_mult; + u32 virtual_tsc_khz; + s64 ia32_tsc_adjust_msr; + u64 msr_ia32_power_ctl; + u64 tsc_scaling_ratio; + atomic_t nmi_queued; + unsigned int nmi_pending; + bool nmi_injected; + bool smi_pending; + struct kvm_mtrr mtrr_state; + u64 pat; + unsigned int switch_db_regs; + long unsigned int db[4]; + long unsigned int dr6; + long unsigned int dr7; + long unsigned int eff_db[4]; + long unsigned int guest_debug_dr7; + u64 msr_platform_info; + u64 msr_misc_features_enables; + u64 mcg_cap; + u64 mcg_status; + u64 mcg_ctl; + u64 mcg_ext_ctl; + u64 *mce_banks; + u64 mmio_gva; + unsigned int mmio_access; + gfn_t mmio_gfn; + u64 mmio_gen; + struct kvm_pmu pmu; + long unsigned int singlestep_rip; + bool hyperv_enabled; + struct kvm_vcpu_hv *hyperv; + struct kvm_vcpu_xen xen; + cpumask_var_t wbinvd_dirty_mask; + long unsigned int last_retry_eip; + long unsigned int last_retry_addr; + struct { + bool halted; + gfn_t gfns[64]; + struct gfn_to_hva_cache data; + u64 msr_en_val; + u64 msr_int_val; + u16 vec; + u32 id; + bool send_user_only; + u32 host_apf_flags; + long unsigned int nested_apf_token; + bool delivery_as_pf_vmexit; + bool pageready_pending; + } apf; + struct { + u64 length; + u64 status; + } osvw; + struct { + u64 msr_val; + struct gfn_to_hva_cache data; + } pv_eoi; + u64 msr_kvm_poll_control; + bool write_fault_to_shadow_pgtable; + long unsigned int exit_qualification; + struct { + bool pv_unhalted; + } pv; + int pending_ioapic_eoi; + int pending_external_vector; + bool preempted_in_kernel; + bool l1tf_flush_l1d; + unsigned int last_vmentry_cpu; + u64 msr_hwcr; + struct { + u32 features; + bool enforce; + } pv_cpuid; + bool guest_state_protected; +}; + +struct kvm_dirty_ring { + u32 dirty_index; + u32 reset_index; + u32 size; + u32 soft_limit; + struct kvm_dirty_gfn *dirty_gfns; + int index; +}; + +struct kvm_vcpu { + struct kvm *kvm; + struct preempt_notifier preempt_notifier; + int cpu; + int vcpu_id; + int vcpu_idx; + int srcu_idx; + int mode; + u64 requests; + long unsigned int guest_debug; + int pre_pcpu; + struct list_head blocked_vcpu_list; + struct mutex mutex; + struct kvm_run *run; + struct rcuwait wait; + struct pid *pid; + int sigset_active; + sigset_t sigset; + struct kvm_vcpu_stat stat; + unsigned int halt_poll_ns; + bool valid_wakeup; + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_cur_fragment; + int mmio_nr_fragments; + struct kvm_mmio_fragment mmio_fragments[2]; + struct { + u32 queued; + struct list_head queue; + struct list_head done; + spinlock_t lock; + } async_pf; + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; + bool preempted; + bool ready; + struct kvm_vcpu_arch arch; + struct kvm_dirty_ring dirty_ring; +}; + +struct kvm_vm_stat { + ulong mmu_shadow_zapped; + ulong mmu_pte_write; + ulong mmu_pde_zapped; + ulong mmu_flooded; + ulong mmu_recycled; + ulong mmu_cache_miss; + ulong mmu_unsync; + ulong remote_tlb_flush; + ulong lpages; + ulong nx_lpage_splits; + ulong max_mmu_page_hash_collisions; +}; + +struct kvm_pic; + +struct kvm_ioapic; + +struct kvm_pit; + +enum hv_tsc_page_status { + HV_TSC_PAGE_UNSET = 0, + HV_TSC_PAGE_GUEST_CHANGED = 1, + HV_TSC_PAGE_HOST_CHANGED = 2, + HV_TSC_PAGE_SET = 3, + HV_TSC_PAGE_UPDATING = 4, + HV_TSC_PAGE_BROKEN = 5, +}; + +struct kvm_hv_syndbg { + struct { + u64 control; + u64 status; + u64 send_page; + u64 recv_page; + u64 pending_page; + } control; + u64 options; +}; + +struct kvm_hv { + struct mutex hv_lock; + u64 hv_guest_os_id; + u64 hv_hypercall; + u64 hv_tsc_page; + enum hv_tsc_page_status hv_tsc_page_status; + u64 hv_crash_param[5]; + u64 hv_crash_ctl; + struct ms_hyperv_tsc_page tsc_ref; + struct idr conn_to_evt; + u64 hv_reenlightenment_control; + u64 hv_tsc_emulation_control; + u64 hv_tsc_emulation_status; + atomic_t num_mismatched_vp_indexes; + struct hv_partition_assist_pg *hv_pa_pg; + struct kvm_hv_syndbg hv_syndbg; +}; + +struct kvm_xen { + bool long_mode; + bool shinfo_set; + u8 upcall_vector; + struct gfn_to_hva_cache shinfo_cache; +}; + +enum kvm_irqchip_mode { + KVM_IRQCHIP_NONE = 0, + KVM_IRQCHIP_KERNEL = 1, + KVM_IRQCHIP_SPLIT = 2, +}; + +struct kvm_apic_map; + +struct kvm_x86_msr_filter; + +struct kvm_arch { + long unsigned int n_used_mmu_pages; + long unsigned int n_requested_mmu_pages; + long unsigned int n_max_mmu_pages; + unsigned int indirect_shadow_pages; + u8 mmu_valid_gen; + struct hlist_head mmu_page_hash[4096]; + struct list_head active_mmu_pages; + struct list_head zapped_obsolete_pages; + struct list_head lpage_disallowed_mmu_pages; + struct kvm_page_track_notifier_node mmu_sp_tracker; + struct kvm_page_track_notifier_head track_notifier_head; + spinlock_t mmu_unsync_pages_lock; + struct list_head assigned_dev_head; + struct iommu_domain *iommu_domain; + bool iommu_noncoherent; + atomic_t noncoherent_dma_count; + atomic_t assigned_device_count; + struct kvm_pic *vpic; + struct kvm_ioapic *vioapic; + struct kvm_pit *vpit; + atomic_t vapics_in_nmi_mode; + struct mutex apic_map_lock; + struct kvm_apic_map *apic_map; + atomic_t apic_map_dirty; + bool apic_access_page_done; + long unsigned int apicv_inhibit_reasons; + gpa_t wall_clock; + bool mwait_in_guest; + bool hlt_in_guest; + bool pause_in_guest; + bool cstate_in_guest; + long unsigned int irq_sources_bitmap; + s64 kvmclock_offset; + raw_spinlock_t tsc_write_lock; + u64 last_tsc_nsec; + u64 last_tsc_write; + u32 last_tsc_khz; + u64 cur_tsc_nsec; + u64 cur_tsc_write; + u64 cur_tsc_offset; + u64 cur_tsc_generation; + int nr_vcpus_matched_tsc; + spinlock_t pvclock_gtod_sync_lock; + bool use_master_clock; + u64 master_kernel_ns; + u64 master_cycle_now; + struct delayed_work kvmclock_update_work; + struct delayed_work kvmclock_sync_work; + struct kvm_xen_hvm_config xen_hvm_config; + struct hlist_head mask_notifier_list; + struct kvm_hv hyperv; + struct kvm_xen xen; + bool backwards_tsc_observed; + bool boot_vcpu_runs_old_kvmclock; + u32 bsp_vcpu_id; + u64 disabled_quirks; + int cpu_dirty_logging_count; + enum kvm_irqchip_mode irqchip_mode; + u8 nr_reserved_ioapic_pins; + bool disabled_lapic_found; + bool x2apic_format; + bool x2apic_broadcast_quirk_disabled; + bool guest_can_read_msr_platform_info; + bool exception_payload_enabled; + bool bus_lock_detection_enabled; + u32 user_space_msr_mask; + struct kvm_x86_msr_filter *msr_filter; + bool sgx_provisioning_allowed; + struct kvm_pmu_event_filter *pmu_event_filter; + struct task_struct *nx_lpage_recovery_thread; + bool tdp_mmu_enabled; + struct list_head tdp_mmu_roots; + struct list_head tdp_mmu_pages; + spinlock_t tdp_mmu_pages_lock; +}; + +struct kvm_memslots; + +struct kvm_io_bus; + +struct kvm_irq_routing_table; + +struct kvm_stat_data; + +struct kvm { + rwlock_t mmu_lock; + struct mutex slots_lock; + struct mm_struct *mm; + struct kvm_memslots *memslots[2]; + struct kvm_vcpu *vcpus[288]; + atomic_t online_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus *buses[4]; + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; + struct mutex irq_lock; + struct kvm_irq_routing_table *irq_routing; + struct hlist_head irq_ack_notifier_list; + struct mmu_notifier mmu_notifier; + long unsigned int mmu_notifier_seq; + long int mmu_notifier_count; + long unsigned int mmu_notifier_range_start; + long unsigned int mmu_notifier_range_end; + long int tlbs_dirty; + struct list_head devices; + u64 manual_dirty_log_protect; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; + unsigned int max_halt_poll_ns; + u32 dirty_ring_size; +}; + +enum kvm_reg { + VCPU_REGS_RAX = 0, + VCPU_REGS_RCX = 1, + VCPU_REGS_RDX = 2, + VCPU_REGS_RBX = 3, + VCPU_REGS_RSP = 4, + VCPU_REGS_RBP = 5, + VCPU_REGS_RSI = 6, + VCPU_REGS_RDI = 7, + VCPU_REGS_R8 = 8, + VCPU_REGS_R9 = 9, + VCPU_REGS_R10 = 10, + VCPU_REGS_R11 = 11, + VCPU_REGS_R12 = 12, + VCPU_REGS_R13 = 13, + VCPU_REGS_R14 = 14, + VCPU_REGS_R15 = 15, + VCPU_REGS_RIP = 16, + NR_VCPU_REGS = 17, + VCPU_EXREG_PDPTR = 17, + VCPU_EXREG_CR0 = 18, + VCPU_EXREG_CR3 = 19, + VCPU_EXREG_CR4 = 20, + VCPU_EXREG_RFLAGS = 21, + VCPU_EXREG_SEGMENTS = 22, + VCPU_EXREG_EXIT_INFO_1 = 23, + VCPU_EXREG_EXIT_INFO_2 = 24, +}; + +enum exit_fastpath_completion { + EXIT_FASTPATH_NONE = 0, + EXIT_FASTPATH_REENTER_GUEST = 1, + EXIT_FASTPATH_EXIT_HANDLED = 2, +}; + +struct kvm_rmap_head { + long unsigned int val; +}; + +struct kvm_tlb_range { + u64 start_gfn; + u64 pages; +}; + +struct kvm_vcpu_hv_stimer { + struct hrtimer timer; + int index; + union hv_stimer_config config; + u64 count; + u64 exp_time; + struct hv_message msg; + bool msg_pending; +}; + +struct kvm_vcpu_hv_synic { + u64 version; + u64 control; + u64 msg_page; + u64 evt_page; + atomic64_t sint[16]; + atomic_t sint_to_gsi[16]; + long unsigned int auto_eoi_bitmap[4]; + long unsigned int vec_bitmap[4]; + bool active; + bool dont_zero_synic_pages; +}; + +struct kvm_vcpu_hv { + struct kvm_vcpu *vcpu; + u32 vp_index; + u64 hv_vapic; + s64 runtime_offset; + struct kvm_vcpu_hv_synic synic; + struct kvm_hyperv_exit exit; + struct kvm_vcpu_hv_stimer stimer[4]; + long unsigned int stimer_pending_bitmap[1]; + cpumask_t tlb_flush; +}; + +struct kvm_lpage_info { + int disallow_lpage; +}; + +struct kvm_apic_map { + struct callback_head rcu; + u8 mode; + u32 max_apic_id; + union { + struct kvm_lapic *xapic_flat_map[8]; + struct kvm_lapic *xapic_cluster_map[64]; + }; + struct kvm_lapic *phys_map[0]; +}; + +struct msr_bitmap_range { + u32 flags; + u32 nmsrs; + u32 base; + long unsigned int *bitmap; +}; + +struct kvm_x86_msr_filter { + u8 count; + bool default_allow: 1; + struct msr_bitmap_range ranges[16]; +}; + +struct msr_data { + bool host_initiated; + u32 index; + u64 data; +}; + +struct x86_instruction_info; + +enum x86_intercept_stage; + +struct kvm_pmu_ops; + +struct kvm_x86_nested_ops; + +struct kvm_x86_ops { + int (*hardware_enable)(); + void (*hardware_disable)(); + void (*hardware_unsetup)(); + bool (*cpu_has_accelerated_tpr)(); + bool (*has_emulated_msr)(struct kvm *, u32); + void (*vcpu_after_set_cpuid)(struct kvm_vcpu *); + unsigned int vm_size; + int (*vm_init)(struct kvm *); + void (*vm_destroy)(struct kvm *); + int (*vcpu_create)(struct kvm_vcpu *); + void (*vcpu_free)(struct kvm_vcpu *); + void (*vcpu_reset)(struct kvm_vcpu *, bool); + void (*prepare_guest_switch)(struct kvm_vcpu *); + void (*vcpu_load)(struct kvm_vcpu *, int); + void (*vcpu_put)(struct kvm_vcpu *); + void (*update_exception_bitmap)(struct kvm_vcpu *); + int (*get_msr)(struct kvm_vcpu *, struct msr_data *); + int (*set_msr)(struct kvm_vcpu *, struct msr_data *); + u64 (*get_segment_base)(struct kvm_vcpu *, int); + void (*get_segment)(struct kvm_vcpu *, struct kvm_segment *, int); + int (*get_cpl)(struct kvm_vcpu *); + void (*set_segment)(struct kvm_vcpu *, struct kvm_segment *, int); + void (*get_cs_db_l_bits)(struct kvm_vcpu *, int *, int *); + void (*set_cr0)(struct kvm_vcpu *, long unsigned int); + bool (*is_valid_cr4)(struct kvm_vcpu *, long unsigned int); + void (*set_cr4)(struct kvm_vcpu *, long unsigned int); + int (*set_efer)(struct kvm_vcpu *, u64); + void (*get_idt)(struct kvm_vcpu *, struct desc_ptr *); + void (*set_idt)(struct kvm_vcpu *, struct desc_ptr *); + void (*get_gdt)(struct kvm_vcpu *, struct desc_ptr *); + void (*set_gdt)(struct kvm_vcpu *, struct desc_ptr *); + void (*sync_dirty_debug_regs)(struct kvm_vcpu *); + void (*set_dr7)(struct kvm_vcpu *, long unsigned int); + void (*cache_reg)(struct kvm_vcpu *, enum kvm_reg); + long unsigned int (*get_rflags)(struct kvm_vcpu *); + void (*set_rflags)(struct kvm_vcpu *, long unsigned int); + void (*tlb_flush_all)(struct kvm_vcpu *); + void (*tlb_flush_current)(struct kvm_vcpu *); + int (*tlb_remote_flush)(struct kvm *); + int (*tlb_remote_flush_with_range)(struct kvm *, struct kvm_tlb_range *); + void (*tlb_flush_gva)(struct kvm_vcpu *, gva_t); + void (*tlb_flush_guest)(struct kvm_vcpu *); + enum exit_fastpath_completion (*run)(struct kvm_vcpu *); + int (*handle_exit)(struct kvm_vcpu *, enum exit_fastpath_completion); + int (*skip_emulated_instruction)(struct kvm_vcpu *); + void (*update_emulated_instruction)(struct kvm_vcpu *); + void (*set_interrupt_shadow)(struct kvm_vcpu *, int); + u32 (*get_interrupt_shadow)(struct kvm_vcpu *); + void (*patch_hypercall)(struct kvm_vcpu *, unsigned char *); + void (*set_irq)(struct kvm_vcpu *); + void (*set_nmi)(struct kvm_vcpu *); + void (*queue_exception)(struct kvm_vcpu *); + void (*cancel_injection)(struct kvm_vcpu *); + int (*interrupt_allowed)(struct kvm_vcpu *, bool); + int (*nmi_allowed)(struct kvm_vcpu *, bool); + bool (*get_nmi_mask)(struct kvm_vcpu *); + void (*set_nmi_mask)(struct kvm_vcpu *, bool); + void (*enable_nmi_window)(struct kvm_vcpu *); + void (*enable_irq_window)(struct kvm_vcpu *); + void (*update_cr8_intercept)(struct kvm_vcpu *, int, int); + bool (*check_apicv_inhibit_reasons)(ulong); + void (*pre_update_apicv_exec_ctrl)(struct kvm *, bool); + void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *); + void (*hwapic_irr_update)(struct kvm_vcpu *, int); + void (*hwapic_isr_update)(struct kvm_vcpu *, int); + bool (*guest_apic_has_interrupt)(struct kvm_vcpu *); + void (*load_eoi_exitmap)(struct kvm_vcpu *, u64 *); + void (*set_virtual_apic_mode)(struct kvm_vcpu *); + void (*set_apic_access_page_addr)(struct kvm_vcpu *); + int (*deliver_posted_interrupt)(struct kvm_vcpu *, int); + int (*sync_pir_to_irr)(struct kvm_vcpu *); + int (*set_tss_addr)(struct kvm *, unsigned int); + int (*set_identity_map_addr)(struct kvm *, u64); + u64 (*get_mt_mask)(struct kvm_vcpu *, gfn_t, bool); + void (*load_mmu_pgd)(struct kvm_vcpu *, hpa_t, int); + bool (*has_wbinvd_exit)(); + u64 (*write_l1_tsc_offset)(struct kvm_vcpu *, u64); + void (*get_exit_info)(struct kvm_vcpu *, u64 *, u64 *, u32 *, u32 *); + int (*check_intercept)(struct kvm_vcpu *, struct x86_instruction_info *, enum x86_intercept_stage, struct x86_exception *); + void (*handle_exit_irqoff)(struct kvm_vcpu *); + void (*request_immediate_exit)(struct kvm_vcpu *); + void (*sched_in)(struct kvm_vcpu *, int); + int cpu_dirty_log_size; + void (*update_cpu_dirty_logging)(struct kvm_vcpu *); + const struct kvm_pmu_ops *pmu_ops; + const struct kvm_x86_nested_ops *nested_ops; + int (*pre_block)(struct kvm_vcpu *); + void (*post_block)(struct kvm_vcpu *); + void (*vcpu_blocking)(struct kvm_vcpu *); + void (*vcpu_unblocking)(struct kvm_vcpu *); + int (*update_pi_irte)(struct kvm *, unsigned int, uint32_t, bool); + void (*start_assignment)(struct kvm *); + void (*apicv_post_state_restore)(struct kvm_vcpu *); + bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *); + int (*set_hv_timer)(struct kvm_vcpu *, u64, bool *); + void (*cancel_hv_timer)(struct kvm_vcpu *); + void (*setup_mce)(struct kvm_vcpu *); + int (*smi_allowed)(struct kvm_vcpu *, bool); + int (*pre_enter_smm)(struct kvm_vcpu *, char *); + int (*pre_leave_smm)(struct kvm_vcpu *, const char *); + void (*enable_smi_window)(struct kvm_vcpu *); + int (*mem_enc_op)(struct kvm *, void *); + int (*mem_enc_reg_region)(struct kvm *, struct kvm_enc_region *); + int (*mem_enc_unreg_region)(struct kvm *, struct kvm_enc_region *); + int (*vm_copy_enc_context_from)(struct kvm *, unsigned int); + int (*get_msr_feature)(struct kvm_msr_entry *); + bool (*can_emulate_instruction)(struct kvm_vcpu *, void *, int); + bool (*apic_init_signal_blocked)(struct kvm_vcpu *); + int (*enable_direct_tlbflush)(struct kvm_vcpu *); + void (*migrate_timers)(struct kvm_vcpu *); + void (*msr_filter_changed)(struct kvm_vcpu *); + int (*complete_emulated_msr)(struct kvm_vcpu *, int); + void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *, u8); +}; + +struct kvm_x86_nested_ops { + int (*check_events)(struct kvm_vcpu *); + bool (*hv_timer_pending)(struct kvm_vcpu *); + void (*triple_fault)(struct kvm_vcpu *); + int (*get_state)(struct kvm_vcpu *, struct kvm_nested_state *, unsigned int); + int (*set_state)(struct kvm_vcpu *, struct kvm_nested_state *, struct kvm_nested_state *); + bool (*get_nested_state_pages)(struct kvm_vcpu *); + int (*write_log_dirty)(struct kvm_vcpu *, gpa_t); + int (*enable_evmcs)(struct kvm_vcpu *, uint16_t *); + uint16_t (*get_evmcs_version)(struct kvm_vcpu *); +}; + +struct kvm_io_device; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +struct kvm_io_bus { + int dev_count; + int ioeventfd_count; + struct kvm_io_range range[0]; +}; + +enum kvm_bus { + KVM_MMIO_BUS = 0, + KVM_PIO_BUS = 1, + KVM_VIRTIO_CCW_NOTIFY_BUS = 2, + KVM_FAST_MMIO_BUS = 3, + KVM_NR_BUSES = 4, +}; + +struct kvm_irq_routing_table { + int chip[72]; + u32 nr_rt_entries; + struct hlist_head map[0]; +}; + +struct kvm_memslots { + u64 generation; + short int id_to_index[32767]; + atomic_t lru_slot; + int used_slots; + struct kvm_memory_slot memslots[0]; +}; + +struct kvm_stats_debugfs_item; + +struct kvm_stat_data { + struct kvm *kvm; + struct kvm_stats_debugfs_item *dbgfs_item; +}; + +enum kvm_stat_kind { + KVM_STAT_VM = 0, + KVM_STAT_VCPU = 1, +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; + enum kvm_stat_kind kind; + int mode; +}; + +enum kcmp_type { + KCMP_FILE = 0, + KCMP_VM = 1, + KCMP_FILES = 2, + KCMP_FS = 3, + KCMP_SIGHAND = 4, + KCMP_IO = 5, + KCMP_SYSVSEM = 6, + KCMP_EPOLL_TFD = 7, + KCMP_TYPES = 8, +}; + +struct kcmp_epoll_slot { + __u32 efd; + __u32 tfd; + __u32 toff; +}; + +enum profile_type { + PROFILE_TASK_EXIT = 0, + PROFILE_MUNMAP = 1, +}; + +struct profile_hit { + u32 pc; + u32 hits; +}; + +struct stacktrace_cookie { + long unsigned int *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_long_t __kernel_old_time_t; + +typedef __kernel_suseconds_t suseconds_t; + +typedef __u64 timeu64_t; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + s64 now; + void *function; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_raw_tick_stop { + struct trace_entry ent; + int success; + int dependency; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +struct trace_event_data_offsets_tick_stop {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int, unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +typedef void (*btf_trace_tick_stop)(void *, int, int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + bool timers_pending; + long unsigned int pending_map[9]; + struct hlist_head vectors[576]; + long: 64; + long: 64; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct audit_ntp_val { + long long int oldval; + long long int newval; +}; + +struct audit_ntp_data { + struct audit_ntp_val vals[6]; +}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_fast { + seqcount_latch_t seq; + struct tk_read_base base[2]; +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + int uie_unsupported; + long unsigned int set_offset_nsec; + long unsigned int features[1]; + time64_t range_min; + timeu64_t range_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; +}; + +typedef s64 int64_t; + +enum tick_nohz_mode { + NOHZ_MODE_INACTIVE = 0, + NOHZ_MODE_LOWRES = 1, + NOHZ_MODE_HIGHRES = 2, +}; + +struct tick_sched { + struct hrtimer sched_timer; + long unsigned int check_clocks; + enum tick_nohz_mode nohz_mode; + unsigned int inidle: 1; + unsigned int tick_stopped: 1; + unsigned int idle_active: 1; + unsigned int do_timer_last: 1; + unsigned int got_idle_tick: 1; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_entrytime; + ktime_t idle_waketime; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + long unsigned int last_jiffies; + u64 timer_expires; + u64 timer_expires_base; + u64 next_timer; + ktime_t idle_expires; + atomic_t tick_dep_mask; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +typedef __kernel_timer_t timer_t; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART = 0, + ALARMTIMER_RESTART = 1, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + int firing; +}; + +struct k_clock; + +struct k_itimer { + struct list_head list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_active; + s64 it_overrun; + s64 it_overrun_last; + int it_requeue_pending; + int it_sigev_notify; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue *sigq; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct class_interface { + struct list_head node; + struct class *class; + int (*add_dev)(struct device *, struct class_interface *); + void (*remove_dev)(struct device *, struct class_interface *); +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + int (*remove)(struct platform_device *); + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[12]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef struct sigevent sigevent_t; + +struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[13]; + compat_int_t _tid; + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +}; + +struct posix_clock; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock *, unsigned int, long unsigned int); + int (*open)(struct posix_clock *, fmode_t); + __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *); + int (*release)(struct posix_clock *); + ssize_t (*read)(struct posix_clock *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct old_itimerval32 { + struct old_timeval32 it_interval; + struct old_timeval32 it_value; +}; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +struct proc_timens_offset { + int clockid; + struct timespec64 val; +}; + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +struct dma_chan { + int lock; + const char *device_id; +}; + +struct cfd_percpu { + call_single_data_t csd; +}; + +struct call_function_data { + struct cfd_percpu *pcpu; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +struct modversion_info { + long unsigned int crc; + char name[56]; +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct load_info { + const char *name; + struct module *mod; + Elf64_Ehdr *hdr; + long unsigned int len; + Elf64_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; +}; + +struct trace_event_data_offsets_module_free { + u32 name; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; +}; + +struct trace_event_data_offsets_module_request { + u32 name; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, +}; + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *module_init; +}; + +struct module_signature { + u8 algo; + u8 hash; + u8 id_type; + u8 signer_len; + u8 key_id_len; + u8 __pad[3]; + __be32 sig_len; +}; + +enum pkey_id_type { + PKEY_ID_PGP = 0, + PKEY_ID_X509 = 1, + PKEY_ID_PKCS7 = 2, +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_arch_end; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[128]; + char module_name[56]; + int exported; + int show_value; +}; + +typedef __u16 comp_t; + +struct acct_v3 { + char ac_flag; + char ac_version; + __u16 ac_tty; + __u32 ac_exitcode; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u32 ac_etime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + char ac_comm[16]; +}; + +typedef struct acct_v3 acct_t; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +struct elf_note_section { + struct elf64_note n_hdr; + u8 n_data[0]; +}; + +typedef long unsigned int elf_greg_t; + +typedef elf_greg_t elf_gregset_t[27]; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus_common { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; +}; + +struct elf_prstatus { + struct elf_prstatus_common common; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +typedef u32 note_buf_t[92]; + +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; + compat_size_t memsz; +}; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +typedef struct elf64_phdr Elf64_Phdr; + +struct kexec_sha_region { + long unsigned int start; + long unsigned int len; +}; + +typedef __kernel_ulong_t ino_t; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + MAX_BPF_LINK_TYPE = 7, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; + __u32 target_btf_id; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + }; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_CPUSET_V2_MODE = 16, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 32, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 64, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int id; + int level; + u32 __data_loc_path; + char __data[0]; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_id; + int dst_level; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int id; + int level; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + u32 comm; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; +}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_memory_localevents = 1, + Opt_memory_recursiveprot = 2, + nr__cgroup2_params = 3, +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + struct cgroup_file events_file; + atomic64_t events_limit; +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE = 0, + RDMACG_RESOURCE_HCA_OBJECT = 1, + RDMACG_RESOURCE_MAX = 2, +}; + +struct rdma_cgroup { + struct cgroup_subsys_state css; + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +enum rdmacg_file_type { + RDMACG_RESOURCE_TYPE_MAX = 0, + RDMACG_RESOURCE_TYPE_STAT = 1, +}; + +struct rdmacg_resource { + int max; + int usage; +}; + +struct rdmacg_resource_pool { + struct rdmacg_device *device; + struct rdmacg_resource resources[2]; + struct list_head cg_node; + struct list_head dev_node; + u64 usage_sum; + int num_max_cnt; +}; + +struct root_domain___2; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; +}; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t subparts_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int pn; + int relax_domain_level; + int nr_subparts_cpus; + int partition_root_state; + int use_parent_ecpus; + int child_ecpus_count; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +enum subparts_cmd { + partcmd_enable = 0, + partcmd_disable = 1, + partcmd_update = 2, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_CPU_EXCLUSIVE = 6, + FILE_MEM_EXCLUSIVE = 7, + FILE_MEM_HARDWALL = 8, + FILE_SCHED_LOAD_BALANCE = 9, + FILE_PARTITION_ROOT = 10, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 11, + FILE_MEMORY_PRESSURE_ENABLED = 12, + FILE_MEMORY_PRESSURE = 13, + FILE_SPREAD_PAGE = 14, + FILE_SPREAD_SLAB = 15, +} cpuset_filetype_t; + +enum misc_res_type { + MISC_CG_RES_SEV = 0, + MISC_CG_RES_SEV_ES = 1, + MISC_CG_RES_TYPES = 2, +}; + +struct misc_res { + long unsigned int max; + atomic_long_t usage; + bool failed; +}; + +struct misc_cg { + struct cgroup_subsys_state css; + struct misc_res res[2]; +}; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +struct key_preparsed_payload { + const char *orig_description; + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +struct idmap_key { + bool map_up; + u32 id; + u32 count; +}; + +struct ctl_path { + const char *procname; +}; + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; + long unsigned int caller; + cpu_stop_fn_t fn; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +enum audit_state { + AUDIT_DISABLED = 0, + AUDIT_BUILD_CONTEXT = 1, + AUDIT_RECORD_CONTEXT = 2, +}; + +struct audit_cap_data { + kernel_cap_t permitted; + kernel_cap_t inheritable; + union { + unsigned int fE; + kernel_cap_t effective; + }; + kernel_cap_t ambient; + kuid_t rootid; +}; + +struct audit_names { + struct list_head list; + struct filename *name; + int name_len; + bool hidden; + long unsigned int ino; + dev_t dev; + umode_t mode; + kuid_t uid; + kgid_t gid; + dev_t rdev; + struct lsmblob oblob; + struct audit_cap_data fcap; + unsigned int fcap_ver; + unsigned char type; + bool should_free; +}; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct audit_proctitle { + int len; + char *value; +}; + +struct audit_aux_data; + +struct __kernel_sockaddr_storage; + +struct audit_tree_refs; + +struct audit_context { + int dummy; + int in_syscall; + enum audit_state state; + enum audit_state current_state; + unsigned int serial; + int major; + struct timespec64 ctime; + long unsigned int argv[4]; + long int return_code; + u64 prio; + int return_valid; + struct audit_names preallocated_names[5]; + int name_count; + struct list_head names_list; + char *filterkey; + struct path pwd; + struct audit_aux_data *aux; + struct audit_aux_data *aux_pids; + struct __kernel_sockaddr_storage *sockaddr; + size_t sockaddr_len; + pid_t pid; + pid_t ppid; + kuid_t uid; + kuid_t euid; + kuid_t suid; + kuid_t fsuid; + kgid_t gid; + kgid_t egid; + kgid_t sgid; + kgid_t fsgid; + long unsigned int personality; + int arch; + pid_t target_pid; + kuid_t target_auid; + kuid_t target_uid; + unsigned int target_sessionid; + struct lsmblob target_lsm; + char target_comm[16]; + struct audit_tree_refs *trees; + struct audit_tree_refs *first_trees; + struct list_head killed_trees; + int tree_count; + int type; + union { + struct { + int nargs; + long int args[6]; + } socketcall; + struct { + kuid_t uid; + kgid_t gid; + umode_t mode; + struct lsmblob oblob; + int has_perm; + uid_t perm_uid; + gid_t perm_gid; + umode_t perm_mode; + long unsigned int qbytes; + } ipc; + struct { + mqd_t mqdes; + struct mq_attr mqstat; + } mq_getsetattr; + struct { + mqd_t mqdes; + int sigev_signo; + } mq_notify; + struct { + mqd_t mqdes; + size_t msg_len; + unsigned int msg_prio; + struct timespec64 abs_timeout; + } mq_sendrecv; + struct { + int oflag; + umode_t mode; + struct mq_attr attr; + } mq_open; + struct { + pid_t pid; + struct audit_cap_data cap; + } capset; + struct { + int fd; + int flags; + } mmap; + struct { + int argc; + } execve; + struct { + char *name; + } module; + }; + int fds[2]; + struct audit_proctitle proctitle; +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +struct lsmcontext { + char *context; + u32 len; + int slot; +}; + +enum audit_nlgrps { + AUDIT_NLGRP_NONE = 0, + AUDIT_NLGRP_READLOG = 1, + __AUDIT_NLGRP_MAX = 2, +}; + +struct audit_status { + __u32 mask; + __u32 enabled; + __u32 failure; + __u32 pid; + __u32 rate_limit; + __u32 backlog_limit; + __u32 lost; + __u32 backlog; + union { + __u32 version; + __u32 feature_bitmap; + }; + __u32 backlog_wait_time; + __u32 backlog_wait_time_actual; +}; + +struct audit_features { + __u32 vers; + __u32 mask; + __u32 features; + __u32 lock; +}; + +struct audit_tty_status { + __u32 enabled; + __u32 log_passwd; +}; + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + void *ptr[0]; + }; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + unsigned int *id; + size_t size; +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + struct mutex *cb_mutex; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + bool (*compare)(struct net *, struct sock *); +}; + +struct audit_netlink_list { + __u32 portid; + struct net *net; + struct sk_buff_head q; +}; + +struct audit_net { + struct sock *sk; +}; + +struct auditd_connection { + struct pid *pid; + u32 portid; + struct net *net; + struct callback_head rcu; +}; + +struct audit_ctl_mutex { + struct mutex lock; + void *owner; +}; + +struct audit_buffer { + struct sk_buff *skb; + struct audit_context *ctx; + gfp_t gfp_mask; +}; + +struct audit_reply { + __u32 portid; + struct net *net; + struct sk_buff *skb; +}; + +enum { + Audit_equal = 0, + Audit_not_equal = 1, + Audit_bitmask = 2, + Audit_bittest = 3, + Audit_lt = 4, + Audit_gt = 5, + Audit_le = 6, + Audit_ge = 7, + Audit_bad = 8, +}; + +struct audit_rule_data { + __u32 flags; + __u32 action; + __u32 field_count; + __u32 mask[64]; + __u32 fields[64]; + __u32 values[64]; + __u32 fieldflags[64]; + __u32 buflen; + char buf[0]; +}; + +struct audit_field; + +struct audit_watch; + +struct audit_tree; + +struct audit_fsnotify_mark; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[64]; + u32 buflen; + u32 field_count; + char *filterkey; + struct audit_field *fields; + struct audit_field *arch_f; + struct audit_field *inode_f; + struct audit_watch *watch; + struct audit_tree *tree; + struct audit_fsnotify_mark *exe; + struct list_head rlist; + struct list_head list; + u64 prio; +}; + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + bool lsm_isset; + char *lsm_str; + void *lsm_rules[4]; + }; + }; + u32 op; +}; + +struct audit_entry { + struct list_head list; + struct callback_head rcu; + struct audit_krule rule; +}; + +struct audit_buffer___2; + +typedef int __kernel_key_t; + +typedef __kernel_key_t key_t; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; + kuid_t rootid; +}; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +struct fsnotify_mark_connector { + spinlock_t lock; + short unsigned int type; + short unsigned int flags; + __kernel_fsid_t fsid; + union { + fsnotify_connp_t *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_INVALID = 19, +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_PARENT = 1, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 2, + FSNOTIFY_OBJ_TYPE_SB = 3, + FSNOTIFY_OBJ_TYPE_COUNT = 4, + FSNOTIFY_OBJ_TYPE_DETACHED = 4, +}; + +struct audit_aux_data { + struct audit_aux_data *next; + int type; +}; + +struct audit_chunk; + +struct audit_tree_refs { + struct audit_tree_refs *next; + struct audit_chunk *c[31]; +}; + +struct audit_aux_data_pids { + struct audit_aux_data d; + pid_t target_pid[16]; + kuid_t target_auid[16]; + kuid_t target_uid[16]; + unsigned int target_sessionid[16]; + struct lsmblob target_lsm[16]; + char target_comm[256]; + int pid_count; +}; + +struct audit_aux_data_bprm_fcaps { + struct audit_aux_data d; + struct audit_cap_data fcap; + unsigned int fcap_ver; + struct audit_cap_data old_pcap; + struct audit_cap_data new_pcap; +}; + +struct audit_nfcfgop_tab { + enum audit_nfcfgop op; + const char *s; +}; + +struct audit_parent; + +struct audit_watch { + refcount_t count; + dev_t dev; + char *path; + long unsigned int ino; + struct audit_parent *parent; + struct list_head wlist; + struct list_head rules; +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fanotify_group_private_data { + struct hlist_head *merge_hash; + struct list_head access_list; + wait_queue_head_t access_waitq; + int flags; + int f_flags; + struct ucounts *ucounts; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + unsigned int priority; + bool shutdown; + struct mutex mark_mutex; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + struct fanotify_group_private_data fanotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[4]; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignored_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; +}; + +struct audit_parent { + struct list_head watches; + struct fsnotify_mark mark; +}; + +struct audit_fsnotify_mark { + dev_t dev; + long unsigned int ino; + char *path; + struct fsnotify_mark mark; + struct audit_krule *rule; +}; + +struct audit_chunk___2; + +struct audit_tree { + refcount_t count; + int goner; + struct audit_chunk___2 *root; + struct list_head chunks; + struct list_head rules; + struct list_head list; + struct list_head same_root; + struct callback_head head; + char pathname[0]; +}; + +struct node { + struct list_head list; + struct audit_tree *owner; + unsigned int index; +}; + +struct audit_chunk___2 { + struct list_head hash; + long unsigned int key; + struct fsnotify_mark *mark; + struct list_head trees; + int count; + atomic_long_t refs; + struct callback_head head; + struct node owners[0]; +}; + +struct audit_tree_mark { + struct fsnotify_mark mark; + struct audit_chunk___2 *chunk; +}; + +enum { + HASH_SIZE = 128, +}; + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +struct kgdb_io { + const char *name; + int (*read_char)(); + void (*write_char)(u8); + void (*flush)(); + int (*init)(); + void (*deinit)(); + void (*pre_exception)(); + void (*post_exception)(); + struct console *cons; +}; + +enum { + KDB_NOT_INITIALIZED = 0, + KDB_INIT_EARLY = 1, + KDB_INIT_FULL = 2, +}; + +struct kgdb_state { + int ex_vector; + int signo; + int err_code; + int cpu; + int pass_exception; + long unsigned int thr_query; + long unsigned int threadid; + long int kgdb_usethreadid; + struct pt_regs *linux_regs; + atomic_t *send_ready; +}; + +struct debuggerinfo_struct { + void *debuggerinfo; + struct task_struct *task; + int exception_state; + int ret_state; + int irq_depth; + int enter_kgdb; + bool rounding_up; +}; + +typedef int (*get_char_func)(); + +typedef enum { + KDB_ENABLE_ALL = 1, + KDB_ENABLE_MEM_READ = 2, + KDB_ENABLE_MEM_WRITE = 4, + KDB_ENABLE_REG_READ = 8, + KDB_ENABLE_REG_WRITE = 16, + KDB_ENABLE_INSPECT = 32, + KDB_ENABLE_FLOW_CTRL = 64, + KDB_ENABLE_SIGNAL = 128, + KDB_ENABLE_REBOOT = 256, + KDB_ENABLE_ALWAYS_SAFE = 512, + KDB_ENABLE_MASK = 1023, + KDB_ENABLE_ALL_NO_ARGS = 1024, + KDB_ENABLE_MEM_READ_NO_ARGS = 2048, + KDB_ENABLE_MEM_WRITE_NO_ARGS = 4096, + KDB_ENABLE_REG_READ_NO_ARGS = 8192, + KDB_ENABLE_REG_WRITE_NO_ARGS = 16384, + KDB_ENABLE_INSPECT_NO_ARGS = 32768, + KDB_ENABLE_FLOW_CTRL_NO_ARGS = 65536, + KDB_ENABLE_SIGNAL_NO_ARGS = 131072, + KDB_ENABLE_REBOOT_NO_ARGS = 262144, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = 524288, + KDB_ENABLE_MASK_NO_ARGS = 1047552, + KDB_REPEAT_NO_ARGS = 1073741824, + KDB_REPEAT_WITH_ARGS = 2147483648, +} kdb_cmdflags_t; + +typedef int (*kdb_func_t)(int, const char **); + +typedef enum { + KDB_REASON_ENTER = 1, + KDB_REASON_ENTER_SLAVE = 2, + KDB_REASON_BREAK = 3, + KDB_REASON_DEBUG = 4, + KDB_REASON_OOPS = 5, + KDB_REASON_SWITCH = 6, + KDB_REASON_KEYBOARD = 7, + KDB_REASON_NMI = 8, + KDB_REASON_RECURSE = 9, + KDB_REASON_SSTEP = 10, + KDB_REASON_SYSTEM_NMI = 11, +} kdb_reason_t; + +struct __ksymtab { + long unsigned int value; + const char *mod_name; + long unsigned int mod_start; + long unsigned int mod_end; + const char *sec_name; + long unsigned int sec_start; + long unsigned int sec_end; + const char *sym_name; + long unsigned int sym_start; + long unsigned int sym_end; +}; + +typedef struct __ksymtab kdb_symtab_t; + +struct _kdbtab { + char *cmd_name; + kdb_func_t cmd_func; + char *cmd_usage; + char *cmd_help; + short int cmd_minlen; + kdb_cmdflags_t cmd_flags; + struct list_head list_node; + bool is_dynamic; +}; + +typedef struct _kdbtab kdbtab_t; + +typedef enum { + KDB_DB_BPT = 0, + KDB_DB_SS = 1, + KDB_DB_SSBPT = 2, + KDB_DB_NOBPT = 3, +} kdb_dbtrap_t; + +struct _kdbmsg { + int km_diag; + char *km_msg; +}; + +typedef struct _kdbmsg kdbmsg_t; + +struct defcmd_set { + int count; + bool usable; + char *name; + char *usage; + char *help; + char **command; +}; + +struct debug_alloc_header { + u32 next; + u32 size; + void *caller; +}; + +struct _kdb_bp { + long unsigned int bp_addr; + unsigned int bp_free: 1; + unsigned int bp_enabled: 1; + unsigned int bp_type: 4; + unsigned int bp_installed: 1; + unsigned int bp_delay: 1; + unsigned int bp_delayed: 1; + unsigned int bph_length; +}; + +typedef struct _kdb_bp kdb_bp_t; + +typedef short unsigned int u_short; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +struct action_cache { + long unsigned int allow_native[7]; + long unsigned int allow_compat[7]; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + struct action_cache cache; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_metadata { + __u64 filter_off; + __u64 flags; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + __u32 ioctl_flags; + union { + bool setfd; + int ret; + }; + struct completion completion; + struct list_head list; +}; + +struct notification { + struct semaphore request; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +struct rchan; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + const struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + __NLA_TYPE_MAX = 18, +}; + +struct genl_multicast_group { + char name[16]; + u8 flags; +}; + +struct genl_ops; + +struct genl_info; + +struct genl_small_ops; + +struct genl_family { + int id; + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + unsigned int mcgrp_offset; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_mcgrps; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + void *userhdr; + struct nlattr **attrs; + possible_net_t _net; + void *user_ptr[2]; + struct netlink_ext_ack *extack; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +enum tp_func_state { + TP_FUNC_0 = 0, + TP_FUNC_1 = 1, + TP_FUNC_2 = 2, + TP_FUNC_N = 3, +}; + +enum tp_transition_sync { + TP_TRANSITION_SYNC_1_0_1 = 0, + TP_TRANSITION_SYNC_N_2_1 = 1, + _NR_TP_TRANSITION_SYNC = 2, +}; + +struct tp_transition_snapshot { + long unsigned int rcu; + long unsigned int srcu; + bool ongoing; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +struct ftrace_hash { + long unsigned int size_bits; + struct hlist_head *buckets; + long unsigned int count; + long unsigned int flags; + struct callback_head rcu; +}; + +struct ftrace_func_entry { + struct hlist_node hlist; + long unsigned int ip; + long unsigned int direct; +}; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN = 0, + FTRACE_BUG_INIT = 1, + FTRACE_BUG_NOP = 2, + FTRACE_BUG_CALL = 3, + FTRACE_BUG_UPDATE = 4, +}; + +enum { + FTRACE_UPDATE_CALLS = 1, + FTRACE_DISABLE_CALLS = 2, + FTRACE_UPDATE_TRACE_FUNC = 4, + FTRACE_START_FUNC_RET = 8, + FTRACE_STOP_FUNC_RET = 16, + FTRACE_MAY_SLEEP = 32, +}; + +enum { + FTRACE_ITER_FILTER = 1, + FTRACE_ITER_NOTRACE = 2, + FTRACE_ITER_PRINTALL = 4, + FTRACE_ITER_DO_PROBES = 8, + FTRACE_ITER_PROBE = 16, + FTRACE_ITER_MOD = 32, + FTRACE_ITER_ENABLED = 64, +}; + +struct ftrace_graph_ent { + long unsigned int func; + int depth; +} __attribute__((packed)); + +struct ftrace_graph_ret { + long unsigned int func; + int depth; + unsigned int overrun; + long long unsigned int calltime; + long long unsigned int rettime; +}; + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); + +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + u64 time_start; + int cpu; +}; + +struct trace_pid_list; + +struct trace_options; + +struct cond_snapshot; + +struct trace_func_repeats; + +struct trace_array { + struct list_head list; + char *name; + struct array_buffer array_buffer; + struct array_buffer max_buffer; + bool allocated_snapshot; + long unsigned int max_latency; + struct dentry *d_max_latency; + struct work_struct fsnotify_work; + struct irq_work fsnotify_irqwork; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[447]; + struct trace_event_file *exit_syscall_files[447]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct dentry *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + int ref; + int trace_ref; + struct ftrace_ops *ops; + struct trace_pid_list *function_pids; + struct trace_pid_list *function_no_pids; + struct list_head func_probes; + struct list_head mod_trace; + struct list_head mod_notrace; + int function_enabled; + int no_filter_buffering_ref; + struct list_head hist_vars; + struct cond_snapshot *cond_snapshot; + struct trace_func_repeats *last_func_repeats; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool use_max_tr; + bool noboot; +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct dentry *entry; + int ref_count; + int nr_events; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + int ftrace_ignore_pid; + bool ignore_pid; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_pid_list { + int pid_max; + long unsigned int *pids; +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +struct cond_snapshot { + void *cond_data; + cond_update_fn_t update; +}; + +struct trace_func_repeats { + long unsigned int ip; + long unsigned int parent_ip; + long unsigned int count; + u64 ts_last_call; +}; + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +struct ftrace_mod_load { + struct list_head list; + char *func; + char *module; + int enable; +}; + +enum { + FTRACE_HASH_FL_MOD = 1, +}; + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); +}; + +struct ftrace_probe_ops { + void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); + int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); + void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); + int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); +}; + +typedef int (*ftrace_mapper_func)(void *); + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_PRINTK_BIT = 8, + TRACE_ITER_ANNOTATE_BIT = 9, + TRACE_ITER_USERSTACKTRACE_BIT = 10, + TRACE_ITER_SYM_USEROBJ_BIT = 11, + TRACE_ITER_PRINTK_MSGONLY_BIT = 12, + TRACE_ITER_CONTEXT_INFO_BIT = 13, + TRACE_ITER_LATENCY_FMT_BIT = 14, + TRACE_ITER_RECORD_CMD_BIT = 15, + TRACE_ITER_RECORD_TGID_BIT = 16, + TRACE_ITER_OVERWRITE_BIT = 17, + TRACE_ITER_STOP_ON_FREE_BIT = 18, + TRACE_ITER_IRQ_INFO_BIT = 19, + TRACE_ITER_MARKERS_BIT = 20, + TRACE_ITER_EVENT_FORK_BIT = 21, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 22, + TRACE_ITER_HASH_PTR_BIT = 23, + TRACE_ITER_FUNCTION_BIT = 24, + TRACE_ITER_FUNC_FORK_BIT = 25, + TRACE_ITER_DISPLAY_GRAPH_BIT = 26, + TRACE_ITER_STACKTRACE_BIT = 27, + TRACE_ITER_LAST_BIT = 28, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +enum { + FTRACE_MODIFY_ENABLE_FL = 1, + FTRACE_MODIFY_MAY_SLEEP_FL = 2, +}; + +struct ftrace_profile { + struct hlist_node node; + long unsigned int ip; + long unsigned int counter; + long long unsigned int time; + long long unsigned int time_squared; +}; + +struct ftrace_profile_page { + struct ftrace_profile_page *next; + long unsigned int index; + struct ftrace_profile records[0]; +}; + +struct ftrace_profile_stat { + atomic_t disabled; + struct hlist_head *hash; + struct ftrace_profile_page *pages; + struct ftrace_profile_page *start; + struct tracer_stat stat; +}; + +struct ftrace_func_probe { + struct ftrace_probe_ops *probe_ops; + struct ftrace_ops ops; + struct trace_array *tr; + struct list_head list; + void *data; + int ref; +}; + +struct ftrace_page { + struct ftrace_page *next; + struct dyn_ftrace *records; + int index; + int order; +}; + +struct ftrace_rec_iter___2 { + struct ftrace_page *pg; + int index; +}; + +struct ftrace_iterator { + loff_t pos; + loff_t func_pos; + loff_t mod_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct ftrace_func_entry *probe_entry; + struct trace_parser parser; + struct ftrace_hash *hash; + struct ftrace_ops *ops; + struct trace_array *tr; + struct list_head *mod_list; + int pidx; + int idx; + unsigned int flags; +}; + +struct ftrace_glob { + char *search; + unsigned int len; + int type; +}; + +struct ftrace_func_map { + struct ftrace_func_entry entry; + void *data; +}; + +struct ftrace_func_mapper { + struct ftrace_hash hash; +}; + +struct ftrace_direct_func { + struct list_head next; + long unsigned int addr; + int count; +}; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION = 1, +}; + +struct ftrace_graph_data { + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; +}; + +struct ftrace_mod_func { + struct list_head list; + char *name; + long unsigned int ip; + unsigned int size; +}; + +struct ftrace_mod_map { + struct callback_head rcu; + struct list_head list; + struct module *mod; + long unsigned int start_addr; + long unsigned int end_addr; + struct list_head funcs; + unsigned int num_funcs; +}; + +struct ftrace_init_func { + struct list_head list; + long unsigned int ip; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + int missed_events; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer___2 { + unsigned int flags; + int cpus; + atomic_t record_disabled; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer___2 *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 event_stamp[5]; + u64 read_stamp; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_IRQS_NOSUPPORT = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_USER_STACK = 12, + TRACE_BLK = 13, + TRACE_BPUTS = 14, + TRACE_HWLAT = 15, + TRACE_RAW_DATA = 16, + TRACE_FUNC_REPEATS = 17, + __TRACE_LAST_TYPE = 18, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[8]; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +struct func_repeats_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; + u16 count; + u16 top_delta_ts; + u32 bottom_delta_ts; +}; + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_PRINTK = 256, + TRACE_ITER_ANNOTATE = 512, + TRACE_ITER_USERSTACKTRACE = 1024, + TRACE_ITER_SYM_USEROBJ = 2048, + TRACE_ITER_PRINTK_MSGONLY = 4096, + TRACE_ITER_CONTEXT_INFO = 8192, + TRACE_ITER_LATENCY_FMT = 16384, + TRACE_ITER_RECORD_CMD = 32768, + TRACE_ITER_RECORD_TGID = 65536, + TRACE_ITER_OVERWRITE = 131072, + TRACE_ITER_STOP_ON_FREE = 262144, + TRACE_ITER_IRQ_INFO = 524288, + TRACE_ITER_MARKERS = 1048576, + TRACE_ITER_EVENT_FORK = 2097152, + TRACE_ITER_PAUSE_ON_TRACE = 4194304, + TRACE_ITER_HASH_PTR = 8388608, + TRACE_ITER_FUNCTION = 16777216, + TRACE_ITER_FUNC_FORK = 33554432, + TRACE_ITER_DISPLAY_GRAPH = 67108864, + TRACE_ITER_STACKTRACE = 134217728, +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char *saved_cmdlines; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u8 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char cmd[256]; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +struct ftrace_func_mapper___2; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; +}; + +struct trace_mark { + long long unsigned int val; + char sym; +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +typedef int (*tracing_map_cmp_fn_t)(void *, void *); + +struct tracing_map_field { + tracing_map_cmp_fn_t cmp_fn; + union { + atomic64_t sum; + unsigned int offset; + }; +}; + +struct tracing_map; + +struct tracing_map_elt { + struct tracing_map *map; + struct tracing_map_field *fields; + atomic64_t *vars; + bool *var_set; + void *key; + void *private_data; +}; + +struct tracing_map_sort_key { + unsigned int field_idx; + bool descending; +}; + +struct tracing_map_array; + +struct tracing_map_ops; + +struct tracing_map { + unsigned int key_size; + unsigned int map_bits; + unsigned int map_size; + unsigned int max_elts; + atomic_t next_elt; + struct tracing_map_array *elts; + struct tracing_map_array *map; + const struct tracing_map_ops *ops; + void *private_data; + struct tracing_map_field fields[6]; + unsigned int n_fields; + int key_idx[3]; + unsigned int n_keys; + struct tracing_map_sort_key sort_key; + unsigned int n_vars; + atomic64_t hits; + atomic64_t drops; +}; + +struct tracing_map_entry { + u32 key; + struct tracing_map_elt *val; +}; + +struct tracing_map_sort_entry { + void *key; + struct tracing_map_elt *elt; + bool elt_copied; + bool dup; +}; + +struct tracing_map_array { + unsigned int entries_per_page; + unsigned int entry_size_shift; + unsigned int entry_shift; + unsigned int entry_mask; + unsigned int n_pages; + void **pages; +}; + +struct tracing_map_ops { + int (*elt_alloc)(struct tracing_map_elt *); + void (*elt_free)(struct tracing_map_elt *); + void (*elt_clear)(struct tracing_map_elt *); + void (*elt_init)(struct tracing_map_elt *); +}; + +enum { + TRACE_FUNC_NO_OPTS = 0, + TRACE_FUNC_OPT_STACK = 1, + TRACE_FUNC_OPT_NO_REPEATS = 2, + TRACE_FUNC_OPT_HIGHEST_BIT = 4, +}; + +struct hwlat_sample { + u64 seqnum; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + int nmi_count; + int count; +}; + +struct hwlat_data { + struct mutex lock; + u64 count; + u64 sample_window; + u64 sample_width; +}; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +struct trace_mmiotrace_rw { + struct trace_entry ent; + struct mmiotrace_rw rw; +}; + +struct trace_mmiotrace_map { + struct trace_entry ent; + struct mmiotrace_map map; +}; + +struct header_iter { + struct pci_dev *dev; +}; + +struct ftrace_graph_ent_entry { + struct trace_entry ent; + struct ftrace_graph_ent graph_ent; +} __attribute__((packed)); + +struct ftrace_graph_ret_entry { + struct trace_entry ent; + struct ftrace_graph_ret ret; +}; + +struct fgraph_cpu_data { + pid_t last_pid; + int depth; + int depth_irq; + int ignore; + long unsigned int enter_funcs[50]; +}; + +struct fgraph_data { + struct fgraph_cpu_data *cpu_data; + struct ftrace_graph_ent_entry ent; + struct ftrace_graph_ret_entry ret; + int failed; + int cpu; + int: 32; +} __attribute__((packed)); + +enum { + FLAGS_FILL_FULL = 268435456, + FLAGS_FILL_START = 536870912, + FLAGS_FILL_END = 805306368, +}; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; +}; + +struct blk_crypto_key; + +struct bio_crypt_ctx { + const struct blk_crypto_key *bc_key; + u64 bc_dun[4]; +}; + +typedef __u32 blk_mq_req_flags_t; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 64; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + long unsigned int rq_dispatched[2]; + long unsigned int rq_merged; + long unsigned int rq_completed[2]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbitmap_word; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + bool round_robin; + struct sbitmap_word *map; + unsigned int *alloc_hint; +}; + +struct blk_mq_tags; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + long unsigned int queued; + long unsigned int run; + long unsigned int dispatched[7]; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + long unsigned int poll_considered; + long unsigned int poll_invoked; + long unsigned int poll_success; + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct list_head hctx_list; + struct srcu_struct srcu[0]; +}; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + unsigned int cmd_flags; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct list_head running_list; + atomic_t dropped; +}; + +struct blk_flush_queue { + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + spinlock_t mq_flush_lock; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct sbq_wait_state; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + unsigned int min_shallow_depth; +}; + +struct blk_mq_tag_set { + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + const struct blk_mq_ops *ops; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + atomic_t active_queues_shared_sbitmap; + struct sbitmap_queue __bitmap_tags; + struct sbitmap_queue __breserved_tags; + struct blk_mq_tags **tags; + struct mutex tag_list_lock; + struct list_head tag_list; +}; + +typedef u64 compat_u64; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +struct compat_blk_user_trace_setup { + char name[32]; + u16 act_mask; + short: 16; + u32 buf_size; + u32 buf_nr; + compat_u64 start_lba; + compat_u64 end_lba; + u32 pid; +} __attribute__((packed)); + +struct sbitmap_word { + long unsigned int depth; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int word; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int cleared; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbq_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + atomic_t active_queues; + struct sbitmap_queue *bitmap_tags; + struct sbitmap_queue *breserved_tags; + struct sbitmap_queue __bitmap_tags; + struct sbitmap_queue __breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + spinlock_t lock; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID = 0, + BLK_ENCRYPTION_MODE_AES_256_XTS = 1, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV = 2, + BLK_ENCRYPTION_MODE_ADIANTUM = 3, + BLK_ENCRYPTION_MODE_MAX = 4, +}; + +struct blk_crypto_config { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int dun_bytes; +}; + +struct blk_crypto_key { + struct blk_crypto_config crypto_cfg; + unsigned int data_unit_size_bits; + unsigned int size; + u8 raw[64]; +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct event_probe_data { + struct trace_event_file *file; + long unsigned int count; + int ref; + bool enable; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + long long unsigned int regs; + long unsigned int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + long long unsigned int regs; + long unsigned int syscall_nr; + long unsigned int args[6]; +}; + +typedef long unsigned int perf_trace_t[256]; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +typedef int (*filter_pred_fn_t)(struct filter_pred *, void *); + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +struct filter_pred { + filter_pred_fn_t fn; + u64 val; + struct regex regex; + short unsigned int *ops; + struct ftrace_event_field *field; + int offset; + int not; + int op; +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_OPERAND_TOO_LONG = 5, + FILT_ERR_EXPECT_STRING = 6, + FILT_ERR_EXPECT_DIGIT = 7, + FILT_ERR_ILLEGAL_FIELD_OP = 8, + FILT_ERR_FIELD_NOT_FOUND = 9, + FILT_ERR_ILLEGAL_INTVAL = 10, + FILT_ERR_BAD_SUBSYS_FILTER = 11, + FILT_ERR_TOO_MANY_PREDS = 12, + FILT_ERR_INVALID_FILTER = 13, + FILT_ERR_IP_FIELD_ONLY = 14, + FILT_ERR_INVALID_VALUE = 15, + FILT_ERR_ERRNO = 16, + FILT_ERR_NO_FILTER = 17, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +enum { + TOO_MANY_CLOSE = 4294967295, + TOO_MANY_OPEN = 4294967294, + MISSING_QUOTE = 4294967293, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*func)(struct event_trigger_data *, struct trace_buffer *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_ops *, struct event_trigger_data *); + void (*free)(struct event_trigger_ops *, struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_ops *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*func)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct synth_field_desc { + const char *type; + const char *name; +}; + +struct synth_trace_event; + +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +struct synth_trace_event { + struct trace_entry ent; + u64 fields[0]; +}; + +struct dyn_event_operations; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +struct synth_field; + +struct synth_event { + struct dyn_event devent; + int ref; + char *name; + struct synth_field **fields; + unsigned int n_fields; + struct synth_field **dynamic_fields; + unsigned int n_dynamic_fields; + unsigned int n_u64; + struct trace_event_class class; + struct trace_event_call call; + struct tracepoint *tp; + struct module *mod; +}; + +struct dyn_event_operations { + struct list_head list; + int (*create)(const char *); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg { + const char *str; + char separator; +}; + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct synth_field { + char *type; + char *name; + size_t size; + unsigned int offset; + unsigned int field_pos; + bool is_signed; + bool is_string; + bool is_dynamic; +}; + +enum { + SYNTH_ERR_BAD_NAME = 0, + SYNTH_ERR_INVALID_CMD = 1, + SYNTH_ERR_INVALID_DYN_CMD = 2, + SYNTH_ERR_EVENT_EXISTS = 3, + SYNTH_ERR_TOO_MANY_FIELDS = 4, + SYNTH_ERR_INCOMPLETE_TYPE = 5, + SYNTH_ERR_INVALID_TYPE = 6, + SYNTH_ERR_INVALID_FIELD = 7, + SYNTH_ERR_INVALID_ARRAY_SPEC = 8, +}; + +enum { + HIST_ERR_NONE = 0, + HIST_ERR_DUPLICATE_VAR = 1, + HIST_ERR_VAR_NOT_UNIQUE = 2, + HIST_ERR_TOO_MANY_VARS = 3, + HIST_ERR_MALFORMED_ASSIGNMENT = 4, + HIST_ERR_NAMED_MISMATCH = 5, + HIST_ERR_TRIGGER_EEXIST = 6, + HIST_ERR_TRIGGER_ENOENT_CLEAR = 7, + HIST_ERR_SET_CLOCK_FAIL = 8, + HIST_ERR_BAD_FIELD_MODIFIER = 9, + HIST_ERR_TOO_MANY_SUBEXPR = 10, + HIST_ERR_TIMESTAMP_MISMATCH = 11, + HIST_ERR_TOO_MANY_FIELD_VARS = 12, + HIST_ERR_EVENT_FILE_NOT_FOUND = 13, + HIST_ERR_HIST_NOT_FOUND = 14, + HIST_ERR_HIST_CREATE_FAIL = 15, + HIST_ERR_SYNTH_VAR_NOT_FOUND = 16, + HIST_ERR_SYNTH_EVENT_NOT_FOUND = 17, + HIST_ERR_SYNTH_TYPE_MISMATCH = 18, + HIST_ERR_SYNTH_COUNT_MISMATCH = 19, + HIST_ERR_FIELD_VAR_PARSE_FAIL = 20, + HIST_ERR_VAR_CREATE_FIND_FAIL = 21, + HIST_ERR_ONX_NOT_VAR = 22, + HIST_ERR_ONX_VAR_NOT_FOUND = 23, + HIST_ERR_ONX_VAR_CREATE_FAIL = 24, + HIST_ERR_FIELD_VAR_CREATE_FAIL = 25, + HIST_ERR_TOO_MANY_PARAMS = 26, + HIST_ERR_PARAM_NOT_FOUND = 27, + HIST_ERR_INVALID_PARAM = 28, + HIST_ERR_ACTION_NOT_FOUND = 29, + HIST_ERR_NO_SAVE_PARAMS = 30, + HIST_ERR_TOO_MANY_SAVE_ACTIONS = 31, + HIST_ERR_ACTION_MISMATCH = 32, + HIST_ERR_NO_CLOSING_PAREN = 33, + HIST_ERR_SUBSYS_NOT_FOUND = 34, + HIST_ERR_INVALID_SUBSYS_EVENT = 35, + HIST_ERR_INVALID_REF_KEY = 36, + HIST_ERR_VAR_NOT_FOUND = 37, + HIST_ERR_FIELD_NOT_FOUND = 38, + HIST_ERR_EMPTY_ASSIGNMENT = 39, + HIST_ERR_INVALID_SORT_MODIFIER = 40, + HIST_ERR_EMPTY_SORT_FIELD = 41, + HIST_ERR_TOO_MANY_SORT_FIELDS = 42, + HIST_ERR_INVALID_SORT_FIELD = 43, + HIST_ERR_INVALID_STR_OPERAND = 44, +}; + +struct hist_field; + +typedef u64 (*hist_field_fn_t)(struct hist_field *, struct tracing_map_elt *, struct trace_buffer *, struct ring_buffer_event *, void *); + +struct hist_trigger_data; + +struct hist_var { + char *name; + struct hist_trigger_data *hist_data; + unsigned int idx; +}; + +enum field_op_id { + FIELD_OP_NONE = 0, + FIELD_OP_PLUS = 1, + FIELD_OP_MINUS = 2, + FIELD_OP_UNARY_MINUS = 3, +}; + +struct hist_field { + struct ftrace_event_field *field; + long unsigned int flags; + hist_field_fn_t fn; + unsigned int ref; + unsigned int size; + unsigned int offset; + unsigned int is_signed; + const char *type; + struct hist_field *operands[2]; + struct hist_trigger_data *hist_data; + struct hist_var var; + enum field_op_id operator; + char *system; + char *event_name; + char *name; + unsigned int var_ref_idx; + bool read_once; + unsigned int var_str_idx; +}; + +struct hist_trigger_attrs; + +struct action_data; + +struct field_var; + +struct field_var_hist; + +struct hist_trigger_data { + struct hist_field *fields[22]; + unsigned int n_vals; + unsigned int n_keys; + unsigned int n_fields; + unsigned int n_vars; + unsigned int n_var_str; + unsigned int key_size; + struct tracing_map_sort_key sort_keys[2]; + unsigned int n_sort_keys; + struct trace_event_file *event_file; + struct hist_trigger_attrs *attrs; + struct tracing_map *map; + bool enable_timestamps; + bool remove; + struct hist_field *var_refs[16]; + unsigned int n_var_refs; + struct action_data *actions[8]; + unsigned int n_actions; + struct field_var *field_vars[32]; + unsigned int n_field_vars; + unsigned int n_field_var_str; + struct field_var_hist *field_var_hists[32]; + unsigned int n_field_var_hists; + struct field_var *save_vars[32]; + unsigned int n_save_vars; + unsigned int n_save_var_str; +}; + +enum hist_field_flags { + HIST_FIELD_FL_HITCOUNT = 1, + HIST_FIELD_FL_KEY = 2, + HIST_FIELD_FL_STRING = 4, + HIST_FIELD_FL_HEX = 8, + HIST_FIELD_FL_SYM = 16, + HIST_FIELD_FL_SYM_OFFSET = 32, + HIST_FIELD_FL_EXECNAME = 64, + HIST_FIELD_FL_SYSCALL = 128, + HIST_FIELD_FL_STACKTRACE = 256, + HIST_FIELD_FL_LOG2 = 512, + HIST_FIELD_FL_TIMESTAMP = 1024, + HIST_FIELD_FL_TIMESTAMP_USECS = 2048, + HIST_FIELD_FL_VAR = 4096, + HIST_FIELD_FL_EXPR = 8192, + HIST_FIELD_FL_VAR_REF = 16384, + HIST_FIELD_FL_CPU = 32768, + HIST_FIELD_FL_ALIAS = 65536, +}; + +struct var_defs { + unsigned int n_vars; + char *name[16]; + char *expr[16]; +}; + +struct hist_trigger_attrs { + char *keys_str; + char *vals_str; + char *sort_key_str; + char *name; + char *clock; + bool pause; + bool cont; + bool clear; + bool ts_in_usecs; + unsigned int map_bits; + char *assignment_str[16]; + unsigned int n_assignments; + char *action_str[8]; + unsigned int n_actions; + struct var_defs var_defs; +}; + +struct field_var { + struct hist_field *var; + struct hist_field *val; +}; + +struct field_var_hist { + struct hist_trigger_data *hist_data; + char *cmd; +}; + +enum handler_id { + HANDLER_ONMATCH = 1, + HANDLER_ONMAX = 2, + HANDLER_ONCHANGE = 3, +}; + +enum action_id { + ACTION_SAVE = 1, + ACTION_TRACE = 2, + ACTION_SNAPSHOT = 3, +}; + +typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, struct trace_buffer *, void *, struct ring_buffer_event *, void *, struct action_data *, u64 *); + +typedef bool (*check_track_val_fn_t)(u64, u64); + +struct action_data { + enum handler_id handler; + enum action_id action; + char *action_name; + action_fn_t fn; + unsigned int n_params; + char *params[32]; + unsigned int var_ref_idx[16]; + struct synth_event *synth_event; + bool use_trace_keyword; + char *synth_event_name; + union { + struct { + char *event; + char *event_system; + } match_data; + struct { + char *var_str; + struct hist_field *var_ref; + struct hist_field *track_var; + check_track_val_fn_t check_val; + action_fn_t save_data; + } track_data; + }; +}; + +struct track_data { + u64 track_val; + bool updated; + unsigned int key_len; + void *key; + struct tracing_map_elt elt; + struct action_data *action_data; + struct hist_trigger_data *hist_data; +}; + +struct hist_elt_data { + char *comm; + u64 *var_ref_vals; + char *field_var_str[32]; +}; + +struct snapshot_context { + struct tracing_map_elt *elt; + void *key; +}; + +typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int *); + +struct hist_var_data { + struct list_head list; + struct hist_trigger_data *hist_data; +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + __BPF_FUNC_MAX_ID = 166, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295, + BPF_F_CURRENT_CPU = 4294967295, + BPF_F_CTXLEN_MASK = 0, +}; + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = 1, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_UNINIT_MAP_VALUE = 4, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 5, + ARG_PTR_TO_MEM = 6, + ARG_PTR_TO_MEM_OR_NULL = 7, + ARG_PTR_TO_UNINIT_MEM = 8, + ARG_CONST_SIZE = 9, + ARG_CONST_SIZE_OR_ZERO = 10, + ARG_PTR_TO_CTX = 11, + ARG_PTR_TO_CTX_OR_NULL = 12, + ARG_ANYTHING = 13, + ARG_PTR_TO_SPIN_LOCK = 14, + ARG_PTR_TO_SOCK_COMMON = 15, + ARG_PTR_TO_INT = 16, + ARG_PTR_TO_LONG = 17, + ARG_PTR_TO_SOCKET = 18, + ARG_PTR_TO_SOCKET_OR_NULL = 19, + ARG_PTR_TO_BTF_ID = 20, + ARG_PTR_TO_ALLOC_MEM = 21, + ARG_PTR_TO_ALLOC_MEM_OR_NULL = 22, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 23, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 24, + ARG_PTR_TO_PERCPU_BTF_ID = 25, + ARG_PTR_TO_FUNC = 26, + ARG_PTR_TO_STACK_OR_NULL = 27, + ARG_PTR_TO_CONST_STR = 28, + __BPF_ARG_TYPE_MAX = 29, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_MAP_VALUE_OR_NULL = 3, + RET_PTR_TO_SOCKET_OR_NULL = 4, + RET_PTR_TO_TCP_SOCK_OR_NULL = 5, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 6, + RET_PTR_TO_ALLOC_MEM_OR_NULL = 7, + RET_PTR_TO_BTF_ID_OR_NULL = 8, + RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL = 9, + RET_PTR_TO_MEM_OR_BTF_ID = 10, + RET_PTR_TO_BTF_ID = 11, +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_verifier_log; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + union { + int ctx_field_size; + struct { + struct btf *btf; + u32 btf_id; + }; + }; + struct bpf_verifier_log *log; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct btf *, const struct btf_type *, int, int, enum bpf_access_type, u32 *); + bool (*check_kfunc_call)(u32); +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +typedef struct pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + u8 data[0]; +}; + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_override_return)(struct pt_regs *, long unsigned int); + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_get_current_task_btf)(); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_DEREF = 10, + FETCH_OP_UDEREF = 11, + FETCH_OP_ST_RAW = 12, + FETCH_OP_ST_MEM = 13, + FETCH_OP_ST_UMEM = 14, + FETCH_OP_ST_STRING = 15, + FETCH_OP_ST_USTRING = 16, + FETCH_OP_MOD_BF = 17, + FETCH_OP_LP_ARRAY = 18, + FETCH_OP_END = 19, + FETCH_NOP_SYMBOL = 20, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + int is_signed; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_MAXACT_NO_KPROBE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_BAD_RETPROBE = 10, + TP_ERR_BAD_ADDR_SUFFIX = 11, + TP_ERR_NO_GROUP_NAME = 12, + TP_ERR_GROUP_TOO_LONG = 13, + TP_ERR_BAD_GROUP_NAME = 14, + TP_ERR_NO_EVENT_NAME = 15, + TP_ERR_EVENT_TOO_LONG = 16, + TP_ERR_BAD_EVENT_NAME = 17, + TP_ERR_RETVAL_ON_PROBE = 18, + TP_ERR_BAD_STACK_NUM = 19, + TP_ERR_BAD_ARG_NUM = 20, + TP_ERR_BAD_VAR = 21, + TP_ERR_BAD_REG_NAME = 22, + TP_ERR_BAD_MEM_ADDR = 23, + TP_ERR_BAD_IMM = 24, + TP_ERR_IMMSTR_NO_CLOSE = 25, + TP_ERR_FILE_ON_KPROBE = 26, + TP_ERR_BAD_FILE_OFFS = 27, + TP_ERR_SYM_ON_UPROBE = 28, + TP_ERR_TOO_MANY_OPS = 29, + TP_ERR_DEREF_NEED_BRACE = 30, + TP_ERR_BAD_DEREF_OFFS = 31, + TP_ERR_DEREF_OPEN_BRACE = 32, + TP_ERR_COMM_CANT_DEREF = 33, + TP_ERR_BAD_FETCH_ARG = 34, + TP_ERR_ARRAY_NO_CLOSE = 35, + TP_ERR_BAD_ARRAY_SUFFIX = 36, + TP_ERR_BAD_ARRAY_NUM = 37, + TP_ERR_ARRAY_TOO_BIG = 38, + TP_ERR_BAD_TYPE = 39, + TP_ERR_BAD_STRING = 40, + TP_ERR_BAD_BITFIELD = 41, + TP_ERR_ARG_NAME_TOO_LONG = 42, + TP_ERR_NO_ARG_NAME = 43, + TP_ERR_BAD_ARG_NAME = 44, + TP_ERR_USED_ARG_NAME = 45, + TP_ERR_ARG_TOO_LONG = 46, + TP_ERR_NO_ARG_BODY = 47, + TP_ERR_BAD_INSN_BNDRY = 48, + TP_ERR_FAIL_REG_PROBE = 49, + TP_ERR_DIFF_PROBE_TYPE = 50, + TP_ERR_DIFF_ARG_TYPE = 51, + TP_ERR_SAME_PROBE = 52, +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +enum error_detector { + ERROR_DETECTOR_KFENCE = 0, + ERROR_DETECTOR_KASAN = 1, +}; + +struct trace_event_raw_error_report_template { + struct trace_entry ent; + enum error_detector error_detector; + long unsigned int id; + char __data[0]; +}; + +struct trace_event_data_offsets_error_report_template {}; + +typedef void (*btf_trace_error_report_end)(void *, enum error_detector, long unsigned int); + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + u32 driver; + u32 parent; + u32 pm_ops; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + u32 driver; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; +}; + +struct trace_event_data_offsets_clock { + u32 name; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; +}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER = 0, + UPROBE_FILTER_UNREGISTER = 1, + UPROBE_FILTER_MMAP = 2, +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *); + bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); + struct uprobe_consumer *next; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct trace_uprobe { + struct dyn_event devent; + struct uprobe_consumer consumer; + struct path path; + struct inode *inode; + char *filename; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int nhit; + struct trace_probe tp; +}; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + long: 64; + struct rhash_lock_head *buckets[0]; +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct rhash_lock_head {}; + +struct zero_copy_allocator; + +struct page_pool; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + struct zero_copy_allocator *zc_alloc; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, enum bpf_map_type, u32, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; + __u64 name; + __u32 name_len; + __u32 kernel_btf; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_verifier_log { + u32 level; + char kbuf[1024]; + char *ubuf; + u32 len_used; + u32 len_total; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + bool has_tail_call; + bool tail_call_reachable; + bool has_ld_abs; +}; + +struct bpf_id_pair { + u32 old; + u32 cur; +}; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + struct btf_mod_pair used_btfs[64]; + u32 used_map_cnt; + u32 used_btf_cnt; + u32 id_gen; + bool explore_alu_limits; + bool allow_ptr_leaks; + bool allow_uninit_stack; + bool allow_ptr_to_map_access; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[257]; + struct bpf_id_pair idmap_scratch[75]; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; +}; + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + s32 off; + union { + int range; + struct bpf_map *map_ptr; + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + struct { + long unsigned int raw1; + long unsigned int raw2; + } raw; + u32 subprogno; + }; + u32 id; + u32 ref_obj_id; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; +}; + +struct bpf_reference_state; + +struct bpf_stack_state; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + int acquired_refs; + struct bpf_reference_state *refs; + int allocated_stack; + bool in_callback_fn; + struct bpf_stack_state *stack; +}; + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +struct bpf_reference_state { + int id; + int insn_idx; +}; + +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + u32 active_spin_lock; + bool speculative; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + long unsigned int map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + struct { + struct btf *btf; + u32 btf_id; + }; + u32 mem_size; + }; + } btf_var; + }; + u64 map_key_state; + int ctx_field_size; + u32 seen; + bool sanitize_stack_spill; + bool zext_dst; + u8 alu_state; + unsigned int orig_idx; + bool prune_point; +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_tracing_link { + struct bpf_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +struct bpf_kfunc_desc { + struct btf_func_model func_model; + u32 func_id; + s32 imm; +}; + +struct bpf_kfunc_desc_tab { + struct bpf_kfunc_desc descs[256]; + u32 nr_descs; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *); + void (*unreg)(void *); + const struct btf_type *type; + const struct btf_type *value_type; + const char *name; + struct btf_func_model func_models[64]; + u32 type_id; + u32 value_id; +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, +}; + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + MAX_BTF_SOCK_TYPE = 13, +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + int regno; + int access_size; + int mem_size; + u64 msize_max_value; + int ref_obj_id; + int func_id; + struct btf *btf; + u32 btf_id; + struct btf *ret_btf; + u32 ret_btf_id; + u32 subprogno; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +enum stack_access_src { + ACCESS_DIRECT = 1, + ACCESS_HELPER = 2, +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + AT_PKT_END = 4294967295, + BEYOND_PKT_END = 4294967294, +}; + +typedef int (*set_callee_state_fn)(struct bpf_verifier_env *, struct bpf_func_state *, struct bpf_func_state *, int); + +enum { + REASON_BOUNDS = 4294967295, + REASON_TYPE = 4294967294, + REASON_PATHS = 4294967293, + REASON_LIMIT = 4294967292, + REASON_STACK = 4294967291, +}; + +struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; + bool mask_to_left; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +enum { + DONE_EXPLORING = 0, + KEEP_EXPLORING = 1, +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +struct bpf_preload_info { + char link_name[16]; + int link_id; +}; + +struct bpf_preload_ops { + struct umd_info info; + int (*preload)(struct bpf_preload_info *); + int (*finish)(); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +enum { + OPT_MODE = 0, +}; + +struct bpf_mount_opts { + umode_t mode; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +struct bpf_cgroup_storage_info { + struct task_struct *task; + struct bpf_cgroup_storage *storage[2]; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_ktime_get_coarse_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, long int *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, long unsigned int *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +struct bpf_bprintf_buffers { + char tmp_bufs[1536]; +}; + +typedef u64 (*btf_bpf_snprintf)(char *, u32, char *, const void *, u32); + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +enum bpf_iter_feature { + BPF_ITER_RESCHED = 1, +}; + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + u32 ctx_arg_info_size; + u32 feature; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 56; + u8 target_private[0]; +}; + +typedef u64 (*btf_bpf_for_each_map_elem)(struct bpf_map *, void *, void *, u64); + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_task_vma_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct vm_area_struct *vma; + u32 tid; + long unsigned int prev_vm_start; + long unsigned int prev_vm_end; +}; + +enum bpf_task_vma_iter_find_op { + task_vma_iter_first_vma = 0, + task_vma_iter_next_vma = 1, + task_vma_iter_find_vma = 2, +}; + +struct bpf_iter__task_vma { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + union { + struct vm_area_struct *vma; + }; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + raw_spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 56; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bucket { + struct hlist_nulls_head head; + union { + raw_spinlock_t raw_lock; + spinlock_t lock; + }; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bucket *buckets; + void *elems; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + atomic_t count; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + struct lock_class_key lockdep_key; + int *map_locked[8]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct bpf_htab *htab; + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + struct callback_head rcu; + struct bpf_lru_node lru_node; + }; + u32 hash; + int: 32; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_lpm_trie_key { + __u32 prefixlen; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + u64 mask; + struct page **pages; + int nr_pages; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t spinlock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int consumer_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int producer_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_ringbuf *rb; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + struct callback_head rcu; + long: 64; + struct bpf_local_storage_data sdata; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + u64 idx_usage_counts[16]; +}; + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +typedef u64 (*btf_bpf_task_storage_get)(struct bpf_map *, struct task_struct *, void *, u64); + +typedef u64 (*btf_bpf_task_storage_delete)(struct bpf_map *, struct task_struct *); + +struct lsm_blob_sizes { + int lbs_cred; + int lbs_file; + int lbs_inode; + int lbs_superblock; + int lbs_sock; + int lbs_ipc; + int lbs_msg_msg; + int lbs_task; +}; + +struct bpf_storage_blob { + struct bpf_local_storage *storage; +}; + +typedef u64 (*btf_bpf_inode_storage_get)(struct bpf_map *, struct inode *, void *, u64); + +typedef u64 (*btf_bpf_inode_storage_delete)(struct bpf_map *, struct inode *); + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __u32 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +struct bpf_sk_lookup { + union { + union { + struct bpf_sock *sk; + }; + __u64 cookie; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __u32 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + const void *data; + const void *data_end; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + unsigned int count; + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + long: 64; + struct inet_listen_hashbucket listening_hash[32]; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; +}; + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + u64 temp; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + u64 tmp_reg; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + bool no_reuseport; +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; + struct bpf_prog *skb_verdict; +}; + +struct strp_stats { + long long unsigned int msgs; + long long unsigned int bytes; + unsigned int mem_fail; + unsigned int need_more_hdr; + unsigned int msg_too_big; + unsigned int msg_timeouts; + unsigned int bad_hdr_len; +}; + +struct strparser; + +struct strp_callbacks { + int (*parse_msg)(struct strparser *, struct sk_buff *); + void (*rcv_msg)(struct strparser *, struct sk_buff *); + int (*read_sock_done)(struct strparser *, int); + void (*abort_parser)(struct strparser *, int); + void (*lock)(struct strparser *); + void (*unlock)(struct strparser *); +}; + +struct strparser { + struct sock *sk; + u32 stopped: 1; + u32 paused: 1; + u32 aborted: 1; + u32 interrupted: 1; + u32 unrecov_intr: 1; + struct sk_buff **skb_nextp; + struct sk_buff *skb_head; + unsigned int need_bytes; + struct delayed_work msg_timer_work; + struct work_struct work; + struct strp_stats stats; + struct strp_callbacks cb; +}; + +struct sk_psock_work_state { + struct sk_buff *skb; + u32 len; + u32 off; +}; + +struct sk_msg; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct strparser strp; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + spinlock_t ingress_lock; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + void (*saved_data_ready)(struct sock *); + int (*psock_update_sk_prot)(struct sock *, struct sk_psock *, bool); + struct proto *sk_proto; + struct mutex work_mutex; + struct sk_psock_work_state work_state; + struct work_struct work; + struct rcu_work rwork; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + s32 delivered; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, struct __va_list_tag *); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_LSM_prog; + void *BPF_PROG_TYPE_LSM_kern; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_LSM = 28, + __ctx_convert_unused = 29, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +struct btf_module { + struct list_head list; + struct module *module; + struct btf *btf; + struct bin_attribute *sysfs_attr; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + u32 image_off; + struct bpf_ksym ksym; +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + unsigned int count; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_RELEASE = 18, + NETDEV_NOTIFY_PEERS = 19, + NETDEV_JOIN = 20, + NETDEV_CHANGEUPPER = 21, + NETDEV_RESEND_IGMP = 22, + NETDEV_PRECHANGEMTU = 23, + NETDEV_CHANGEINFODATA = 24, + NETDEV_BONDING_INFO = 25, + NETDEV_PRECHANGEUPPER = 26, + NETDEV_CHANGELOWERSTATE = 27, + NETDEV_UDP_TUNNEL_PUSH_INFO = 28, + NETDEV_UDP_TUNNEL_DROP_INFO = 29, + NETDEV_CHANGE_TX_QUEUE_LEN = 30, + NETDEV_CVLAN_FILTER_PUSH_INFO = 31, + NETDEV_CVLAN_FILTER_DROP_INFO = 32, + NETDEV_SVLAN_FILTER_PUSH_INFO = 33, + NETDEV_SVLAN_FILTER_DROP_INFO = 34, +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u32 flags; + u32 tgt_index; + void *tgt_value; + u32 map_id; + enum bpf_map_type map_type; + u32 kern_flags; + struct bpf_nh_params nh; +}; + +struct bpf_dtab; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_dtab *dtab; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long: 32; + long: 64; + long: 64; +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int size; + int batch; + void **queue; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct bpf_cpu_map *cmap; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + atomic_t refcnt; + struct callback_head rcu; + struct work_struct kthread_stop_wq; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +typedef struct ns_common *ns_get_path_helper_t(void *); + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 4294967264, + PERF_CONTEXT_KERNEL = 4294967168, + PERF_CONTEXT_USER = 4294966784, + PERF_CONTEXT_GUEST = 4294965248, + PERF_CONTEXT_GUEST_KERNEL = 4294965120, + PERF_CONTEXT_GUEST_USER = 4294964736, + PERF_CONTEXT_MAX = 4294963201, +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long: 64; + long: 64; + long: 64; +}; + +struct stack_map_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_prog_list { + struct list_head node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; + u16 mru; + bool post_ct; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +struct bpf_sockopt_buf { + u8 data[32]; +}; + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, +}; + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, +}; + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, +}; + +struct bpf_struct_ops_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops *st_ops; + struct mutex lock; + struct bpf_prog **progs; + void *image; + struct bpf_struct_ops_value *uvalue; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_tcp_congestion_ops { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct tcp_congestion_ops data; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_user_sec_ctx { + __u16 len; + __u16 exttype; + __u8 ctx_alg; + __u8 ctx_doi; + __u16 ctx_len; +}; + +enum { + BPF_F_BPRM_SECUREEXEC = 1, +}; + +typedef u64 (*btf_bpf_bprm_opts_set)(struct linux_binprm *, u64); + +typedef u64 (*btf_bpf_ima_inode_hash)(struct inode *, void *, u32); + +struct static_call_tramp_key { + s32 tramp; + s32 key; +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_MAX = 16, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX = 21, +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +struct min_heap { + void *data; + int nr; + int size; +}; + +struct min_heap_callbacks { + int elem_size; + bool (*less)(const void *, const void *); + void (*swp)(void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_CPU = 8, + EVENT_ALL = 3, +}; + +struct __group_key { + int cpu; + struct cgroup *cgroup; +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + u8 build_id[20]; + u32 build_id_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; + int recursion[4]; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = 4294967295, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 0, + TYPE_MAX = 1, +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + unsigned int *tsk_pinned; + unsigned int flexible; +}; + +struct bp_busy_slots { + unsigned int pinned; + unsigned int flexible; +}; + +typedef u8 uprobe_opcode_t; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct uprobe_consumer *consumers; + struct inode *inode; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; +}; + +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + long unsigned int *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + long unsigned int vaddr; +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +typedef int filler_t(void *, struct page *); + +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +struct compact_control { + struct list_head freepages; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool rescan; + bool alloc_contig; +}; + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct __uprobe_key { + struct inode *inode; + loff_t offset; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +struct user_return_notifier { + void (*on_user_return)(struct user_return_notifier *); + struct hlist_node link; +}; + +struct parallel_data; + +struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; + unsigned int seq_nr; + int info; + void (*parallel)(struct padata_priv *); + void (*serial)(struct padata_priv *); +}; + +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; + +struct padata_shell; + +struct padata_list; + +struct padata_serial_queue; + +struct parallel_data { + struct padata_shell *ps; + struct padata_list *reorder_list; + struct padata_serial_queue *squeue; + atomic_t refcnt; + unsigned int seq_nr; + unsigned int processed; + int cpu; + struct padata_cpumask cpumask; + struct work_struct reorder_work; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct padata_list { + struct list_head list; + spinlock_t lock; +}; + +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +struct padata_instance; + +struct padata_shell { + struct padata_instance *pinst; + struct parallel_data *pd; + struct parallel_data *opd; + struct list_head list; +}; + +struct padata_instance { + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; + struct list_head pslist; + struct padata_cpumask cpumask; + struct kobject kobj; + struct mutex lock; + u8 flags; +}; + +struct padata_mt_job { + void (*thread_fn)(long unsigned int, long unsigned int, void *); + void *fn_arg; + long unsigned int start; + long unsigned int size; + long unsigned int align; + long unsigned int min_chunk; + int max_threads; +}; + +struct padata_work { + struct work_struct pw_work; + struct list_head pw_list; + void *pw_data; +}; + +struct padata_mt_job_state { + spinlock_t lock; + struct completion completion; + struct padata_mt_job *job; + int nworks; + int nworks_fini; + long unsigned int chunk_size; +}; + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = 4294967295, + RSEQ_CPU_ID_REGISTRATION_FAILED = 4294967294, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +struct watch; + +struct watch_list { + struct callback_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +enum watch_notification_type { + WATCH_TYPE_META = 0, + WATCH_TYPE_KEY_NOTIFY = 1, + WATCH_TYPE__NR = 2, +}; + +enum watch_meta_notification_subtype { + WATCH_META_REMOVAL_NOTIFICATION = 0, + WATCH_META_LOSS_NOTIFICATION = 1, +}; + +struct watch_notification { + __u32 type: 24; + __u32 subtype: 8; + __u32 info; +}; + +struct watch_notification_type_filter { + __u32 type; + __u32 info_filter; + __u32 info_mask; + __u32 subtype_filter[8]; +}; + +struct watch_notification_filter { + __u32 nr_filters; + __u32 __reserved; + struct watch_notification_type_filter filters[0]; +}; + +struct watch_notification_removal { + struct watch_notification watch; + __u64 id; +}; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; + __u32 info_filter; + __u32 info_mask; +}; + +struct watch_filter { + union { + struct callback_head rcu; + long unsigned int type_filter[2]; + }; + u32 nr_filters; + struct watch_type_filter filters[0]; +}; + +struct watch_queue { + struct callback_head rcu; + struct watch_filter *filter; + struct pipe_inode_info *pipe; + struct hlist_head watches; + struct page **notes; + long unsigned int *notes_bitmap; + struct kref usage; + spinlock_t lock; + unsigned int nr_notes; + unsigned int nr_pages; + bool defunct; +}; + +struct watch { + union { + struct callback_head rcu; + u32 info_id; + }; + struct watch_queue *queue; + struct hlist_node queue_node; + struct watch_list *watch_list; + struct hlist_node list_node; + const struct cred *cred; + void *private; + u64 id; + struct kref usage; +}; + +struct pkcs7_message; + +typedef int __kernel_rwf_t; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +enum iter_type { + ITER_IOVEC = 4, + ITER_KVEC = 8, + ITER_BVEC = 16, + ITER_PIPE = 32, + ITER_DISCARD = 64, + ITER_XARRAY = 128, +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_THP_SUPPORT = 6, +}; + +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct pagevec { + unsigned char nr; + bool percpu_pvec_drained; + struct page *pages[15]; +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + __u32 raw[0]; + }; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct page *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct page *); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct kmem_cache_order_objects { + unsigned int x; +}; + +struct kmem_cache_cpu; + +struct kmem_cache_node; + +struct kmem_cache { + struct kmem_cache_cpu *cpu_slab; + slab_flags_t flags; + long unsigned int min_partial; + unsigned int size; + unsigned int object_size; + struct reciprocal_value reciprocal_size; + unsigned int offset; + unsigned int cpu_partial; + struct kmem_cache_order_objects oo; + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; + int refcount; + void (*ctor)(void *); + unsigned int inuse; + unsigned int align; + unsigned int red_left_pad; + const char *name; + struct list_head list; + struct kobject kobj; + long unsigned int random; + unsigned int remote_node_defrag_ratio; + unsigned int *random_seq; + unsigned int useroffset; + unsigned int usersize; + struct kmem_cache_node *node[1024]; +}; + +struct kmem_cache_cpu { + void **freelist; + long unsigned int tid; + struct page *page; + struct page *partial; +}; + +struct kmem_cache_node { + spinlock_t list_lock; + long unsigned int nr_partial; + struct list_head partial; + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +}; + +struct zap_details { + struct address_space *check_mapping; + long unsigned int first_index; + long unsigned int last_index; + struct page *single_page; +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim {}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, int); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +enum wb_congested_state { + WB_async_congested = 0, + WB_sync_congested = 1, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +typedef int (*writepage_t)(struct page *, struct writeback_control *, void *); + +enum page_memcg_data_flags { + MEMCG_DATA_OBJCGS = 1, + MEMCG_DATA_KMEM = 2, + __NR_MEMCG_DATA_FLAGS = 4, +}; + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; +}; + +typedef void compound_page_dtor(struct page *); + +typedef struct {} local_lock_t; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct page *page; + long unsigned int pfn; + enum lru_list lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct page *page; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct page *); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct page *); + +struct lru_rotate { + local_lock_t lock; + struct pagevec pvec; +}; + +struct lru_pvecs { + local_lock_t lock; + struct pagevec lru_add; + struct pagevec lru_deactivate_file; + struct pagevec lru_deactivate; + struct pagevec lru_lazyfree; + struct pagevec activate_page; +}; + +enum lruvec_flags { + LRUVEC_CONGESTED = 0, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + unsigned int generation; +}; + +enum ttu_flags { + TTU_MIGRATION = 1, + TTU_MUNLOCK = 2, + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_SYNC = 16, + TTU_IGNORE_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, + TTU_SPLIT_FREEZE = 256, +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + gfp_t gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + isolate_mode_t isolate_mode; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_writepage { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_inactive_list_is_low { + struct trace_entry ent; + int nid; + int reclaim_idx; + long unsigned int total_inactive; + long unsigned int inactive; + long unsigned int total_active; + long unsigned int active; + long unsigned int ratio; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_writepage {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_inactive_list_is_low {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, isolate_mode_t, int); + +typedef void (*btf_trace_mm_vmscan_writepage)(void *, struct page *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_inactive_list_is_low)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum page_references { + PAGEREF_RECLAIM = 0, + PAGEREF_RECLAIM_CLEAN = 1, + PAGEREF_KEEP = 2, + PAGEREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +typedef __u64 __le64; + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_NEVER_DAX = 0, + TRANSPARENT_HUGEPAGE_FLAG = 1, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 2, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 3, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 4, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 5, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 6, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 7, + TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 8, +}; + +struct xattr; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +struct constant_table { + const char *name; + int value; +}; + +enum { + MPOL_DEFAULT = 0, + MPOL_PREFERRED = 1, + MPOL_BIND = 2, + MPOL_INTERLEAVE = 3, + MPOL_LOCAL = 4, + MPOL_MAX = 5, +}; + +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; + +struct simple_xattrs { + struct list_head head; + spinlock_t lock; +}; + +struct simple_xattr { + struct list_head list; + char *name; + size_t size; + char value[0]; +}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + struct list_head shrinklist; + struct list_head swaplist; + struct shared_policy policy; + struct simple_xattrs xattrs; + atomic_t stop_eviction; + struct inode vfs_inode; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_inodes; + spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_CACHE = 1, + SGP_NOHUGE = 2, + SGP_HUGE = 3, + SGP_WRITE = 4, + SGP_FALLOC = 5, +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_LUSTRE = 151, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; +}; + +enum shmem_param { + Opt_gid = 0, + Opt_huge = 1, + Opt_mode = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, +}; + +enum writeback_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_VM_WRITEBACK_STAT_ITEMS = 2, +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, bool, bool, size_t, size_t, void *, int, void *); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +enum pcpu_chunk_type { + PCPU_CHUNK_ROOT = 0, + PCPU_CHUNK_MEMCG = 1, + PCPU_NR_CHUNK_TYPES = 2, + PCPU_FAIL_ALLOC = 2, +}; + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + void *base_addr; + long unsigned int *alloc_map; + long unsigned int *bound_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + int start_offset; + int end_offset; + struct obj_cgroup **obj_cgroups; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; +}; + +struct trace_event_raw_kmem_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_kmem_alloc_node { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + gfp_t gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kfree { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_kmem_cache_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + gfp_t gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_alloc {}; + +struct trace_event_data_offsets_kmem_alloc_node {}; + +struct trace_event_data_offsets_kfree {}; + +struct trace_event_data_offsets_kmem_cache_free { + u32 name; +}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t); + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t); + +typedef void (*btf_trace_kmalloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kmem_cache_alloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *, const char *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int, long int); + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + PARTIAL_NODE = 2, + UP = 3, + FULL = 4, +}; + +struct kmalloc_info_struct { + const char *name[3]; + unsigned int size; +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +struct kmem_obj_info { + void *kp_ptr; + struct page *kp_page; + void *kp_objp; + long unsigned int kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[16]; +}; + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +struct node___2 { + struct device dev; + struct list_head access_list; + struct work_struct node_work; + struct list_head cache_attrs; + struct device *cache_dev; +}; + +typedef struct page *new_page_t(struct page *, long unsigned int); + +typedef void free_page_t(struct page *, long unsigned int); + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + gfp_t gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, long unsigned int, int, struct list_head *); + +typedef void (*btf_trace_mm_compaction_begin)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *); + +typedef struct { + long unsigned int pd; +} hugepd_t; + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +struct trace_event_raw_mmap_lock_start_locking { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_acquire_returned { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + bool success; + char __data[0]; +}; + +struct trace_event_raw_mmap_lock_released { + struct trace_entry ent; + struct mm_struct *mm; + u32 __data_loc_memcg_path; + bool write; + char __data[0]; +}; + +struct trace_event_data_offsets_mmap_lock_start_locking { + u32 memcg_path; +}; + +struct trace_event_data_offsets_mmap_lock_acquire_returned { + u32 memcg_path; +}; + +struct trace_event_data_offsets_mmap_lock_released { + u32 memcg_path; +}; + +typedef void (*btf_trace_mmap_lock_start_locking)(void *, struct mm_struct *, const char *, bool); + +typedef void (*btf_trace_mmap_lock_acquire_returned)(void *, struct mm_struct *, const char *, bool, bool); + +typedef void (*btf_trace_mmap_lock_released)(void *, struct mm_struct *, const char *, bool); + +struct memcg_path { + local_lock_t lock; + char *buf; + local_t buf_idx; +}; + +typedef unsigned int pgtbl_mod_mask; + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_VALID = 8192, + SWP_SCANNING = 16384, +}; + +struct copy_subpage_arg { + struct page *dst; + struct page *src; + struct vm_area_struct *vma; +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +enum pgt_entry { + NORMAL_PMD = 0, + HPAGE_PMD = 1, + NORMAL_PUD = 2, +}; + +struct rmap_walk_control { + void *arg; + bool (*rmap_one)(struct page *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct page *); + struct anon_vma * (*anon_lock)(struct page *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct page_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + }; +}; + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; +}; + +struct vmap_pfn_data { + long unsigned int *pfns; + pgprot_t prot; + unsigned int idx; +}; + +struct page_frag_cache { + void *va; + __u16 offset; + __u16 size; + unsigned int pagecnt_bias; + bool pfmemalloc; +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, +}; + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +typedef int fpi_t; + +struct pcpu_drain { + struct zone *zone; + struct work_struct work; +}; + +struct mminit_pfnnid_cache { + long unsigned int last_start; + long unsigned int last_end; + int last_nid; +}; + +enum { + MMOP_OFFLINE = 0, + MMOP_ONLINE = 1, + MMOP_ONLINE_KERNEL = 2, + MMOP_ONLINE_MOVABLE = 3, +}; + +typedef int mhp_t; + +typedef void (*online_page_callback_t)(struct page *, unsigned int); + +struct memory_block { + long unsigned int start_section_nr; + long unsigned int state; + int online_type; + int nid; + struct device dev; + long unsigned int nr_vmemmap_pages; +}; + +struct memory_notify { + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid_high; + int status_change_nid; +}; + +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); + +enum hugetlb_page_flags { + HPG_restore_reserve = 0, + HPG_migratable = 1, + HPG_temporary = 2, + HPG_freed = 3, + __NR_HPAGEFLAGS = 4, +}; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +struct vma_swap_readahead { + short unsigned int win; + short unsigned int offset; + short unsigned int nr_pte; + pte_t *ptes; +}; + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + sector_t start_block; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +struct frontswap_ops { + void (*init)(unsigned int); + int (*store)(unsigned int, long unsigned int, struct page *); + int (*load)(unsigned int, long unsigned int, struct page *); + void (*invalidate_page)(unsigned int, long unsigned int); + void (*invalidate_area)(unsigned int); + struct frontswap_ops *next; +}; + +struct crypto_async_request; + +typedef void (*crypto_completion_t)(struct crypto_async_request *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +struct zpool; + +struct zpool_ops { + int (*evict)(struct zpool *, long unsigned int); +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_acomp_ctx { + struct crypto_acomp *acomp; + struct acomp_req *req; + struct crypto_wait wait; + u8 *dstmem; + struct mutex *mutex; +}; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_acomp_ctx *acomp_ctx; + struct kref kref; + struct list_head list; + struct work_struct release_work; + struct work_struct shrink_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + struct rb_node rbnode; + long unsigned int offset; + int refcount; + unsigned int length; + struct zswap_pool *pool; + union { + long unsigned int handle; + long unsigned int value; + }; +}; + +struct zswap_header { + swp_entry_t swpentry; +}; + +struct zswap_tree { + struct rb_root rbroot; + spinlock_t lock; +}; + +enum zswap_get_swap_ret { + ZSWAP_SWAPCACHE_NEW = 0, + ZSWAP_SWAPCACHE_EXIST = 1, + ZSWAP_SWAPCACHE_FAIL = 2, +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + size_t size; + struct device *dev; + size_t allocation; + size_t boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; + unsigned int in_use; + unsigned int offset; +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, +}; + +typedef void (*node_registration_func_t)(struct node___2 *); + +enum mcopy_atomic_mode { + MCOPY_ATOMIC_NORMAL = 0, + MCOPY_ATOMIC_ZEROPAGE = 1, + MCOPY_ATOMIC_CONTINUE = 2, +}; + +struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long int adds_in_progress; + struct list_head region_cache; + long int region_cache_count; + struct page_counter *reservation_counter; + long unsigned int pages_per_hpage; + struct cgroup_subsys_state *css; +}; + +struct file_region { + struct list_head link; + long int from; + long int to; + struct page_counter *reservation_counter; + struct cgroup_subsys_state *css; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +enum hugetlb_memory_event { + HUGETLB_MAX = 0, + HUGETLB_NR_MEMORY_EVENTS = 1, +}; + +struct hugetlb_cgroup { + struct cgroup_subsys_state css; + struct page_counter hugepage[2]; + struct page_counter rsvd_hugepage[2]; + atomic_long_t events[2]; + atomic_long_t events_local[2]; + struct cgroup_file events_file[2]; + struct cgroup_file events_local_file[2]; +}; + +enum vma_resv_mode { + VMA_NEEDS_RESV = 0, + VMA_COMMIT_RESV = 1, + VMA_END_RESV = 2, + VMA_ADD_RESV = 3, + VMA_DEL_RESV = 4, +}; + +struct node_hstate { + struct kobject *hugepages_kobj; + struct kobject *hstate_kobjs[2]; +}; + +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +struct sp_node { + struct rb_node nd; + long unsigned int start; + long unsigned int end; + struct mempolicy *policy; +}; + +struct mempolicy_operations { + int (*create)(struct mempolicy *, const nodemask_t *); + void (*rebind)(struct mempolicy *, const nodemask_t *); +}; + +struct queue_pages { + struct list_head *pagelist; + long unsigned int flags; + nodemask_t *nmask; + long unsigned int start; + long unsigned int end; + struct vm_area_struct *first; +}; + +struct mmu_notifier_subscriptions { + struct hlist_head list; + bool has_itree; + spinlock_t lock; + long unsigned int invalidate_seq; + long unsigned int active_invalidate_ranges; + struct rb_root_cached itree; + wait_queue_head_t wq; + struct hlist_head deferred_list; +}; + +struct interval_tree_node { + struct rb_node rb; + long unsigned int start; + long unsigned int last; + long unsigned int __subtree_last; +}; + +struct mmu_interval_notifier; + +struct mmu_interval_notifier_ops { + bool (*invalidate)(struct mmu_interval_notifier *, const struct mmu_notifier_range *, long unsigned int); +}; + +struct mmu_interval_notifier { + struct interval_tree_node interval_tree; + const struct mmu_interval_notifier_ops *ops; + struct mm_struct *mm; + struct hlist_node deferred_item; + long unsigned int invalidate_seq; +}; + +struct rmap_item; + +struct mm_slot { + struct hlist_node link; + struct list_head mm_list; + struct rmap_item *rmap_list; + struct mm_struct *mm; +}; + +struct stable_node; + +struct rmap_item { + struct rmap_item *rmap_list; + union { + struct anon_vma *anon_vma; + int nid; + }; + struct mm_struct *mm; + long unsigned int address; + unsigned int oldchecksum; + union { + struct rb_node node; + struct { + struct stable_node *head; + struct hlist_node hlist; + }; + }; +}; + +struct ksm_scan { + struct mm_slot *mm_slot; + long unsigned int address; + struct rmap_item **rmap_list; + long unsigned int seqnr; +}; + +struct stable_node { + union { + struct rb_node node; + struct { + struct list_head *head; + struct { + struct hlist_node hlist_dup; + struct list_head list; + }; + }; + }; + struct hlist_head hlist; + union { + long unsigned int kpfn; + long unsigned int chain_prune_time; + }; + int rmap_hlist_len; + int nid; +}; + +enum get_ksm_page_flags { + GET_KSM_PAGE_NOLOCK = 0, + GET_KSM_PAGE_LOCK = 1, + GET_KSM_PAGE_TRYLOCK = 2, +}; + +enum stat_item { + ALLOC_FASTPATH = 0, + ALLOC_SLOWPATH = 1, + FREE_FASTPATH = 2, + FREE_SLOWPATH = 3, + FREE_FROZEN = 4, + FREE_ADD_PARTIAL = 5, + FREE_REMOVE_PARTIAL = 6, + ALLOC_FROM_PARTIAL = 7, + ALLOC_SLAB = 8, + ALLOC_REFILL = 9, + ALLOC_NODE_MISMATCH = 10, + FREE_SLAB = 11, + CPUSLAB_FLUSH = 12, + DEACTIVATE_FULL = 13, + DEACTIVATE_EMPTY = 14, + DEACTIVATE_TO_HEAD = 15, + DEACTIVATE_TO_TAIL = 16, + DEACTIVATE_REMOTE_FREES = 17, + DEACTIVATE_BYPASS = 18, + ORDER_FALLBACK = 19, + CMPXCHG_DOUBLE_CPU_FAIL = 20, + CMPXCHG_DOUBLE_FAIL = 21, + CPU_PARTIAL_ALLOC = 22, + CPU_PARTIAL_FREE = 23, + CPU_PARTIAL_NODE = 24, + CPU_PARTIAL_DRAIN = 25, + NR_SLUB_STAT_ITEMS = 26, +}; + +struct track { + long unsigned int addr; + long unsigned int addrs[16]; + int cpu; + int pid; + long unsigned int when; +}; + +enum track_item { + TRACK_ALLOC = 0, + TRACK_FREE = 1, +}; + +struct detached_freelist { + struct page *page; + void *tail; + void *freelist; + int cnt; + struct kmem_cache *s; +}; + +struct location { + long unsigned int count; + long unsigned int addr; + long long int sum_time; + long int min_time; + long int max_time; + long int min_pid; + long int max_pid; + long unsigned int cpus[128]; + nodemask_t nodes; +}; + +struct loc_track { + long unsigned int max; + long unsigned int count; + struct location *loc; +}; + +enum slab_stat_type { + SL_ALL = 0, + SL_PARTIAL = 1, + SL_CPU = 2, + SL_OBJECTS = 3, + SL_TOTAL = 4, +}; + +struct slab_attribute { + struct attribute attr; + ssize_t (*show)(struct kmem_cache *, char *); + ssize_t (*store)(struct kmem_cache *, const char *, size_t); +}; + +struct saved_alias { + struct kmem_cache *s; + const char *name; + struct saved_alias *next; +}; + +enum slab_modes { + M_NONE = 0, + M_PARTIAL = 1, + M_FULL = 2, + M_FREE = 3, +}; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + struct page *b_page; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +enum migrate_vma_direction { + MIGRATE_VMA_SELECT_SYSTEM = 1, + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 2, +}; + +struct migrate_vma { + struct vm_area_struct *vma; + long unsigned int *dst; + long unsigned int *src; + long unsigned int cpages; + long unsigned int npages; + long unsigned int start; + long unsigned int end; + void *pgmap_owner; + long unsigned int flags; +}; + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_raw_mm_migrate_pages_start { + struct trace_entry ent; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +struct trace_event_data_offsets_mm_migrate_pages_start {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +typedef void (*btf_trace_mm_migrate_pages_start)(void *, enum migrate_mode, int); + +enum scan_result { + SCAN_FAIL = 0, + SCAN_SUCCEED = 1, + SCAN_PMD_NULL = 2, + SCAN_EXCEED_NONE_PTE = 3, + SCAN_EXCEED_SWAP_PTE = 4, + SCAN_EXCEED_SHARED_PTE = 5, + SCAN_PTE_NON_PRESENT = 6, + SCAN_PTE_UFFD_WP = 7, + SCAN_PAGE_RO = 8, + SCAN_LACK_REFERENCED_PAGE = 9, + SCAN_PAGE_NULL = 10, + SCAN_SCAN_ABORT = 11, + SCAN_PAGE_COUNT = 12, + SCAN_PAGE_LRU = 13, + SCAN_PAGE_LOCK = 14, + SCAN_PAGE_ANON = 15, + SCAN_PAGE_COMPOUND = 16, + SCAN_ANY_PROCESS = 17, + SCAN_VMA_NULL = 18, + SCAN_VMA_CHECK = 19, + SCAN_ADDRESS_RANGE = 20, + SCAN_SWAP_CACHE_PAGE = 21, + SCAN_DEL_PAGE_LRU = 22, + SCAN_ALLOC_HUGE_PAGE_FAIL = 23, + SCAN_CGROUP_CHARGE_FAIL = 24, + SCAN_TRUNCATED = 25, + SCAN_PAGE_HAS_PRIVATE = 26, +}; + +struct trace_event_raw_mm_khugepaged_scan_pmd { + struct trace_entry ent; + struct mm_struct *mm; + long unsigned int pfn; + bool writable; + int referenced; + int none_or_zero; + int status; + int unmapped; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page { + struct trace_entry ent; + struct mm_struct *mm; + int isolated; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_isolate { + struct trace_entry ent; + long unsigned int pfn; + int none_or_zero; + int referenced; + bool writable; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_swapin { + struct trace_entry ent; + struct mm_struct *mm; + int swapped_in; + int referenced; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; + +struct trace_event_data_offsets_mm_collapse_huge_page {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; + +typedef void (*btf_trace_mm_khugepaged_scan_pmd)(void *, struct mm_struct *, struct page *, bool, int, int, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page)(void *, struct mm_struct *, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page_isolate)(void *, struct page *, int, int, bool, int); + +typedef void (*btf_trace_mm_collapse_huge_page_swapin)(void *, struct mm_struct *, int, int, int); + +struct mm_slot___2 { + struct hlist_node hash; + struct list_head mm_node; + struct mm_struct *mm; + int nr_pte_mapped_thp; + long unsigned int pte_mapped_thp[8]; +}; + +struct khugepaged_scan { + struct list_head mm_head; + struct mm_slot___2 *mm_slot; + long unsigned int address; +}; + +struct mem_cgroup_tree_per_node { + struct rb_root rb_root; + struct rb_node *rb_rightmost; + spinlock_t lock; +}; + +struct mem_cgroup_tree { + struct mem_cgroup_tree_per_node *rb_tree_per_node[1024]; +}; + +struct mem_cgroup_eventfd_list { + struct list_head list; + struct eventfd_ctx *eventfd; +}; + +struct mem_cgroup_event { + struct mem_cgroup *memcg; + struct eventfd_ctx *eventfd; + struct list_head list; + int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *); + void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *); + poll_table pt; + wait_queue_head_t *wqh; + wait_queue_entry_t wait; + struct work_struct remove; +}; + +struct move_charge_struct { + spinlock_t lock; + struct mm_struct *mm; + struct mem_cgroup *from; + struct mem_cgroup *to; + long unsigned int flags; + long unsigned int precharge; + long unsigned int moved_charge; + long unsigned int moved_swap; + struct task_struct *moving_task; + wait_queue_head_t waitq; +}; + +enum res_type { + _MEM = 0, + _MEMSWAP = 1, + _OOM_TYPE = 2, + _KMEM = 3, + _TCP = 4, +}; + +struct memory_stat { + const char *name; + unsigned int idx; +}; + +struct oom_wait_info { + struct mem_cgroup *memcg; + wait_queue_entry_t wait; +}; + +enum oom_status { + OOM_SUCCESS = 0, + OOM_FAILED = 1, + OOM_ASYNC = 2, + OOM_SKIPPED = 3, +}; + +struct memcg_stock_pcp { + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + unsigned int nr_bytes; + struct work_struct work; + long unsigned int flags; +}; + +enum { + RES_USAGE = 0, + RES_LIMIT = 1, + RES_MAX_USAGE = 2, + RES_FAILCNT = 3, + RES_SOFT_LIMIT = 4, +}; + +union mc_target { + struct page *page; + swp_entry_t ent; +}; + +enum mc_target_type { + MC_TARGET_NONE = 0, + MC_TARGET_PAGE = 1, + MC_TARGET_SWAP = 2, + MC_TARGET_DEVICE = 3, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_memory; + long unsigned int pgpgout; + long unsigned int nr_kmem; + struct page *dummy_page; +}; + +struct numa_stat { + const char *name; + unsigned int lru_mask; +}; + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +enum { + RES_USAGE___2 = 0, + RES_RSVD_USAGE = 1, + RES_LIMIT___2 = 2, + RES_RSVD_LIMIT = 3, + RES_MAX_USAGE___2 = 4, + RES_RSVD_MAX_USAGE = 5, + RES_FAILCNT___2 = 6, + RES_RSVD_FAILCNT = 7, +}; + +enum mf_result { + MF_IGNORED = 0, + MF_FAILED = 1, + MF_DELAYED = 2, + MF_RECOVERED = 3, +}; + +enum mf_action_page_type { + MF_MSG_KERNEL = 0, + MF_MSG_KERNEL_HIGH_ORDER = 1, + MF_MSG_SLAB = 2, + MF_MSG_DIFFERENT_COMPOUND = 3, + MF_MSG_POISONED_HUGE = 4, + MF_MSG_HUGE = 5, + MF_MSG_FREE_HUGE = 6, + MF_MSG_NON_PMD_HUGE = 7, + MF_MSG_UNMAP_FAILED = 8, + MF_MSG_DIRTY_SWAPCACHE = 9, + MF_MSG_CLEAN_SWAPCACHE = 10, + MF_MSG_DIRTY_MLOCKED_LRU = 11, + MF_MSG_CLEAN_MLOCKED_LRU = 12, + MF_MSG_DIRTY_UNEVICTABLE_LRU = 13, + MF_MSG_CLEAN_UNEVICTABLE_LRU = 14, + MF_MSG_DIRTY_LRU = 15, + MF_MSG_CLEAN_LRU = 16, + MF_MSG_TRUNCATED_LRU = 17, + MF_MSG_BUDDY = 18, + MF_MSG_BUDDY_2ND = 19, + MF_MSG_DAX = 20, + MF_MSG_UNSPLIT_THP = 21, + MF_MSG_UNKNOWN = 22, +}; + +typedef long unsigned int dax_entry_t; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct to_kill { + struct list_head nd; + struct task_struct *tsk; + long unsigned int addr; + short int size_shift; +}; + +struct page_state { + long unsigned int mask; + long unsigned int res; + enum mf_action_page_type type; + int (*action)(struct page *, long unsigned int); +}; + +struct memory_failure_entry { + long unsigned int pfn; + int flags; +}; + +struct memory_failure_cpu { + struct { + union { + struct __kfifo kfifo; + struct memory_failure_entry *type; + const struct memory_failure_entry *const_type; + char (*rectype)[0]; + struct memory_failure_entry *ptr; + const struct memory_failure_entry *ptr_const; + }; + struct memory_failure_entry buf[16]; + } fifo; + spinlock_t lock; + struct work_struct work; +}; + +struct cleancache_filekey { + union { + ino_t ino; + __u32 fh[6]; + u32 key[6]; + } u; +}; + +struct cleancache_ops { + int (*init_fs)(size_t); + int (*init_shared_fs)(uuid_t *, size_t); + int (*get_page)(int, struct cleancache_filekey, long unsigned int, struct page *); + void (*put_page)(int, struct cleancache_filekey, long unsigned int, struct page *); + void (*invalidate_page)(int, struct cleancache_filekey, long unsigned int); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +}; + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct zpool_driver; + +struct zpool { + struct zpool_driver *driver; + void *pool; + const struct zpool_ops *ops; + bool evictable; + bool can_sleep_mapped; + struct list_head list; +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t, const struct zpool_ops *, struct zpool *); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + int (*shrink)(void *, unsigned int, unsigned int *); + bool sleep_mapped; + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_size)(void *); +}; + +struct zbud_pool; + +struct zbud_ops { + int (*evict)(struct zbud_pool *, long unsigned int); +}; + +struct zbud_pool { + spinlock_t lock; + struct list_head unbuddied[63]; + struct list_head buddied; + struct list_head lru; + u64 pages_nr; + const struct zbud_ops *ops; + struct zpool *zpool; + const struct zpool_ops *zpool_ops; +}; + +struct zbud_header { + struct list_head buddy; + struct list_head lru; + unsigned int first_chunks; + unsigned int last_chunks; + bool under_reclaim; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +enum zs_mapmode { + ZS_MM_RW = 0, + ZS_MM_RO = 1, + ZS_MM_WO = 2, +}; + +struct zs_pool_stats { + atomic_long_t pages_compacted; +}; + +enum fullness_group { + ZS_EMPTY = 0, + ZS_ALMOST_EMPTY = 1, + ZS_ALMOST_FULL = 2, + ZS_FULL = 3, + NR_ZS_FULLNESS = 4, +}; + +enum zs_stat_type { + CLASS_EMPTY = 0, + CLASS_ALMOST_EMPTY = 1, + CLASS_ALMOST_FULL = 2, + CLASS_FULL = 3, + OBJ_ALLOCATED = 4, + OBJ_USED = 5, + NR_ZS_STAT_TYPE = 6, +}; + +struct zs_size_stat { + long unsigned int objs[6]; +}; + +struct size_class { + spinlock_t lock; + struct list_head fullness_list[4]; + int size; + int objs_per_zspage; + int pages_per_zspage; + unsigned int index; + struct zs_size_stat stats; +}; + +struct link_free { + union { + long unsigned int next; + long unsigned int handle; + }; +}; + +struct zs_pool { + const char *name; + struct size_class *size_class[255]; + struct kmem_cache *handle_cachep; + struct kmem_cache *zspage_cachep; + atomic_long_t pages_allocated; + struct zs_pool_stats stats; + struct shrinker shrinker; + struct inode *inode; + struct work_struct free_work; + struct wait_queue_head migration_wait; + atomic_long_t isolated_pages; + bool destroying; +}; + +struct zspage { + struct { + unsigned int fullness: 2; + unsigned int class: 9; + unsigned int isolated: 3; + unsigned int magic: 8; + }; + unsigned int inuse; + unsigned int freeobj; + struct page *first_page; + struct list_head list; + rwlock_t lock; +}; + +struct mapping_area { + char *vm_buf; + char *vm_addr; + enum zs_mapmode vm_mm; +}; + +struct zs_compact_control { + struct page *s_page; + struct page *d_page; + int obj_idx; +}; + +struct balloon_dev_info { + long unsigned int isolated_pages; + spinlock_t pages_lock; + struct list_head pages; + int (*migratepage)(struct balloon_dev_info *, struct page *, struct page *, enum migrate_mode); + struct inode *inode; +}; + +enum { + BAD_STACK = 4294967295, + NOT_STACK = 0, + GOOD_FRAME = 1, + GOOD_STACK = 2, +}; + +enum hmm_pfn_flags { + HMM_PFN_VALID = 0, + HMM_PFN_WRITE = 0, + HMM_PFN_ERROR = 0, + HMM_PFN_ORDER_SHIFT = 56, + HMM_PFN_REQ_FAULT = 0, + HMM_PFN_REQ_WRITE = 0, + HMM_PFN_FLAGS = 0, +}; + +struct hmm_range { + struct mmu_interval_notifier *notifier; + long unsigned int notifier_seq; + long unsigned int start; + long unsigned int end; + long unsigned int *hmm_pfns; + long unsigned int default_flags; + long unsigned int pfn_flags_mask; + void *dev_private_owner; +}; + +struct hmm_vma_walk { + struct hmm_range *range; + long unsigned int last; +}; + +enum { + HMM_NEED_FAULT = 1, + HMM_NEED_WRITE_FAULT = 2, + HMM_NEED_ALL_BITS = 3, +}; + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +struct wp_walk { + struct mmu_notifier_range range; + long unsigned int tlbflush_start; + long unsigned int tlbflush_end; + long unsigned int total; +}; + +struct clean_walk { + struct wp_walk base; + long unsigned int bitmap_pgoff; + long unsigned int *bitmap; + long unsigned int start; + long unsigned int end; +}; + +struct page_reporting_dev_info { + int (*report)(struct page_reporting_dev_info *, struct scatterlist *, unsigned int); + struct delayed_work work; + atomic_t state; +}; + +enum { + PAGE_REPORTING_IDLE = 0, + PAGE_REPORTING_REQUESTED = 1, + PAGE_REPORTING_ACTIVE = 2, +}; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +typedef s32 compat_off_t; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +typedef __kernel_rwf_t rwf_t; + +struct fscrypt_policy_v1 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[8]; +}; + +struct fscrypt_policy_v2 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 __reserved[4]; + __u8 master_key_identifier[16]; +}; + +union fscrypt_policy { + u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; +}; + +enum vfs_get_super_keying { + vfs_get_single_super = 0, + vfs_get_single_reconf_super = 1, + vfs_get_keyed_super = 2, + vfs_get_independent_super = 3, +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct kobj_map; + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +struct stat { + __kernel_ulong_t st_dev; + __kernel_ulong_t st_ino; + __kernel_ulong_t st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + __kernel_ulong_t st_rdev; + __kernel_long_t st_size; + __kernel_long_t st_blksize; + __kernel_long_t st_blocks; + __kernel_ulong_t st_atime; + __kernel_ulong_t st_atime_nsec; + __kernel_ulong_t st_mtime; + __kernel_ulong_t st_mtime_nsec; + __kernel_ulong_t st_ctime; + __kernel_ulong_t st_ctime_nsec; + __kernel_long_t __unused[3]; +}; + +struct __old_kernel_stat { + short unsigned int st_dev; + short unsigned int st_ino; + short unsigned int st_mode; + short unsigned int st_nlink; + short unsigned int st_uid; + short unsigned int st_gid; + short unsigned int st_rdev; + unsigned int st_size; + unsigned int st_atime; + unsigned int st_mtime; + unsigned int st_ctime; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u64 __spare2; + __u64 __spare3[12]; +}; + +struct mount; + +struct mnt_namespace { + struct ns_common ns; + struct mount *root; + struct list_head list; + spinlock_t ns_lock; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + u64 event; + unsigned int mounts; + unsigned int pending_mounts; +}; + +typedef u32 compat_ino_t; + +typedef u16 __compat_uid_t; + +typedef u16 __compat_gid_t; + +typedef u16 compat_mode_t; + +typedef u16 compat_dev_t; + +typedef u16 compat_nlink_t; + +struct compat_stat { + compat_dev_t st_dev; + u16 __pad1; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + u16 __pad2; + u32 st_size; + u32 st_blksize; + u32 st_blocks; + u32 st_atime; + u32 st_atime_nsec; + u32 st_mtime; + u32 st_mtime_nsec; + u32 st_ctime; + u32 st_ctime_nsec; + u32 __unused4; + u32 __unused5; +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + struct list_head mnt_list; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +typedef short unsigned int ushort; + +struct user_arg_ptr { + bool is_compat; + union { + const char * const *native; + const compat_uptr_t *compat; + } ptr; +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[32]; +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + kuid_t dir_uid; + umode_t dir_mode; +}; + +struct renamedata { + struct user_namespace *old_mnt_userns; + struct inode *old_dir; + struct dentry *old_dentry; + struct user_namespace *new_mnt_userns; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct word_at_a_time { + const long unsigned int one_bits; + const long unsigned int high_bits; +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +struct compat_flock { + short int l_type; + short int l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +struct compat_flock64 { + short int l_type; + short int l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +} __attribute__((packed)); + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +struct fileattr { + u32 flags; + u32 fsx_xflags; + u32 fsx_extsize; + u32 fsx_nextents; + u32 fsx_projid; + u32 fsx_cowextsize; + bool flags_valid: 1; + bool fsx_valid: 1; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct space_resv_32 { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +} __attribute__((packed)); + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +struct old_linux_dirent { + long unsigned int d_ino; + long unsigned int d_offset; + short unsigned int d_namlen; + char d_name[1]; +}; + +struct readdir_callback { + struct dir_context ctx; + struct old_linux_dirent *dirent; + int result; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[1]; +}; + +struct getdents_callback { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct compat_old_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_offset; + short unsigned int d_namlen; + char d_name[1]; +}; + +struct compat_readdir_callback { + struct dir_context ctx; + struct compat_old_linux_dirent *dirent; + int result; +}; + +struct compat_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_off; + short unsigned int d_reclen; + char d_name[1]; +}; + +struct compat_getdents_callback { + struct dir_context ctx; + struct compat_linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +typedef struct { + long unsigned int fds_bits[16]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[9]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct poll_list { + struct poll_list *next; + int len; + struct pollfd entries[0]; +}; + +struct compat_sel_arg_struct { + compat_ulong_t n; + compat_uptr_t inp; + compat_uptr_t outp; + compat_uptr_t exp; + compat_uptr_t tvp; +}; + +struct compat_sigset_argpack { + compat_uptr_t p; + compat_size_t size; +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +struct mount_attr { + __u64 attr_set; + __u64 attr_clr; + __u64 propagation; + __u64 userns_fd; +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); + struct mount cursor; +}; + +struct mount_kattr { + unsigned int attr_set; + unsigned int attr_clr; + unsigned int propagation; + unsigned int lookup_flags; + bool recurse; + struct user_namespace *mnt_userns; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +struct unicode_map { + const char *charset; + int version; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_page_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_congest_waited_template { + struct trace_entry ent; + unsigned int usec_timeout; + unsigned int usec_delayed; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_page_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_congest_waited_template {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_page)(void *, struct page *, struct address_space *); + +typedef void (*btf_trace_wait_on_page_writeback)(void *, struct page *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct page *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_congestion_wait)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_wait_iff_congested)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct inode *inode; + struct bdi_writeback *new_wb; + struct callback_head callback_head; + struct work_struct work; +}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +struct old_utimbuf32 { + old_time32_t actime; + old_time32_t modtime; +}; + +struct utimbuf { + __kernel_old_time_t actime; + __kernel_old_time_t modtime; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + long unsigned int f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +typedef s32 compat_daddr_t; + +typedef __kernel_fsid_t compat_fsid_t; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __kernel_long_t f_blocks; + __kernel_long_t f_bfree; + __kernel_long_t f_bavail; + __kernel_long_t f_files; + __kernel_long_t f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct statfs64 { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct compat_statfs64 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +} __attribute__((packed)); + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, +}; + +struct dax_device; + +struct iomap_page_ops; + +struct iomap___2 { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_page_ops *page_ops; +}; + +struct iomap_page_ops { + int (*page_prepare)(struct inode *, loff_t, unsigned int, struct iomap___2 *); + void (*page_done)(struct inode *, loff_t, unsigned int, struct page *, struct iomap___2 *); +}; + +struct decrypt_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + bool multi_bio: 1; + bool should_dirty: 1; + bool is_sync: 1; + struct bio bio; +}; + +struct bd_holder_disk { + struct list_head list; + struct gendisk *disk; + int refcnt; +}; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +typedef void dio_submit_t(struct bio *, struct inode *, loff_t); + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + dio_submit_t *submit_io; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; +}; + +struct dio { + int flags; + int op; + int op_flags; + blk_qc_t bio_cookie; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 64; +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct mpage_readpage_args { + struct bio *bio; + struct page *page; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; + unsigned int use_writepage; +}; + +typedef u32 nlink_t; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + const char *lsm; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + struct inode vfs_inode; +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +enum { + FAN_EVENT_INIT = 0, + FAN_EVENT_REPORTED = 1, + FAN_EVENT_ANSWERED = 2, + FAN_EVENT_CANCELED = 3, +}; + +struct fanotify_fh { + u8 type; + u8 len; + u8 flags; + u8 pad; + unsigned char buf[0]; +}; + +struct fanotify_info { + u8 dir_fh_totlen; + u8 file_fh_totlen; + u8 name_len; + u8 pad; + unsigned char buf[0]; +}; + +enum fanotify_event_type { + FANOTIFY_EVENT_TYPE_FID = 0, + FANOTIFY_EVENT_TYPE_FID_NAME = 1, + FANOTIFY_EVENT_TYPE_PATH = 2, + FANOTIFY_EVENT_TYPE_PATH_PERM = 3, + FANOTIFY_EVENT_TYPE_OVERFLOW = 4, + __FANOTIFY_EVENT_TYPE_NUM = 5, +}; + +struct fanotify_event { + struct fsnotify_event fse; + struct hlist_node merge_list; + u32 mask; + struct { + unsigned int type: 3; + unsigned int hash: 29; + }; + struct pid *pid; +}; + +struct fanotify_fid_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct fanotify_fh object_fh; + unsigned char _inline_fh_buf[12]; +}; + +struct fanotify_name_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct fanotify_info info; +}; + +struct fanotify_path_event { + struct fanotify_event fae; + struct path path; +}; + +struct fanotify_perm_event { + struct fanotify_event fae; + struct path path; + short unsigned int response; + short unsigned int state; + int fd; +}; + +struct fanotify_event_metadata { + __u32 event_len; + __u8 vers; + __u8 reserved; + __u16 metadata_len; + __u64 mask; + __s32 fd; + __s32 pid; +}; + +struct fanotify_event_info_header { + __u8 info_type; + __u8 pad; + __u16 len; +}; + +struct fanotify_event_info_fid { + struct fanotify_event_info_header hdr; + __kernel_fsid_t fsid; + unsigned char handle[0]; +}; + +struct fanotify_response { + __s32 fd; + __u32 response; +}; + +struct epoll_event { + __poll_t events; + __u64 data; +} __attribute__((packed)); + +struct epoll_filefd { + struct file *file; + int fd; +} __attribute__((packed)); + +struct epitem; + +struct eppoll_entry { + struct eppoll_entry *next; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + struct eppoll_entry *pwqlist; + struct eventpoll *ep; + struct hlist_node fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + struct hlist_head refs; + unsigned int napi_id; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct epitems_head { + struct hlist_head epitems; + struct epitems_head *next; +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct eventfd_ctx___2 { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +enum userfaultfd_state { + UFFD_STATE_WAIT_API = 0, + UFFD_STATE_RUNNING = 1, +}; + +struct userfaultfd_ctx { + wait_queue_head_t fault_pending_wqh; + wait_queue_head_t fault_wqh; + wait_queue_head_t fd_wqh; + wait_queue_head_t event_wqh; + seqcount_spinlock_t refile_seq; + refcount_t refcount; + unsigned int flags; + unsigned int features; + enum userfaultfd_state state; + bool released; + bool mmap_changing; + struct mm_struct *mm; +}; + +struct uffd_msg { + __u8 event; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + union { + struct { + __u64 flags; + __u64 address; + union { + __u32 ptid; + } feat; + } pagefault; + struct { + __u32 ufd; + } fork; + struct { + __u64 from; + __u64 to; + __u64 len; + } remap; + struct { + __u64 start; + __u64 end; + } remove; + struct { + __u64 reserved1; + __u64 reserved2; + __u64 reserved3; + } reserved; + } arg; +}; + +struct uffdio_api { + __u64 api; + __u64 features; + __u64 ioctls; +}; + +struct uffdio_range { + __u64 start; + __u64 len; +}; + +struct uffdio_register { + struct uffdio_range range; + __u64 mode; + __u64 ioctls; +}; + +struct uffdio_copy { + __u64 dst; + __u64 src; + __u64 len; + __u64 mode; + __s64 copy; +}; + +struct uffdio_zeropage { + struct uffdio_range range; + __u64 mode; + __s64 zeropage; +}; + +struct uffdio_writeprotect { + struct uffdio_range range; + __u64 mode; +}; + +struct uffdio_continue { + struct uffdio_range range; + __u64 mode; + __s64 mapped; +}; + +struct userfaultfd_fork_ctx { + struct userfaultfd_ctx *orig; + struct userfaultfd_ctx *new; + struct list_head list; +}; + +struct userfaultfd_unmap_ctx { + struct userfaultfd_ctx *ctx; + long unsigned int start; + long unsigned int end; + struct list_head list; +}; + +struct userfaultfd_wait_queue { + struct uffd_msg msg; + wait_queue_entry_t wq; + struct userfaultfd_ctx *ctx; + bool waken; +}; + +struct userfaultfd_wake_range { + long unsigned int start; + long unsigned int len; +}; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __u32 aio_key; + __kernel_rwf_t aio_rw_flags; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef u32 compat_aio_context_t; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct page **ring_pages; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 64; + long: 64; + long: 64; + struct { + atomic_t reqs_available; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long: 64; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct page *internal_pages[8]; + struct file *aio_ring_file; + unsigned int id; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool done; + bool cancelled; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct __compat_aio_sigset { + compat_uptr_t sigmask; + compat_size_t sigsetsize; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +struct io_wq; + +struct io_wq_work_node; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +struct io_ring_ctx; + +struct io_uring_task { + struct xarray xa; + struct wait_queue_head wait; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct percpu_counter inflight; + atomic_t inflight_tracked; + atomic_t in_idle; + spinlock_t task_lock; + struct io_wq_work_list task_list; + long unsigned int task_state; + struct callback_head task_work; +}; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +typedef s32 compat_ssize_t; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct scm_fp_list { + short int count; + short int max; + struct user_struct *user; + struct file *fp[253]; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + struct lsmblob lsmblob; + u32 consumed; +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + bool eventfd; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + int fd; + char __data[0]; +}; + +struct io_wq_work; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + int rw; + void *req; + struct io_wq_work *work; + unsigned int flags; + char __data[0]; +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work { + struct io_wq_work_node list; + unsigned int flags; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *req; + void *link; + char __data[0]; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + u64 user_data; + int res; + unsigned int cflags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_sqe { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + bool force_nonblock; + bool sq_thread; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + int events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_wake { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_run { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work {}; + +struct trace_event_data_offsets_io_uring_defer {}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link {}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_sqe {}; + +struct trace_event_data_offsets_io_uring_poll_arm {}; + +struct trace_event_data_offsets_io_uring_poll_wake {}; + +struct trace_event_data_offsets_io_uring_task_add {}; + +struct trace_event_data_offsets_io_uring_task_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, bool, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, void *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, void *, int, void *, struct io_wq_work *, unsigned int); + +typedef void (*btf_trace_io_uring_defer)(void *, void *, void *, long long unsigned int); + +typedef void (*btf_trace_io_uring_link)(void *, void *, void *, void *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, void *, void *); + +typedef void (*btf_trace_io_uring_complete)(void *, void *, u64, int, unsigned int); + +typedef void (*btf_trace_io_uring_submit_sqe)(void *, void *, u8, u64, bool, bool); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, void *, u8, u64, int, int); + +typedef void (*btf_trace_io_uring_poll_wake)(void *, void *, u8, u64, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, void *, u8, u64, int); + +typedef void (*btf_trace_io_uring_task_run)(void *, void *, u8, u64); + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + }; + union { + __u64 addr; + __u64 splice_off_in; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + }; + __u64 user_data; + union { + struct { + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + __s32 splice_fd_in; + }; + __u64 __pad2[3]; + }; +}; + +enum { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, +}; + +enum { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_SHUTDOWN = 34, + IORING_OP_RENAMEAT = 35, + IORING_OP_UNLINKAT = 36, + IORING_OP_LAST = 37, +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; +}; + +enum { + IORING_CQE_BUFFER_SHIFT = 16, +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 resv2; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 resv2; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +enum { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + IORING_REGISTER_LAST = 17, +}; + +struct io_uring_rsrc_register { + __u32 nr; + __u32 resv; + __u64 resv2; + __u64 data; + __u64 tags; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __u64 data; + __u64 tags; + __u32 nr; + __u32 resv2; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +enum { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad; + __u64 ts; +}; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +typedef struct io_wq_work *free_work_fn(struct io_wq_work *); + +typedef void io_wq_work_fn(struct io_wq_work *); + +struct io_wq_hash { + refcount_t refs; + long unsigned int map; + struct wait_queue_head wait; +}; + +struct io_wq_data { + struct io_wq_hash *hash; + struct task_struct *task; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +struct io_uring { + u32 head; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 tail; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + u32 sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 64; + long: 64; + long: 64; + long: 64; + struct io_uring_cqe cqes[0]; +}; + +enum io_uring_cmd_flags { + IO_URING_F_NONBLOCK = 1, + IO_URING_F_COMPLETE_DEFER = 2, +}; + +struct io_mapped_ubuf { + u64 ubuf; + u64 ubuf_end; + unsigned int nr_bvecs; + long unsigned int acct_pages; + struct bio_vec bvec[0]; +}; + +struct io_overflow_cqe { + struct io_uring_cqe cqe; + struct list_head list; +}; + +struct io_fixed_file { + long unsigned int file_ptr; +}; + +struct io_rsrc_put { + struct list_head list; + u64 tag; + union { + void *rsrc; + struct file *file; + struct io_mapped_ubuf *buf; + }; +}; + +struct io_file_table { + struct io_fixed_file **files; +}; + +struct io_rsrc_data; + +struct io_rsrc_node { + struct percpu_ref refs; + struct list_head node; + struct list_head rsrc_list; + struct io_rsrc_data *rsrc_data; + struct llist_node llist; + bool done; +}; + +typedef void rsrc_put_fn(struct io_ring_ctx *, struct io_rsrc_put *); + +struct io_rsrc_data { + struct io_ring_ctx *ctx; + u64 *tags; + rsrc_put_fn *do_put; + atomic_t refs; + struct completion done; + bool quiesce; +}; + +struct io_kiocb; + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_comp_state { + struct io_kiocb *reqs[32]; + unsigned int nr; + struct list_head free_list; +}; + +struct io_submit_state { + struct blk_plug plug; + struct io_submit_link link; + void *reqs[32]; + unsigned int free_reqs; + bool plug_started; + struct io_comp_state comp; + struct file *file; + unsigned int fd; + unsigned int file_refs; + unsigned int ios_left; +}; + +struct io_restriction { + long unsigned int register_op[1]; + long unsigned int sqe_op[1]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_sq_data; + +struct io_ring_ctx { + struct { + struct percpu_ref refs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + unsigned int flags; + unsigned int compat: 1; + unsigned int drain_next: 1; + unsigned int eventfd_async: 1; + unsigned int restricted: 1; + u32 *sq_array; + unsigned int cached_sq_head; + unsigned int sq_entries; + unsigned int sq_mask; + unsigned int sq_thread_idle; + unsigned int cached_sq_dropped; + unsigned int cached_cq_overflow; + long unsigned int sq_check_overflow; + struct list_head defer_list; + struct list_head timeout_list; + struct list_head cq_overflow_list; + struct io_uring_sqe *sq_sqes; + long: 64; + long: 64; + long: 64; + }; + struct { + struct mutex uring_lock; + wait_queue_head_t wait; + long: 64; + }; + struct io_submit_state submit_state; + struct list_head locked_free_list; + unsigned int locked_free_nr; + struct io_rings *rings; + const struct cred *sq_creds; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct list_head sqd_list; + struct io_rsrc_data *file_data; + struct io_file_table file_table; + unsigned int nr_user_files; + struct io_rsrc_data *buf_data; + unsigned int nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + struct xarray io_buffers; + struct xarray personalities; + u32 pers_next; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + struct { + unsigned int cached_cq_tail; + unsigned int cq_entries; + unsigned int cq_mask; + atomic_t cq_timeouts; + unsigned int cq_last_tm_flush; + unsigned int cq_extra; + long unsigned int cq_check_overflow; + struct wait_queue_head cq_wait; + struct fasync_struct *cq_fasync; + struct eventfd_ctx *cq_ev_fd; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + spinlock_t completion_lock; + struct list_head iopoll_list; + struct hlist_head *cancel_hash; + unsigned int cancel_hash_bits; + bool poll_multi_file; + long: 24; + long: 64; + long: 64; + long: 64; + }; + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + struct io_rsrc_node *rsrc_node; + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_restriction restrictions; + struct { + struct socket *ring_sock; + struct io_wq_hash *hash_map; + struct user_struct *user; + struct mm_struct *mm_account; + struct callback_head *exit_task_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + }; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __u32 len; + __u16 bid; +}; + +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK = 1, +}; + +struct io_sq_data { + refcount_t refs; + atomic_t park_pending; + struct mutex lock; + struct list_head ctx_list; + struct task_struct *thread; + struct wait_queue_head wait; + unsigned int sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + long unsigned int state; + struct completion exited; + struct callback_head *park_task_work; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u64 len; +}; + +struct io_poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool done; + bool canceled; + struct wait_queue_entry wait; +}; + +struct io_poll_update { + struct file *file; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + long unsigned int nofile; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct io_cancel { + struct file *file; + u64 addr; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + struct list_head list; + struct io_kiocb *head; +}; + +struct io_timeout_rem { + struct file *file; + u64 addr; + struct timespec64 ts; + u32 flags; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; +}; + +struct io_sr_msg { + struct file *file; + union { + struct compat_msghdr *umsg_compat; + struct user_msghdr *umsg; + void *buf; + }; + int msg_flags; + int bgid; + size_t len; + struct io_buffer *kbuf; +}; + +struct io_open { + struct file *file; + int dfd; + struct filename *filename; + struct open_how how; + long unsigned int nofile; +}; + +struct io_close { + struct file *file; + int fd; +}; + +struct io_rsrc_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u32 len; + u32 advice; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u32 len; + u32 advice; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +} __attribute__((packed)); + +struct io_splice { + struct file *file_out; + struct file *file_in; + loff_t off_out; + loff_t off_in; + u64 len; + unsigned int flags; +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __u32 len; + __u32 bgid; + __u16 nbufs; + __u16 bid; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + const char *filename; + struct statx *buffer; +}; + +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_completion { + struct file *file; + struct list_head list; + u32 cflags; +}; + +struct io_task_work { + struct io_wq_work_node node; + task_work_func_t func; +}; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_rw rw; + struct io_poll_iocb poll; + struct io_poll_update poll_update; + struct io_accept accept; + struct io_sync sync; + struct io_cancel cancel; + struct io_timeout timeout; + struct io_timeout_rem timeout_rem; + struct io_connect connect; + struct io_sr_msg sr_msg; + struct io_open open; + struct io_close close; + struct io_rsrc_update rsrc_update; + struct io_fadvise fadvise; + struct io_madvise madvise; + struct io_epoll epoll; + struct io_splice splice; + struct io_provide_buf pbuf; + struct io_statx statx; + struct io_shutdown shutdown; + struct io_rename rename; + struct io_unlink unlink; + struct io_completion compl; + }; + void *async_data; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + u32 result; + struct io_ring_ctx *ctx; + unsigned int flags; + atomic_t refs; + struct task_struct *task; + u64 user_data; + struct io_kiocb *link; + struct percpu_ref *fixed_rsrc_refs; + struct list_head inflight_entry; + union { + struct io_task_work io_task_work; + struct callback_head task_work; + }; + struct hlist_node hash_node; + struct async_poll *apoll; + struct io_wq_work work; + const struct cred *creds; + struct io_mapped_ubuf *imu; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; +}; + +struct io_async_connect { + struct __kernel_sockaddr_storage address; +}; + +struct io_async_msghdr { + struct iovec fast_iov[8]; + struct iovec *free_iov; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_async_rw { + struct iovec fast_iov[8]; + const struct iovec *free_iovec; + struct iov_iter iter; + size_t bytes_done; + struct wait_page_queue wpq; +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_FAIL_LINK_BIT = 8, + REQ_F_INFLIGHT_BIT = 9, + REQ_F_CUR_POS_BIT = 10, + REQ_F_NOWAIT_BIT = 11, + REQ_F_LINK_TIMEOUT_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_BUFFER_SELECTED_BIT = 15, + REQ_F_LTIMEOUT_ACTIVE_BIT = 16, + REQ_F_COMPLETE_INLINE_BIT = 17, + REQ_F_REISSUE_BIT = 18, + REQ_F_DONT_REISSUE_BIT = 19, + REQ_F_ASYNC_READ_BIT = 20, + REQ_F_ASYNC_WRITE_BIT = 21, + REQ_F_ISREG_BIT = 22, + __REQ_F_LAST_BIT = 23, +}; + +enum { + REQ_F_FIXED_FILE = 1, + REQ_F_IO_DRAIN = 2, + REQ_F_LINK = 4, + REQ_F_HARDLINK = 8, + REQ_F_FORCE_ASYNC = 16, + REQ_F_BUFFER_SELECT = 32, + REQ_F_FAIL_LINK = 256, + REQ_F_INFLIGHT = 512, + REQ_F_CUR_POS = 1024, + REQ_F_NOWAIT = 2048, + REQ_F_LINK_TIMEOUT = 4096, + REQ_F_NEED_CLEANUP = 8192, + REQ_F_POLLED = 16384, + REQ_F_BUFFER_SELECTED = 32768, + REQ_F_LTIMEOUT_ACTIVE = 65536, + REQ_F_COMPLETE_INLINE = 131072, + REQ_F_REISSUE = 262144, + REQ_F_DONT_REISSUE = 524288, + REQ_F_ASYNC_READ = 1048576, + REQ_F_ASYNC_WRITE = 2097152, + REQ_F_ISREG = 4194304, +}; + +struct async_poll { + struct io_poll_iocb poll; + struct io_poll_iocb *double_poll; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct io_op_def { + unsigned int needs_file: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int not_supported: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int buffer_select: 1; + unsigned int needs_async_setup: 1; + unsigned int plug: 1; + short unsigned int async_size; +}; + +struct req_batch { + struct task_struct *task; + int task_refs; + int ctx_refs; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int nr_entries; + int error; +}; + +struct io_cancel_data { + struct io_ring_ctx *ctx; + u64 user_data; +}; + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int to_wait; + unsigned int nr_timeouts; +}; + +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; + +struct io_task_cancel { + struct task_struct *task; + bool all; +}; + +struct creds; + +enum { + IO_WORKER_F_UP = 1, + IO_WORKER_F_RUNNING = 2, + IO_WORKER_F_FREE = 4, + IO_WORKER_F_FIXED = 8, + IO_WORKER_F_BOUND = 16, +}; + +enum { + IO_WQ_BIT_EXIT = 0, +}; + +enum { + IO_WQE_FLAG_STALLED = 1, +}; + +struct io_wqe; + +struct io_worker { + refcount_t ref; + unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wqe *wqe; + struct io_wq_work *cur_work; + spinlock_t lock; + struct completion ref_done; + long unsigned int create_state; + struct callback_head create_work; + int create_index; + struct callback_head rcu; +}; + +struct io_wqe_acct { + unsigned int nr_workers; + unsigned int max_workers; + int index; + atomic_t nr_running; +}; + +struct io_wq___2; + +struct io_wqe { + struct { + raw_spinlock_t lock; + struct io_wq_work_list work_list; + unsigned int flags; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + }; + int node; + struct io_wqe_acct acct[2]; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct wait_queue_entry wait; + struct io_wq___2 *wq; + struct io_wq_work *hash_tail[64]; + long: 64; + long: 64; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, +}; + +struct io_wq___2 { + struct io_wqe **wqes; + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct io_wq_hash *hash; + refcount_t refs; + atomic_t worker_refs; + struct completion worker_done; + struct hlist_node cpuhp_node; + struct task_struct *task; +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap___2 *, struct iomap___2 *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap___2 *); +}; + +typedef loff_t (*iomap_actor_t)(struct inode *, loff_t, loff_t, void *, struct iomap___2 *, struct iomap___2 *); + +struct trace_event_raw_dax_pmd_fault_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_start; + long unsigned int vm_end; + long unsigned int vm_flags; + long unsigned int address; + long unsigned int pgoff; + long unsigned int max_pgoff; + dev_t dev; + unsigned int flags; + int result; + char __data[0]; +}; + +struct trace_event_raw_dax_pmd_load_hole_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + struct page *zero_page; + void *radix_entry; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_dax_pmd_insert_mapping_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + long int length; + u64 pfn_val; + void *radix_entry; + dev_t dev; + int write; + char __data[0]; +}; + +struct trace_event_raw_dax_pte_fault_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + long unsigned int pgoff; + dev_t dev; + unsigned int flags; + int result; + char __data[0]; +}; + +struct trace_event_raw_dax_insert_mapping { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + void *radix_entry; + dev_t dev; + int write; + char __data[0]; +}; + +struct trace_event_raw_dax_writeback_range_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int start_index; + long unsigned int end_index; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_dax_writeback_one { + struct trace_entry ent; + long unsigned int ino; + long unsigned int pgoff; + long unsigned int pglen; + dev_t dev; + char __data[0]; +}; + +struct trace_event_data_offsets_dax_pmd_fault_class {}; + +struct trace_event_data_offsets_dax_pmd_load_hole_class {}; + +struct trace_event_data_offsets_dax_pmd_insert_mapping_class {}; + +struct trace_event_data_offsets_dax_pte_fault_class {}; + +struct trace_event_data_offsets_dax_insert_mapping {}; + +struct trace_event_data_offsets_dax_writeback_range_class {}; + +struct trace_event_data_offsets_dax_writeback_one {}; + +typedef void (*btf_trace_dax_pmd_fault)(void *, struct inode *, struct vm_fault *, long unsigned int, int); + +typedef void (*btf_trace_dax_pmd_fault_done)(void *, struct inode *, struct vm_fault *, long unsigned int, int); + +typedef void (*btf_trace_dax_pmd_load_hole)(void *, struct inode *, struct vm_fault *, struct page *, void *); + +typedef void (*btf_trace_dax_pmd_load_hole_fallback)(void *, struct inode *, struct vm_fault *, struct page *, void *); + +typedef void (*btf_trace_dax_pmd_insert_mapping)(void *, struct inode *, struct vm_fault *, long int, pfn_t, void *); + +typedef void (*btf_trace_dax_pte_fault)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_pte_fault_done)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_load_hole)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_pfn_mkwrite_no_entry)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_pfn_mkwrite)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_mapping)(void *, struct inode *, struct vm_fault *, void *); + +typedef void (*btf_trace_dax_writeback_range)(void *, struct inode *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_dax_writeback_range_done)(void *, struct inode *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_dax_writeback_one)(void *, struct inode *, long unsigned int, long unsigned int); + +struct exceptional_entry_key { + struct xarray *xa; + long unsigned int entry_start; +}; + +struct wait_exceptional_entry_queue { + wait_queue_entry_t wait; + struct exceptional_entry_key key; +}; + +enum dax_wake_mode { + WAKE_ALL = 0, + WAKE_NEXT = 1, +}; + +struct crypto_skcipher; + +struct fscrypt_blk_crypto_key; + +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; + struct fscrypt_blk_crypto_key *blk_key; +}; + +struct fscrypt_mode; + +struct fscrypt_direct_key; + +struct fscrypt_info { + struct fscrypt_prepared_key ci_enc_key; + bool ci_owns_key; + bool ci_inlinecrypt; + struct fscrypt_mode *ci_mode; + struct inode *ci_inode; + struct key *ci_master_key; + struct list_head ci_master_key_link; + struct fscrypt_direct_key *ci_direct_key; + siphash_key_t ci_dirhash_key; + bool ci_dirhash_key_initialized; + union fscrypt_policy ci_policy; + u8 ci_nonce[16]; + u32 ci_hashed_ino; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct fscrypt_mode { + const char *friendly_name; + const char *cipher_str; + int keysize; + int ivsize; + int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; +}; + +typedef enum { + FS_DECRYPT = 0, + FS_ENCRYPT = 1, +} fscrypt_direction_t; + +union fscrypt_iv { + struct { + __le64 lblk_num; + u8 nonce[16]; + }; + u8 raw[32]; + __le64 dun[4]; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +struct fscrypt_nokey_name { + u32 dirhash[2]; + u8 bytes[149]; + u8 sha256[32]; +}; + +struct fscrypt_hkdf { + struct crypto_shash *hmac_tfm; +}; + +struct fscrypt_key_specifier { + __u32 type; + __u32 __reserved; + union { + __u8 __reserved[32]; + __u8 descriptor[8]; + __u8 identifier[16]; + } u; +}; + +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; +} __attribute__((packed)); + +struct fscrypt_master_key_secret { + struct fscrypt_hkdf hkdf; + u32 size; + u8 raw[64]; +}; + +struct fscrypt_master_key { + struct fscrypt_master_key_secret mk_secret; + struct fscrypt_key_specifier mk_spec; + struct key *mk_users; + refcount_t mk_refcount; + struct list_head mk_decrypted_inodes; + spinlock_t mk_decrypted_inodes_lock; + struct fscrypt_prepared_key mk_direct_keys[10]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[10]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[10]; + siphash_key_t mk_ino_hash_key; + bool mk_ino_hash_key_initialized; +}; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct fscrypt_provisioning_key_payload { + __u32 type; + __u32 __reserved; + __u8 raw[0]; +}; + +struct fscrypt_add_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 raw_size; + __u32 key_id; + __u32 __reserved[8]; + __u8 raw[0]; +}; + +struct fscrypt_remove_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 removal_status_flags; + __u32 __reserved[5]; +}; + +struct fscrypt_get_key_status_arg { + struct fscrypt_key_specifier key_spec; + __u32 __reserved[6]; + __u32 status; + __u32 status_flags; + __u32 user_count; + __u32 __out_reserved[13]; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + struct crypto_alg base; +}; + +struct fscrypt_context_v1 { + u8 version; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[8]; + u8 nonce[16]; +}; + +struct fscrypt_context_v2 { + u8 version; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 __reserved[4]; + u8 master_key_identifier[16]; + u8 nonce[16]; +}; + +union fscrypt_context { + u8 version; + struct fscrypt_context_v1 v1; + struct fscrypt_context_v2 v2; +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 48; + char data[0]; +}; + +struct fscrypt_key { + __u32 mode; + __u8 raw[64]; + __u32 size; +}; + +struct fscrypt_direct_key { + struct hlist_node dk_node; + refcount_t dk_refcount; + const struct fscrypt_mode *dk_mode; + struct fscrypt_prepared_key dk_key; + u8 dk_descriptor[8]; + u8 dk_raw[64]; +}; + +struct fscrypt_get_policy_ex_arg { + __u64 policy_size; + union { + __u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; + } policy; +}; + +struct fscrypt_dummy_policy { + const union fscrypt_policy *policy; +}; + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[0]; +}; + +struct fsverity_hash_alg; + +struct merkle_tree_params { + struct fsverity_hash_alg *hash_alg; + const u8 *hashstate; + unsigned int digest_size; + unsigned int block_size; + unsigned int hashes_per_block; + unsigned int log_blocksize; + unsigned int log_arity; + unsigned int num_levels; + u64 tree_size; + long unsigned int level0_blocks; + u64 level_start[8]; +}; + +struct fsverity_info { + struct merkle_tree_params tree_params; + u8 root_hash[64]; + u8 file_digest[64]; + const struct inode *inode; +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct crypto_ahash; + +struct fsverity_hash_alg { + struct crypto_ahash *tfm; + const char *name; + unsigned int digest_size; + unsigned int block_size; + mempool_t req_pool; +}; + +struct ahash_request; + +struct crypto_ahash { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + void *__ctx[0]; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + struct crypto_alg base; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +struct fsverity_read_metadata_arg { + __u64 metadata_type; + __u64 offset; + __u64 length; + __u64 buf_ptr; + __u64 __reserved; +}; + +struct fsverity_formatted_digest { + char magic[8]; + __le16 digest_algorithm; + __le16 digest_size; + __u8 digest[0]; +}; + +struct flock64 { + short int l_type; + short int l_whence; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_pid; + unsigned int fl_flags; + unsigned char fl_type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lock *, struct file_lock *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +struct locks_iterator { + int li_cpu; + loff_t li_pos; +}; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; +}; + +struct arch_elf_state {}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct user_regs_struct { + long unsigned int r15; + long unsigned int r14; + long unsigned int r13; + long unsigned int r12; + long unsigned int bp; + long unsigned int bx; + long unsigned int r11; + long unsigned int r10; + long unsigned int r9; + long unsigned int r8; + long unsigned int ax; + long unsigned int cx; + long unsigned int dx; + long unsigned int si; + long unsigned int di; + long unsigned int orig_ax; + long unsigned int ip; + long unsigned int cs; + long unsigned int flags; + long unsigned int sp; + long unsigned int ss; + long unsigned int fs_base; + long unsigned int gs_base; + long unsigned int ds; + long unsigned int es; + long unsigned int fs; + long unsigned int gs; +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Off; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +struct user_regs_struct32 { + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 esi; + __u32 edi; + __u32 ebp; + __u32 eax; + short unsigned int ds; + short unsigned int __ds; + short unsigned int es; + short unsigned int __es; + short unsigned int fs; + short unsigned int __fs; + short unsigned int gs; + short unsigned int __gs; + __u32 orig_eax; + __u32 eip; + short unsigned int cs; + short unsigned int __cs; + __u32 eflags; + __u32 esp; + short unsigned int ss; + short unsigned int __ss; +}; + +struct compat_elf_siginfo { + compat_int_t si_signo; + compat_int_t si_code; + compat_int_t si_errno; +}; + +struct compat_elf_prstatus_common { + struct compat_elf_siginfo pr_info; + short int pr_cursig; + compat_ulong_t pr_sigpend; + compat_ulong_t pr_sighold; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + struct old_timeval32 pr_utime; + struct old_timeval32 pr_stime; + struct old_timeval32 pr_cutime; + struct old_timeval32 pr_cstime; +}; + +struct compat_elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + compat_ulong_t pr_flag; + __compat_uid_t pr_uid; + __compat_gid_t pr_gid; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +typedef struct user_regs_struct compat_elf_gregset_t; + +struct i386_elf_prstatus { + struct compat_elf_prstatus_common common; + struct user_regs_struct32 pr_reg; + compat_int_t pr_fpvalid; +}; + +struct compat_elf_prstatus { + struct compat_elf_prstatus_common common; + compat_elf_gregset_t pr_reg; + compat_int_t pr_fpvalid; +}; + +struct elf_thread_core_info___2 { + struct elf_thread_core_info___2 *next; + struct task_struct *task; + struct compat_elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info___2 { + struct elf_thread_core_info___2 *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + compat_siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + u32 e_referenced: 1; + u32 e_reusable: 1; + u64 e_value; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker c_shrink; + struct work_struct c_shrink_work; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +struct rpc_timer { + struct list_head list; + long unsigned int expires; + struct delayed_work dwork; +}; + +struct rpc_wait_queue { + spinlock_t lock; + struct list_head tasks[4]; + unsigned char maxpriority; + unsigned char priority; + unsigned char nr; + short unsigned int qlen; + struct rpc_timer timer_list; + const char *name; +}; + +struct nfs_seqid_counter { + ktime_t create_time; + int owner_id; + int flags; + u32 counter; + spinlock_t lock; + struct list_head list; + struct rpc_wait_queue wait; +}; + +struct nfs4_stateid_struct { + union { + char data[16]; + struct { + __be32 seqid; + char other[12]; + }; + }; + enum { + NFS4_INVALID_STATEID_TYPE = 0, + NFS4_SPECIAL_STATEID_TYPE = 1, + NFS4_OPEN_STATEID_TYPE = 2, + NFS4_LOCK_STATEID_TYPE = 3, + NFS4_DELEGATION_STATEID_TYPE = 4, + NFS4_LAYOUT_STATEID_TYPE = 5, + NFS4_PNFS_DS_STATEID_TYPE = 6, + NFS4_REVOKED_STATEID_TYPE = 7, + } type; +}; + +typedef struct nfs4_stateid_struct nfs4_stateid; + +struct nfs4_state; + +struct nfs4_lock_state { + struct list_head ls_locks; + struct nfs4_state *ls_state; + long unsigned int ls_flags; + struct nfs_seqid_counter ls_seqid; + nfs4_stateid ls_stateid; + refcount_t ls_count; + fl_owner_t ls_owner; +}; + +struct xdr_buf { + struct kvec head[1]; + struct kvec tail[1]; + struct bio_vec *bvec; + struct page **pages; + unsigned int page_base; + unsigned int page_len; + unsigned int flags; + unsigned int buflen; + unsigned int len; +}; + +struct rpc_rqst; + +struct xdr_stream { + __be32 *p; + struct xdr_buf *buf; + __be32 *end; + struct kvec *iov; + struct kvec scratch; + struct page **page_ptr; + unsigned int nwords; + struct rpc_rqst *rqst; +}; + +struct rpc_xprt; + +struct rpc_task; + +struct rpc_cred; + +struct rpc_rqst { + struct rpc_xprt *rq_xprt; + struct xdr_buf rq_snd_buf; + struct xdr_buf rq_rcv_buf; + struct rpc_task *rq_task; + struct rpc_cred *rq_cred; + __be32 rq_xid; + int rq_cong; + u32 rq_seqno; + int rq_enc_pages_num; + struct page **rq_enc_pages; + void (*rq_release_snd_buf)(struct rpc_rqst *); + union { + struct list_head rq_list; + struct rb_node rq_recv; + }; + struct list_head rq_xmit; + struct list_head rq_xmit2; + void *rq_buffer; + size_t rq_callsize; + void *rq_rbuffer; + size_t rq_rcvsize; + size_t rq_xmit_bytes_sent; + size_t rq_reply_bytes_recvd; + struct xdr_buf rq_private_buf; + long unsigned int rq_majortimeo; + long unsigned int rq_minortimeo; + long unsigned int rq_timeout; + ktime_t rq_rtt; + unsigned int rq_retries; + unsigned int rq_connect_cookie; + atomic_t rq_pin; + u32 rq_bytes_sent; + ktime_t rq_xtime; + int rq_ntrans; + struct list_head rq_bc_list; + long unsigned int rq_bc_pa_state; + struct list_head rq_bc_pa_list; +}; + +typedef void (*kxdreproc_t)(struct rpc_rqst *, struct xdr_stream *, const void *); + +typedef int (*kxdrdproc_t)(struct rpc_rqst *, struct xdr_stream *, void *); + +struct rpc_procinfo; + +struct rpc_message { + const struct rpc_procinfo *rpc_proc; + void *rpc_argp; + void *rpc_resp; + const struct cred *rpc_cred; +}; + +struct rpc_procinfo { + u32 p_proc; + kxdreproc_t p_encode; + kxdrdproc_t p_decode; + unsigned int p_arglen; + unsigned int p_replen; + unsigned int p_timer; + u32 p_statidx; + const char *p_name; +}; + +struct rpc_wait { + struct list_head list; + struct list_head links; + struct list_head timer_list; +}; + +struct rpc_call_ops; + +struct rpc_clnt; + +struct rpc_task { + atomic_t tk_count; + int tk_status; + struct list_head tk_task; + void (*tk_callback)(struct rpc_task *); + void (*tk_action)(struct rpc_task *); + long unsigned int tk_timeout; + long unsigned int tk_runstate; + struct rpc_wait_queue *tk_waitqueue; + union { + struct work_struct tk_work; + struct rpc_wait tk_wait; + } u; + int tk_rpc_status; + struct rpc_message tk_msg; + void *tk_calldata; + const struct rpc_call_ops *tk_ops; + struct rpc_clnt *tk_client; + struct rpc_xprt *tk_xprt; + struct rpc_cred *tk_op_cred; + struct rpc_rqst *tk_rqstp; + struct workqueue_struct *tk_workqueue; + ktime_t tk_start; + pid_t tk_owner; + short unsigned int tk_flags; + short unsigned int tk_timeouts; + short unsigned int tk_pid; + unsigned char tk_priority: 2; + unsigned char tk_garb_retry: 2; + unsigned char tk_cred_retry: 2; + unsigned char tk_rebind_retry: 2; +}; + +struct rpc_call_ops { + void (*rpc_call_prepare)(struct rpc_task *, void *); + void (*rpc_call_done)(struct rpc_task *, void *); + void (*rpc_count_stats)(struct rpc_task *, void *); + void (*rpc_release)(void *); +}; + +struct rpc_iostats; + +struct rpc_pipe_dir_head { + struct list_head pdh_entries; + struct dentry *pdh_dentry; +}; + +struct rpc_rtt { + long unsigned int timeo; + long unsigned int srtt[5]; + long unsigned int sdrtt[5]; + int ntimeouts[5]; +}; + +struct rpc_timeout { + long unsigned int to_initval; + long unsigned int to_maxval; + long unsigned int to_increment; + unsigned int to_retries; + unsigned char to_exponential; +}; + +struct rpc_xprt_switch; + +struct rpc_xprt_iter_ops; + +struct rpc_xprt_iter { + struct rpc_xprt_switch *xpi_xpswitch; + struct rpc_xprt *xpi_cursor; + const struct rpc_xprt_iter_ops *xpi_ops; +}; + +struct rpc_auth; + +struct rpc_stat; + +struct rpc_program; + +struct rpc_clnt { + atomic_t cl_count; + unsigned int cl_clid; + struct list_head cl_clients; + struct list_head cl_tasks; + spinlock_t cl_lock; + struct rpc_xprt *cl_xprt; + const struct rpc_procinfo *cl_procinfo; + u32 cl_prog; + u32 cl_vers; + u32 cl_maxproc; + struct rpc_auth *cl_auth; + struct rpc_stat *cl_stats; + struct rpc_iostats *cl_metrics; + unsigned int cl_softrtry: 1; + unsigned int cl_softerr: 1; + unsigned int cl_discrtry: 1; + unsigned int cl_noretranstimeo: 1; + unsigned int cl_autobind: 1; + unsigned int cl_chatty: 1; + struct rpc_rtt *cl_rtt; + const struct rpc_timeout *cl_timeout; + atomic_t cl_swapper; + int cl_nodelen; + char cl_nodename[65]; + struct rpc_pipe_dir_head cl_pipedir_objects; + struct rpc_clnt *cl_parent; + struct rpc_rtt cl_rtt_default; + struct rpc_timeout cl_timeout_default; + const struct rpc_program *cl_program; + const char *cl_principal; + struct dentry *cl_debugfs; + union { + struct rpc_xprt_iter cl_xpi; + struct work_struct cl_work; + }; + const struct cred *cl_cred; +}; + +struct svc_xprt; + +struct svc_serv; + +struct rpc_xprt_ops; + +struct rpc_xprt { + struct kref kref; + const struct rpc_xprt_ops *ops; + const struct rpc_timeout *timeout; + struct __kernel_sockaddr_storage addr; + size_t addrlen; + int prot; + long unsigned int cong; + long unsigned int cwnd; + size_t max_payload; + struct rpc_wait_queue binding; + struct rpc_wait_queue sending; + struct rpc_wait_queue pending; + struct rpc_wait_queue backlog; + struct list_head free; + unsigned int max_reqs; + unsigned int min_reqs; + unsigned int num_reqs; + long unsigned int state; + unsigned char resvport: 1; + unsigned char reuseport: 1; + atomic_t swapper; + unsigned int bind_index; + struct list_head xprt_switch; + long unsigned int bind_timeout; + long unsigned int reestablish_timeout; + unsigned int connect_cookie; + struct work_struct task_cleanup; + struct timer_list timer; + long unsigned int last_used; + long unsigned int idle_timeout; + long unsigned int connect_timeout; + long unsigned int max_reconnect_timeout; + atomic_long_t queuelen; + spinlock_t transport_lock; + spinlock_t reserve_lock; + spinlock_t queue_lock; + u32 xid; + struct rpc_task *snd_task; + struct list_head xmit_queue; + atomic_long_t xmit_queuelen; + struct svc_xprt *bc_xprt; + struct svc_serv *bc_serv; + unsigned int bc_alloc_max; + unsigned int bc_alloc_count; + atomic_t bc_slot_count; + spinlock_t bc_pa_lock; + struct list_head bc_pa_list; + struct rb_root recv_queue; + struct { + long unsigned int bind_count; + long unsigned int connect_count; + long unsigned int connect_start; + long unsigned int connect_time; + long unsigned int sends; + long unsigned int recvs; + long unsigned int bad_xids; + long unsigned int max_slots; + long long unsigned int req_u; + long long unsigned int bklog_u; + long long unsigned int sending_u; + long long unsigned int pending_u; + } stat; + struct net *xprt_net; + const char *servername; + const char *address_strings[6]; + struct dentry *debugfs; + atomic_t inject_disconnect; + struct callback_head rcu; +}; + +struct rpc_credops; + +struct rpc_cred { + struct hlist_node cr_hash; + struct list_head cr_lru; + struct callback_head cr_rcu; + struct rpc_auth *cr_auth; + const struct rpc_credops *cr_ops; + long unsigned int cr_expire; + long unsigned int cr_flags; + refcount_t cr_count; + const struct cred *cr_cred; +}; + +typedef u32 rpc_authflavor_t; + +struct auth_cred { + const struct cred *cred; + const char *principal; +}; + +struct rpc_cred_cache; + +struct rpc_authops; + +struct rpc_auth { + unsigned int au_cslack; + unsigned int au_rslack; + unsigned int au_verfsize; + unsigned int au_ralign; + long unsigned int au_flags; + const struct rpc_authops *au_ops; + rpc_authflavor_t au_flavor; + refcount_t au_count; + struct rpc_cred_cache *au_credcache; +}; + +struct rpc_credops { + const char *cr_name; + int (*cr_init)(struct rpc_auth *, struct rpc_cred *); + void (*crdestroy)(struct rpc_cred *); + int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); + int (*crmarshal)(struct rpc_task *, struct xdr_stream *); + int (*crrefresh)(struct rpc_task *); + int (*crvalidate)(struct rpc_task *, struct xdr_stream *); + int (*crwrap_req)(struct rpc_task *, struct xdr_stream *); + int (*crunwrap_resp)(struct rpc_task *, struct xdr_stream *); + int (*crkey_timeout)(struct rpc_cred *); + char * (*crstringify_acceptor)(struct rpc_cred *); + bool (*crneed_reencode)(struct rpc_task *); +}; + +struct rpc_auth_create_args; + +struct rpcsec_gss_info; + +struct rpc_authops { + struct module *owner; + rpc_authflavor_t au_flavor; + char *au_name; + struct rpc_auth * (*create)(const struct rpc_auth_create_args *, struct rpc_clnt *); + void (*destroy)(struct rpc_auth *); + int (*hash_cred)(struct auth_cred *, unsigned int); + struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int); + struct rpc_cred * (*crcreate)(struct rpc_auth *, struct auth_cred *, int, gfp_t); + rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *); + int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *); + int (*key_timeout)(struct rpc_auth *, struct rpc_cred *); +}; + +struct rpc_auth_create_args { + rpc_authflavor_t pseudoflavor; + const char *target_name; +}; + +struct rpcsec_gss_oid { + unsigned int len; + u8 data[32]; +}; + +struct rpcsec_gss_info { + struct rpcsec_gss_oid oid; + u32 qop; + u32 service; +}; + +struct rpc_xprt_ops { + void (*set_buffer_size)(struct rpc_xprt *, size_t, size_t); + int (*reserve_xprt)(struct rpc_xprt *, struct rpc_task *); + void (*release_xprt)(struct rpc_xprt *, struct rpc_task *); + void (*alloc_slot)(struct rpc_xprt *, struct rpc_task *); + void (*free_slot)(struct rpc_xprt *, struct rpc_rqst *); + void (*rpcbind)(struct rpc_task *); + void (*set_port)(struct rpc_xprt *, short unsigned int); + void (*connect)(struct rpc_xprt *, struct rpc_task *); + int (*buf_alloc)(struct rpc_task *); + void (*buf_free)(struct rpc_task *); + void (*prepare_request)(struct rpc_rqst *); + int (*send_request)(struct rpc_rqst *); + void (*wait_for_reply_request)(struct rpc_task *); + void (*timer)(struct rpc_xprt *, struct rpc_task *); + void (*release_request)(struct rpc_task *); + void (*close)(struct rpc_xprt *); + void (*destroy)(struct rpc_xprt *); + void (*set_connect_timeout)(struct rpc_xprt *, long unsigned int, long unsigned int); + void (*print_stats)(struct rpc_xprt *, struct seq_file *); + int (*enable_swap)(struct rpc_xprt *); + void (*disable_swap)(struct rpc_xprt *); + void (*inject_disconnect)(struct rpc_xprt *); + int (*bc_setup)(struct rpc_xprt *, unsigned int); + size_t (*bc_maxpayload)(struct rpc_xprt *); + unsigned int (*bc_num_slots)(struct rpc_xprt *); + void (*bc_free_rqst)(struct rpc_rqst *); + void (*bc_destroy)(struct rpc_xprt *, unsigned int); +}; + +struct rpc_xprt_switch { + spinlock_t xps_lock; + struct kref xps_kref; + unsigned int xps_nxprts; + unsigned int xps_nactive; + atomic_long_t xps_queuelen; + struct list_head xps_xprt_list; + struct net *xps_net; + const struct rpc_xprt_iter_ops *xps_iter_ops; + struct callback_head xps_rcu; +}; + +struct rpc_stat { + const struct rpc_program *program; + unsigned int netcnt; + unsigned int netudpcnt; + unsigned int nettcpcnt; + unsigned int nettcpconn; + unsigned int netreconn; + unsigned int rpccnt; + unsigned int rpcretrans; + unsigned int rpcauthrefresh; + unsigned int rpcgarbage; +}; + +struct rpc_version; + +struct rpc_program { + const char *name; + u32 number; + unsigned int nrvers; + const struct rpc_version **version; + struct rpc_stat *stats; + const char *pipe_dir_name; +}; + +struct rpc_xprt_iter_ops { + void (*xpi_rewind)(struct rpc_xprt_iter *); + struct rpc_xprt * (*xpi_xprt)(struct rpc_xprt_iter *); + struct rpc_xprt * (*xpi_next)(struct rpc_xprt_iter *); +}; + +struct rpc_version { + u32 number; + unsigned int nrprocs; + const struct rpc_procinfo *procs; + unsigned int *counts; +}; + +struct nfs_fh { + short unsigned int size; + unsigned char data[128]; +}; + +enum nfs3_stable_how { + NFS_UNSTABLE = 0, + NFS_DATA_SYNC = 1, + NFS_FILE_SYNC = 2, + NFS_INVALID_STABLE_HOW = 4294967295, +}; + +struct nfs4_label { + uint32_t lfs; + uint32_t pi; + u32 len; + char *label; +}; + +typedef struct { + char data[8]; +} nfs4_verifier; + +enum nfs4_change_attr_type { + NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, + NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, + NFS4_CHANGE_TYPE_IS_UNDEFINED = 4, +}; + +struct nfs4_string { + unsigned int len; + char *data; +}; + +struct nfs_fsid { + uint64_t major; + uint64_t minor; +}; + +struct nfs4_threshold { + __u32 bm; + __u32 l_type; + __u64 rd_sz; + __u64 wr_sz; + __u64 rd_io_sz; + __u64 wr_io_sz; +}; + +struct nfs_fattr { + unsigned int valid; + umode_t mode; + __u32 nlink; + kuid_t uid; + kgid_t gid; + dev_t rdev; + __u64 size; + union { + struct { + __u32 blocksize; + __u32 blocks; + } nfs2; + struct { + __u64 used; + } nfs3; + } du; + struct nfs_fsid fsid; + __u64 fileid; + __u64 mounted_on_fileid; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + __u64 change_attr; + __u64 pre_change_attr; + __u64 pre_size; + struct timespec64 pre_mtime; + struct timespec64 pre_ctime; + long unsigned int time_start; + long unsigned int gencount; + struct nfs4_string *owner_name; + struct nfs4_string *group_name; + struct nfs4_threshold *mdsthreshold; + struct nfs4_label *label; +}; + +struct nfs_fsinfo { + struct nfs_fattr *fattr; + __u32 rtmax; + __u32 rtpref; + __u32 rtmult; + __u32 wtmax; + __u32 wtpref; + __u32 wtmult; + __u32 dtpref; + __u64 maxfilesize; + struct timespec64 time_delta; + __u32 lease_time; + __u32 nlayouttypes; + __u32 layouttype[8]; + __u32 blksize; + __u32 clone_blksize; + enum nfs4_change_attr_type change_attr_type; + __u32 xattr_support; +}; + +struct nfs_fsstat { + struct nfs_fattr *fattr; + __u64 tbytes; + __u64 fbytes; + __u64 abytes; + __u64 tfiles; + __u64 ffiles; + __u64 afiles; +}; + +struct nfs_pathconf { + struct nfs_fattr *fattr; + __u32 max_link; + __u32 max_namelen; +}; + +struct nfs4_change_info { + u32 atomic; + u64 before; + u64 after; +}; + +struct nfs4_slot; + +struct nfs4_sequence_args { + struct nfs4_slot *sa_slot; + u8 sa_cache_this: 1; + u8 sa_privileged: 1; +}; + +struct nfs4_sequence_res { + struct nfs4_slot *sr_slot; + long unsigned int sr_timestamp; + int sr_status; + u32 sr_status_flags; + u32 sr_highest_slotid; + u32 sr_target_highest_slotid; +}; + +struct nfs_open_context; + +struct nfs_lock_context { + refcount_t count; + struct list_head list; + struct nfs_open_context *open_context; + fl_owner_t lockowner; + atomic_t io_count; + struct callback_head callback_head; +}; + +struct nfs_open_context { + struct nfs_lock_context lock_context; + fl_owner_t flock_owner; + struct dentry *dentry; + const struct cred *cred; + struct rpc_cred *ll_cred; + struct nfs4_state *state; + fmode_t mode; + long unsigned int flags; + int error; + struct list_head list; + struct nfs4_threshold *mdsthreshold; + struct callback_head callback_head; +}; + +struct nlm_host; + +struct nfs_iostats; + +struct nfs_auth_info { + unsigned int flavor_len; + rpc_authflavor_t flavors[12]; +}; + +struct nfs_fscache_key; + +struct fscache_cookie; + +struct pnfs_layoutdriver_type; + +struct nfs_client; + +struct nfs_server { + struct nfs_client *nfs_client; + struct list_head client_link; + struct list_head master_link; + struct rpc_clnt *client; + struct rpc_clnt *client_acl; + struct nlm_host *nlm_host; + struct nfs_iostats *io_stats; + atomic_long_t writeback; + unsigned int flags; + unsigned int fattr_valid; + unsigned int caps; + unsigned int rsize; + unsigned int rpages; + unsigned int wsize; + unsigned int wpages; + unsigned int wtmult; + unsigned int dtsize; + short unsigned int port; + unsigned int bsize; + unsigned int gxasize; + unsigned int sxasize; + unsigned int lxasize; + unsigned int acregmin; + unsigned int acregmax; + unsigned int acdirmin; + unsigned int acdirmax; + unsigned int namelen; + unsigned int options; + unsigned int clone_blksize; + enum nfs4_change_attr_type change_attr_type; + struct nfs_fsid fsid; + __u64 maxfilesize; + struct timespec64 time_delta; + long unsigned int mount_time; + struct super_block *super; + dev_t s_dev; + struct nfs_auth_info auth_info; + struct nfs_fscache_key *fscache_key; + struct fscache_cookie *fscache; + u32 pnfs_blksize; + u32 attr_bitmask[3]; + u32 attr_bitmask_nl[3]; + u32 exclcreat_bitmask[3]; + u32 cache_consistency_bitmask[3]; + u32 acl_bitmask; + u32 fh_expire_type; + struct pnfs_layoutdriver_type *pnfs_curr_ld; + struct rpc_wait_queue roc_rpcwaitq; + void *pnfs_ld_data; + struct rb_root state_owners; + struct ida openowner_id; + struct ida lockowner_id; + struct list_head state_owners_lru; + struct list_head layouts; + struct list_head delegations; + struct list_head ss_copies; + long unsigned int mig_gen; + long unsigned int mig_status; + void (*destroy)(struct nfs_server *); + atomic_t active; + struct __kernel_sockaddr_storage mountd_address; + size_t mountd_addrlen; + u32 mountd_version; + short unsigned int mountd_port; + short unsigned int mountd_protocol; + struct rpc_wait_queue uoc_rpcwaitq; + unsigned int read_hdrsize; + const struct cred *cred; + bool has_sec_mnt_opts; +}; + +struct nfs_subversion; + +struct idmap; + +struct nfs4_slot_table; + +struct nfs4_session; + +struct nfs_rpc_ops; + +struct nfs4_minor_version_ops; + +struct nfs41_server_owner; + +struct nfs41_server_scope; + +struct nfs41_impl_id; + +struct nfs_client { + refcount_t cl_count; + atomic_t cl_mds_count; + int cl_cons_state; + long unsigned int cl_res_state; + long unsigned int cl_flags; + struct __kernel_sockaddr_storage cl_addr; + size_t cl_addrlen; + char *cl_hostname; + char *cl_acceptor; + struct list_head cl_share_link; + struct list_head cl_superblocks; + struct rpc_clnt *cl_rpcclient; + const struct nfs_rpc_ops *rpc_ops; + int cl_proto; + struct nfs_subversion *cl_nfs_mod; + u32 cl_minorversion; + unsigned int cl_nconnect; + const char *cl_principal; + struct list_head cl_ds_clients; + u64 cl_clientid; + nfs4_verifier cl_confirm; + long unsigned int cl_state; + spinlock_t cl_lock; + long unsigned int cl_lease_time; + long unsigned int cl_last_renewal; + struct delayed_work cl_renewd; + struct rpc_wait_queue cl_rpcwaitq; + struct idmap *cl_idmap; + const char *cl_owner_id; + u32 cl_cb_ident; + const struct nfs4_minor_version_ops *cl_mvops; + long unsigned int cl_mig_gen; + struct nfs4_slot_table *cl_slot_tbl; + u32 cl_seqid; + u32 cl_exchange_flags; + struct nfs4_session *cl_session; + bool cl_preserve_clid; + struct nfs41_server_owner *cl_serverowner; + struct nfs41_server_scope *cl_serverscope; + struct nfs41_impl_id *cl_implid; + long unsigned int cl_sp4_flags; + wait_queue_head_t cl_lock_waitq; + char cl_ipaddr[48]; + struct fscache_cookie *fscache; + struct net *cl_net; + struct list_head pending_cb_stateids; +}; + +struct pnfs_layout_segment; + +struct nfs_seqid { + struct nfs_seqid_counter *sequence; + struct list_head list; + struct rpc_task *task; +}; + +struct nfs_write_verifier { + char data[8]; +}; + +struct nfs_writeverf { + struct nfs_write_verifier verifier; + enum nfs3_stable_how committed; +}; + +struct nfs_pgio_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + struct nfs_open_context *context; + struct nfs_lock_context *lock_context; + nfs4_stateid stateid; + __u64 offset; + __u32 count; + unsigned int pgbase; + struct page **pages; + union { + unsigned int replen; + struct { + const u32 *bitmask; + u32 bitmask_store[3]; + enum nfs3_stable_how stable; + }; + }; +}; + +struct nfs_pgio_res { + struct nfs4_sequence_res seq_res; + struct nfs_fattr *fattr; + __u64 count; + __u32 op_status; + union { + struct { + unsigned int replen; + int eof; + }; + struct { + struct nfs_writeverf *verf; + const struct nfs_server *server; + }; + }; +}; + +struct nfs_commitargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + __u64 offset; + __u32 count; + const u32 *bitmask; +}; + +struct nfs_commitres { + struct nfs4_sequence_res seq_res; + __u32 op_status; + struct nfs_fattr *fattr; + struct nfs_writeverf *verf; + const struct nfs_server *server; +}; + +struct nfs_removeargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *fh; + struct qstr name; +}; + +struct nfs_removeres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs_fattr *dir_attr; + struct nfs4_change_info cinfo; +}; + +struct nfs_renameargs { + struct nfs4_sequence_args seq_args; + const struct nfs_fh *old_dir; + const struct nfs_fh *new_dir; + const struct qstr *old_name; + const struct qstr *new_name; +}; + +struct nfs_renameres { + struct nfs4_sequence_res seq_res; + struct nfs_server *server; + struct nfs4_change_info old_cinfo; + struct nfs_fattr *old_fattr; + struct nfs4_change_info new_cinfo; + struct nfs_fattr *new_fattr; +}; + +struct nfs_entry { + __u64 ino; + __u64 cookie; + __u64 prev_cookie; + const char *name; + unsigned int len; + int eof; + struct nfs_fh *fh; + struct nfs_fattr *fattr; + struct nfs4_label *label; + unsigned char d_type; + struct nfs_server *server; +}; + +struct nfs_readdir_arg { + struct dentry *dentry; + const struct cred *cred; + __be32 *verf; + u64 cookie; + struct page **pages; + unsigned int page_len; + bool plus; +}; + +struct nfs_readdir_res { + __be32 *verf; +}; + +struct nfs4_pathname { + unsigned int ncomponents; + struct nfs4_string components[512]; +}; + +struct nfs4_fs_location { + unsigned int nservers; + struct nfs4_string servers[10]; + struct nfs4_pathname rootpath; +}; + +struct nfs4_fs_locations { + struct nfs_fattr fattr; + const struct nfs_server *server; + struct nfs4_pathname fs_path; + int nlocations; + struct nfs4_fs_location locations[10]; +}; + +struct nfstime4 { + u64 seconds; + u32 nseconds; +}; + +struct pnfs_commit_ops; + +struct pnfs_ds_commit_info { + struct list_head commits; + unsigned int nwritten; + unsigned int ncommitting; + const struct pnfs_commit_ops *ops; +}; + +struct nfs41_server_owner { + uint64_t minor_id; + uint32_t major_id_sz; + char major_id[1024]; +}; + +struct nfs41_server_scope { + uint32_t server_scope_sz; + char server_scope[1024]; +}; + +struct nfs41_impl_id { + char domain[1025]; + char name[1025]; + struct nfstime4 date; +}; + +struct nfs_page_array { + struct page **pagevec; + unsigned int npages; + struct page *page_array[8]; +}; + +struct nfs_page; + +struct nfs_rw_ops; + +struct nfs_io_completion; + +struct nfs_direct_req; + +struct nfs_pgio_completion_ops; + +struct nfs_pgio_header { + struct inode *inode; + const struct cred *cred; + struct list_head pages; + struct nfs_page *req; + struct nfs_writeverf verf; + fmode_t rw_mode; + struct pnfs_layout_segment *lseg; + loff_t io_start; + const struct rpc_call_ops *mds_ops; + void (*release)(struct nfs_pgio_header *); + const struct nfs_pgio_completion_ops *completion_ops; + const struct nfs_rw_ops *rw_ops; + struct nfs_io_completion *io_completion; + struct nfs_direct_req *dreq; + int pnfs_error; + int error; + unsigned int good_bytes; + long unsigned int flags; + struct rpc_task task; + struct nfs_fattr fattr; + struct nfs_pgio_args args; + struct nfs_pgio_res res; + long unsigned int timestamp; + int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *); + __u64 mds_offset; + struct nfs_page_array page_array; + struct nfs_client *ds_clp; + u32 ds_commit_idx; + u32 pgio_mirror_idx; +}; + +struct nfs_pgio_completion_ops { + void (*error_cleanup)(struct list_head *, int); + void (*init_hdr)(struct nfs_pgio_header *); + void (*completion)(struct nfs_pgio_header *); + void (*reschedule_io)(struct nfs_pgio_header *); +}; + +struct nfs_mds_commit_info { + atomic_t rpcs_out; + atomic_long_t ncommit; + struct list_head list; +}; + +struct nfs_commit_data; + +struct nfs_commit_info; + +struct nfs_commit_completion_ops { + void (*completion)(struct nfs_commit_data *); + void (*resched_write)(struct nfs_commit_info *, struct nfs_page *); +}; + +struct nfs_commit_data { + struct rpc_task task; + struct inode *inode; + const struct cred *cred; + struct nfs_fattr fattr; + struct nfs_writeverf verf; + struct list_head pages; + struct list_head list; + struct nfs_direct_req *dreq; + struct nfs_commitargs args; + struct nfs_commitres res; + struct nfs_open_context *context; + struct pnfs_layout_segment *lseg; + struct nfs_client *ds_clp; + int ds_commit_index; + loff_t lwb; + const struct rpc_call_ops *mds_ops; + const struct nfs_commit_completion_ops *completion_ops; + int (*commit_done_cb)(struct rpc_task *, struct nfs_commit_data *); + long unsigned int flags; +}; + +struct nfs_commit_info { + struct inode *inode; + struct nfs_mds_commit_info *mds; + struct pnfs_ds_commit_info *ds; + struct nfs_direct_req *dreq; + const struct nfs_commit_completion_ops *completion_ops; +}; + +struct nfs_unlinkdata { + struct nfs_removeargs args; + struct nfs_removeres res; + struct dentry *dentry; + wait_queue_head_t wq; + const struct cred *cred; + struct nfs_fattr dir_attr; + long int timeout; +}; + +struct nfs_renamedata { + struct nfs_renameargs args; + struct nfs_renameres res; + const struct cred *cred; + struct inode *old_dir; + struct dentry *old_dentry; + struct nfs_fattr old_fattr; + struct inode *new_dir; + struct dentry *new_dentry; + struct nfs_fattr new_fattr; + void (*complete)(struct rpc_task *, struct nfs_renamedata *); + long int timeout; + bool cancelled; +}; + +struct nlmclnt_operations; + +struct nfs_client_initdata; + +struct nfs_access_entry; + +struct nfs_rpc_ops { + u32 version; + const struct dentry_operations *dentry_ops; + const struct inode_operations *dir_inode_ops; + const struct inode_operations *file_inode_ops; + const struct file_operations *file_ops; + const struct nlmclnt_operations *nlmclnt_ops; + int (*getroot)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*submount)(struct fs_context *, struct nfs_server *); + int (*try_get_tree)(struct fs_context *); + int (*getattr)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *, struct inode *); + int (*setattr)(struct dentry *, struct nfs_fattr *, struct iattr *); + int (*lookup)(struct inode *, struct dentry *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); + int (*lookupp)(struct inode *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); + int (*access)(struct inode *, struct nfs_access_entry *); + int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); + int (*create)(struct inode *, struct dentry *, struct iattr *, int); + int (*remove)(struct inode *, struct dentry *); + void (*unlink_setup)(struct rpc_message *, struct dentry *, struct inode *); + void (*unlink_rpc_prepare)(struct rpc_task *, struct nfs_unlinkdata *); + int (*unlink_done)(struct rpc_task *, struct inode *); + void (*rename_setup)(struct rpc_message *, struct dentry *, struct dentry *); + void (*rename_rpc_prepare)(struct rpc_task *, struct nfs_renamedata *); + int (*rename_done)(struct rpc_task *, struct inode *, struct inode *); + int (*link)(struct inode *, struct inode *, const struct qstr *); + int (*symlink)(struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *); + int (*mkdir)(struct inode *, struct dentry *, struct iattr *); + int (*rmdir)(struct inode *, const struct qstr *); + int (*readdir)(struct nfs_readdir_arg *, struct nfs_readdir_res *); + int (*mknod)(struct inode *, struct dentry *, struct iattr *, dev_t); + int (*statfs)(struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *); + int (*fsinfo)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + int (*pathconf)(struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *); + int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); + int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool); + int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *); + void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *); + int (*read_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, struct rpc_clnt **); + int (*write_done)(struct rpc_task *, struct nfs_pgio_header *); + void (*commit_setup)(struct nfs_commit_data *, struct rpc_message *, struct rpc_clnt **); + void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *); + int (*commit_done)(struct rpc_task *, struct nfs_commit_data *); + int (*lock)(struct file *, int, struct file_lock *); + int (*lock_check_bounds)(const struct file_lock *); + void (*clear_acl_cache)(struct inode *); + void (*close_context)(struct nfs_open_context *, int); + struct inode * (*open_context)(struct inode *, struct nfs_open_context *, int, struct iattr *, int *); + int (*have_delegation)(struct inode *, fmode_t); + struct nfs_client * (*alloc_client)(const struct nfs_client_initdata *); + struct nfs_client * (*init_client)(struct nfs_client *, const struct nfs_client_initdata *); + void (*free_client)(struct nfs_client *); + struct nfs_server * (*create_server)(struct fs_context *); + struct nfs_server * (*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); +}; + +struct nfs_access_entry { + struct rb_node rb_node; + struct list_head lru; + const struct cred *cred; + __u32 mask; + struct callback_head callback_head; +}; + +struct nfs4_state_recovery_ops; + +struct nfs4_state_maintenance_ops; + +struct nfs4_mig_recovery_ops; + +struct nfs4_minor_version_ops { + u32 minor_version; + unsigned int init_caps; + int (*init_client)(struct nfs_client *); + void (*shutdown_client)(struct nfs_client *); + bool (*match_stateid)(const nfs4_stateid *, const nfs4_stateid *); + int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); + void (*free_lock_state)(struct nfs_server *, struct nfs4_lock_state *); + int (*test_and_free_expired)(struct nfs_server *, nfs4_stateid *, const struct cred *); + struct nfs_seqid * (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); + void (*session_trunk)(struct rpc_clnt *, struct rpc_xprt *, void *); + const struct rpc_call_ops *call_sync_ops; + const struct nfs4_state_recovery_ops *reboot_recovery_ops; + const struct nfs4_state_recovery_ops *nograce_recovery_ops; + const struct nfs4_state_maintenance_ops *state_renewal_ops; + const struct nfs4_mig_recovery_ops *mig_recovery_ops; +}; + +struct nfs4_state_owner; + +struct nfs4_state { + struct list_head open_states; + struct list_head inode_states; + struct list_head lock_states; + struct nfs4_state_owner *owner; + struct inode *inode; + long unsigned int flags; + spinlock_t state_lock; + seqlock_t seqlock; + nfs4_stateid stateid; + nfs4_stateid open_stateid; + unsigned int n_rdonly; + unsigned int n_wronly; + unsigned int n_rdwr; + fmode_t state; + refcount_t count; + wait_queue_head_t waitq; + struct callback_head callback_head; +}; + +struct nfs4_ssc_client_ops; + +struct nfs_ssc_client_ops; + +struct nfs_ssc_client_ops_tbl { + const struct nfs4_ssc_client_ops *ssc_nfs4_ops; + const struct nfs_ssc_client_ops *ssc_nfs_ops; +}; + +struct nfs4_ssc_client_ops { + struct file * (*sco_open)(struct vfsmount *, struct nfs_fh *, nfs4_stateid *); + void (*sco_close)(struct file *); +}; + +struct nfs_ssc_client_ops { + void (*sco_sb_deactive)(struct super_block *); +}; + +struct nfs4_state_recovery_ops { + int owner_flag_bit; + int state_flag_bit; + int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); + int (*recover_lock)(struct nfs4_state *, struct file_lock *); + int (*establish_clid)(struct nfs_client *, const struct cred *); + int (*reclaim_complete)(struct nfs_client *, const struct cred *); + int (*detect_trunking)(struct nfs_client *, struct nfs_client **, const struct cred *); +}; + +struct nfs4_state_maintenance_ops { + int (*sched_state_renewal)(struct nfs_client *, const struct cred *, unsigned int); + const struct cred * (*get_state_renewal_cred)(struct nfs_client *); + int (*renew_lease)(struct nfs_client *, const struct cred *); +}; + +struct nfs4_mig_recovery_ops { + int (*get_locations)(struct inode *, struct nfs4_fs_locations *, struct page *, const struct cred *); + int (*fsid_present)(struct inode *, const struct cred *); +}; + +struct nfs4_state_owner { + struct nfs_server *so_server; + struct list_head so_lru; + long unsigned int so_expires; + struct rb_node so_server_node; + const struct cred *so_cred; + spinlock_t so_lock; + atomic_t so_count; + long unsigned int so_flags; + struct list_head so_states; + struct nfs_seqid_counter so_seqid; + seqcount_spinlock_t so_reclaim_seqcount; + struct mutex so_delegreturn_mutex; +}; + +struct core_name { + char *corename; + int used; + int size; +}; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + int nr_pages; + char __data[0]; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t size; + long unsigned int offset; + unsigned int length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_apply { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t pos; + loff_t length; + unsigned int flags; + const void *ops; + void *actor; + long unsigned int caller; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_apply {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_releasepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_invalidatepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_apply_dstmap)(void *, struct inode *, struct iomap___2 *); + +typedef void (*btf_trace_iomap_apply_srcmap)(void *, struct inode *, struct iomap___2 *); + +typedef void (*btf_trace_iomap_apply)(void *, struct inode *, loff_t, loff_t, unsigned int, const void *, void *, long unsigned int); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + loff_t io_offset; + struct bio *io_bio; + struct bio io_inline_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_page)(struct page *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap___2 iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; +}; + +typedef int (*list_cmp_func_t)(void *, const struct list_head *, const struct list_head *); + +struct iomap_page { + atomic_t read_bytes_pending; + atomic_t write_bytes_pending; + spinlock_t uptodate_lock; + long unsigned int uptodate[0]; +}; + +struct iomap_readpage_ctx { + struct page *cur_page; + bool cur_page_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +enum { + IOMAP_WRITE_F_UNSHARE = 1, +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + blk_qc_t (*submit_io)(struct inode *, struct iomap___2 *, struct bio *, loff_t); +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + struct request_queue *last_queue; + blk_qc_t cookie; + } submit; + struct { + struct work_struct work; + } aio; + }; +}; + +struct fiemap_ctx { + struct fiemap_extent_info *fi; + struct iomap___2 prev; +}; + +struct iomap_swapfile_info { + struct iomap___2 iomap; + struct swap_info_struct *sis; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; + struct file *file; +}; + +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B = 1, + QIF_ILIMITS_B = 2, + QIF_INODES_B = 3, + QIF_BTIME_B = 4, + QIF_ITIME_B = 5, +}; + +typedef __kernel_uid32_t qid_t; + +enum { + DQF_INFO_DIRTY_B = 17, +}; + +struct dqstats { + long unsigned int stat[8]; + struct percpu_counter counter[8]; +}; + +enum { + _DQUOT_USAGE_ENABLED = 0, + _DQUOT_LIMITS_ENABLED = 1, + _DQUOT_SUSPENDED = 2, + _DQUOT_STATE_FLAGS = 3, +}; + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +struct dquot_warn { + struct super_block *w_sb; + struct kqid w_dq_id; + short int w_type; +}; + +struct fs_disk_quota { + __s8 d_version; + __s8 d_flags; + __u16 d_fieldmask; + __u32 d_id; + __u64 d_blk_hardlimit; + __u64 d_blk_softlimit; + __u64 d_ino_hardlimit; + __u64 d_ino_softlimit; + __u64 d_bcount; + __u64 d_icount; + __s32 d_itimer; + __s32 d_btimer; + __u16 d_iwarns; + __u16 d_bwarns; + __s8 d_itimer_hi; + __s8 d_btimer_hi; + __s8 d_rtbtimer_hi; + __s8 d_padding2; + __u64 d_rtb_hardlimit; + __u64 d_rtb_softlimit; + __u64 d_rtbcount; + __s32 d_rtbtimer; + __u16 d_rtbwarns; + __s16 d_padding3; + char d_padding4[8]; +}; + +struct fs_qfilestat { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; +}; + +typedef struct fs_qfilestat fs_qfilestat_t; + +struct fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + fs_qfilestat_t qs_uquota; + fs_qfilestat_t qs_gquota; + __u32 qs_incoredqs; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +struct fs_qfilestatv { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; + __u32 qfs_pad; +}; + +struct fs_quota_statv { + __s8 qs_version; + __u8 qs_pad1; + __u16 qs_flags; + __u32 qs_incoredqs; + struct fs_qfilestatv qs_uquota; + struct fs_qfilestatv qs_gquota; + struct fs_qfilestatv qs_pquota; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; + __u16 qs_rtbwarnlimit; + __u16 qs_pad3; + __u32 qs_pad4; + __u64 qs_pad2[7]; +}; + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +struct if_nextdqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; + __u32 dqb_id; +}; + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +struct compat_if_dqblk { + compat_u64 dqb_bhardlimit; + compat_u64 dqb_bsoftlimit; + compat_u64 dqb_curspace; + compat_u64 dqb_ihardlimit; + compat_u64 dqb_isoftlimit; + compat_u64 dqb_curinodes; + compat_u64 dqb_btime; + compat_u64 dqb_itime; + compat_uint_t dqb_valid; +} __attribute__((packed)); + +struct compat_fs_qfilestat { + compat_u64 dqb_bhardlimit; + compat_u64 qfs_nblks; + compat_uint_t qfs_nextents; +} __attribute__((packed)); + +struct compat_fs_quota_stat { + __s8 qs_version; + char: 8; + __u16 qs_flags; + __s8 qs_pad; + int: 24; + struct compat_fs_qfilestat qs_uquota; + struct compat_fs_qfilestat qs_gquota; + compat_uint_t qs_incoredqs; + compat_int_t qs_btimelimit; + compat_int_t qs_itimelimit; + compat_int_t qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +} __attribute__((packed)); + +enum { + QUOTA_NL_C_UNSPEC = 0, + QUOTA_NL_C_WARNING = 1, + __QUOTA_NL_C_MAX = 2, +}; + +enum { + QUOTA_NL_A_UNSPEC = 0, + QUOTA_NL_A_QTYPE = 1, + QUOTA_NL_A_EXCESS_ID = 2, + QUOTA_NL_A_WARNING = 3, + QUOTA_NL_A_DEV_MAJOR = 4, + QUOTA_NL_A_DEV_MINOR = 5, + QUOTA_NL_A_CAUSED_ID = 6, + QUOTA_NL_A_PAD = 7, + __QUOTA_NL_A_MAX = 8, +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *tail_vma; + struct mempolicy *task_mempolicy; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_locked; + u64 swap_pss; + bool check_shmem_swap; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct numa_maps { + long unsigned int pages; + long unsigned int anon; + long unsigned int active; + long unsigned int writeback; + long unsigned int mapcount_max; + long unsigned int dirty; + long unsigned int swapcache; + long unsigned int node[1024]; +}; + +struct numa_maps_private { + struct proc_maps_private proc_maps; + struct numa_maps md; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___2 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct timers_private { + struct pid *pid; + struct task_struct *task; + struct sighand_struct *sighand; + struct pid_namespace *ns; + long unsigned int flags; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct seq_net_private { + struct net *net; +}; + +struct bpf_iter_aux_info___2; + +struct vmcore { + struct list_head list; + long long unsigned int paddr; + long long unsigned int size; + loff_t offset; +}; + +struct vmcoredd_node { + struct list_head list; + void *buf; + unsigned int size; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +typedef struct elf32_phdr Elf32_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +typedef struct elf64_note Elf64_Nhdr; + +struct vmcoredd_header { + __u32 n_namesz; + __u32 n_descsz; + __u32 n_type; + __u8 name[8]; + __u8 dump_name[44]; +}; + +struct vmcoredd_data { + char dump_name[44]; + unsigned int size; + int (*vmcoredd_callback)(struct vmcoredd_data *, void *); +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, +}; + +struct kernfs_open_node { + atomic_t refcnt; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; +}; + +struct config_group; + +struct config_item_type; + +struct config_item { + char *ci_name; + char ci_namebuf[20]; + struct kref ci_kref; + struct list_head ci_entry; + struct config_item *ci_parent; + struct config_group *ci_group; + const struct config_item_type *ci_type; + struct dentry *ci_dentry; +}; + +struct configfs_subsystem; + +struct config_group { + struct config_item cg_item; + struct list_head cg_children; + struct configfs_subsystem *cg_subsys; + struct list_head default_groups; + struct list_head group_entry; +}; + +struct configfs_item_operations; + +struct configfs_group_operations; + +struct configfs_attribute; + +struct configfs_bin_attribute; + +struct config_item_type { + struct module *ct_owner; + struct configfs_item_operations *ct_item_ops; + struct configfs_group_operations *ct_group_ops; + struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; +}; + +struct configfs_item_operations { + void (*release)(struct config_item *); + int (*allow_link)(struct config_item *, struct config_item *); + void (*drop_link)(struct config_item *, struct config_item *); +}; + +struct configfs_group_operations { + struct config_item * (*make_item)(struct config_group *, const char *); + struct config_group * (*make_group)(struct config_group *, const char *); + int (*commit_item)(struct config_item *); + void (*disconnect_notify)(struct config_group *, struct config_item *); + void (*drop_item)(struct config_group *, struct config_item *); +}; + +struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; + ssize_t (*show)(struct config_item *, char *); + ssize_t (*store)(struct config_item *, const char *, size_t); +}; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; + void *cb_private; + size_t cb_max_size; + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +struct configfs_subsystem { + struct config_group su_group; + struct mutex su_mutex; +}; + +struct configfs_fragment { + atomic_t frag_count; + struct rw_semaphore frag_sem; + bool frag_dead; +}; + +struct configfs_dirent { + atomic_t s_count; + int s_dependent_count; + struct list_head s_sibling; + struct list_head s_children; + int s_links; + void *s_element; + int s_type; + umode_t s_mode; + struct dentry *s_dentry; + struct iattr *s_iattr; + struct configfs_fragment *s_frag; +}; + +struct configfs_buffer { + size_t count; + loff_t pos; + char *page; + struct configfs_item_operations *ops; + struct mutex mutex; + int needs_read_fill; + bool read_in_progress; + bool write_in_progress; + char *bin_buffer; + int bin_buffer_size; + int cb_max_size; + struct config_item *item; + struct module *owner; + union { + struct configfs_attribute *attr; + struct configfs_bin_attribute *bin_attr; + }; +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___3 = 1, + Opt_mode___2 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_checkpoint_io_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + spinlock_t t_handle_lock; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + int j_format_version; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __u32 s_padding[41]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct bgl_lock { + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; +}; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + struct list_head i_orphan; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + struct rw_semaphore i_mmap_sem; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + struct list_head i_prealloc_list; + spinlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + unsigned int i_reserved_data_blocks; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + qsize_t i_reserved_quota; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + atomic_t i_unwritten; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + struct dquot *i_dquot[3]; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_reserved[95]; + __le32 s_checksum; +}; + +struct mb_cache___2; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct percpu_counter s_sra_exceeded_retry_limit; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct buffer_head *s_mmp_bh; + struct journal_s *s_journal; + struct list_head s_orphan; + struct mutex s_orphan_lock; + long unsigned int s_ext4_flags; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct block_device *s_journal_bdev; + char *s_qf_names[3]; + int s_jquota_fmt; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list; + struct rb_root s_mb_avg_fragment_size_root; + rwlock_t s_mb_rb_lock; + struct list_head *s_mb_largest_free_orders; + rwlock_t *s_mb_largest_free_orders_locks; + long unsigned int s_stripe; + unsigned int s_mb_max_linear_groups; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_groups_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + atomic_t s_bal_cr0_bad_suggestions; + atomic_t s_bal_cr1_bad_suggestions; + atomic64_t s_bal_cX_groups_considered[4]; + atomic64_t s_bal_cX_hits[4]; + atomic64_t s_bal_cX_failed[4]; + atomic_t s_mb_buddies_generated; + atomic64_t s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + atomic_t s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache___2 *s_ea_block_cache; + struct mb_cache___2 *s_ea_inode_cache; + long: 64; + long: 64; + long: 64; + spinlock_t s_es_lock; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + spinlock_t s_error_lock; + int s_add_error_count; + int s_first_error_code; + __u32 s_first_error_line; + __u32 s_first_error_ino; + __u64 s_first_error_block; + const char *s_first_error_func; + time64_t s_first_error_time; + int s_last_error_code; + __u32 s_last_error_line; + __u32 s_last_error_ino; + __u64 s_last_error_block; + const char *s_last_error_func; + time64_t s_last_error_time; + struct work_struct s_error_work; + atomic_t s_fc_subtid; + atomic_t s_fc_ineligible_updates; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + u64 s_fc_avg_commit_time; + struct ext4_fc_replay_state s_fc_replay_state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + ext4_grpblk_t bb_largest_free_order; + ext4_group_t bb_group; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + struct rb_node bb_avg_fragment_size_rb; + struct list_head bb_largest_free_order_node; + ext4_grpblk_t bb_counters[0]; +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, +} ext4_iget_flags; + +struct ext4_system_zone { + struct rb_node node; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +enum { + EXT4_FC_REASON_OK = 0, + EXT4_FC_REASON_INELIGIBLE = 1, + EXT4_FC_REASON_ALREADY_COMMITTED = 2, + EXT4_FC_REASON_FC_START_FAILED = 3, + EXT4_FC_REASON_FC_FAILED = 4, + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_COMMIT_FAILED = 9, + EXT4_FC_REASON_MAX = 10, +}; + +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + atomic_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +enum { + EXT4_STATE_JDATA = 0, + EXT4_STATE_NEW = 1, + EXT4_STATE_XATTR = 2, + EXT4_STATE_NO_EXPAND = 3, + EXT4_STATE_DA_ALLOC_CLOSE = 4, + EXT4_STATE_EXT_MIGRATE = 5, + EXT4_STATE_NEWENTRY = 6, + EXT4_STATE_MAY_INLINE_DATA = 7, + EXT4_STATE_EXT_PRECACHED = 8, + EXT4_STATE_LUSTRE_EA_INODE = 9, + EXT4_STATE_VERITY_IN_PROGRESS = 10, + EXT4_STATE_FC_COMMITTING = 11, +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelonly; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FS_ABORTED = 1, + EXT4_MF_FC_INELIGIBLE = 2, + EXT4_MF_FC_COMMITTING = 3, +}; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +typedef unsigned int __kernel_mode_t; + +typedef __kernel_mode_t mode_t; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; + struct fscrypt_str crypto_buf; + struct fscrypt_str cf_name; +}; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + sector_t io_next_block; +}; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +struct ext4_new_group_input { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct compat_ext4_new_group_input { + u32 group; + compat_u64 block_bitmap; + compat_u64 inode_bitmap; + compat_u64 inode_table; + u32 blocks_count; + u16 reserved_blocks; + u16 unused; +} __attribute__((packed)); + +struct ext4_new_group_data { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 2560, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +struct ext4_prealloc_space { + struct list_head pa_inode_list; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + spinlock_t *pa_obj_lock; + struct inode *pa_inode; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + ext4_group_t ac_last_optimal_group; + __u32 ac_groups_considered; + __u32 ac_flags; + __u16 ac_groups_scanned; + __u16 ac_groups_linear_remaining; + __u16 ac_found; + __u16 ac_tail; + __u16 ac_buddy; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct page *ac_bitmap_page; + struct page *ac_buddy_page; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct ext4_buddy { + struct page *bd_buddy_page; + void *bd_buddy; + struct page *bd_bitmap_page; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t count; +}; + +enum { + I_DATA_SEM_NORMAL = 0, + I_DATA_SEM_OTHER = 1, + I_DATA_SEM_QUOTA = 2, +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__page_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidatepage_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + unsigned int offset; + unsigned int length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + unsigned int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_block { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool allocated; + char __data[0]; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + struct ext4_sb_info *sbi; + int count; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_create { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_link { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_unlink { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + int ino; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__page_op {}; + +struct trace_event_data_offsets_ext4_invalidatepage_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_block {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_create {}; + +struct trace_event_data_offsets_ext4_fc_track_link {}; + +struct trace_event_data_offsets_ext4_fc_track_unlink {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_writepage)(void *, struct page *); + +typedef void (*btf_trace_ext4_readpage)(void *, struct page *); + +typedef void (*btf_trace_ext4_releasepage)(void *, struct page *); + +typedef void (*btf_trace_ext4_invalidatepage)(void *, struct page *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_invalidatepage)(void *, struct page *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start)(void *, struct super_block *, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_block)(void *, struct inode *, struct extent_status *, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, struct inode *, long int, long int, int); + +struct ext4_err_translation { + int code; + int errno; +}; + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb = 6, + Opt_err_cont = 7, + Opt_err_panic = 8, + Opt_err_ro = 9, + Opt_nouid32 = 10, + Opt_debug = 11, + Opt_removed = 12, + Opt_user_xattr = 13, + Opt_nouser_xattr = 14, + Opt_acl = 15, + Opt_noacl = 16, + Opt_auto_da_alloc = 17, + Opt_noauto_da_alloc = 18, + Opt_noload = 19, + Opt_commit = 20, + Opt_min_batch_time = 21, + Opt_max_batch_time = 22, + Opt_journal_dev = 23, + Opt_journal_path = 24, + Opt_journal_checksum = 25, + Opt_journal_async_commit = 26, + Opt_abort = 27, + Opt_data_journal = 28, + Opt_data_ordered = 29, + Opt_data_writeback = 30, + Opt_data_err_abort = 31, + Opt_data_err_ignore = 32, + Opt_test_dummy_encryption = 33, + Opt_inlinecrypt = 34, + Opt_usrjquota = 35, + Opt_grpjquota = 36, + Opt_offusrjquota = 37, + Opt_offgrpjquota = 38, + Opt_jqfmt_vfsold = 39, + Opt_jqfmt_vfsv0 = 40, + Opt_jqfmt_vfsv1 = 41, + Opt_quota = 42, + Opt_noquota = 43, + Opt_barrier = 44, + Opt_nobarrier = 45, + Opt_err___2 = 46, + Opt_usrquota = 47, + Opt_grpquota = 48, + Opt_prjquota = 49, + Opt_i_version = 50, + Opt_dax = 51, + Opt_dax_always = 52, + Opt_dax_inode = 53, + Opt_dax_never = 54, + Opt_stripe = 55, + Opt_delalloc = 56, + Opt_nodelalloc = 57, + Opt_warn_on_error = 58, + Opt_nowarn_on_error = 59, + Opt_mblk_io_submit = 60, + Opt_lazytime = 61, + Opt_nolazytime = 62, + Opt_debug_want_extra_isize = 63, + Opt_nomblk_io_submit = 64, + Opt_block_validity = 65, + Opt_noblock_validity = 66, + Opt_inode_readahead_blks = 67, + Opt_journal_ioprio = 68, + Opt_dioread_nolock = 69, + Opt_dioread_lock = 70, + Opt_discard = 71, + Opt_nodiscard = 72, + Opt_init_itable = 73, + Opt_noinit_itable = 74, + Opt_max_dir_size_kb = 75, + Opt_nojournal_checksum = 76, + Opt_nombcache = 77, + Opt_no_prefetch_block_bitmaps = 78, + Opt_mb_optimize_scan = 79, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_sb_encodings { + __u16 magic; + char *name; + char *version; +}; + +struct ext4_parsed_options { + long unsigned int journal_devnum; + unsigned int journal_ioprio; + int mb_optimize_scan; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; + int s_jquota_fmt; + char *s_qf_names[3]; +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_sra_exceeded_retry_limit = 5, + attr_inode_readahead = 6, + attr_trigger_test_error = 7, + attr_first_error_time = 8, + attr_last_error_time = 9, + attr_feature = 10, + attr_pointer_ui = 11, + attr_pointer_ul = 12, + attr_pointer_u64 = 13, + attr_pointer_u8 = 14, + attr_pointer_string = 15, + attr_pointer_atomic = 16, + attr_journal_task = 17, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + __u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + struct qstr fcd_name; + unsigned char fcd_iname[32]; + struct list_head fcd_list; +}; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long long unsigned int blocknr; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + int transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + int transaction; + int head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + int write_op; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, long unsigned int, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, long unsigned int, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +struct meta_entry { + u64 data_block; + unsigned int index_block; + short unsigned int offset; + short unsigned int pad; +}; + +struct meta_index { + unsigned int inode_number; + unsigned int offset; + short unsigned int entries; + short unsigned int skip; + short unsigned int locked; + short unsigned int pad; + struct meta_entry meta_entry[127]; +}; + +struct squashfs_cache_entry; + +struct squashfs_cache { + char *name; + int entries; + int curr_blk; + int next_blk; + int num_waiters; + int unused; + int block_size; + int pages; + spinlock_t lock; + wait_queue_head_t wait_queue; + struct squashfs_cache_entry *entry; +}; + +struct squashfs_page_actor; + +struct squashfs_cache_entry { + u64 block; + int length; + int refcount; + u64 next_index; + int pending; + int error; + int num_waiters; + wait_queue_head_t wait_queue; + struct squashfs_cache *cache; + void **data; + struct squashfs_page_actor *actor; +}; + +struct squashfs_page_actor { + union { + void **buffer; + struct page **page; + }; + void *pageaddr; + void * (*squashfs_first_page)(struct squashfs_page_actor *); + void * (*squashfs_next_page)(struct squashfs_page_actor *); + void (*squashfs_finish_page)(struct squashfs_page_actor *); + int pages; + int length; + int next_page; +}; + +struct squashfs_decompressor; + +struct squashfs_stream; + +struct squashfs_sb_info { + const struct squashfs_decompressor *decompressor; + int devblksize; + int devblksize_log2; + struct squashfs_cache *block_cache; + struct squashfs_cache *fragment_cache; + struct squashfs_cache *read_page; + int next_meta_index; + __le64 *id_table; + __le64 *fragment_index; + __le64 *xattr_id_table; + struct mutex meta_index_mutex; + struct meta_index *meta_index; + struct squashfs_stream *stream; + __le64 *inode_lookup_table; + u64 inode_table; + u64 directory_table; + u64 xattr_table; + unsigned int block_size; + short unsigned int block_log; + long long int bytes_used; + unsigned int inodes; + unsigned int fragments; + int xattr_ids; + unsigned int ids; +}; + +struct squashfs_decompressor { + void * (*init)(struct squashfs_sb_info *, void *); + void * (*comp_opts)(struct squashfs_sb_info *, void *, int); + void (*free)(void *); + int (*decompress)(struct squashfs_sb_info *, void *, struct bio *, int, int, struct squashfs_page_actor *); + int id; + char *name; + int supported; +}; + +struct squashfs_dir_index { + __le32 index; + __le32 start_block; + __le32 size; + unsigned char name[0]; +}; + +struct squashfs_dir_entry { + __le16 offset; + __le16 inode_number; + __le16 type; + __le16 size; + char name[0]; +}; + +struct squashfs_dir_header { + __le32 count; + __le32 start_block; + __le32 inode_number; +}; + +struct squashfs_inode_info { + u64 start; + int offset; + u64 xattr; + unsigned int xattr_size; + int xattr_count; + union { + struct { + u64 fragment_block; + int fragment_size; + int fragment_offset; + u64 block_list_start; + }; + struct { + u64 dir_idx_start; + int dir_idx_offset; + int dir_idx_cnt; + int parent; + }; + }; + struct inode vfs_inode; +}; + +struct squashfs_fragment_entry { + __le64 start_block; + __le32 size; + unsigned int unused; +}; + +struct squashfs_base_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; +}; + +struct squashfs_ipc_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; +}; + +struct squashfs_lipc_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; + __le32 xattr; +}; + +struct squashfs_dev_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; + __le32 rdev; +}; + +struct squashfs_ldev_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; + __le32 rdev; + __le32 xattr; +}; + +struct squashfs_symlink_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; + __le32 symlink_size; + char symlink[0]; +}; + +struct squashfs_reg_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 start_block; + __le32 fragment; + __le32 offset; + __le32 file_size; + __le16 block_list[0]; +}; + +struct squashfs_lreg_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le64 start_block; + __le64 file_size; + __le64 sparse; + __le32 nlink; + __le32 fragment; + __le32 offset; + __le32 xattr; + __le16 block_list[0]; +}; + +struct squashfs_dir_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 start_block; + __le32 nlink; + __le16 file_size; + __le16 offset; + __le32 parent_inode; +}; + +struct squashfs_ldir_inode { + __le16 inode_type; + __le16 mode; + __le16 uid; + __le16 guid; + __le32 mtime; + __le32 inode_number; + __le32 nlink; + __le32 file_size; + __le32 start_block; + __le32 parent_inode; + __le16 i_count; + __le16 offset; + __le32 xattr; + struct squashfs_dir_index index[0]; +}; + +union squashfs_inode { + struct squashfs_base_inode base; + struct squashfs_dev_inode dev; + struct squashfs_ldev_inode ldev; + struct squashfs_symlink_inode symlink; + struct squashfs_reg_inode reg; + struct squashfs_lreg_inode lreg; + struct squashfs_dir_inode dir; + struct squashfs_ldir_inode ldir; + struct squashfs_ipc_inode ipc; + struct squashfs_lipc_inode lipc; +}; + +struct squashfs_super_block { + __le32 s_magic; + __le32 inodes; + __le32 mkfs_time; + __le32 block_size; + __le32 fragments; + __le16 compression; + __le16 block_log; + __le16 flags; + __le16 no_ids; + __le16 s_major; + __le16 s_minor; + __le64 root_inode; + __le64 bytes_used; + __le64 id_table_start; + __le64 xattr_id_table_start; + __le64 inode_table_start; + __le64 directory_table_start; + __le64 fragment_table_start; + __le64 lookup_table_start; +}; + +struct squashfs_stream { + void *stream; + struct mutex mutex; +}; + +struct squashfs_xattr_entry { + __le16 type; + __le16 size; + char data[0]; +}; + +struct squashfs_xattr_val { + __le32 vsize; + char value[0]; +}; + +struct squashfs_xattr_id { + __le64 xattr; + __le32 count; + __le32 size; +}; + +struct squashfs_xattr_id_table { + __le64 xattr_table_start; + __le32 xattr_ids; + __le32 unused; +}; + +struct lz4_comp_opts { + __le32 version; + __le32 flags; +}; + +struct squashfs_lz4 { + void *input; + void *output; +}; + +struct squashfs_lzo { + void *input; + void *output; +}; + +enum xz_mode { + XZ_SINGLE = 0, + XZ_PREALLOC = 1, + XZ_DYNALLOC = 2, +}; + +enum xz_ret { + XZ_OK = 0, + XZ_STREAM_END = 1, + XZ_UNSUPPORTED_CHECK = 2, + XZ_MEM_ERROR = 3, + XZ_MEMLIMIT_ERROR = 4, + XZ_FORMAT_ERROR = 5, + XZ_OPTIONS_ERROR = 6, + XZ_DATA_ERROR = 7, + XZ_BUF_ERROR = 8, +}; + +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +struct xz_dec; + +struct squashfs_xz { + struct xz_dec *state; + struct xz_buf buf; +}; + +struct disk_comp_opts { + __le32 dictionary_size; + __le32 flags; +}; + +struct comp_opts { + int dict_size; +}; + +typedef unsigned char Byte; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 2, + ZSTD_error_version_unsupported = 3, + ZSTD_error_parameter_unknown = 4, + ZSTD_error_frameParameter_unsupported = 5, + ZSTD_error_frameParameter_unsupportedBy32bits = 6, + ZSTD_error_frameParameter_windowTooLarge = 7, + ZSTD_error_compressionParameter_unsupported = 8, + ZSTD_error_init_missing = 9, + ZSTD_error_memory_allocation = 10, + ZSTD_error_stage_wrong = 11, + ZSTD_error_dstSize_tooSmall = 12, + ZSTD_error_srcSize_wrong = 13, + ZSTD_error_corruption_detected = 14, + ZSTD_error_checksum_wrong = 15, + ZSTD_error_tableLog_tooLarge = 16, + ZSTD_error_maxSymbolValue_tooLarge = 17, + ZSTD_error_maxSymbolValue_tooSmall = 18, + ZSTD_error_dictionary_corrupted = 19, + ZSTD_error_dictionary_wrong = 20, + ZSTD_error_dictionaryCreation_failed = 21, + ZSTD_error_maxCode = 22, +} ZSTD_ErrorCode; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +struct ZSTD_DStream_s; + +typedef struct ZSTD_DStream_s ZSTD_DStream; + +struct workspace { + void *mem; + size_t mem_size; + size_t window_size; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode___3 = 0, +}; + +enum hugetlbfs_size_type { + NO_SIZE = 0, + SIZE_STD = 1, + SIZE_PERCENT = 2, +}; + +struct hugetlbfs_fs_context { + struct hstate *hstate; + long long unsigned int max_size_opt; + long long unsigned int min_size_opt; + long int max_hpages; + long int nr_inodes; + long int min_hpages; + enum hugetlbfs_size_type max_val_type; + enum hugetlbfs_size_type min_val_type; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum hugetlb_param { + Opt_gid___4 = 0, + Opt_min_size = 1, + Opt_mode___4 = 2, + Opt_nr_inodes___2 = 3, + Opt_pagesize = 4, + Opt_size___2 = 5, + Opt_uid___3 = 6, +}; + +typedef u16 wchar_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +struct fat_mount_options { + kuid_t fs_uid; + kgid_t fs_gid; + short unsigned int fs_fmask; + short unsigned int fs_dmask; + short unsigned int codepage; + int time_offset; + char *iocharset; + short unsigned int shortname; + unsigned char name_check; + unsigned char errors; + unsigned char nfs; + short unsigned int allow_utime; + unsigned int quiet: 1; + unsigned int showexec: 1; + unsigned int sys_immutable: 1; + unsigned int dotsOK: 1; + unsigned int isvfat: 1; + unsigned int utf8: 1; + unsigned int unicode_xlate: 1; + unsigned int numtail: 1; + unsigned int flush: 1; + unsigned int nocase: 1; + unsigned int usefree: 1; + unsigned int tz_set: 1; + unsigned int rodir: 1; + unsigned int discard: 1; + unsigned int dos1xfloppy: 1; +}; + +struct fatent_operations; + +struct msdos_sb_info { + short unsigned int sec_per_clus; + short unsigned int cluster_bits; + unsigned int cluster_size; + unsigned char fats; + unsigned char fat_bits; + short unsigned int fat_start; + long unsigned int fat_length; + long unsigned int dir_start; + short unsigned int dir_entries; + long unsigned int data_start; + long unsigned int max_cluster; + long unsigned int root_cluster; + long unsigned int fsinfo_sector; + struct mutex fat_lock; + struct mutex nfs_build_inode_lock; + struct mutex s_lock; + unsigned int prev_free; + unsigned int free_clusters; + unsigned int free_clus_valid; + struct fat_mount_options options; + struct nls_table *nls_disk; + struct nls_table *nls_io; + const void *dir_ops; + int dir_per_block; + int dir_per_block_bits; + unsigned int vol_id; + int fatent_shift; + const struct fatent_operations *fatent_ops; + struct inode *fat_inode; + struct inode *fsinfo_inode; + struct ratelimit_state ratelimit; + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[256]; + spinlock_t dir_hash_lock; + struct hlist_head dir_hashtable[256]; + unsigned int dirty; + struct callback_head rcu; +}; + +struct fat_entry; + +struct fatent_operations { + void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); + void (*ent_set_ptr)(struct fat_entry *, int); + int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); + int (*ent_get)(struct fat_entry *); + void (*ent_put)(struct fat_entry *, int); + int (*ent_next)(struct fat_entry *); +}; + +struct msdos_inode_info { + spinlock_t cache_lru_lock; + struct list_head cache_lru; + int nr_caches; + unsigned int cache_valid_id; + loff_t mmu_private; + int i_start; + int i_logstart; + int i_attrs; + loff_t i_pos; + struct hlist_node i_fat_hash; + struct hlist_node i_dir_hash; + struct rw_semaphore truncate_lock; + struct inode vfs_inode; +}; + +struct fat_entry { + int entry; + union { + u8 *ent12_p[2]; + __le16 *ent16_p; + __le32 *ent32_p; + } u; + int nr_bhs; + struct buffer_head *bhs[2]; + struct inode *fat_inode; +}; + +struct fat_cache { + struct list_head cache_list; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct fat_cache_id { + unsigned int id; + int nr_contig; + int fcluster; + int dcluster; +}; + +struct compat_dirent { + u32 d_ino; + compat_off_t d_off; + u16 d_reclen; + char d_name[256]; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct __fat_dirent { + long int d_ino; + __kernel_off_t d_off; + short unsigned int d_reclen; + char d_name[256]; +}; + +struct msdos_dir_entry { + __u8 name[11]; + __u8 attr; + __u8 lcase; + __u8 ctime_cs; + __le16 ctime; + __le16 cdate; + __le16 adate; + __le16 starthi; + __le16 time; + __le16 date; + __le16 start; + __le32 size; +}; + +struct msdos_dir_slot { + __u8 id; + __u8 name0_4[10]; + __u8 attr; + __u8 reserved; + __u8 alias_checksum; + __u8 name5_10[12]; + __le16 start; + __u8 name11_12[4]; +}; + +struct fat_slot_info { + loff_t i_pos; + loff_t slot_off; + int nr_slots; + struct msdos_dir_entry *de; + struct buffer_head *bh; +}; + +typedef long long unsigned int llu; + +enum { + PARSE_INVALID = 1, + PARSE_NOT_LONGNAME = 2, + PARSE_EOF = 3, +}; + +struct fat_ioctl_filldir_callback { + struct dir_context ctx; + void *dirent; + int result; + const char *longname; + int long_len; + const char *shortname; + int short_len; +}; + +struct fatent_ra { + sector_t cur; + sector_t limit; + unsigned int ra_blocks; + sector_t ra_advance; + sector_t ra_next; + sector_t ra_limit; +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +struct fat_boot_fsinfo { + __le32 signature1; + __le32 reserved1[120]; + __le32 signature2; + __le32 free_clusters; + __le32 next_cluster; + __le32 reserved2[4]; +}; + +struct fat_bios_param_block { + u16 fat_sector_size; + u8 fat_sec_per_clus; + u16 fat_reserved; + u8 fat_fats; + u16 fat_dir_entries; + u16 fat_sectors; + u16 fat_fat_length; + u32 fat_total_sect; + u8 fat16_state; + u32 fat16_vol_id; + u32 fat32_length; + u32 fat32_root_cluster; + u16 fat32_info_sector; + u8 fat32_state; + u32 fat32_vol_id; +}; + +struct fat_floppy_defaults { + unsigned int nr_sectors; + unsigned int sec_per_clus; + unsigned int dir_entries; + unsigned int media; + unsigned int fat_length; +}; + +enum { + Opt_check_n = 0, + Opt_check_r = 1, + Opt_check_s = 2, + Opt_uid___4 = 3, + Opt_gid___5 = 4, + Opt_umask = 5, + Opt_dmask = 6, + Opt_fmask = 7, + Opt_allow_utime = 8, + Opt_codepage = 9, + Opt_usefree = 10, + Opt_nocase = 11, + Opt_quiet = 12, + Opt_showexec = 13, + Opt_debug___2 = 14, + Opt_immutable = 15, + Opt_dots = 16, + Opt_nodots = 17, + Opt_charset = 18, + Opt_shortname_lower = 19, + Opt_shortname_win95 = 20, + Opt_shortname_winnt = 21, + Opt_shortname_mixed = 22, + Opt_utf8_no = 23, + Opt_utf8_yes = 24, + Opt_uni_xl_no = 25, + Opt_uni_xl_yes = 26, + Opt_nonumtail_no = 27, + Opt_nonumtail_yes = 28, + Opt_obsolete = 29, + Opt_flush = 30, + Opt_tz_utc = 31, + Opt_rodir = 32, + Opt_err_cont___2 = 33, + Opt_err_panic___2 = 34, + Opt_err_ro___2 = 35, + Opt_discard___2 = 36, + Opt_nfs = 37, + Opt_time_offset = 38, + Opt_nfs_stale_rw = 39, + Opt_nfs_nostale_ro = 40, + Opt_err___3 = 41, + Opt_dos1xfloppy = 42, +}; + +struct fat_fid { + u32 i_gen; + u32 i_pos_low; + u16 i_pos_hi; + u16 parent_i_pos_hi; + u32 parent_i_pos_low; + u32 parent_i_gen; +}; + +struct shortname_info { + unsigned char lower: 1; + unsigned char upper: 1; + unsigned char valid: 1; +}; + +struct ecryptfs_mount_crypt_stat; + +struct ecryptfs_crypt_stat { + u32 flags; + unsigned int file_version; + size_t iv_bytes; + size_t metadata_size; + size_t extent_size; + size_t key_size; + size_t extent_shift; + unsigned int extent_mask; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct crypto_skcipher *tfm; + struct crypto_shash *hash_tfm; + unsigned char cipher[32]; + unsigned char key[64]; + unsigned char root_iv[16]; + struct list_head keysig_list; + struct mutex keysig_list_mutex; + struct mutex cs_tfm_mutex; + struct mutex cs_mutex; +}; + +struct ecryptfs_mount_crypt_stat { + u32 flags; + struct list_head global_auth_tok_list; + struct mutex global_auth_tok_list_mutex; + size_t global_default_cipher_key_size; + size_t global_default_fn_cipher_key_bytes; + unsigned char global_default_cipher_name[32]; + unsigned char global_default_fn_cipher_name[32]; + char global_default_fnek_sig[17]; +}; + +struct ecryptfs_inode_info { + struct inode vfs_inode; + struct inode *wii_inode; + struct mutex lower_file_mutex; + atomic_t lower_file_count; + struct file *lower_file; + struct ecryptfs_crypt_stat crypt_stat; +}; + +struct ecryptfs_dentry_info { + struct path lower_path; + struct callback_head rcu; +}; + +struct ecryptfs_sb_info { + struct super_block *wsi_sb; + struct ecryptfs_mount_crypt_stat mount_crypt_stat; +}; + +struct ecryptfs_file_info { + struct file *wfi_file; + struct ecryptfs_crypt_stat *crypt_stat; +}; + +struct ecryptfs_getdents_callback { + struct dir_context ctx; + struct dir_context *caller; + struct super_block *sb; + int filldir_called; + int entries_written; +}; + +struct ecryptfs_session_key { + u32 flags; + u32 encrypted_key_size; + u32 decrypted_key_size; + u8 encrypted_key[512]; + u8 decrypted_key[64]; +}; + +struct ecryptfs_password { + u32 password_bytes; + s32 hash_algo; + u32 hash_iterations; + u32 session_key_encryption_key_bytes; + u32 flags; + u8 session_key_encryption_key[64]; + u8 signature[17]; + u8 salt[8]; +}; + +struct ecryptfs_private_key { + u32 key_size; + u32 data_len; + u8 signature[17]; + char pki_type[17]; + u8 data[0]; +}; + +struct ecryptfs_auth_tok { + u16 version; + u16 token_type; + u32 flags; + struct ecryptfs_session_key session_key; + u8 reserved[32]; + union { + struct ecryptfs_password password; + struct ecryptfs_private_key private_key; + } token; +}; + +struct ecryptfs_global_auth_tok { + u32 flags; + struct list_head mount_crypt_stat_list; + struct key *global_auth_tok_key; + unsigned char sig[17]; +}; + +struct ecryptfs_key_tfm { + struct crypto_skcipher *key_tfm; + size_t key_size; + struct mutex key_tfm_mutex; + struct list_head key_tfm_list; + unsigned char cipher_name[32]; +}; + +enum { + ecryptfs_opt_sig = 0, + ecryptfs_opt_ecryptfs_sig = 1, + ecryptfs_opt_cipher = 2, + ecryptfs_opt_ecryptfs_cipher = 3, + ecryptfs_opt_ecryptfs_key_bytes = 4, + ecryptfs_opt_passthrough = 5, + ecryptfs_opt_xattr_metadata = 6, + ecryptfs_opt_encrypted_view = 7, + ecryptfs_opt_fnek_sig = 8, + ecryptfs_opt_fn_cipher = 9, + ecryptfs_opt_fn_cipher_key_bytes = 10, + ecryptfs_opt_unlink_sigs = 11, + ecryptfs_opt_mount_auth_tok_only = 12, + ecryptfs_opt_check_dev_ruid = 13, + ecryptfs_opt_err = 14, +}; + +struct ecryptfs_cache_info { + struct kmem_cache **cache; + const char *name; + size_t size; + slab_flags_t flags; + void (*ctor)(void *); +}; + +struct ecryptfs_key_sig { + struct list_head crypt_stat_list; + char keysig[17]; +}; + +struct ecryptfs_filename { + struct list_head crypt_stat_list; + u32 flags; + u32 seq_no; + char *filename; + char *encrypted_filename; + size_t filename_size; + size_t encrypted_filename_size; + char fnek_sig[16]; + char dentry_name[57]; +}; + +struct extent_crypt_result { + struct completion completion; + int rc; +}; + +struct ecryptfs_flag_map_elem { + u32 file_flag; + u32 local_flag; +}; + +struct ecryptfs_cipher_code_str_map_elem { + char cipher_str[16]; + u8 cipher_code; +}; + +struct encrypted_key_payload { + struct callback_head rcu; + char *format; + char *master_desc; + char *datalen; + u8 *iv; + u8 *encrypted_data; + short unsigned int datablob_len; + short unsigned int decrypted_datalen; + short unsigned int payload_datalen; + short unsigned int encrypted_key_format; + u8 *decrypted_data; + u8 payload_data[0]; +}; + +enum ecryptfs_token_types { + ECRYPTFS_PASSWORD = 0, + ECRYPTFS_PRIVATE_KEY = 1, +}; + +struct ecryptfs_key_record { + unsigned char type; + size_t enc_key_size; + unsigned char sig[8]; + unsigned char enc_key[512]; +}; + +struct ecryptfs_auth_tok_list_item { + unsigned char encrypted_session_key[64]; + struct list_head list; + struct ecryptfs_auth_tok auth_tok; +}; + +struct ecryptfs_message { + u32 index; + u32 data_len; + u8 data[0]; +}; + +struct ecryptfs_msg_ctx { + u8 state; + u8 type; + u32 index; + u32 counter; + size_t msg_size; + struct ecryptfs_message *msg; + struct task_struct *task; + struct list_head node; + struct list_head daemon_out_list; + struct mutex mux; +}; + +struct ecryptfs_write_tag_70_packet_silly_stack { + u8 cipher_code; + size_t max_packet_size; + size_t packet_size_len; + size_t block_aligned_filename_size; + size_t block_size; + size_t i; + size_t j; + size_t num_rand_bytes; + struct mutex *tfm_mutex; + char *block_aligned_filename; + struct ecryptfs_auth_tok *auth_tok; + struct scatterlist src_sg[2]; + struct scatterlist dst_sg[2]; + struct crypto_skcipher *skcipher_tfm; + struct skcipher_request *skcipher_req; + char iv[16]; + char hash[16]; + char tmp_hash[16]; + struct crypto_shash *hash_tfm; + struct shash_desc *hash_desc; +}; + +struct ecryptfs_parse_tag_70_packet_silly_stack { + u8 cipher_code; + size_t max_packet_size; + size_t packet_size_len; + size_t parsed_tag_70_packet_size; + size_t block_aligned_filename_size; + size_t block_size; + size_t i; + struct mutex *tfm_mutex; + char *decrypted_filename; + struct ecryptfs_auth_tok *auth_tok; + struct scatterlist src_sg[2]; + struct scatterlist dst_sg[2]; + struct crypto_skcipher *skcipher_tfm; + struct skcipher_request *skcipher_req; + char fnek_sig_hex[17]; + char iv[16]; + char cipher_string[32]; +}; + +struct ecryptfs_open_req { + struct file **lower_file; + struct path path; + struct completion done; + struct list_head kthread_ctl_list; +}; + +struct ecryptfs_kthread_ctl { + u32 flags; + struct mutex mux; + struct list_head req_list; + wait_queue_head_t wait; +}; + +struct ecryptfs_daemon { + u32 flags; + u32 num_queued_msg_ctx; + struct file *file; + struct mutex mux; + struct list_head msg_ctx_out_queue; + wait_queue_head_t wait; + struct hlist_node euid_chain; +}; + +struct getdents_callback___2 { + struct dir_context ctx; + char *name; + u64 ino; + int found; + int sequence; +}; + +typedef u32 unicode_t; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +struct utf8data; + +struct utf8cursor { + const struct utf8data *data; + const char *s; + const char *p; + const char *ss; + const char *sp; + unsigned int len; + unsigned int slen; + short int ccc; + short int nccc; + unsigned char hangul[12]; +}; + +struct utf8data { + unsigned int maxage; + unsigned int offset; +}; + +typedef const unsigned char utf8trie_t; + +typedef const unsigned char utf8leaf_t; + +enum fuse_opcode { + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, + FUSE_SYNCFS = 50, + CUSE_INIT = 4096, + CUSE_INIT_BSWAP_RESERVED = 1048576, + FUSE_INIT_BSWAP_RESERVED = 436207616, +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_CODE_MAX = 7, +}; + +struct fuse_forget_in { + uint64_t nlookup; +}; + +struct fuse_forget_one { + uint64_t nodeid; + uint64_t nlookup; +}; + +struct fuse_batch_forget_in { + uint32_t count; + uint32_t dummy; +}; + +struct fuse_interrupt_in { + uint64_t unique; +}; + +struct fuse_notify_poll_wakeup_out { + uint64_t kh; +}; + +struct fuse_in_header { + uint32_t len; + uint32_t opcode; + uint64_t unique; + uint64_t nodeid; + uint32_t uid; + uint32_t gid; + uint32_t pid; + uint32_t padding; +}; + +struct fuse_out_header { + uint32_t len; + int32_t error; + uint64_t unique; +}; + +struct fuse_notify_inval_inode_out { + uint64_t ino; + int64_t off; + int64_t len; +}; + +struct fuse_notify_inval_entry_out { + uint64_t parent; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_delete_out { + uint64_t parent; + uint64_t child; + uint32_t namelen; + uint32_t padding; +}; + +struct fuse_notify_store_out { + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_out { + uint64_t notify_unique; + uint64_t nodeid; + uint64_t offset; + uint32_t size; + uint32_t padding; +}; + +struct fuse_notify_retrieve_in { + uint64_t dummy1; + uint64_t offset; + uint32_t size; + uint32_t dummy2; + uint64_t dummy3; + uint64_t dummy4; +}; + +struct fuse_forget_link { + struct fuse_forget_one forget_one; + struct fuse_forget_link *next; +}; + +struct fuse_mount; + +struct fuse_release_args; + +struct fuse_file { + struct fuse_mount *fm; + struct fuse_release_args *release_args; + u64 kh; + u64 fh; + u64 nodeid; + refcount_t count; + u32 open_flags; + struct list_head write_entry; + struct { + struct mutex lock; + loff_t pos; + loff_t cache_off; + u64 version; + } readdir; + struct rb_node polled_node; + wait_queue_head_t poll_wait; + bool flock: 1; +}; + +struct fuse_conn; + +struct fuse_mount { + struct fuse_conn *fc; + struct super_block *sb; + struct list_head fc_entry; +}; + +struct fuse_in_arg { + unsigned int size; + const void *value; +}; + +struct fuse_arg { + unsigned int size; + void *value; +}; + +struct fuse_page_desc { + unsigned int length; + unsigned int offset; +}; + +struct fuse_args { + uint64_t nodeid; + uint32_t opcode; + short unsigned int in_numargs; + short unsigned int out_numargs; + bool force: 1; + bool noreply: 1; + bool nocreds: 1; + bool in_pages: 1; + bool out_pages: 1; + bool out_argvar: 1; + bool page_zeroing: 1; + bool page_replace: 1; + bool may_block: 1; + struct fuse_in_arg in_args[3]; + struct fuse_arg out_args[2]; + void (*end)(struct fuse_mount *, struct fuse_args *, int); +}; + +struct fuse_args_pages { + struct fuse_args args; + struct page **pages; + struct fuse_page_desc *descs; + unsigned int num_pages; +}; + +enum fuse_req_flag { + FR_ISREPLY = 0, + FR_FORCE = 1, + FR_BACKGROUND = 2, + FR_WAITING = 3, + FR_ABORTED = 4, + FR_INTERRUPTED = 5, + FR_LOCKED = 6, + FR_PENDING = 7, + FR_SENT = 8, + FR_FINISHED = 9, + FR_PRIVATE = 10, + FR_ASYNC = 11, +}; + +struct fuse_req { + struct list_head list; + struct list_head intr_entry; + struct fuse_args *args; + refcount_t count; + long unsigned int flags; + struct { + struct fuse_in_header h; + } in; + struct { + struct fuse_out_header h; + } out; + wait_queue_head_t waitq; + void *argbuf; + struct fuse_mount *fm; +}; + +struct fuse_iqueue; + +struct fuse_iqueue_ops { + void (*wake_forget_and_unlock)(struct fuse_iqueue *); + void (*wake_interrupt_and_unlock)(struct fuse_iqueue *); + void (*wake_pending_and_unlock)(struct fuse_iqueue *); + void (*release)(struct fuse_iqueue *); +}; + +struct fuse_iqueue { + unsigned int connected; + spinlock_t lock; + wait_queue_head_t waitq; + u64 reqctr; + struct list_head pending; + struct list_head interrupts; + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + int forget_batch; + struct fasync_struct *fasync; + const struct fuse_iqueue_ops *ops; + void *priv; +}; + +struct fuse_pqueue { + unsigned int connected; + spinlock_t lock; + struct list_head *processing; + struct list_head io; +}; + +struct fuse_dev { + struct fuse_conn *fc; + struct fuse_pqueue pq; + struct list_head entry; +}; + +struct fuse_conn_dax; + +struct fuse_sync_bucket; + +struct fuse_conn { + spinlock_t lock; + refcount_t count; + atomic_t dev_count; + struct callback_head rcu; + kuid_t user_id; + kgid_t group_id; + struct pid_namespace *pid_ns; + struct user_namespace *user_ns; + unsigned int max_read; + unsigned int max_write; + unsigned int max_pages; + unsigned int max_pages_limit; + struct fuse_iqueue iq; + atomic64_t khctr; + struct rb_root polled_files; + unsigned int max_background; + unsigned int congestion_threshold; + unsigned int num_background; + unsigned int active_background; + struct list_head bg_queue; + spinlock_t bg_lock; + int initialized; + int blocked; + wait_queue_head_t blocked_waitq; + unsigned int connected; + bool aborted; + unsigned int conn_error: 1; + unsigned int conn_init: 1; + unsigned int async_read: 1; + unsigned int abort_err: 1; + unsigned int atomic_o_trunc: 1; + unsigned int export_support: 1; + unsigned int writeback_cache: 1; + unsigned int parallel_dirops: 1; + unsigned int handle_killpriv: 1; + unsigned int cache_symlinks: 1; + unsigned int legacy_opts_show: 1; + unsigned int handle_killpriv_v2: 1; + unsigned int no_open: 1; + unsigned int no_opendir: 1; + unsigned int no_fsync: 1; + unsigned int no_fsyncdir: 1; + unsigned int no_flush: 1; + unsigned int no_setxattr: 1; + unsigned int setxattr_ext: 1; + unsigned int no_getxattr: 1; + unsigned int no_listxattr: 1; + unsigned int no_removexattr: 1; + unsigned int no_lock: 1; + unsigned int no_access: 1; + unsigned int no_create: 1; + unsigned int no_interrupt: 1; + unsigned int no_bmap: 1; + unsigned int no_poll: 1; + unsigned int big_writes: 1; + unsigned int dont_mask: 1; + unsigned int no_flock: 1; + unsigned int no_fallocate: 1; + unsigned int no_rename2: 1; + unsigned int auto_inval_data: 1; + unsigned int explicit_inval_data: 1; + unsigned int do_readdirplus: 1; + unsigned int readdirplus_auto: 1; + unsigned int async_dio: 1; + unsigned int no_lseek: 1; + unsigned int posix_acl: 1; + unsigned int default_permissions: 1; + unsigned int allow_other: 1; + unsigned int no_copy_file_range: 1; + unsigned int destroy: 1; + unsigned int delete_stale: 1; + unsigned int no_control: 1; + unsigned int no_force_umount: 1; + unsigned int auto_submounts: 1; + unsigned int sync_fs: 1; + atomic_t num_waiting; + unsigned int minor; + struct list_head entry; + dev_t dev; + struct dentry *ctl_dentry[5]; + int ctl_ndents; + u32 scramble_key[4]; + atomic64_t attr_version; + void (*release)(struct fuse_conn *); + struct rw_semaphore killsb; + struct list_head devices; + struct fuse_conn_dax *dax; + struct list_head mounts; + struct fuse_sync_bucket *curr_bucket; +}; + +struct fuse_sync_bucket { + atomic_t count; + wait_queue_head_t waitq; + struct callback_head rcu; +}; + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + long unsigned int nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages: 1; +}; + +struct fuse_retrieve_args { + struct fuse_args_pages ap; + struct fuse_notify_retrieve_in inarg; +}; + +struct fuse_attr { + uint64_t ino; + uint64_t size; + uint64_t blocks; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t nlink; + uint32_t uid; + uint32_t gid; + uint32_t rdev; + uint32_t blksize; + uint32_t flags; +}; + +struct fuse_entry_out { + uint64_t nodeid; + uint64_t generation; + uint64_t entry_valid; + uint64_t attr_valid; + uint32_t entry_valid_nsec; + uint32_t attr_valid_nsec; + struct fuse_attr attr; +}; + +struct fuse_getattr_in { + uint32_t getattr_flags; + uint32_t dummy; + uint64_t fh; +}; + +struct fuse_attr_out { + uint64_t attr_valid; + uint32_t attr_valid_nsec; + uint32_t dummy; + struct fuse_attr attr; +}; + +struct fuse_mknod_in { + uint32_t mode; + uint32_t rdev; + uint32_t umask; + uint32_t padding; +}; + +struct fuse_mkdir_in { + uint32_t mode; + uint32_t umask; +}; + +struct fuse_rename2_in { + uint64_t newdir; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_link_in { + uint64_t oldnodeid; +}; + +struct fuse_setattr_in { + uint32_t valid; + uint32_t padding; + uint64_t fh; + uint64_t size; + uint64_t lock_owner; + uint64_t atime; + uint64_t mtime; + uint64_t ctime; + uint32_t atimensec; + uint32_t mtimensec; + uint32_t ctimensec; + uint32_t mode; + uint32_t unused4; + uint32_t uid; + uint32_t gid; + uint32_t unused5; +}; + +struct fuse_create_in { + uint32_t flags; + uint32_t mode; + uint32_t umask; + uint32_t open_flags; +}; + +struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; + uint32_t padding; +}; + +struct fuse_access_in { + uint32_t mask; + uint32_t padding; +}; + +struct fuse_inode_dax; + +struct fuse_inode { + struct inode inode; + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *forget; + u64 i_time; + u32 inval_mask; + umode_t orig_i_mode; + u64 orig_ino; + u64 attr_version; + union { + struct { + struct list_head write_files; + struct list_head queued_writes; + int writectr; + wait_queue_head_t page_waitq; + struct rb_root writepages; + }; + struct { + bool cached; + loff_t size; + loff_t pos; + u64 version; + struct timespec64 mtime; + u64 iversion; + spinlock_t lock; + } rdc; + }; + long unsigned int state; + struct mutex mutex; + spinlock_t lock; + struct rw_semaphore i_mmap_sem; + struct fuse_inode_dax *dax; +}; + +enum { + FUSE_I_ADVISE_RDPLUS = 0, + FUSE_I_INIT_RDPLUS = 1, + FUSE_I_SIZE_UNSTABLE = 2, + FUSE_I_BAD = 3, +}; + +struct fuse_file_lock { + uint64_t start; + uint64_t end; + uint32_t type; + uint32_t pid; +}; + +struct fuse_open_in { + uint32_t flags; + uint32_t open_flags; +}; + +struct fuse_release_in { + uint64_t fh; + uint32_t flags; + uint32_t release_flags; + uint64_t lock_owner; +}; + +struct fuse_flush_in { + uint64_t fh; + uint32_t unused; + uint32_t padding; + uint64_t lock_owner; +}; + +struct fuse_read_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t read_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_in { + uint64_t fh; + uint64_t offset; + uint32_t size; + uint32_t write_flags; + uint64_t lock_owner; + uint32_t flags; + uint32_t padding; +}; + +struct fuse_write_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_fsync_in { + uint64_t fh; + uint32_t fsync_flags; + uint32_t padding; +}; + +struct fuse_lk_in { + uint64_t fh; + uint64_t owner; + struct fuse_file_lock lk; + uint32_t lk_flags; + uint32_t padding; +}; + +struct fuse_lk_out { + struct fuse_file_lock lk; +}; + +struct fuse_bmap_in { + uint64_t block; + uint32_t blocksize; + uint32_t padding; +}; + +struct fuse_bmap_out { + uint64_t block; +}; + +struct fuse_poll_in { + uint64_t fh; + uint64_t kh; + uint32_t flags; + uint32_t events; +}; + +struct fuse_poll_out { + uint32_t revents; + uint32_t padding; +}; + +struct fuse_fallocate_in { + uint64_t fh; + uint64_t offset; + uint64_t length; + uint32_t mode; + uint32_t padding; +}; + +struct fuse_lseek_in { + uint64_t fh; + uint64_t offset; + uint32_t whence; + uint32_t padding; +}; + +struct fuse_lseek_out { + uint64_t offset; +}; + +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + +struct fuse_release_args { + struct fuse_args args; + struct fuse_release_in inarg; + struct inode *inode; +}; + +struct fuse_io_priv { + struct kref refcnt; + int async; + spinlock_t lock; + unsigned int reqs; + ssize_t bytes; + size_t size; + __u64 offset; + bool write; + bool should_dirty; + int err; + struct kiocb *iocb; + struct completion *done; + bool blocking; +}; + +struct fuse_io_args { + union { + struct { + struct fuse_read_in in; + u64 attr_ver; + } read; + struct { + struct fuse_write_in in; + struct fuse_write_out out; + bool page_locked; + } write; + }; + struct fuse_args_pages ap; + struct fuse_io_priv *io; + struct fuse_file *ff; +}; + +struct fuse_writepage_args { + struct fuse_io_args ia; + struct rb_node writepages_entry; + struct list_head queue_entry; + struct fuse_writepage_args *next; + struct inode *inode; + struct fuse_sync_bucket *bucket; +}; + +struct fuse_fill_wb_data { + struct fuse_writepage_args *wpa; + struct fuse_file *ff; + struct inode *inode; + struct page **orig_pages; + unsigned int max_pages; +}; + +struct fuse_kstatfs { + uint64_t blocks; + uint64_t bfree; + uint64_t bavail; + uint64_t files; + uint64_t ffree; + uint32_t bsize; + uint32_t namelen; + uint32_t frsize; + uint32_t padding; + uint32_t spare[6]; +}; + +struct fuse_statfs_out { + struct fuse_kstatfs st; +}; + +struct fuse_init_in { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; +}; + +struct fuse_init_out { + uint32_t major; + uint32_t minor; + uint32_t max_readahead; + uint32_t flags; + uint16_t max_background; + uint16_t congestion_threshold; + uint32_t max_write; + uint32_t time_gran; + uint16_t max_pages; + uint16_t map_alignment; + uint32_t unused[8]; +}; + +struct fuse_syncfs_in { + uint64_t padding; +}; + +struct fuse_fs_context { + int fd; + unsigned int rootmode; + kuid_t user_id; + kgid_t group_id; + bool is_bdev: 1; + bool fd_present: 1; + bool rootmode_present: 1; + bool user_id_present: 1; + bool group_id_present: 1; + bool default_permissions: 1; + bool allow_other: 1; + bool destroy: 1; + bool no_control: 1; + bool no_force_umount: 1; + bool legacy_opts_show: 1; + bool dax: 1; + unsigned int max_read; + unsigned int blksize; + const char *subtype; + struct dax_device *dax_dev; + void **fudptr; +}; + +enum { + OPT_SOURCE = 0, + OPT_SUBTYPE = 1, + OPT_FD = 2, + OPT_ROOTMODE = 3, + OPT_USER_ID = 4, + OPT_GROUP_ID = 5, + OPT_DEFAULT_PERMISSIONS = 6, + OPT_ALLOW_OTHER = 7, + OPT_MAX_READ = 8, + OPT_BLKSIZE = 9, + OPT_ERR = 10, +}; + +struct fuse_inode_handle { + u64 nodeid; + u32 generation; +}; + +struct fuse_init_args { + struct fuse_args args; + struct fuse_init_in in; + struct fuse_init_out out; +}; + +struct fuse_setxattr_in { + uint32_t size; + uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; +}; + +struct fuse_getxattr_in { + uint32_t size; + uint32_t padding; +}; + +struct fuse_getxattr_out { + uint32_t size; + uint32_t padding; +}; + +struct fuse_dirent { + uint64_t ino; + uint64_t off; + uint32_t namelen; + uint32_t type; + char name[0]; +}; + +struct fuse_direntplus { + struct fuse_entry_out entry_out; + struct fuse_dirent dirent; +}; + +enum fuse_parse_result { + FOUND_ERR = 4294967295, + FOUND_NONE = 0, + FOUND_SOME = 1, + FOUND_ALL = 2, +}; + +struct fuse_ioctl_in { + uint64_t fh; + uint32_t flags; + uint32_t cmd; + uint64_t arg; + uint32_t in_size; + uint32_t out_size; +}; + +struct fuse_ioctl_iovec { + uint64_t base; + uint64_t len; +}; + +struct fuse_ioctl_out { + int32_t result; + uint32_t flags; + uint32_t in_iovs; + uint32_t out_iovs; +}; + +struct fuse_setupmapping_in { + uint64_t fh; + uint64_t foffset; + uint64_t len; + uint64_t flags; + uint64_t moffset; +}; + +struct fuse_removemapping_in { + uint32_t count; +}; + +struct fuse_removemapping_one { + uint64_t moffset; + uint64_t len; +}; + +struct fuse_inode_dax { + struct rw_semaphore sem; + struct rb_root_cached tree; + long unsigned int nr; +}; + +struct fuse_conn_dax { + struct dax_device *dev; + spinlock_t lock; + long unsigned int nr_busy_ranges; + struct list_head busy_ranges; + struct delayed_work free_work; + wait_queue_head_t range_waitq; + long int nr_free_ranges; + struct list_head free_ranges; + long unsigned int nr_ranges; +}; + +struct fuse_dax_mapping { + struct inode *inode; + struct list_head list; + struct interval_tree_node itn; + struct list_head busy_list; + u64 window_offset; + loff_t length; + bool writable; + refcount_t refcnt; +}; + +struct debugfs_fsdata { + const struct file_operations *real_fops; + refcount_t active_users; + struct completion active_users_drained; +}; + +struct debugfs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum { + Opt_uid___5 = 0, + Opt_gid___6 = 1, + Opt_mode___5 = 2, + Opt_err___4 = 3, +}; + +struct debugfs_fs_info { + struct debugfs_mount_opts mount_opts; +}; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +struct tracefs_fs_info { + struct tracefs_mount_opts mount_opts; +}; + +enum pstore_type_id { + PSTORE_TYPE_DMESG = 0, + PSTORE_TYPE_MCE = 1, + PSTORE_TYPE_CONSOLE = 2, + PSTORE_TYPE_FTRACE = 3, + PSTORE_TYPE_PPC_RTAS = 4, + PSTORE_TYPE_PPC_OF = 5, + PSTORE_TYPE_PPC_COMMON = 6, + PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, + PSTORE_TYPE_MAX = 9, +}; + +struct pstore_info; + +struct pstore_record { + struct pstore_info *psi; + enum pstore_type_id type; + u64 id; + struct timespec64 time; + char *buf; + ssize_t size; + ssize_t ecc_notice_size; + int count; + enum kmsg_dump_reason reason; + unsigned int part; + bool compressed; +}; + +struct pstore_info { + struct module *owner; + const char *name; + struct semaphore buf_lock; + char *buf; + size_t bufsize; + struct mutex read_mutex; + int flags; + int max_reason; + void *data; + int (*open)(struct pstore_info *); + int (*close)(struct pstore_info *); + ssize_t (*read)(struct pstore_record *); + int (*write)(struct pstore_record *); + int (*write_user)(struct pstore_record *, const char *); + int (*erase)(struct pstore_record *); +}; + +struct pstore_ftrace_record { + long unsigned int ip; + long unsigned int parent_ip; + u64 ts; +}; + +struct pstore_private { + struct list_head list; + struct dentry *dentry; + struct pstore_record *record; + size_t total_size; +}; + +struct pstore_ftrace_seq_data { + const void *ptr; + size_t off; + size_t size; +}; + +enum { + Opt_kmsg_bytes = 0, + Opt_err___5 = 1, +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct pstore_zbackend { + int (*zbufsize)(size_t); + const char *name; +}; + +struct efi_variable { + efi_char16_t VariableName[512]; + efi_guid_t VendorGuid; + long unsigned int DataSize; + __u8 Data[1024]; + efi_status_t Status; + __u32 Attributes; +} __attribute__((packed)); + +struct efivar_entry { + struct efi_variable var; + struct list_head list; + struct kobject kobj; + bool scanning; + bool deleting; +}; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[0]; + short unsigned int seq; + short unsigned int __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +typedef s32 compat_key_t; + +typedef u32 __compat_gid32_t; + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + short unsigned int mode; + short unsigned int __pad1; + short unsigned int seq; + short unsigned int __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_ipc_perm { + key_t key; + __compat_uid_t uid; + __compat_gid_t gid; + __compat_uid_t cuid; + __compat_gid_t cgid; + compat_mode_t mode; + short unsigned int seq; +}; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +typedef int __kernel_ipc_pid_t; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long int msg_stime; + long int msg_rtime; + long int msg_ctime; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +typedef u16 compat_ipc_pid_t; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 64; + long: 64; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct compat_msqid_ds { + struct compat_ipc_perm msg_perm; + compat_uptr_t msg_first; + compat_uptr_t msg_last; + old_time32_t msg_stime; + old_time32_t msg_rtime; + old_time32_t msg_ctime; + compat_ulong_t msg_lcbytes; + compat_ulong_t msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + compat_ipc_pid_t msg_lspid; + compat_ipc_pid_t msg_lrpid; +}; + +struct compat_msgbuf { + compat_long_t mtype; + char mtext[1]; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + time64_t sem_otime; +}; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int *semadj; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + __kernel_long_t sem_otime; + __kernel_ulong_t __unused1; + __kernel_long_t sem_ctime; + __kernel_ulong_t __unused2; + __kernel_ulong_t sem_nsems; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sem sems[0]; +}; + +struct compat_semid_ds { + struct compat_ipc_perm sem_perm; + old_time32_t sem_otime; + old_time32_t sem_ctime; + compat_uptr_t sem_base; + compat_uptr_t sem_pending; + compat_uptr_t sem_pending_last; + compat_uptr_t undo; + short unsigned int sem_nsems; +}; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + size_t shm_segsz; + long int shm_atime; + long int shm_dtime; + long int shm_ctime; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct user_struct *mlock_user; + struct task_struct *shm_creator; + struct list_head shm_clist; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct compat_shmid_ds { + struct compat_ipc_perm shm_perm; + int shm_segsz; + old_time32_t shm_atime; + old_time32_t shm_dtime; + old_time32_t shm_ctime; + compat_ipc_pid_t shm_cpid; + compat_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + compat_uptr_t shm_unused2; + compat_uptr_t shm_unused3; +}; + +struct compat_shminfo64 { + compat_ulong_t shmmax; + compat_ulong_t shmmin; + compat_ulong_t shmmni; + compat_ulong_t shmseg; + compat_ulong_t shmall; + compat_ulong_t __unused1; + compat_ulong_t __unused2; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_shm_info { + compat_int_t used_ids; + compat_ulong_t shm_tot; + compat_ulong_t shm_rss; + compat_ulong_t shm_swp; + compat_ulong_t swap_attempts; + compat_ulong_t swap_successes; +}; + +struct compat_ipc_kludge { + compat_uptr_t msgp; + compat_long_t msgtyp; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct user_struct *user; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; +}; + +struct compat_mq_attr { + compat_long_t mq_flags; + compat_long_t mq_maxmsg; + compat_long_t mq_msgsize; + compat_long_t mq_curmsgs; + compat_long_t __reserved[4]; +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct key_notification { + struct watch_notification watch; + __u32 key_id; + __u32 aux; +}; + +struct assoc_array_edit; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit___2 { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + time64_t now; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +struct compat_keyctl_kdf_params { + compat_uptr_t hashname; + compat_uptr_t otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_kpp { + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + unsigned int reqsize; + struct crypto_alg base; +}; + +struct dh { + void *key; + void *p; + void *q; + void *g; + unsigned int key_size; + unsigned int p_size; + unsigned int q_size; + unsigned int g_size; +}; + +struct dh_completion { + struct completion completion; + int err; +}; + +struct kdf_sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +enum { + Opt_err___6 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO__LAST = 20, +}; + +enum tpm_duration { + TPM_SHORT = 0, + TPM_MEDIUM = 1, + TPM_LONG = 2, + TPM_LONG_LONG = 3, + TPM_UNDEFINED = 4, + TPM_NUM_DURATIONS = 4, +}; + +struct trusted_key_payload { + struct callback_head rcu; + unsigned int key_len; + unsigned int blob_len; + unsigned char migratable; + unsigned char old_format; + unsigned char key[129]; + unsigned char blob[512]; +}; + +struct trusted_key_ops { + unsigned char migratable; + int (*init)(); + int (*seal)(struct trusted_key_payload *, char *); + int (*unseal)(struct trusted_key_payload *, char *); + int (*get_random)(unsigned char *, size_t); + void (*exit)(); +}; + +struct trusted_key_source { + char *name; + struct trusted_key_ops *ops; +}; + +enum { + Opt_err___7 = 0, + Opt_new = 1, + Opt_load = 2, + Opt_update = 3, +}; + +struct hwrng { + const char *name; + int (*init)(struct hwrng *); + void (*cleanup)(struct hwrng *); + int (*data_present)(struct hwrng *, int); + int (*data_read)(struct hwrng *, u32 *); + int (*read)(struct hwrng *, void *, size_t, bool); + long unsigned int priv; + short unsigned int quality; + struct list_head list; + struct kref ref; + struct completion cleanup_done; +}; + +struct tpm_digest { + u16 alg_id; + u8 digest[64]; +}; + +struct tpm_bank_info { + u16 alg_id; + u16 digest_size; + u16 crypto_id; +}; + +struct tpm_chip; + +struct tpm_class_ops { + unsigned int flags; + const u8 req_complete_mask; + const u8 req_complete_val; + bool (*req_canceled)(struct tpm_chip *, u8); + int (*recv)(struct tpm_chip *, u8 *, size_t); + int (*send)(struct tpm_chip *, u8 *, size_t); + void (*cancel)(struct tpm_chip *); + u8 (*status)(struct tpm_chip *); + void (*update_timeouts)(struct tpm_chip *, long unsigned int *); + void (*update_durations)(struct tpm_chip *, long unsigned int *); + int (*go_idle)(struct tpm_chip *); + int (*cmd_ready)(struct tpm_chip *); + int (*request_locality)(struct tpm_chip *, int); + int (*relinquish_locality)(struct tpm_chip *, int); + void (*clk_enable)(struct tpm_chip *, bool); +}; + +struct tpm_bios_log { + void *bios_event_log; + void *bios_event_log_end; +}; + +struct tpm_chip_seqops { + struct tpm_chip *chip; + const struct seq_operations *seqops; +}; + +struct tpm_space { + u32 context_tbl[3]; + u8 *context_buf; + u32 session_tbl[3]; + u8 *session_buf; + u32 buf_size; +}; + +struct tpm_chip { + struct device dev; + struct device devs; + struct cdev cdev; + struct cdev cdevs; + struct rw_semaphore ops_sem; + const struct tpm_class_ops *ops; + struct tpm_bios_log log; + struct tpm_chip_seqops bin_log_seqops; + struct tpm_chip_seqops ascii_log_seqops; + unsigned int flags; + int dev_num; + long unsigned int is_open; + char hwrng_name[64]; + struct hwrng hwrng; + struct mutex tpm_mutex; + long unsigned int timeout_a; + long unsigned int timeout_b; + long unsigned int timeout_c; + long unsigned int timeout_d; + bool timeout_adjusted; + long unsigned int duration[4]; + bool duration_adjusted; + struct dentry *bios_dir[3]; + const struct attribute_group *groups[8]; + unsigned int groups_cnt; + u32 nr_allocated_banks; + struct tpm_bank_info *allocated_banks; + acpi_handle acpi_dev_handle; + char ppi_version[4]; + struct tpm_space work_space; + u32 last_cc; + u32 nr_commands; + u32 *cc_attrs_tbl; + int locality; +}; + +struct tpm_header { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __attribute__((packed)); + +enum tpm_buf_flags { + TPM_BUF_OVERFLOW = 1, +}; + +struct tpm_buf { + unsigned int flags; + u8 *data; +}; + +struct trusted_key_options { + uint16_t keytype; + uint32_t keyhandle; + unsigned char keyauth[20]; + uint32_t blobauth_len; + unsigned char blobauth[20]; + uint32_t pcrinfo_len; + unsigned char pcrinfo[64]; + int pcrlock; + uint32_t hash; + uint32_t policydigest_len; + unsigned char policydigest[64]; + uint32_t policyhandle; +}; + +struct osapsess { + uint32_t handle; + unsigned char secret[20]; + unsigned char enonce[20]; +}; + +enum { + SEAL_keytype = 1, + SRK_keytype = 4, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +struct tpm_digests { + unsigned char encauth[20]; + unsigned char pubauth[20]; + unsigned char xorwork[40]; + unsigned char xorhash[20]; + unsigned char nonceodd[20]; +}; + +enum { + Opt_err___8 = 0, + Opt_keyhandle = 1, + Opt_keyauth = 2, + Opt_blobauth = 3, + Opt_pcrinfo = 4, + Opt_pcrlock = 5, + Opt_migratable = 6, + Opt_hash___2 = 7, + Opt_policydigest = 8, + Opt_policyhandle = 9, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecPublicKey = 2, + OID_id_prime192v1 = 3, + OID_id_prime256v1 = 4, + OID_id_ecdsa_with_sha1 = 5, + OID_id_ecdsa_with_sha224 = 6, + OID_id_ecdsa_with_sha256 = 7, + OID_id_ecdsa_with_sha384 = 8, + OID_id_ecdsa_with_sha512 = 9, + OID_rsaEncryption = 10, + OID_md2WithRSAEncryption = 11, + OID_md3WithRSAEncryption = 12, + OID_md4WithRSAEncryption = 13, + OID_sha1WithRSAEncryption = 14, + OID_sha256WithRSAEncryption = 15, + OID_sha384WithRSAEncryption = 16, + OID_sha512WithRSAEncryption = 17, + OID_sha224WithRSAEncryption = 18, + OID_data = 19, + OID_signed_data = 20, + OID_email_address = 21, + OID_contentType = 22, + OID_messageDigest = 23, + OID_signingTime = 24, + OID_smimeCapabilites = 25, + OID_smimeAuthenticatedAttrs = 26, + OID_md2 = 27, + OID_md4 = 28, + OID_md5 = 29, + OID_msIndirectData = 30, + OID_msStatementType = 31, + OID_msSpOpusInfo = 32, + OID_msPeImageDataObjId = 33, + OID_msIndividualSPKeyPurpose = 34, + OID_msOutlookExpress = 35, + OID_certAuthInfoAccess = 36, + OID_sha1 = 37, + OID_id_ansip384r1 = 38, + OID_sha256 = 39, + OID_sha384 = 40, + OID_sha512 = 41, + OID_sha224 = 42, + OID_commonName = 43, + OID_surname = 44, + OID_countryName = 45, + OID_locality = 46, + OID_stateOrProvinceName = 47, + OID_organizationName = 48, + OID_organizationUnitName = 49, + OID_title = 50, + OID_description = 51, + OID_name = 52, + OID_givenName = 53, + OID_initials = 54, + OID_generationalQualifier = 55, + OID_subjectKeyIdentifier = 56, + OID_keyUsage = 57, + OID_subjectAltName = 58, + OID_issuerAltName = 59, + OID_basicConstraints = 60, + OID_crlDistributionPoints = 61, + OID_certPolicies = 62, + OID_authorityKeyIdentifier = 63, + OID_extKeyUsage = 64, + OID_gostCPSignA = 65, + OID_gostCPSignB = 66, + OID_gostCPSignC = 67, + OID_gost2012PKey256 = 68, + OID_gost2012PKey512 = 69, + OID_gost2012Digest256 = 70, + OID_gost2012Digest512 = 71, + OID_gost2012Signature256 = 72, + OID_gost2012Signature512 = 73, + OID_gostTC26Sign256A = 74, + OID_gostTC26Sign256B = 75, + OID_gostTC26Sign256C = 76, + OID_gostTC26Sign256D = 77, + OID_gostTC26Sign512A = 78, + OID_gostTC26Sign512B = 79, + OID_gostTC26Sign512C = 80, + OID_sm2 = 81, + OID_sm3 = 82, + OID_SM2_with_SM3 = 83, + OID_sm3WithRSAEncryption = 84, + OID_TPMLoadableKey = 85, + OID_TPMImportableKey = 86, + OID_TPMSealedData = 87, + OID__NR = 88, +}; + +enum tpm_algorithms { + TPM_ALG_ERROR = 0, + TPM_ALG_SHA1 = 4, + TPM_ALG_KEYEDHASH = 8, + TPM_ALG_SHA256 = 11, + TPM_ALG_SHA384 = 12, + TPM_ALG_SHA512 = 13, + TPM_ALG_NULL = 16, + TPM_ALG_SM3_256 = 18, +}; + +enum tpm2_structures { + TPM2_ST_NO_SESSIONS = 32769, + TPM2_ST_SESSIONS = 32770, +}; + +enum tpm2_return_codes { + TPM2_RC_SUCCESS = 0, + TPM2_RC_HASH = 131, + TPM2_RC_HANDLE = 139, + TPM2_RC_INITIALIZE = 256, + TPM2_RC_FAILURE = 257, + TPM2_RC_DISABLED = 288, + TPM2_RC_COMMAND_CODE = 323, + TPM2_RC_TESTING = 2314, + TPM2_RC_REFERENCE_H0 = 2320, + TPM2_RC_RETRY = 2338, +}; + +enum tpm2_command_codes { + TPM2_CC_FIRST = 287, + TPM2_CC_HIERARCHY_CONTROL = 289, + TPM2_CC_HIERARCHY_CHANGE_AUTH = 297, + TPM2_CC_CREATE_PRIMARY = 305, + TPM2_CC_SEQUENCE_COMPLETE = 318, + TPM2_CC_SELF_TEST = 323, + TPM2_CC_STARTUP = 324, + TPM2_CC_SHUTDOWN = 325, + TPM2_CC_NV_READ = 334, + TPM2_CC_CREATE = 339, + TPM2_CC_LOAD = 343, + TPM2_CC_SEQUENCE_UPDATE = 348, + TPM2_CC_UNSEAL = 350, + TPM2_CC_CONTEXT_LOAD = 353, + TPM2_CC_CONTEXT_SAVE = 354, + TPM2_CC_FLUSH_CONTEXT = 357, + TPM2_CC_VERIFY_SIGNATURE = 375, + TPM2_CC_GET_CAPABILITY = 378, + TPM2_CC_GET_RANDOM = 379, + TPM2_CC_PCR_READ = 382, + TPM2_CC_PCR_EXTEND = 386, + TPM2_CC_EVENT_SEQUENCE_COMPLETE = 389, + TPM2_CC_HASH_SEQUENCE_START = 390, + TPM2_CC_CREATE_LOADED = 401, + TPM2_CC_LAST = 403, +}; + +enum tpm2_permanent_handles { + TPM2_RS_PW = 1073741833, +}; + +enum tpm2_object_attributes { + TPM2_OA_FIXED_TPM = 2, + TPM2_OA_FIXED_PARENT = 16, + TPM2_OA_USER_WITH_AUTH = 64, +}; + +enum tpm2_session_attributes { + TPM2_SA_CONTINUE_SESSION = 1, +}; + +struct tpm2_hash { + unsigned int crypto_id; + unsigned int tpm_id; +}; + +struct tpm2_key_context { + u32 parent; + const u8 *pub; + u32 pub_len; + const u8 *priv; + u32 priv_len; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +enum tpm2key_actions { + ACT_tpm2_key_parent = 0, + ACT_tpm2_key_priv = 1, + ACT_tpm2_key_pub = 2, + ACT_tpm2_key_type = 3, + NR__tpm2key_actions = 4, +}; + +enum { + Opt_new___2 = 0, + Opt_load___2 = 1, + Opt_update___2 = 2, + Opt_err___9 = 3, +}; + +enum { + Opt_default = 0, + Opt_ecryptfs = 1, + Opt_enc32 = 2, + Opt_error = 3, +}; + +enum derived_key_type { + ENC_KEY = 0, + AUTH_KEY = 1, +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct sctp_endpoint; + +union security_list_options { + int (*binder_set_context_mgr)(struct task_struct *); + int (*binder_transaction)(struct task_struct *, struct task_struct *); + int (*binder_transfer_binder)(struct task_struct *, struct task_struct *); + int (*binder_transfer_file)(struct task_struct *, struct task_struct *, struct file *); + int (*ptrace_access_check)(struct task_struct *, unsigned int); + int (*ptrace_traceme)(struct task_struct *); + int (*capget)(struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *); + int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *); + int (*capable)(const struct cred *, struct user_namespace *, int, unsigned int); + int (*quotactl)(int, int, int, struct super_block *); + int (*quota_on)(struct dentry *); + int (*syslog)(int); + int (*settime)(const struct timespec64 *, const struct timezone *); + int (*vm_enough_memory)(struct mm_struct *, long int); + int (*bprm_creds_for_exec)(struct linux_binprm *); + int (*bprm_creds_from_file)(struct linux_binprm *, struct file *); + int (*bprm_check_security)(struct linux_binprm *); + void (*bprm_committing_creds)(struct linux_binprm *); + void (*bprm_committed_creds)(struct linux_binprm *); + int (*fs_context_dup)(struct fs_context *, struct fs_context *); + int (*fs_context_parse_param)(struct fs_context *, struct fs_parameter *); + int (*sb_alloc_security)(struct super_block *); + void (*sb_delete)(struct super_block *); + void (*sb_free_security)(struct super_block *); + void (*sb_free_mnt_opts)(void *); + int (*sb_eat_lsm_opts)(char *, void **); + int (*sb_mnt_opts_compat)(struct super_block *, void *); + int (*sb_remount)(struct super_block *, void *); + int (*sb_kern_mount)(struct super_block *); + int (*sb_show_options)(struct seq_file *, struct super_block *); + int (*sb_statfs)(struct dentry *); + int (*sb_mount)(const char *, const struct path *, const char *, long unsigned int, void *); + int (*sb_umount)(struct vfsmount *, int); + int (*sb_pivotroot)(const struct path *, const struct path *); + int (*sb_set_mnt_opts)(struct super_block *, void *, long unsigned int, long unsigned int *); + int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, long unsigned int, long unsigned int *); + int (*sb_add_mnt_opt)(const char *, const char *, int, void **); + int (*move_mount)(const struct path *, const struct path *); + int (*dentry_init_security)(struct dentry *, int, const struct qstr *, void **, u32 *); + int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *); + int (*path_unlink)(const struct path *, struct dentry *); + int (*path_mkdir)(const struct path *, struct dentry *, umode_t); + int (*path_rmdir)(const struct path *, struct dentry *); + int (*path_mknod)(const struct path *, struct dentry *, umode_t, unsigned int); + int (*path_truncate)(const struct path *); + int (*path_symlink)(const struct path *, struct dentry *, const char *); + int (*path_link)(struct dentry *, const struct path *, struct dentry *); + int (*path_rename)(const struct path *, struct dentry *, const struct path *, struct dentry *); + int (*path_chmod)(const struct path *, umode_t); + int (*path_chown)(const struct path *, kuid_t, kgid_t); + int (*path_chroot)(const struct path *); + int (*path_notify)(const struct path *, u64, unsigned int); + int (*inode_alloc_security)(struct inode *); + void (*inode_free_security)(struct inode *); + int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, const char **, void **, size_t *); + int (*inode_init_security_anon)(struct inode *, const struct qstr *, const struct inode *); + int (*inode_create)(struct inode *, struct dentry *, umode_t); + int (*inode_link)(struct dentry *, struct inode *, struct dentry *); + int (*inode_unlink)(struct inode *, struct dentry *); + int (*inode_symlink)(struct inode *, struct dentry *, const char *); + int (*inode_mkdir)(struct inode *, struct dentry *, umode_t); + int (*inode_rmdir)(struct inode *, struct dentry *); + int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t); + int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); + int (*inode_readlink)(struct dentry *); + int (*inode_follow_link)(struct dentry *, struct inode *, bool); + int (*inode_permission)(struct inode *, int); + int (*inode_setattr)(struct dentry *, struct iattr *); + int (*inode_getattr)(const struct path *); + int (*inode_setxattr)(struct user_namespace *, struct dentry *, const char *, const void *, size_t, int); + void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int); + int (*inode_getxattr)(struct dentry *, const char *); + int (*inode_listxattr)(struct dentry *); + int (*inode_removexattr)(struct user_namespace *, struct dentry *, const char *); + int (*inode_need_killpriv)(struct dentry *); + int (*inode_killpriv)(struct user_namespace *, struct dentry *); + int (*inode_getsecurity)(struct user_namespace *, struct inode *, const char *, void **, bool); + int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int); + int (*inode_listsecurity)(struct inode *, char *, size_t); + void (*inode_getsecid)(struct inode *, u32 *); + int (*inode_copy_up)(struct dentry *, struct cred **); + int (*inode_copy_up_xattr)(const char *); + int (*kernfs_init_security)(struct kernfs_node *, struct kernfs_node *); + int (*file_permission)(struct file *, int); + int (*file_alloc_security)(struct file *); + void (*file_free_security)(struct file *); + int (*file_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap_addr)(long unsigned int); + int (*mmap_file)(struct file *, long unsigned int, long unsigned int, long unsigned int); + int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int); + int (*file_lock)(struct file *, unsigned int); + int (*file_fcntl)(struct file *, unsigned int, long unsigned int); + void (*file_set_fowner)(struct file *); + int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int); + int (*file_receive)(struct file *); + int (*file_open)(struct file *); + int (*task_alloc)(struct task_struct *, long unsigned int); + void (*task_free)(struct task_struct *); + int (*cred_alloc_blank)(struct cred *, gfp_t); + void (*cred_free)(struct cred *); + int (*cred_prepare)(struct cred *, const struct cred *, gfp_t); + void (*cred_transfer)(struct cred *, const struct cred *); + void (*cred_getsecid)(const struct cred *, u32 *); + int (*kernel_act_as)(struct cred *, u32); + int (*kernel_create_files_as)(struct cred *, struct inode *); + int (*kernel_module_request)(char *); + int (*kernel_load_data)(enum kernel_load_data_id, bool); + int (*kernel_post_load_data)(char *, loff_t, enum kernel_load_data_id, char *); + int (*kernel_read_file)(struct file *, enum kernel_read_file_id, bool); + int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id); + int (*task_fix_setuid)(struct cred *, const struct cred *, int); + int (*task_fix_setgid)(struct cred *, const struct cred *, int); + int (*task_setpgid)(struct task_struct *, pid_t); + int (*task_getpgid)(struct task_struct *); + int (*task_getsid)(struct task_struct *); + void (*task_getsecid_subj)(struct task_struct *, u32 *); + void (*task_getsecid_obj)(struct task_struct *, u32 *); + int (*task_setnice)(struct task_struct *, int); + int (*task_setioprio)(struct task_struct *, int); + int (*task_getioprio)(struct task_struct *); + int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int); + int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *); + int (*task_setscheduler)(struct task_struct *); + int (*task_getscheduler)(struct task_struct *); + int (*task_movememory)(struct task_struct *); + int (*task_kill)(struct task_struct *, struct kernel_siginfo *, int, const struct cred *); + int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + void (*task_to_inode)(struct task_struct *, struct inode *); + int (*ipc_permission)(struct kern_ipc_perm *, short int); + void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *); + int (*msg_msg_alloc_security)(struct msg_msg *); + void (*msg_msg_free_security)(struct msg_msg *); + int (*msg_queue_alloc_security)(struct kern_ipc_perm *); + void (*msg_queue_free_security)(struct kern_ipc_perm *); + int (*msg_queue_associate)(struct kern_ipc_perm *, int); + int (*msg_queue_msgctl)(struct kern_ipc_perm *, int); + int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int); + int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int); + int (*shm_alloc_security)(struct kern_ipc_perm *); + void (*shm_free_security)(struct kern_ipc_perm *); + int (*shm_associate)(struct kern_ipc_perm *, int); + int (*shm_shmctl)(struct kern_ipc_perm *, int); + int (*shm_shmat)(struct kern_ipc_perm *, char *, int); + int (*sem_alloc_security)(struct kern_ipc_perm *); + void (*sem_free_security)(struct kern_ipc_perm *); + int (*sem_associate)(struct kern_ipc_perm *, int); + int (*sem_semctl)(struct kern_ipc_perm *, int); + int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int); + int (*netlink_send)(struct sock *, struct sk_buff *); + void (*d_instantiate)(struct dentry *, struct inode *); + int (*getprocattr)(struct task_struct *, char *, char **); + int (*setprocattr)(const char *, void *, size_t); + int (*ismaclabel)(const char *); + int (*secid_to_secctx)(u32, char **, u32 *); + int (*secctx_to_secid)(const char *, u32, u32 *); + void (*release_secctx)(char *, u32); + void (*inode_invalidate_secctx)(struct inode *); + int (*inode_notifysecctx)(struct inode *, void *, u32); + int (*inode_setsecctx)(struct dentry *, void *, u32); + int (*inode_getsecctx)(struct inode *, void **, u32 *); + int (*post_notification)(const struct cred *, const struct cred *, struct watch_notification *); + int (*watch_key)(struct key *); + int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *); + int (*unix_may_send)(struct socket *, struct socket *); + int (*socket_create)(int, int, int, int); + int (*socket_post_create)(struct socket *, int, int, int, int); + int (*socket_socketpair)(struct socket *, struct socket *); + int (*socket_bind)(struct socket *, struct sockaddr *, int); + int (*socket_connect)(struct socket *, struct sockaddr *, int); + int (*socket_listen)(struct socket *, int); + int (*socket_accept)(struct socket *, struct socket *); + int (*socket_sendmsg)(struct socket *, struct msghdr *, int); + int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int); + int (*socket_getsockname)(struct socket *); + int (*socket_getpeername)(struct socket *); + int (*socket_getsockopt)(struct socket *, int, int); + int (*socket_setsockopt)(struct socket *, int, int); + int (*socket_shutdown)(struct socket *, int); + int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff *); + int (*socket_getpeersec_stream)(struct socket *, char *, int *, unsigned int); + int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff *, u32 *); + int (*sk_alloc_security)(struct sock *, int, gfp_t); + void (*sk_free_security)(struct sock *); + void (*sk_clone_security)(const struct sock *, struct sock *); + void (*sk_getsecid)(struct sock *, u32 *); + void (*sock_graft)(struct sock *, struct socket *); + int (*inet_conn_request)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*inet_csk_clone)(struct sock *, const struct request_sock *); + void (*inet_conn_established)(struct sock *, struct sk_buff *); + int (*secmark_relabel_packet)(u32); + void (*secmark_refcount_inc)(); + void (*secmark_refcount_dec)(); + void (*req_classify_flow)(const struct request_sock *, struct flowi_common *); + int (*tun_dev_alloc_security)(void **); + void (*tun_dev_free_security)(void *); + int (*tun_dev_create)(); + int (*tun_dev_attach_queue)(void *); + int (*tun_dev_attach)(struct sock *, void *); + int (*tun_dev_open)(void *); + int (*sctp_assoc_request)(struct sctp_endpoint *, struct sk_buff *); + int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int); + void (*sctp_sk_clone)(struct sctp_endpoint *, struct sock *, struct sock *); + int (*ib_pkey_access)(void *, u64, u16); + int (*ib_endport_manage_subnet)(void *, const char *, u8); + int (*ib_alloc_security)(void **); + void (*ib_free_security)(void *); + int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **, struct xfrm_user_sec_ctx *, gfp_t); + int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *, struct xfrm_sec_ctx **); + void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *); + int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *); + int (*xfrm_state_alloc)(struct xfrm_state *, struct xfrm_user_sec_ctx *); + int (*xfrm_state_alloc_acquire)(struct xfrm_state *, struct xfrm_sec_ctx *, u32); + void (*xfrm_state_free_security)(struct xfrm_state *); + int (*xfrm_state_delete_security)(struct xfrm_state *); + int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *, u32, u8); + int (*xfrm_state_pol_flow_match)(struct xfrm_state *, struct xfrm_policy *, const struct flowi_common *); + int (*xfrm_decode_session)(struct sk_buff *, u32 *, int); + int (*key_alloc)(struct key *, const struct cred *, long unsigned int); + void (*key_free)(struct key *); + int (*key_permission)(key_ref_t, const struct cred *, enum key_need_perm); + int (*key_getsecurity)(struct key *, char **); + int (*audit_rule_init)(u32, u32, char *, void **); + int (*audit_rule_known)(struct audit_krule *); + int (*audit_rule_match)(u32, u32, u32, void *); + void (*audit_rule_free)(void *); + int (*bpf)(int, union bpf_attr *, unsigned int); + int (*bpf_map)(struct bpf_map *, fmode_t); + int (*bpf_prog)(struct bpf_prog *); + int (*bpf_map_alloc_security)(struct bpf_map *); + void (*bpf_map_free_security)(struct bpf_map *); + int (*bpf_prog_alloc_security)(struct bpf_prog_aux *); + void (*bpf_prog_free_security)(struct bpf_prog_aux *); + int (*locked_down)(enum lockdown_reason); + int (*lock_kernel_down)(const char *, enum lockdown_reason); + int (*perf_event_open)(struct perf_event_attr *, int); + int (*perf_event_alloc)(struct perf_event *); + void (*perf_event_free)(struct perf_event *); + int (*perf_event_read)(struct perf_event *); + int (*perf_event_write)(struct perf_event *); +}; + +struct security_hook_heads { + struct hlist_head binder_set_context_mgr; + struct hlist_head binder_transaction; + struct hlist_head binder_transfer_binder; + struct hlist_head binder_transfer_file; + struct hlist_head ptrace_access_check; + struct hlist_head ptrace_traceme; + struct hlist_head capget; + struct hlist_head capset; + struct hlist_head capable; + struct hlist_head quotactl; + struct hlist_head quota_on; + struct hlist_head syslog; + struct hlist_head settime; + struct hlist_head vm_enough_memory; + struct hlist_head bprm_creds_for_exec; + struct hlist_head bprm_creds_from_file; + struct hlist_head bprm_check_security; + struct hlist_head bprm_committing_creds; + struct hlist_head bprm_committed_creds; + struct hlist_head fs_context_dup; + struct hlist_head fs_context_parse_param; + struct hlist_head sb_alloc_security; + struct hlist_head sb_delete; + struct hlist_head sb_free_security; + struct hlist_head sb_free_mnt_opts; + struct hlist_head sb_eat_lsm_opts; + struct hlist_head sb_mnt_opts_compat; + struct hlist_head sb_remount; + struct hlist_head sb_kern_mount; + struct hlist_head sb_show_options; + struct hlist_head sb_statfs; + struct hlist_head sb_mount; + struct hlist_head sb_umount; + struct hlist_head sb_pivotroot; + struct hlist_head sb_set_mnt_opts; + struct hlist_head sb_clone_mnt_opts; + struct hlist_head sb_add_mnt_opt; + struct hlist_head move_mount; + struct hlist_head dentry_init_security; + struct hlist_head dentry_create_files_as; + struct hlist_head path_unlink; + struct hlist_head path_mkdir; + struct hlist_head path_rmdir; + struct hlist_head path_mknod; + struct hlist_head path_truncate; + struct hlist_head path_symlink; + struct hlist_head path_link; + struct hlist_head path_rename; + struct hlist_head path_chmod; + struct hlist_head path_chown; + struct hlist_head path_chroot; + struct hlist_head path_notify; + struct hlist_head inode_alloc_security; + struct hlist_head inode_free_security; + struct hlist_head inode_init_security; + struct hlist_head inode_init_security_anon; + struct hlist_head inode_create; + struct hlist_head inode_link; + struct hlist_head inode_unlink; + struct hlist_head inode_symlink; + struct hlist_head inode_mkdir; + struct hlist_head inode_rmdir; + struct hlist_head inode_mknod; + struct hlist_head inode_rename; + struct hlist_head inode_readlink; + struct hlist_head inode_follow_link; + struct hlist_head inode_permission; + struct hlist_head inode_setattr; + struct hlist_head inode_getattr; + struct hlist_head inode_setxattr; + struct hlist_head inode_post_setxattr; + struct hlist_head inode_getxattr; + struct hlist_head inode_listxattr; + struct hlist_head inode_removexattr; + struct hlist_head inode_need_killpriv; + struct hlist_head inode_killpriv; + struct hlist_head inode_getsecurity; + struct hlist_head inode_setsecurity; + struct hlist_head inode_listsecurity; + struct hlist_head inode_getsecid; + struct hlist_head inode_copy_up; + struct hlist_head inode_copy_up_xattr; + struct hlist_head kernfs_init_security; + struct hlist_head file_permission; + struct hlist_head file_alloc_security; + struct hlist_head file_free_security; + struct hlist_head file_ioctl; + struct hlist_head mmap_addr; + struct hlist_head mmap_file; + struct hlist_head file_mprotect; + struct hlist_head file_lock; + struct hlist_head file_fcntl; + struct hlist_head file_set_fowner; + struct hlist_head file_send_sigiotask; + struct hlist_head file_receive; + struct hlist_head file_open; + struct hlist_head task_alloc; + struct hlist_head task_free; + struct hlist_head cred_alloc_blank; + struct hlist_head cred_free; + struct hlist_head cred_prepare; + struct hlist_head cred_transfer; + struct hlist_head cred_getsecid; + struct hlist_head kernel_act_as; + struct hlist_head kernel_create_files_as; + struct hlist_head kernel_module_request; + struct hlist_head kernel_load_data; + struct hlist_head kernel_post_load_data; + struct hlist_head kernel_read_file; + struct hlist_head kernel_post_read_file; + struct hlist_head task_fix_setuid; + struct hlist_head task_fix_setgid; + struct hlist_head task_setpgid; + struct hlist_head task_getpgid; + struct hlist_head task_getsid; + struct hlist_head task_getsecid_subj; + struct hlist_head task_getsecid_obj; + struct hlist_head task_setnice; + struct hlist_head task_setioprio; + struct hlist_head task_getioprio; + struct hlist_head task_prlimit; + struct hlist_head task_setrlimit; + struct hlist_head task_setscheduler; + struct hlist_head task_getscheduler; + struct hlist_head task_movememory; + struct hlist_head task_kill; + struct hlist_head task_prctl; + struct hlist_head task_to_inode; + struct hlist_head ipc_permission; + struct hlist_head ipc_getsecid; + struct hlist_head msg_msg_alloc_security; + struct hlist_head msg_msg_free_security; + struct hlist_head msg_queue_alloc_security; + struct hlist_head msg_queue_free_security; + struct hlist_head msg_queue_associate; + struct hlist_head msg_queue_msgctl; + struct hlist_head msg_queue_msgsnd; + struct hlist_head msg_queue_msgrcv; + struct hlist_head shm_alloc_security; + struct hlist_head shm_free_security; + struct hlist_head shm_associate; + struct hlist_head shm_shmctl; + struct hlist_head shm_shmat; + struct hlist_head sem_alloc_security; + struct hlist_head sem_free_security; + struct hlist_head sem_associate; + struct hlist_head sem_semctl; + struct hlist_head sem_semop; + struct hlist_head netlink_send; + struct hlist_head d_instantiate; + struct hlist_head getprocattr; + struct hlist_head setprocattr; + struct hlist_head ismaclabel; + struct hlist_head secid_to_secctx; + struct hlist_head secctx_to_secid; + struct hlist_head release_secctx; + struct hlist_head inode_invalidate_secctx; + struct hlist_head inode_notifysecctx; + struct hlist_head inode_setsecctx; + struct hlist_head inode_getsecctx; + struct hlist_head post_notification; + struct hlist_head watch_key; + struct hlist_head unix_stream_connect; + struct hlist_head unix_may_send; + struct hlist_head socket_create; + struct hlist_head socket_post_create; + struct hlist_head socket_socketpair; + struct hlist_head socket_bind; + struct hlist_head socket_connect; + struct hlist_head socket_listen; + struct hlist_head socket_accept; + struct hlist_head socket_sendmsg; + struct hlist_head socket_recvmsg; + struct hlist_head socket_getsockname; + struct hlist_head socket_getpeername; + struct hlist_head socket_getsockopt; + struct hlist_head socket_setsockopt; + struct hlist_head socket_shutdown; + struct hlist_head socket_sock_rcv_skb; + struct hlist_head socket_getpeersec_stream; + struct hlist_head socket_getpeersec_dgram; + struct hlist_head sk_alloc_security; + struct hlist_head sk_free_security; + struct hlist_head sk_clone_security; + struct hlist_head sk_getsecid; + struct hlist_head sock_graft; + struct hlist_head inet_conn_request; + struct hlist_head inet_csk_clone; + struct hlist_head inet_conn_established; + struct hlist_head secmark_relabel_packet; + struct hlist_head secmark_refcount_inc; + struct hlist_head secmark_refcount_dec; + struct hlist_head req_classify_flow; + struct hlist_head tun_dev_alloc_security; + struct hlist_head tun_dev_free_security; + struct hlist_head tun_dev_create; + struct hlist_head tun_dev_attach_queue; + struct hlist_head tun_dev_attach; + struct hlist_head tun_dev_open; + struct hlist_head sctp_assoc_request; + struct hlist_head sctp_bind_connect; + struct hlist_head sctp_sk_clone; + struct hlist_head ib_pkey_access; + struct hlist_head ib_endport_manage_subnet; + struct hlist_head ib_alloc_security; + struct hlist_head ib_free_security; + struct hlist_head xfrm_policy_alloc_security; + struct hlist_head xfrm_policy_clone_security; + struct hlist_head xfrm_policy_free_security; + struct hlist_head xfrm_policy_delete_security; + struct hlist_head xfrm_state_alloc; + struct hlist_head xfrm_state_alloc_acquire; + struct hlist_head xfrm_state_free_security; + struct hlist_head xfrm_state_delete_security; + struct hlist_head xfrm_policy_lookup; + struct hlist_head xfrm_state_pol_flow_match; + struct hlist_head xfrm_decode_session; + struct hlist_head key_alloc; + struct hlist_head key_free; + struct hlist_head key_permission; + struct hlist_head key_getsecurity; + struct hlist_head audit_rule_init; + struct hlist_head audit_rule_known; + struct hlist_head audit_rule_match; + struct hlist_head audit_rule_free; + struct hlist_head bpf; + struct hlist_head bpf_map; + struct hlist_head bpf_prog; + struct hlist_head bpf_map_alloc_security; + struct hlist_head bpf_map_free_security; + struct hlist_head bpf_prog_alloc_security; + struct hlist_head bpf_prog_free_security; + struct hlist_head locked_down; + struct hlist_head lock_kernel_down; + struct hlist_head perf_event_open; + struct hlist_head perf_event_alloc; + struct hlist_head perf_event_free; + struct hlist_head perf_event_read; + struct hlist_head perf_event_write; +}; + +struct lsm_id { + const char *lsm; + int slot; +}; + +struct security_hook_list { + struct hlist_node list; + struct hlist_head *head; + union security_list_options hook; + struct lsm_id *lsmid; +}; + +enum lsm_order { + LSM_ORDER_FIRST = 4294967295, + LSM_ORDER_MUTABLE = 0, +}; + +struct lsm_info { + const char *name; + enum lsm_order order; + long unsigned int flags; + int *enabled; + int (*init)(); + struct lsm_blob_sizes *blobs; +}; + +enum lsm_event { + LSM_POLICY_CHANGE = 0, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + __ETHTOOL_LINK_MODE_MASK_NBITS = 92, +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 reserved1[1]; + __u32 reserved[7]; + __u32 link_mode_masks[0]; +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + u8 __link_ext_substate; + }; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + } link_modes; + u32 lanes; +}; + +struct ethtool_eth_mac_stats { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; +}; + +struct ethtool_eth_phy_stats { + u64 SymbolErrorDuringCarrier; +}; + +struct ethtool_eth_ctrl_stats { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; +}; + +struct ethtool_pause_stats { + u64 tx_pause_frames; + u64 rx_pause_frames; +}; + +struct ethtool_fec_stat { + u64 total; + u64 lanes[8]; +}; + +struct ethtool_fec_stats { + struct ethtool_fec_stat corrected_blocks; + struct ethtool_fec_stat uncorrectable_blocks; + struct ethtool_fec_stat corrected_bits; +}; + +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +struct ethtool_rmon_stats { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + u64 hist[10]; + u64 hist_tx[10]; +}; + +struct ethtool_module_eeprom { + __u32 offset; + __u32 length; + __u8 page; + __u8 bank; + __u8 i2c_address; + __u8 *data; +}; + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct lsm_network_audit { + int netif; + const struct sock *sk; + u16 family; + __be16 dport; + __be16 sport; + union { + struct { + __be32 daddr; + __be32 saddr; + } v4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } v6; + } fam; +}; + +struct lsm_ioctlop_audit { + struct path path; + u16 cmd; +}; + +struct lsm_ibpkey_audit { + u64 subnet_prefix; + u16 pkey; +}; + +struct lsm_ibendport_audit { + char dev_name[64]; + u8 port; +}; + +struct selinux_state; + +struct selinux_audit_data { + u32 ssid; + u32 tsid; + u16 tclass; + u32 requested; + u32 audited; + u32 denied; + int result; + struct selinux_state *state; +}; + +struct smack_audit_data; + +struct apparmor_audit_data; + +struct common_audit_data { + char type; + union { + struct path path; + struct dentry *dentry; + struct inode *inode; + struct lsm_network_audit *net; + int cap; + int ipc_id; + struct task_struct *tsk; + struct { + key_serial_t key; + char *key_desc; + } key_struct; + char *kmod_name; + struct lsm_ioctlop_audit *op; + struct file *file; + struct lsm_ibpkey_audit *ibpkey; + struct lsm_ibendport_audit *ibendport; + int reason; + } u; + union { + struct smack_audit_data *smack_audit_data; + struct selinux_audit_data *selinux_audit_data; + struct apparmor_audit_data *apparmor_audit_data; + }; +}; + +enum { + POLICYDB_CAPABILITY_NETPEER = 0, + POLICYDB_CAPABILITY_OPENPERM = 1, + POLICYDB_CAPABILITY_EXTSOCKCLASS = 2, + POLICYDB_CAPABILITY_ALWAYSNETWORK = 3, + POLICYDB_CAPABILITY_CGROUPSECLABEL = 4, + POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION = 5, + POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS = 6, + __POLICYDB_CAPABILITY_MAX = 7, +}; + +struct selinux_avc; + +struct selinux_policy; + +struct selinux_state { + bool enforcing; + bool checkreqprot; + bool initialized; + bool policycap[7]; + struct page *status_page; + struct mutex status_lock; + struct selinux_avc *avc; + struct selinux_policy *policy; + struct mutex policy_mutex; +}; + +struct avc_cache { + struct hlist_head slots[512]; + spinlock_t slots_lock[512]; + atomic_t lru_hint; + atomic_t active_nodes; + u32 latest_notif; +}; + +struct selinux_avc { + unsigned int avc_cache_threshold; + struct avc_cache avc_cache; +}; + +struct av_decision { + u32 allowed; + u32 auditallow; + u32 auditdeny; + u32 seqno; + u32 flags; +}; + +struct extended_perms_data { + u32 p[8]; +}; + +struct extended_perms_decision { + u8 used; + u8 driver; + struct extended_perms_data *allowed; + struct extended_perms_data *auditallow; + struct extended_perms_data *dontaudit; +}; + +struct extended_perms { + u16 len; + struct extended_perms_data drivers; +}; + +struct avc_cache_stats { + unsigned int lookups; + unsigned int misses; + unsigned int allocations; + unsigned int reclaims; + unsigned int frees; +}; + +struct security_class_mapping { + const char *name; + const char *perms[33]; +}; + +struct trace_event_raw_selinux_audited { + struct trace_entry ent; + u32 requested; + u32 denied; + u32 audited; + int result; + u32 __data_loc_scontext; + u32 __data_loc_tcontext; + u32 __data_loc_tclass; + char __data[0]; +}; + +struct trace_event_data_offsets_selinux_audited { + u32 scontext; + u32 tcontext; + u32 tclass; +}; + +typedef void (*btf_trace_selinux_audited)(void *, struct selinux_audit_data *, char *, char *, const char *); + +struct avc_xperms_node; + +struct avc_entry { + u32 ssid; + u32 tsid; + u16 tclass; + struct av_decision avd; + struct avc_xperms_node *xp_node; +}; + +struct avc_xperms_node { + struct extended_perms xp; + struct list_head xpd_head; +}; + +struct avc_node { + struct avc_entry ae; + struct hlist_node list; + struct callback_head rhead; +}; + +struct avc_xperms_decision_node { + struct extended_perms_decision xpd; + struct list_head xpd_list; +}; + +struct avc_callback_node { + int (*callback)(u32); + u32 events; + struct avc_callback_node *next; +}; + +typedef __u16 __sum16; + +enum sctp_endpoint_type { + SCTP_EP_TYPE_SOCKET = 0, + SCTP_EP_TYPE_ASSOCIATION = 1, +}; + +struct sctp_chunk; + +struct sctp_inq { + struct list_head in_chunk_list; + struct sctp_chunk *in_progress; + struct work_struct immediate; +}; + +struct sctp_bind_addr { + __u16 port; + struct list_head address_list; +}; + +struct sctp_ep_common { + struct hlist_node node; + int hashent; + enum sctp_endpoint_type type; + refcount_t refcnt; + bool dead; + struct sock *sk; + struct net *net; + struct sctp_inq inqueue; + struct sctp_bind_addr bind_addr; +}; + +struct sctp_hmac_algo_param; + +struct sctp_chunks_param; + +struct sctp_endpoint { + struct sctp_ep_common base; + struct list_head asocs; + __u8 secret_key[32]; + __u8 *digest; + __u32 sndbuf_policy; + __u32 rcvbuf_policy; + struct crypto_shash **auth_hmacs; + struct sctp_hmac_algo_param *auth_hmacs_list; + struct sctp_chunks_param *auth_chunk_list; + struct list_head endpoint_shared_keys; + __u16 active_key_id; + __u8 ecn_enable: 1; + __u8 auth_enable: 1; + __u8 intl_enable: 1; + __u8 prsctp_enable: 1; + __u8 asconf_enable: 1; + __u8 reconf_enable: 1; + __u8 strreset_enable; + u32 secid; + u32 peer_secid; +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct in_addr { + __be32 s_addr; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct nf_hook_state { + unsigned int hook; + u_int8_t pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u_int8_t pf; + unsigned int hooknum; + int priority; +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = 2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = 4294966846, + NF_IP_PRI_CONNTRACK_DEFRAG = 4294966896, + NF_IP_PRI_RAW = 4294966996, + NF_IP_PRI_SELINUX_FIRST = 4294967071, + NF_IP_PRI_CONNTRACK = 4294967096, + NF_IP_PRI_MANGLE = 4294967146, + NF_IP_PRI_NAT_DST = 4294967196, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = 2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = 4294966846, + NF_IP6_PRI_CONNTRACK_DEFRAG = 4294966896, + NF_IP6_PRI_RAW = 4294966996, + NF_IP6_PRI_SELINUX_FIRST = 4294967071, + NF_IP6_PRI_CONNTRACK = 4294967096, + NF_IP6_PRI_MANGLE = 4294967146, + NF_IP6_PRI_NAT_DST = 4294967196, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u64 transmit_time; + u32 mark; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + __be32 inet_saddr; + __s16 uc_ttl; + __u16 cmsg_flags; + __be16 inet_sport; + __u16 inet_id; + struct ip_options_rcu *inet_opt; + int rx_dst_ifindex; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 recverr: 1; + __u8 is_icsk: 1; + __u8 freebind: 1; + __u8 hdrincl: 1; + __u8 mc_loop: 1; + __u8 transparent: 1; + __u8 mc_all: 1; + __u8 nodefrag: 1; + __u8 bind_address_no_port: 1; + __u8 recverr_rfc4884: 1; + __u8 defer_connect: 1; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + struct ip_mc_socklist *mc_list; + struct inet_cork_full cork; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + const struct in6_addr *saddr_cache; + __be32 flow_label; + __u32 frag_size; + __u16 __unused_1: 7; + __s16 hop_limit: 9; + __u16 mc_loop: 1; + __u16 __unused_2: 6; + __s16 mcast_hops: 9; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u16 recverr: 1; + __u16 sndflow: 1; + __u16 repflow: 1; + __u16 pmtudisc: 3; + __u16 padding: 1; + __u16 srcprefs: 3; + __u16 dontfrag: 1; + __u16 autoflowlabel: 1; + __u16 autoflowlabel_set: 1; + __u16 mc_all: 1; + __u16 recverr_rfc4884: 1; + __u16 rtalert_isolate: 1; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + __u32 rx_dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 res1: 4; + __u16 doff: 4; + __u16 fin: 1; + __u16 syn: 1; + __u16 rst: 1; + __u16 psh: 1; + __u16 ack: 1; + __u16 urg: 1; + __u16 ece: 1; + __u16 cwr: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +struct iphdr { + __u8 ihl: 4; + __u8 version: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + __be32 saddr; + __be32 daddr; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ipv6hdr { + __u8 priority: 4; + __u8 version: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + struct in6_addr saddr; + struct in6_addr daddr; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 dsthao; + __u16 frag_max_size; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +struct netlbl_lsm_cache { + refcount_t refcount; + void (*free)(const void *); + void *data; +}; + +struct netlbl_lsm_catmap { + u32 startbit; + u64 bitmap[4]; + struct netlbl_lsm_catmap *next; +}; + +struct netlbl_lsm_secattr { + u32 flags; + u32 type; + char *domain; + struct netlbl_lsm_cache *cache; + struct { + struct { + struct netlbl_lsm_catmap *cat; + u32 lvl; + } mls; + struct lsmblob lsmblob; + } attr; +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_cscov: 4; + __u8 dccph_ccval: 4; + __sum16 dccph_checksum; + __u8 dccph_x: 1; + __u8 dccph_type: 4; + __u8 dccph_reserved: 3; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 13, + DCCP_PASSIVE_CLOSEREQ = 14, + DCCP_MAX_STATES = 15, +}; + +typedef __s32 sctp_assoc_t; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +struct sctp_initmsg { + __u16 sinit_num_ostreams; + __u16 sinit_max_instreams; + __u16 sinit_max_attempts; + __u16 sinit_max_init_timeo; +}; + +struct sctp_sndrcvinfo { + __u16 sinfo_stream; + __u16 sinfo_ssn; + __u16 sinfo_flags; + __u32 sinfo_ppid; + __u32 sinfo_context; + __u32 sinfo_timetolive; + __u32 sinfo_tsn; + __u32 sinfo_cumtsn; + sctp_assoc_t sinfo_assoc_id; +}; + +struct sctp_rtoinfo { + sctp_assoc_t srto_assoc_id; + __u32 srto_initial; + __u32 srto_max; + __u32 srto_min; +}; + +struct sctp_assocparams { + sctp_assoc_t sasoc_assoc_id; + __u16 sasoc_asocmaxrxt; + __u16 sasoc_number_peer_destinations; + __u32 sasoc_peer_rwnd; + __u32 sasoc_local_rwnd; + __u32 sasoc_cookie_life; +}; + +struct sctp_paddrparams { + sctp_assoc_t spp_assoc_id; + struct __kernel_sockaddr_storage spp_address; + __u32 spp_hbinterval; + __u16 spp_pathmaxrxt; + __u32 spp_pathmtu; + __u32 spp_sackdelay; + __u32 spp_flags; + __u32 spp_ipv6_flowlabel; + __u8 spp_dscp; + char: 8; +} __attribute__((packed)); + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +enum sctp_cid { + SCTP_CID_DATA = 0, + SCTP_CID_INIT = 1, + SCTP_CID_INIT_ACK = 2, + SCTP_CID_SACK = 3, + SCTP_CID_HEARTBEAT = 4, + SCTP_CID_HEARTBEAT_ACK = 5, + SCTP_CID_ABORT = 6, + SCTP_CID_SHUTDOWN = 7, + SCTP_CID_SHUTDOWN_ACK = 8, + SCTP_CID_ERROR = 9, + SCTP_CID_COOKIE_ECHO = 10, + SCTP_CID_COOKIE_ACK = 11, + SCTP_CID_ECN_ECNE = 12, + SCTP_CID_ECN_CWR = 13, + SCTP_CID_SHUTDOWN_COMPLETE = 14, + SCTP_CID_AUTH = 15, + SCTP_CID_I_DATA = 64, + SCTP_CID_FWD_TSN = 192, + SCTP_CID_ASCONF = 193, + SCTP_CID_I_FWD_TSN = 194, + SCTP_CID_ASCONF_ACK = 128, + SCTP_CID_RECONF = 130, +}; + +struct sctp_paramhdr { + __be16 type; + __be16 length; +}; + +enum sctp_param { + SCTP_PARAM_HEARTBEAT_INFO = 256, + SCTP_PARAM_IPV4_ADDRESS = 1280, + SCTP_PARAM_IPV6_ADDRESS = 1536, + SCTP_PARAM_STATE_COOKIE = 1792, + SCTP_PARAM_UNRECOGNIZED_PARAMETERS = 2048, + SCTP_PARAM_COOKIE_PRESERVATIVE = 2304, + SCTP_PARAM_HOST_NAME_ADDRESS = 2816, + SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = 3072, + SCTP_PARAM_ECN_CAPABLE = 128, + SCTP_PARAM_RANDOM = 640, + SCTP_PARAM_CHUNKS = 896, + SCTP_PARAM_HMAC_ALGO = 1152, + SCTP_PARAM_SUPPORTED_EXT = 2176, + SCTP_PARAM_FWD_TSN_SUPPORT = 192, + SCTP_PARAM_ADD_IP = 448, + SCTP_PARAM_DEL_IP = 704, + SCTP_PARAM_ERR_CAUSE = 960, + SCTP_PARAM_SET_PRIMARY = 1216, + SCTP_PARAM_SUCCESS_REPORT = 1472, + SCTP_PARAM_ADAPTATION_LAYER_IND = 1728, + SCTP_PARAM_RESET_OUT_REQUEST = 3328, + SCTP_PARAM_RESET_IN_REQUEST = 3584, + SCTP_PARAM_RESET_TSN_REQUEST = 3840, + SCTP_PARAM_RESET_RESPONSE = 4096, + SCTP_PARAM_RESET_ADD_OUT_STREAMS = 4352, + SCTP_PARAM_RESET_ADD_IN_STREAMS = 4608, +}; + +struct sctp_datahdr { + __be32 tsn; + __be16 stream; + __be16 ssn; + __u32 ppid; + __u8 payload[0]; +}; + +struct sctp_idatahdr { + __be32 tsn; + __be16 stream; + __be16 reserved; + __be32 mid; + union { + __u32 ppid; + __be32 fsn; + }; + __u8 payload[0]; +}; + +struct sctp_inithdr { + __be32 init_tag; + __be32 a_rwnd; + __be16 num_outbound_streams; + __be16 num_inbound_streams; + __be32 initial_tsn; + __u8 params[0]; +}; + +struct sctp_init_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; + +struct sctp_ipv4addr_param { + struct sctp_paramhdr param_hdr; + struct in_addr addr; +}; + +struct sctp_ipv6addr_param { + struct sctp_paramhdr param_hdr; + struct in6_addr addr; +}; + +struct sctp_cookie_preserve_param { + struct sctp_paramhdr param_hdr; + __be32 lifespan_increment; +}; + +struct sctp_hostname_param { + struct sctp_paramhdr param_hdr; + uint8_t hostname[0]; +}; + +struct sctp_supported_addrs_param { + struct sctp_paramhdr param_hdr; + __be16 types[0]; +}; + +struct sctp_adaptation_ind_param { + struct sctp_paramhdr param_hdr; + __be32 adaptation_ind; +}; + +struct sctp_supported_ext_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +struct sctp_random_param { + struct sctp_paramhdr param_hdr; + __u8 random_val[0]; +}; + +struct sctp_chunks_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +struct sctp_hmac_algo_param { + struct sctp_paramhdr param_hdr; + __be16 hmac_ids[0]; +}; + +struct sctp_cookie_param { + struct sctp_paramhdr p; + __u8 body[0]; +}; + +struct sctp_gap_ack_block { + __be16 start; + __be16 end; +}; + +union sctp_sack_variable { + struct sctp_gap_ack_block gab; + __be32 dup; +}; + +struct sctp_sackhdr { + __be32 cum_tsn_ack; + __be32 a_rwnd; + __be16 num_gap_ack_blocks; + __be16 num_dup_tsns; + union sctp_sack_variable variable[0]; +}; + +struct sctp_heartbeathdr { + struct sctp_paramhdr info; +}; + +struct sctp_shutdownhdr { + __be32 cum_tsn_ack; +}; + +struct sctp_errhdr { + __be16 cause; + __be16 length; + __u8 variable[0]; +}; + +struct sctp_ecnehdr { + __be32 lowest_tsn; +}; + +struct sctp_cwrhdr { + __be32 lowest_tsn; +}; + +struct sctp_fwdtsn_skip { + __be16 stream; + __be16 ssn; +}; + +struct sctp_fwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_fwdtsn_skip skip[0]; +}; + +struct sctp_ifwdtsn_skip { + __be16 stream; + __u8 reserved; + __u8 flags; + __be32 mid; +}; + +struct sctp_ifwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_ifwdtsn_skip skip[0]; +}; + +struct sctp_addip_param { + struct sctp_paramhdr param_hdr; + __be32 crr_id; +}; + +struct sctp_addiphdr { + __be32 serial; + __u8 params[0]; +}; + +struct sctp_authhdr { + __be16 shkey_id; + __be16 hmac_id; + __u8 hmac[0]; +}; + +union sctp_addr { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + struct sockaddr sa; +}; + +struct sctp_cookie { + __u32 my_vtag; + __u32 peer_vtag; + __u32 my_ttag; + __u32 peer_ttag; + ktime_t expiration; + __u16 sinit_num_ostreams; + __u16 sinit_max_instreams; + __u32 initial_tsn; + union sctp_addr peer_addr; + __u16 my_port; + __u8 prsctp_capable; + __u8 padding; + __u32 adaptation_ind; + __u8 auth_random[36]; + __u8 auth_hmacs[10]; + __u8 auth_chunks[20]; + __u32 raw_addr_list_len; + struct sctp_init_chunk peer_init[0]; +}; + +struct sctp_tsnmap { + long unsigned int *tsn_map; + __u32 base_tsn; + __u32 cumulative_tsn_ack_point; + __u32 max_tsn_seen; + __u16 len; + __u16 pending_data; + __u16 num_dup_tsns; + __be32 dup_tsns[16]; +}; + +struct sctp_inithdr_host { + __u32 init_tag; + __u32 a_rwnd; + __u16 num_outbound_streams; + __u16 num_inbound_streams; + __u32 initial_tsn; +}; + +enum sctp_state { + SCTP_STATE_CLOSED = 0, + SCTP_STATE_COOKIE_WAIT = 1, + SCTP_STATE_COOKIE_ECHOED = 2, + SCTP_STATE_ESTABLISHED = 3, + SCTP_STATE_SHUTDOWN_PENDING = 4, + SCTP_STATE_SHUTDOWN_SENT = 5, + SCTP_STATE_SHUTDOWN_RECEIVED = 6, + SCTP_STATE_SHUTDOWN_ACK_SENT = 7, +}; + +struct sctp_stream_out_ext; + +struct sctp_stream_out { + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; + struct sctp_stream_out_ext *ext; + __u8 state; +}; + +struct sctp_stream_in { + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; + __u32 fsn; + __u32 fsn_uo; + char pd_mode; + char pd_mode_uo; +}; + +struct sctp_stream_interleave; + +struct sctp_stream { + struct { + struct __genradix tree; + struct sctp_stream_out type[0]; + } out; + struct { + struct __genradix tree; + struct sctp_stream_in type[0]; + } in; + __u16 outcnt; + __u16 incnt; + struct sctp_stream_out *out_curr; + union { + struct { + struct list_head prio_list; + }; + struct { + struct list_head rr_list; + struct sctp_stream_out_ext *rr_next; + }; + }; + struct sctp_stream_interleave *si; +}; + +struct sctp_sched_ops; + +struct sctp_association; + +struct sctp_outq { + struct sctp_association *asoc; + struct list_head out_chunk_list; + struct sctp_sched_ops *sched; + unsigned int out_qlen; + unsigned int error; + struct list_head control_chunk_list; + struct list_head sacked; + struct list_head retransmit; + struct list_head abandoned; + __u32 outstanding_bytes; + char fast_rtx; + char cork; +}; + +struct sctp_ulpq { + char pd_mode; + struct sctp_association *asoc; + struct sk_buff_head reasm; + struct sk_buff_head reasm_uo; + struct sk_buff_head lobby; +}; + +struct sctp_priv_assoc_stats { + struct __kernel_sockaddr_storage obs_rto_ipaddr; + __u64 max_obs_rto; + __u64 isacks; + __u64 osacks; + __u64 opackets; + __u64 ipackets; + __u64 rtxchunks; + __u64 outofseqtsns; + __u64 idupchunks; + __u64 gapcnt; + __u64 ouodchunks; + __u64 iuodchunks; + __u64 oodchunks; + __u64 iodchunks; + __u64 octrlchunks; + __u64 ictrlchunks; +}; + +struct sctp_transport; + +struct sctp_auth_bytes; + +struct sctp_shared_key; + +struct sctp_association { + struct sctp_ep_common base; + struct list_head asocs; + sctp_assoc_t assoc_id; + struct sctp_endpoint *ep; + struct sctp_cookie c; + struct { + struct list_head transport_addr_list; + __u32 rwnd; + __u16 transport_count; + __u16 port; + struct sctp_transport *primary_path; + union sctp_addr primary_addr; + struct sctp_transport *active_path; + struct sctp_transport *retran_path; + struct sctp_transport *last_sent_to; + struct sctp_transport *last_data_from; + struct sctp_tsnmap tsn_map; + __be16 addip_disabled_mask; + __u16 ecn_capable: 1; + __u16 ipv4_address: 1; + __u16 ipv6_address: 1; + __u16 hostname_address: 1; + __u16 asconf_capable: 1; + __u16 prsctp_capable: 1; + __u16 reconf_capable: 1; + __u16 intl_capable: 1; + __u16 auth_capable: 1; + __u16 sack_needed: 1; + __u16 sack_generation: 1; + __u16 zero_window_announced: 1; + __u32 sack_cnt; + __u32 adaptation_ind; + struct sctp_inithdr_host i; + void *cookie; + int cookie_len; + __u32 addip_serial; + struct sctp_random_param *peer_random; + struct sctp_chunks_param *peer_chunks; + struct sctp_hmac_algo_param *peer_hmacs; + } peer; + enum sctp_state state; + int overall_error_count; + ktime_t cookie_life; + long unsigned int rto_initial; + long unsigned int rto_max; + long unsigned int rto_min; + int max_burst; + int max_retrans; + __u16 pf_retrans; + __u16 ps_retrans; + __u16 max_init_attempts; + __u16 init_retries; + long unsigned int max_init_timeo; + long unsigned int hbinterval; + __be16 encap_port; + __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + __u8 pmtu_pending; + __u32 pathmtu; + __u32 param_flags; + __u32 sackfreq; + long unsigned int sackdelay; + long unsigned int timeouts[11]; + struct timer_list timers[11]; + struct sctp_transport *shutdown_last_sent_to; + struct sctp_transport *init_last_sent_to; + int shutdown_retries; + __u32 next_tsn; + __u32 ctsn_ack_point; + __u32 adv_peer_ack_point; + __u32 highest_sacked; + __u32 fast_recovery_exit; + __u8 fast_recovery; + __u16 unack_data; + __u32 rtx_data_chunks; + __u32 rwnd; + __u32 a_rwnd; + __u32 rwnd_over; + __u32 rwnd_press; + int sndbuf_used; + atomic_t rmem_alloc; + wait_queue_head_t wait; + __u32 frag_point; + __u32 user_frag; + int init_err_counter; + int init_cycle; + __u16 default_stream; + __u16 default_flags; + __u32 default_ppid; + __u32 default_context; + __u32 default_timetolive; + __u32 default_rcv_context; + struct sctp_stream stream; + struct sctp_outq outqueue; + struct sctp_ulpq ulpq; + __u32 last_ecne_tsn; + __u32 last_cwr_tsn; + int numduptsns; + struct sctp_chunk *addip_last_asconf; + struct list_head asconf_ack_list; + struct list_head addip_chunk_list; + __u32 addip_serial; + int src_out_of_asoc_ok; + union sctp_addr *asconf_addr_del_pending; + struct sctp_transport *new_transport; + struct list_head endpoint_shared_keys; + struct sctp_auth_bytes *asoc_shared_key; + struct sctp_shared_key *shkey; + __u16 default_hmac_id; + __u16 active_key_id; + __u8 need_ecne: 1; + __u8 temp: 1; + __u8 pf_expose: 2; + __u8 force_delay: 1; + __u8 strreset_enable; + __u8 strreset_outstanding; + __u32 strreset_outseq; + __u32 strreset_inseq; + __u32 strreset_result[2]; + struct sctp_chunk *strreset_chunk; + struct sctp_priv_assoc_stats stats; + int sent_cnt_removable; + __u16 subscribe; + __u64 abandoned_unsent[3]; + __u64 abandoned_sent[3]; + struct callback_head rcu; +}; + +struct sctp_auth_bytes { + refcount_t refcnt; + __u32 len; + __u8 data[0]; +}; + +struct sctp_shared_key { + struct list_head key_list; + struct sctp_auth_bytes *key; + refcount_t refcnt; + __u16 key_id; + __u8 deactivated; +}; + +enum { + SCTP_MAX_STREAM = 65535, +}; + +enum sctp_event_timeout { + SCTP_EVENT_TIMEOUT_NONE = 0, + SCTP_EVENT_TIMEOUT_T1_COOKIE = 1, + SCTP_EVENT_TIMEOUT_T1_INIT = 2, + SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3, + SCTP_EVENT_TIMEOUT_T3_RTX = 4, + SCTP_EVENT_TIMEOUT_T4_RTO = 5, + SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6, + SCTP_EVENT_TIMEOUT_HEARTBEAT = 7, + SCTP_EVENT_TIMEOUT_RECONF = 8, + SCTP_EVENT_TIMEOUT_SACK = 9, + SCTP_EVENT_TIMEOUT_AUTOCLOSE = 10, +}; + +enum { + SCTP_MAX_DUP_TSNS = 16, +}; + +enum sctp_scope { + SCTP_SCOPE_GLOBAL = 0, + SCTP_SCOPE_PRIVATE = 1, + SCTP_SCOPE_LINK = 2, + SCTP_SCOPE_LOOPBACK = 3, + SCTP_SCOPE_UNUSABLE = 4, +}; + +enum { + SCTP_AUTH_HMAC_ID_RESERVED_0 = 0, + SCTP_AUTH_HMAC_ID_SHA1 = 1, + SCTP_AUTH_HMAC_ID_RESERVED_2 = 2, + SCTP_AUTH_HMAC_ID_SHA256 = 3, + __SCTP_AUTH_HMAC_MAX = 4, +}; + +struct sctp_ulpevent { + struct sctp_association *asoc; + struct sctp_chunk *chunk; + unsigned int rmem_len; + union { + __u32 mid; + __u16 ssn; + }; + union { + __u32 ppid; + __u32 fsn; + }; + __u32 tsn; + __u32 cumtsn; + __u16 stream; + __u16 flags; + __u16 msg_flags; +} __attribute__((packed)); + +union sctp_addr_param; + +union sctp_params { + void *v; + struct sctp_paramhdr *p; + struct sctp_cookie_preserve_param *life; + struct sctp_hostname_param *dns; + struct sctp_cookie_param *cookie; + struct sctp_supported_addrs_param *sat; + struct sctp_ipv4addr_param *v4; + struct sctp_ipv6addr_param *v6; + union sctp_addr_param *addr; + struct sctp_adaptation_ind_param *aind; + struct sctp_supported_ext_param *ext; + struct sctp_random_param *random; + struct sctp_chunks_param *chunks; + struct sctp_hmac_algo_param *hmac_algo; + struct sctp_addip_param *addip; +}; + +struct sctp_sender_hb_info; + +struct sctp_signed_cookie; + +struct sctp_datamsg; + +struct sctp_chunk { + struct list_head list; + refcount_t refcnt; + int sent_count; + union { + struct list_head transmitted_list; + struct list_head stream_list; + }; + struct list_head frag_list; + struct sk_buff *skb; + union { + struct sk_buff *head_skb; + struct sctp_shared_key *shkey; + }; + union sctp_params param_hdr; + union { + __u8 *v; + struct sctp_datahdr *data_hdr; + struct sctp_inithdr *init_hdr; + struct sctp_sackhdr *sack_hdr; + struct sctp_heartbeathdr *hb_hdr; + struct sctp_sender_hb_info *hbs_hdr; + struct sctp_shutdownhdr *shutdown_hdr; + struct sctp_signed_cookie *cookie_hdr; + struct sctp_ecnehdr *ecne_hdr; + struct sctp_cwrhdr *ecn_cwr_hdr; + struct sctp_errhdr *err_hdr; + struct sctp_addiphdr *addip_hdr; + struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_authhdr *auth_hdr; + struct sctp_idatahdr *idata_hdr; + struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; + } subh; + __u8 *chunk_end; + struct sctp_chunkhdr *chunk_hdr; + struct sctphdr *sctp_hdr; + struct sctp_sndrcvinfo sinfo; + struct sctp_association *asoc; + struct sctp_ep_common *rcvr; + long unsigned int sent_at; + union sctp_addr source; + union sctp_addr dest; + struct sctp_datamsg *msg; + struct sctp_transport *transport; + struct sk_buff *auth_chunk; + __u16 rtt_in_progress: 1; + __u16 has_tsn: 1; + __u16 has_ssn: 1; + __u16 singleton: 1; + __u16 end_of_packet: 1; + __u16 ecn_ce_done: 1; + __u16 pdiscard: 1; + __u16 tsn_gap_acked: 1; + __u16 data_accepted: 1; + __u16 auth: 1; + __u16 has_asconf: 1; + __u16 tsn_missing_report: 2; + __u16 fast_retransmit: 2; +}; + +struct sctp_stream_interleave { + __u16 data_chunk_len; + __u16 ftsn_chunk_len; + struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t); + void (*assign_number)(struct sctp_chunk *); + bool (*validate_data)(struct sctp_chunk *); + int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); + int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *); + void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); + void (*start_pd)(struct sctp_ulpq *, gfp_t); + void (*abort_pd)(struct sctp_ulpq *, gfp_t); + void (*generate_ftsn)(struct sctp_outq *, __u32); + bool (*validate_ftsn)(struct sctp_chunk *); + void (*report_ftsn)(struct sctp_ulpq *, __u32); + void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *); +}; + +struct sctp_bind_bucket { + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct hlist_node node; + struct hlist_head owner; + struct net *net; +}; + +enum sctp_socket_type { + SCTP_SOCKET_UDP = 0, + SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1, + SCTP_SOCKET_TCP = 2, +}; + +struct sctp_pf; + +struct sctp_sock { + struct inet_sock inet; + enum sctp_socket_type type; + struct sctp_pf *pf; + struct crypto_shash *hmac; + char *sctp_hmac_alg; + struct sctp_endpoint *ep; + struct sctp_bind_bucket *bind_hash; + __u16 default_stream; + __u32 default_ppid; + __u16 default_flags; + __u32 default_context; + __u32 default_timetolive; + __u32 default_rcv_context; + int max_burst; + __u32 hbinterval; + __be16 udp_port; + __be16 encap_port; + __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + __u16 pf_retrans; + __u16 ps_retrans; + __u32 pathmtu; + __u32 sackdelay; + __u32 sackfreq; + __u32 param_flags; + __u32 default_ss; + struct sctp_rtoinfo rtoinfo; + struct sctp_paddrparams paddrparam; + struct sctp_assocparams assocparams; + __u16 subscribe; + struct sctp_initmsg initmsg; + int user_frag; + __u32 autoclose; + __u32 adaptation_ind; + __u32 pd_point; + __u16 nodelay: 1; + __u16 pf_expose: 2; + __u16 reuse: 1; + __u16 disable_fragments: 1; + __u16 v4mapped: 1; + __u16 frag_interleave: 1; + __u16 recvrcvinfo: 1; + __u16 recvnxtinfo: 1; + __u16 data_ready_signalled: 1; + atomic_t pd_mode; + struct sk_buff_head pd_lobby; + struct list_head auto_asconf_list; + int do_auto_asconf; +}; + +struct sctp_af; + +struct sctp_pf { + void (*event_msgname)(struct sctp_ulpevent *, char *, int *); + void (*skb_msgname)(struct sk_buff *, char *, int *); + int (*af_supported)(sa_family_t, struct sctp_sock *); + int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *); + int (*bind_verify)(struct sctp_sock *, union sctp_addr *); + int (*send_verify)(struct sctp_sock *, union sctp_addr *); + int (*supported_addrs)(const struct sctp_sock *, __be16 *); + struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool); + int (*addr_to_user)(struct sctp_sock *, union sctp_addr *); + void (*to_sk_saddr)(union sctp_addr *, struct sock *); + void (*to_sk_daddr)(union sctp_addr *, struct sock *); + void (*copy_ip_options)(struct sock *, struct sock *); + struct sctp_af *af; +}; + +struct sctp_signed_cookie { + __u8 signature[32]; + __u32 __pad; + struct sctp_cookie c; +} __attribute__((packed)); + +union sctp_addr_param { + struct sctp_paramhdr p; + struct sctp_ipv4addr_param v4; + struct sctp_ipv6addr_param v6; +}; + +struct sctp_sender_hb_info { + struct sctp_paramhdr param_hdr; + union sctp_addr daddr; + long unsigned int sent_at; + __u64 hb_nonce; +}; + +struct sctp_af { + int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *); + void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *); + void (*copy_addrlist)(struct list_head *, struct net_device *); + int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *); + void (*addr_copy)(union sctp_addr *, union sctp_addr *); + void (*from_skb)(union sctp_addr *, struct sk_buff *, int); + void (*from_sk)(union sctp_addr *, struct sock *); + bool (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int); + int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *); + int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *); + enum sctp_scope (*scope)(union sctp_addr *); + void (*inaddr_any)(union sctp_addr *, __be16); + int (*is_any)(const union sctp_addr *); + int (*available)(union sctp_addr *, struct sctp_sock *); + int (*skb_iif)(const struct sk_buff *); + int (*is_ce)(const struct sk_buff *); + void (*seq_dump_addr)(struct seq_file *, union sctp_addr *); + void (*ecn_capable)(struct sock *); + __u16 net_header_len; + int sockaddr_len; + int (*ip_options_len)(struct sock *); + sa_family_t sa_family; + struct list_head list; +}; + +struct sctp_packet { + __u16 source_port; + __u16 destination_port; + __u32 vtag; + struct list_head chunk_list; + size_t overhead; + size_t size; + size_t max_size; + struct sctp_transport *transport; + struct sctp_chunk *auth; + u8 has_cookie_echo: 1; + u8 has_sack: 1; + u8 has_auth: 1; + u8 has_data: 1; + u8 ipfragok: 1; +}; + +struct sctp_transport { + struct list_head transports; + struct rhlist_head node; + refcount_t refcnt; + __u32 rto_pending: 1; + __u32 hb_sent: 1; + __u32 pmtu_pending: 1; + __u32 dst_pending_confirm: 1; + __u32 sack_generation: 1; + u32 dst_cookie; + struct flowi fl; + union sctp_addr ipaddr; + struct sctp_af *af_specific; + struct sctp_association *asoc; + long unsigned int rto; + __u32 rtt; + __u32 rttvar; + __u32 srtt; + __u32 cwnd; + __u32 ssthresh; + __u32 partial_bytes_acked; + __u32 flight_size; + __u32 burst_limited; + struct dst_entry *dst; + union sctp_addr saddr; + long unsigned int hbinterval; + long unsigned int sackdelay; + __u32 sackfreq; + atomic_t mtu_info; + ktime_t last_time_heard; + long unsigned int last_time_sent; + long unsigned int last_time_ecne_reduced; + __be16 encap_port; + __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + __u16 pf_retrans; + __u16 ps_retrans; + __u32 pathmtu; + __u32 param_flags; + int init_sent_count; + int state; + short unsigned int error_count; + struct timer_list T3_rtx_timer; + struct timer_list hb_timer; + struct timer_list proto_unreach_timer; + struct timer_list reconf_timer; + struct list_head transmitted; + struct sctp_packet packet; + struct list_head send_ready; + struct { + __u32 next_tsn_at_change; + char changeover_active; + char cycling_changeover; + char cacc_saw_newack; + } cacc; + __u64 hb_nonce; + struct callback_head rcu; +}; + +struct sctp_datamsg { + struct list_head chunks; + refcount_t refcnt; + long unsigned int expires_at; + int send_error; + u8 send_failed: 1; + u8 can_delay: 1; + u8 abandoned: 1; +}; + +struct sctp_stream_priorities { + struct list_head prio_sched; + struct list_head active; + struct sctp_stream_out_ext *next; + __u16 prio; +}; + +struct sctp_stream_out_ext { + __u64 abandoned_unsent[3]; + __u64 abandoned_sent[3]; + struct list_head outq; + union { + struct { + struct list_head prio_list; + struct sctp_stream_priorities *prio_head; + }; + struct { + struct list_head rr_list; + }; + }; +}; + +struct task_security_struct { + u32 osid; + u32 sid; + u32 exec_sid; + u32 create_sid; + u32 keycreate_sid; + u32 sockcreate_sid; +}; + +enum label_initialized { + LABEL_INVALID = 0, + LABEL_INITIALIZED = 1, + LABEL_PENDING = 2, +}; + +struct inode_security_struct { + struct inode *inode; + struct list_head list; + u32 task_sid; + u32 sid; + u16 sclass; + unsigned char initialized; + spinlock_t lock; +}; + +struct file_security_struct { + u32 sid; + u32 fown_sid; + u32 isid; + u32 pseqno; +}; + +struct superblock_security_struct { + u32 sid; + u32 def_sid; + u32 mntpoint_sid; + short unsigned int behavior; + short unsigned int flags; + struct mutex lock; + struct list_head isec_head; + spinlock_t isec_lock; +}; + +struct msg_security_struct { + u32 sid; +}; + +struct ipc_security_struct { + u16 sclass; + u32 sid; +}; + +struct sk_security_struct { + enum { + NLBL_UNSET = 0, + NLBL_REQUIRE = 1, + NLBL_LABELED = 2, + NLBL_REQSKB = 3, + NLBL_CONNLABELED = 4, + } nlbl_state; + struct netlbl_lsm_secattr *nlbl_secattr; + u32 sid; + u32 peer_sid; + u16 sclass; + enum { + SCTP_ASSOC_UNSET = 0, + SCTP_ASSOC_SET = 1, + } sctp_assoc_state; +}; + +struct tun_security_struct { + u32 sid; +}; + +struct key_security_struct { + u32 sid; +}; + +struct ib_security_struct { + u32 sid; +}; + +struct bpf_security_struct { + u32 sid; +}; + +struct perf_event_security_struct { + u32 sid; +}; + +struct selinux_mnt_opts { + const char *fscontext; + const char *context; + const char *rootcontext; + const char *defcontext; +}; + +enum { + Opt_error___2 = 4294967295, + Opt_context = 0, + Opt_defcontext = 1, + Opt_fscontext = 2, + Opt_rootcontext = 3, + Opt_seclabel = 4, +}; + +struct selinux_policy_convert_data; + +struct selinux_load_state { + struct selinux_policy *policy; + struct selinux_policy_convert_data *convert_data; +}; + +enum sel_inos { + SEL_ROOT_INO = 2, + SEL_LOAD = 3, + SEL_ENFORCE = 4, + SEL_CONTEXT = 5, + SEL_ACCESS = 6, + SEL_CREATE = 7, + SEL_RELABEL = 8, + SEL_USER = 9, + SEL_POLICYVERS = 10, + SEL_COMMIT_BOOLS = 11, + SEL_MLS = 12, + SEL_DISABLE = 13, + SEL_MEMBER = 14, + SEL_CHECKREQPROT = 15, + SEL_COMPAT_NET = 16, + SEL_REJECT_UNKNOWN = 17, + SEL_DENY_UNKNOWN = 18, + SEL_STATUS = 19, + SEL_POLICY = 20, + SEL_VALIDATE_TRANS = 21, + SEL_INO_NEXT = 22, +}; + +struct selinux_fs_info { + struct dentry *bool_dir; + unsigned int bool_num; + char **bool_pending_names; + unsigned int *bool_pending_values; + struct dentry *class_dir; + long unsigned int last_class_ino; + bool policy_opened; + struct dentry *policycap_dir; + long unsigned int last_ino; + struct selinux_state *state; + struct super_block *sb; +}; + +struct policy_load_memory { + size_t len; + void *data; +}; + +enum { + SELNL_MSG_SETENFORCE = 16, + SELNL_MSG_POLICYLOAD = 17, + SELNL_MSG_MAX = 18, +}; + +enum selinux_nlgroups { + SELNLGRP_NONE = 0, + SELNLGRP_AVC = 1, + __SELNLGRP_MAX = 2, +}; + +struct selnl_msg_setenforce { + __s32 val; +}; + +struct selnl_msg_policyload { + __u32 seqno; +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + __XFRM_MSG_MAX = 39, +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + RTM_NEWNEXTHOPBUCKET = 116, + RTM_DELNEXTHOPBUCKET = 117, + RTM_GETNEXTHOPBUCKET = 118, + __RTM_MAX = 119, +}; + +struct nlmsg_perm { + u16 nlmsg_type; + u32 perm; +}; + +struct netif_security_struct { + struct net *ns; + int ifindex; + u32 sid; +}; + +struct sel_netif { + struct list_head list; + struct netif_security_struct nsec; + struct callback_head callback_head; +}; + +struct netnode_security_struct { + union { + __be32 ipv4; + struct in6_addr ipv6; + } addr; + u32 sid; + u16 family; +}; + +struct sel_netnode_bkt { + unsigned int size; + struct list_head list; +}; + +struct sel_netnode { + struct netnode_security_struct nsec; + struct list_head list; + struct callback_head rcu; +}; + +struct netport_security_struct { + u32 sid; + u16 port; + u8 protocol; +}; + +struct sel_netport_bkt { + int size; + struct list_head list; +}; + +struct sel_netport { + struct netport_security_struct psec; + struct list_head list; + struct callback_head rcu; +}; + +struct selinux_kernel_status { + u32 version; + u32 sequence; + u32 enforcing; + u32 policyload; + u32 deny_unknown; +}; + +struct ebitmap_node { + struct ebitmap_node *next; + long unsigned int maps[6]; + u32 startbit; +}; + +struct ebitmap { + struct ebitmap_node *node; + u32 highbit; +}; + +struct policy_file { + char *data; + size_t len; +}; + +struct hashtab_node { + void *key; + void *datum; + struct hashtab_node *next; +}; + +struct hashtab { + struct hashtab_node **htable; + u32 size; + u32 nel; +}; + +struct hashtab_info { + u32 slots_used; + u32 max_chain_len; +}; + +struct hashtab_key_params { + u32 (*hash)(const void *); + int (*cmp)(const void *, const void *); +}; + +struct symtab { + struct hashtab table; + u32 nprim; +}; + +struct mls_level { + u32 sens; + struct ebitmap cat; +}; + +struct mls_range { + struct mls_level level[2]; +}; + +struct context___2 { + u32 user; + u32 role; + u32 type; + u32 len; + struct mls_range range; + char *str; +}; + +struct sidtab_str_cache; + +struct sidtab_entry { + u32 sid; + u32 hash; + struct context___2 context; + struct sidtab_str_cache *cache; + struct hlist_node list; +}; + +struct sidtab_str_cache { + struct callback_head rcu_member; + struct list_head lru_member; + struct sidtab_entry *parent; + u32 len; + char str[0]; +}; + +struct sidtab_node_inner; + +struct sidtab_node_leaf; + +union sidtab_entry_inner { + struct sidtab_node_inner *ptr_inner; + struct sidtab_node_leaf *ptr_leaf; +}; + +struct sidtab_node_inner { + union sidtab_entry_inner entries[512]; +}; + +struct sidtab_node_leaf { + struct sidtab_entry entries[39]; +}; + +struct sidtab_isid_entry { + int set; + struct sidtab_entry entry; +}; + +struct sidtab; + +struct sidtab_convert_params { + int (*func)(struct context___2 *, struct context___2 *, void *); + void *args; + struct sidtab *target; +}; + +struct sidtab { + union sidtab_entry_inner roots[4]; + u32 count; + struct sidtab_convert_params *convert; + bool frozen; + spinlock_t lock; + u32 cache_free_slots; + struct list_head cache_lru_list; + spinlock_t cache_lock; + struct sidtab_isid_entry isids[27]; + struct hlist_head context_to_sid[512]; +}; + +struct avtab_key { + u16 source_type; + u16 target_type; + u16 target_class; + u16 specified; +}; + +struct avtab_extended_perms { + u8 specified; + u8 driver; + struct extended_perms_data perms; +}; + +struct avtab_datum { + union { + u32 data; + struct avtab_extended_perms *xperms; + } u; +}; + +struct avtab_node { + struct avtab_key key; + struct avtab_datum datum; + struct avtab_node *next; +}; + +struct avtab { + struct avtab_node **htable; + u32 nel; + u32 nslot; + u32 mask; +}; + +struct type_set; + +struct constraint_expr { + u32 expr_type; + u32 attr; + u32 op; + struct ebitmap names; + struct type_set *type_names; + struct constraint_expr *next; +}; + +struct type_set { + struct ebitmap types; + struct ebitmap negset; + u32 flags; +}; + +struct constraint_node { + u32 permissions; + struct constraint_expr *expr; + struct constraint_node *next; +}; + +struct common_datum { + u32 value; + struct symtab permissions; +}; + +struct class_datum { + u32 value; + char *comkey; + struct common_datum *comdatum; + struct symtab permissions; + struct constraint_node *constraints; + struct constraint_node *validatetrans; + char default_user; + char default_role; + char default_type; + char default_range; +}; + +struct role_datum { + u32 value; + u32 bounds; + struct ebitmap dominates; + struct ebitmap types; +}; + +struct role_allow { + u32 role; + u32 new_role; + struct role_allow *next; +}; + +struct type_datum { + u32 value; + u32 bounds; + unsigned char primary; + unsigned char attribute; +}; + +struct user_datum { + u32 value; + u32 bounds; + struct ebitmap roles; + struct mls_range range; + struct mls_level dfltlevel; +}; + +struct cond_bool_datum { + __u32 value; + int state; +}; + +struct ocontext { + union { + char *name; + struct { + u8 protocol; + u16 low_port; + u16 high_port; + } port; + struct { + u32 addr; + u32 mask; + } node; + struct { + u32 addr[4]; + u32 mask[4]; + } node6; + struct { + u64 subnet_prefix; + u16 low_pkey; + u16 high_pkey; + } ibpkey; + struct { + char *dev_name; + u8 port; + } ibendport; + } u; + union { + u32 sclass; + u32 behavior; + } v; + struct context___2 context[2]; + u32 sid[2]; + struct ocontext *next; +}; + +struct genfs { + char *fstype; + struct ocontext *head; + struct genfs *next; +}; + +struct cond_node; + +struct policydb { + int mls_enabled; + struct symtab symtab[8]; + char **sym_val_to_name[8]; + struct class_datum **class_val_to_struct; + struct role_datum **role_val_to_struct; + struct user_datum **user_val_to_struct; + struct type_datum **type_val_to_struct; + struct avtab te_avtab; + struct hashtab role_tr; + struct ebitmap filename_trans_ttypes; + struct hashtab filename_trans; + u32 compat_filename_trans_count; + struct cond_bool_datum **bool_val_to_struct; + struct avtab te_cond_avtab; + struct cond_node *cond_list; + u32 cond_list_len; + struct role_allow *role_allow; + struct ocontext *ocontexts[9]; + struct genfs *genfs; + struct hashtab range_tr; + struct ebitmap *type_attr_map_array; + struct ebitmap policycaps; + struct ebitmap permissive_map; + size_t len; + unsigned int policyvers; + unsigned int reject_unknown: 1; + unsigned int allow_unknown: 1; + u16 process_class; + u32 process_trans_perms; +}; + +struct perm_datum { + u32 value; +}; + +struct role_trans_key { + u32 role; + u32 type; + u32 tclass; +}; + +struct role_trans_datum { + u32 new_role; +}; + +struct filename_trans_key { + u32 ttype; + u16 tclass; + const char *name; +}; + +struct filename_trans_datum { + struct ebitmap stypes; + u32 otype; + struct filename_trans_datum *next; +}; + +struct level_datum { + struct mls_level *level; + unsigned char isalias; +}; + +struct cat_datum { + u32 value; + unsigned char isalias; +}; + +struct range_trans { + u32 source_type; + u32 target_type; + u32 target_class; +}; + +struct cond_expr_node; + +struct cond_expr { + struct cond_expr_node *nodes; + u32 len; +}; + +struct cond_av_list { + struct avtab_node **nodes; + u32 len; +}; + +struct cond_node { + int cur_state; + struct cond_expr expr; + struct cond_av_list true_list; + struct cond_av_list false_list; +}; + +struct policy_data { + struct policydb *p; + void *fp; +}; + +struct cond_expr_node { + u32 expr_type; + u32 bool; +}; + +struct policydb_compat_info { + int version; + int sym_num; + int ocon_num; +}; + +struct selinux_mapping; + +struct selinux_map { + struct selinux_mapping *mapping; + u16 size; +}; + +struct selinux_policy { + struct sidtab *sidtab; + struct policydb policydb; + struct selinux_map map; + u32 latest_granting; +}; + +struct convert_context_args { + struct selinux_state *state; + struct policydb *oldp; + struct policydb *newp; +}; + +struct selinux_policy_convert_data { + struct convert_context_args args; + struct sidtab_convert_params sidtab_params; +}; + +struct selinux_mapping { + u16 value; + unsigned int num_perms; + u32 perms[32]; +}; + +struct selinux_audit_rule { + u32 au_seqno; + struct context___2 au_ctxt; +}; + +struct cond_insertf_data { + struct policydb *p; + struct avtab_node **dst; + struct cond_av_list *other; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + long unsigned int last_probe; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 1; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct uncached_list; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + struct list_head rt6i_uncached; + struct uncached_list *rt6i_uncached_list; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; + atomic_t fib_rt_uncache; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_node *subtree; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + __XFRMA_MAX = 32, +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_state_offload { + struct net_device *dev; + struct net_device *real_dev; + long unsigned int offload_handle; + unsigned int num_exthdrs; + u8 flags; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +struct xfrm_replay; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + struct hlist_node bysrc; + struct hlist_node byspi; + refcount_t refcnt; + spinlock_t lock; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + const struct xfrm_replay *repl; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_state_offload xso; + long int saved_tmo; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; +}; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct xfrm_policy_walk_entry { + struct list_head all; + u8 dead; +}; + +struct xfrm_policy_queue { + struct sk_buff_head hold_queue; + struct timer_list hold_timer; + long unsigned int timeout; +}; + +struct xfrm_tmpl { + struct xfrm_id id; + xfrm_address_t saddr; + short unsigned int encap_family; + u32 reqid; + u8 mode; + u8 share; + u8 optional; + u8 allalgs; + u32 aalgos; + u32 ealgos; + u32 calgos; +}; + +struct xfrm_policy { + possible_net_t xp_net; + struct hlist_node bydst; + struct hlist_node byidx; + rwlock_t lock; + refcount_t refcnt; + u32 pos; + struct timer_list timer; + atomic_t genid; + u32 priority; + u32 index; + u32 if_id; + struct xfrm_mark mark; + struct xfrm_selector selector; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + struct xfrm_policy_walk_entry walk; + struct xfrm_policy_queue polq; + bool bydst_reinsert; + u8 type; + u8 action; + u8 flags; + u8 xfrm_nr; + u16 family; + struct xfrm_sec_ctx *security; + struct xfrm_tmpl xfrm_vec[6]; + struct hlist_node bydst_inexact_list; + struct callback_head rcu; +}; + +struct udp_hslot; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot *hash2; + unsigned int mask; + unsigned int log; +}; + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct xfrm_replay { + void (*advance)(struct xfrm_state *, __be32); + int (*check)(struct xfrm_state *, struct sk_buff *, __be32); + int (*recheck)(struct xfrm_state *, struct sk_buff *, __be32); + void (*notify)(struct xfrm_state *, int); + int (*overflow)(struct xfrm_state *, struct sk_buff *); +}; + +struct xfrm_type { + char *description; + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); + int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); +}; + +struct xfrm_type_offload { + char *description; + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +struct xfrm_dst { + union { + struct dst_entry dst; + struct rtable rt; + struct rt6_info rt6; + } u; + struct dst_entry *route; + struct dst_entry *child; + struct dst_entry *path; + struct xfrm_policy *pols[2]; + int num_pols; + int num_xfrms; + u32 xfrm_genid; + u32 policy_genid; + u32 route_mtu_cached; + u32 child_mtu_cached; + u32 route_cookie; + u32 path_cookie; +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u8 proto; +}; + +struct sec_path { + int len; + int olen; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +struct udp_hslot { + struct hlist_head head; + int count; + spinlock_t lock; +}; + +struct pkey_security_struct { + u64 subnet_prefix; + u16 pkey; + u32 sid; +}; + +struct sel_ib_pkey_bkt { + int size; + struct list_head list; +}; + +struct sel_ib_pkey { + struct pkey_security_struct psec; + struct list_head list; + struct callback_head rcu; +}; + +struct smack_audit_data { + const char *function; + char *subject; + char *object; + char *request; + int result; +}; + +struct smack_known { + struct list_head list; + struct hlist_node smk_hashed; + char *smk_known; + u32 smk_secid; + struct netlbl_lsm_secattr smk_netlabel; + struct list_head smk_rules; + struct mutex smk_rules_lock; +}; + +struct superblock_smack { + struct smack_known *smk_root; + struct smack_known *smk_floor; + struct smack_known *smk_hat; + struct smack_known *smk_default; + int smk_flags; +}; + +struct socket_smack { + struct smack_known *smk_out; + struct smack_known *smk_in; + struct smack_known *smk_packet; + int smk_state; +}; + +struct inode_smack { + struct smack_known *smk_inode; + struct smack_known *smk_task; + struct smack_known *smk_mmap; + int smk_flags; +}; + +struct task_smack { + struct smack_known *smk_task; + struct smack_known *smk_forked; + struct list_head smk_rules; + struct mutex smk_rules_lock; + struct list_head smk_relabel; +}; + +struct smack_rule { + struct list_head list; + struct smack_known *smk_subject; + struct smack_known *smk_object; + int smk_access; +}; + +struct smk_net4addr { + struct list_head list; + struct in_addr smk_host; + struct in_addr smk_mask; + int smk_masks; + struct smack_known *smk_label; +}; + +struct smk_net6addr { + struct list_head list; + struct in6_addr smk_host; + struct in6_addr smk_mask; + int smk_masks; + struct smack_known *smk_label; +}; + +struct smack_known_list_elem { + struct list_head list; + struct smack_known *smk_label; +}; + +enum { + Opt_error___3 = 4294967295, + Opt_fsdefault = 0, + Opt_fsfloor = 1, + Opt_fshat = 2, + Opt_fsroot = 3, + Opt_fstransmute = 4, +}; + +struct smk_audit_info { + struct common_audit_data a; + struct smack_audit_data sad; +}; + +struct smack_mnt_opts { + const char *fsdefault; + const char *fsfloor; + const char *fshat; + const char *fsroot; + const char *fstransmute; +}; + +struct netlbl_audit { + struct lsmblob lsmdata; + kuid_t loginuid; + unsigned int sessionid; +}; + +struct cipso_v4_std_map_tbl { + struct { + u32 *cipso; + u32 *local; + u32 cipso_size; + u32 local_size; + } lvl; + struct { + u32 *cipso; + u32 *local; + u32 cipso_size; + u32 local_size; + } cat; +}; + +struct cipso_v4_doi { + u32 doi; + u32 type; + union { + struct cipso_v4_std_map_tbl *std; + } map; + u8 tags[5]; + refcount_t refcount; + struct list_head list; + struct callback_head rcu; +}; + +enum smk_inos { + SMK_ROOT_INO = 2, + SMK_LOAD = 3, + SMK_CIPSO = 4, + SMK_DOI = 5, + SMK_DIRECT = 6, + SMK_AMBIENT = 7, + SMK_NET4ADDR = 8, + SMK_ONLYCAP = 9, + SMK_LOGGING = 10, + SMK_LOAD_SELF = 11, + SMK_ACCESSES = 12, + SMK_MAPPED = 13, + SMK_LOAD2 = 14, + SMK_LOAD_SELF2 = 15, + SMK_ACCESS2 = 16, + SMK_CIPSO2 = 17, + SMK_REVOKE_SUBJ = 18, + SMK_CHANGE_RULE = 19, + SMK_SYSLOG = 20, + SMK_PTRACE = 21, + SMK_NET6ADDR = 23, + SMK_RELABEL_SELF = 24, +}; + +struct smack_parsed_rule { + struct smack_known *smk_subject; + struct smack_known *smk_object; + int smk_access1; + int smk_access2; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +struct unix_address { + refcount_t refcnt; + int len; + unsigned int hash; + struct sockaddr_un name[0]; +}; + +struct scm_stat { + atomic_t nr_fds; +}; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct list_head link; + atomic_long_t inflight; + spinlock_t lock; + long unsigned int gc_flags; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + long: 32; + long: 64; + long: 64; +}; + +enum tomoyo_conditions_index { + TOMOYO_TASK_UID = 0, + TOMOYO_TASK_EUID = 1, + TOMOYO_TASK_SUID = 2, + TOMOYO_TASK_FSUID = 3, + TOMOYO_TASK_GID = 4, + TOMOYO_TASK_EGID = 5, + TOMOYO_TASK_SGID = 6, + TOMOYO_TASK_FSGID = 7, + TOMOYO_TASK_PID = 8, + TOMOYO_TASK_PPID = 9, + TOMOYO_EXEC_ARGC = 10, + TOMOYO_EXEC_ENVC = 11, + TOMOYO_TYPE_IS_SOCKET = 12, + TOMOYO_TYPE_IS_SYMLINK = 13, + TOMOYO_TYPE_IS_FILE = 14, + TOMOYO_TYPE_IS_BLOCK_DEV = 15, + TOMOYO_TYPE_IS_DIRECTORY = 16, + TOMOYO_TYPE_IS_CHAR_DEV = 17, + TOMOYO_TYPE_IS_FIFO = 18, + TOMOYO_MODE_SETUID = 19, + TOMOYO_MODE_SETGID = 20, + TOMOYO_MODE_STICKY = 21, + TOMOYO_MODE_OWNER_READ = 22, + TOMOYO_MODE_OWNER_WRITE = 23, + TOMOYO_MODE_OWNER_EXECUTE = 24, + TOMOYO_MODE_GROUP_READ = 25, + TOMOYO_MODE_GROUP_WRITE = 26, + TOMOYO_MODE_GROUP_EXECUTE = 27, + TOMOYO_MODE_OTHERS_READ = 28, + TOMOYO_MODE_OTHERS_WRITE = 29, + TOMOYO_MODE_OTHERS_EXECUTE = 30, + TOMOYO_EXEC_REALPATH = 31, + TOMOYO_SYMLINK_TARGET = 32, + TOMOYO_PATH1_UID = 33, + TOMOYO_PATH1_GID = 34, + TOMOYO_PATH1_INO = 35, + TOMOYO_PATH1_MAJOR = 36, + TOMOYO_PATH1_MINOR = 37, + TOMOYO_PATH1_PERM = 38, + TOMOYO_PATH1_TYPE = 39, + TOMOYO_PATH1_DEV_MAJOR = 40, + TOMOYO_PATH1_DEV_MINOR = 41, + TOMOYO_PATH2_UID = 42, + TOMOYO_PATH2_GID = 43, + TOMOYO_PATH2_INO = 44, + TOMOYO_PATH2_MAJOR = 45, + TOMOYO_PATH2_MINOR = 46, + TOMOYO_PATH2_PERM = 47, + TOMOYO_PATH2_TYPE = 48, + TOMOYO_PATH2_DEV_MAJOR = 49, + TOMOYO_PATH2_DEV_MINOR = 50, + TOMOYO_PATH1_PARENT_UID = 51, + TOMOYO_PATH1_PARENT_GID = 52, + TOMOYO_PATH1_PARENT_INO = 53, + TOMOYO_PATH1_PARENT_PERM = 54, + TOMOYO_PATH2_PARENT_UID = 55, + TOMOYO_PATH2_PARENT_GID = 56, + TOMOYO_PATH2_PARENT_INO = 57, + TOMOYO_PATH2_PARENT_PERM = 58, + TOMOYO_MAX_CONDITION_KEYWORD = 59, + TOMOYO_NUMBER_UNION = 60, + TOMOYO_NAME_UNION = 61, + TOMOYO_ARGV_ENTRY = 62, + TOMOYO_ENVP_ENTRY = 63, +}; + +enum tomoyo_path_stat_index { + TOMOYO_PATH1 = 0, + TOMOYO_PATH1_PARENT = 1, + TOMOYO_PATH2 = 2, + TOMOYO_PATH2_PARENT = 3, + TOMOYO_MAX_PATH_STAT = 4, +}; + +enum tomoyo_mode_index { + TOMOYO_CONFIG_DISABLED = 0, + TOMOYO_CONFIG_LEARNING = 1, + TOMOYO_CONFIG_PERMISSIVE = 2, + TOMOYO_CONFIG_ENFORCING = 3, + TOMOYO_CONFIG_MAX_MODE = 4, + TOMOYO_CONFIG_WANT_REJECT_LOG = 64, + TOMOYO_CONFIG_WANT_GRANT_LOG = 128, + TOMOYO_CONFIG_USE_DEFAULT = 255, +}; + +enum tomoyo_policy_id { + TOMOYO_ID_GROUP = 0, + TOMOYO_ID_ADDRESS_GROUP = 1, + TOMOYO_ID_PATH_GROUP = 2, + TOMOYO_ID_NUMBER_GROUP = 3, + TOMOYO_ID_TRANSITION_CONTROL = 4, + TOMOYO_ID_AGGREGATOR = 5, + TOMOYO_ID_MANAGER = 6, + TOMOYO_ID_CONDITION = 7, + TOMOYO_ID_NAME = 8, + TOMOYO_ID_ACL = 9, + TOMOYO_ID_DOMAIN = 10, + TOMOYO_MAX_POLICY = 11, +}; + +enum tomoyo_domain_info_flags_index { + TOMOYO_DIF_QUOTA_WARNED = 0, + TOMOYO_DIF_TRANSITION_FAILED = 1, + TOMOYO_MAX_DOMAIN_INFO_FLAGS = 2, +}; + +enum tomoyo_grant_log { + TOMOYO_GRANTLOG_AUTO = 0, + TOMOYO_GRANTLOG_NO = 1, + TOMOYO_GRANTLOG_YES = 2, +}; + +enum tomoyo_group_id { + TOMOYO_PATH_GROUP = 0, + TOMOYO_NUMBER_GROUP = 1, + TOMOYO_ADDRESS_GROUP = 2, + TOMOYO_MAX_GROUP = 3, +}; + +enum tomoyo_path_acl_index { + TOMOYO_TYPE_EXECUTE = 0, + TOMOYO_TYPE_READ = 1, + TOMOYO_TYPE_WRITE = 2, + TOMOYO_TYPE_APPEND = 3, + TOMOYO_TYPE_UNLINK = 4, + TOMOYO_TYPE_GETATTR = 5, + TOMOYO_TYPE_RMDIR = 6, + TOMOYO_TYPE_TRUNCATE = 7, + TOMOYO_TYPE_SYMLINK = 8, + TOMOYO_TYPE_CHROOT = 9, + TOMOYO_TYPE_UMOUNT = 10, + TOMOYO_MAX_PATH_OPERATION = 11, +}; + +enum tomoyo_memory_stat_type { + TOMOYO_MEMORY_POLICY = 0, + TOMOYO_MEMORY_AUDIT = 1, + TOMOYO_MEMORY_QUERY = 2, + TOMOYO_MAX_MEMORY_STAT = 3, +}; + +enum tomoyo_mkdev_acl_index { + TOMOYO_TYPE_MKBLOCK = 0, + TOMOYO_TYPE_MKCHAR = 1, + TOMOYO_MAX_MKDEV_OPERATION = 2, +}; + +enum tomoyo_network_acl_index { + TOMOYO_NETWORK_BIND = 0, + TOMOYO_NETWORK_LISTEN = 1, + TOMOYO_NETWORK_CONNECT = 2, + TOMOYO_NETWORK_SEND = 3, + TOMOYO_MAX_NETWORK_OPERATION = 4, +}; + +enum tomoyo_path2_acl_index { + TOMOYO_TYPE_LINK = 0, + TOMOYO_TYPE_RENAME = 1, + TOMOYO_TYPE_PIVOT_ROOT = 2, + TOMOYO_MAX_PATH2_OPERATION = 3, +}; + +enum tomoyo_path_number_acl_index { + TOMOYO_TYPE_CREATE = 0, + TOMOYO_TYPE_MKDIR = 1, + TOMOYO_TYPE_MKFIFO = 2, + TOMOYO_TYPE_MKSOCK = 3, + TOMOYO_TYPE_IOCTL = 4, + TOMOYO_TYPE_CHMOD = 5, + TOMOYO_TYPE_CHOWN = 6, + TOMOYO_TYPE_CHGRP = 7, + TOMOYO_MAX_PATH_NUMBER_OPERATION = 8, +}; + +enum tomoyo_securityfs_interface_index { + TOMOYO_DOMAINPOLICY = 0, + TOMOYO_EXCEPTIONPOLICY = 1, + TOMOYO_PROCESS_STATUS = 2, + TOMOYO_STAT = 3, + TOMOYO_AUDIT = 4, + TOMOYO_VERSION = 5, + TOMOYO_PROFILE = 6, + TOMOYO_QUERY = 7, + TOMOYO_MANAGER = 8, +}; + +enum tomoyo_mac_index { + TOMOYO_MAC_FILE_EXECUTE = 0, + TOMOYO_MAC_FILE_OPEN = 1, + TOMOYO_MAC_FILE_CREATE = 2, + TOMOYO_MAC_FILE_UNLINK = 3, + TOMOYO_MAC_FILE_GETATTR = 4, + TOMOYO_MAC_FILE_MKDIR = 5, + TOMOYO_MAC_FILE_RMDIR = 6, + TOMOYO_MAC_FILE_MKFIFO = 7, + TOMOYO_MAC_FILE_MKSOCK = 8, + TOMOYO_MAC_FILE_TRUNCATE = 9, + TOMOYO_MAC_FILE_SYMLINK = 10, + TOMOYO_MAC_FILE_MKBLOCK = 11, + TOMOYO_MAC_FILE_MKCHAR = 12, + TOMOYO_MAC_FILE_LINK = 13, + TOMOYO_MAC_FILE_RENAME = 14, + TOMOYO_MAC_FILE_CHMOD = 15, + TOMOYO_MAC_FILE_CHOWN = 16, + TOMOYO_MAC_FILE_CHGRP = 17, + TOMOYO_MAC_FILE_IOCTL = 18, + TOMOYO_MAC_FILE_CHROOT = 19, + TOMOYO_MAC_FILE_MOUNT = 20, + TOMOYO_MAC_FILE_UMOUNT = 21, + TOMOYO_MAC_FILE_PIVOT_ROOT = 22, + TOMOYO_MAC_NETWORK_INET_STREAM_BIND = 23, + TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN = 24, + TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT = 25, + TOMOYO_MAC_NETWORK_INET_DGRAM_BIND = 26, + TOMOYO_MAC_NETWORK_INET_DGRAM_SEND = 27, + TOMOYO_MAC_NETWORK_INET_RAW_BIND = 28, + TOMOYO_MAC_NETWORK_INET_RAW_SEND = 29, + TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND = 30, + TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN = 31, + TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT = 32, + TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND = 33, + TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND = 34, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND = 35, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN = 36, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT = 37, + TOMOYO_MAC_ENVIRON = 38, + TOMOYO_MAX_MAC_INDEX = 39, +}; + +enum tomoyo_mac_category_index { + TOMOYO_MAC_CATEGORY_FILE = 0, + TOMOYO_MAC_CATEGORY_NETWORK = 1, + TOMOYO_MAC_CATEGORY_MISC = 2, + TOMOYO_MAX_MAC_CATEGORY_INDEX = 3, +}; + +enum tomoyo_pref_index { + TOMOYO_PREF_MAX_AUDIT_LOG = 0, + TOMOYO_PREF_MAX_LEARNING_ENTRY = 1, + TOMOYO_MAX_PREF = 2, +}; + +struct tomoyo_shared_acl_head { + struct list_head list; + atomic_t users; +} __attribute__((packed)); + +struct tomoyo_path_info { + const char *name; + u32 hash; + u16 const_len; + bool is_dir; + bool is_patterned; +}; + +struct tomoyo_obj_info; + +struct tomoyo_execve; + +struct tomoyo_domain_info; + +struct tomoyo_acl_info; + +struct tomoyo_request_info { + struct tomoyo_obj_info *obj; + struct tomoyo_execve *ee; + struct tomoyo_domain_info *domain; + union { + struct { + const struct tomoyo_path_info *filename; + const struct tomoyo_path_info *matched_path; + u8 operation; + } path; + struct { + const struct tomoyo_path_info *filename1; + const struct tomoyo_path_info *filename2; + u8 operation; + } path2; + struct { + const struct tomoyo_path_info *filename; + unsigned int mode; + unsigned int major; + unsigned int minor; + u8 operation; + } mkdev; + struct { + const struct tomoyo_path_info *filename; + long unsigned int number; + u8 operation; + } path_number; + struct { + const struct tomoyo_path_info *name; + } environ; + struct { + const __be32 *address; + u16 port; + u8 protocol; + u8 operation; + bool is_ipv6; + } inet_network; + struct { + const struct tomoyo_path_info *address; + u8 protocol; + u8 operation; + } unix_network; + struct { + const struct tomoyo_path_info *type; + const struct tomoyo_path_info *dir; + const struct tomoyo_path_info *dev; + long unsigned int flags; + int need_dev; + } mount; + struct { + const struct tomoyo_path_info *domainname; + } task; + } param; + struct tomoyo_acl_info *matched_acl; + u8 param_type; + bool granted; + u8 retry; + u8 profile; + u8 mode; + u8 type; +}; + +struct tomoyo_mini_stat { + kuid_t uid; + kgid_t gid; + ino_t ino; + umode_t mode; + dev_t dev; + dev_t rdev; +}; + +struct tomoyo_obj_info { + bool validate_done; + bool stat_valid[4]; + struct path path1; + struct path path2; + struct tomoyo_mini_stat stat[4]; + struct tomoyo_path_info *symlink_target; +}; + +struct tomoyo_page_dump { + struct page *page; + char *data; +}; + +struct tomoyo_execve { + struct tomoyo_request_info r; + struct tomoyo_obj_info obj; + struct linux_binprm *bprm; + const struct tomoyo_path_info *transition; + struct tomoyo_page_dump dump; + char *tmp; +}; + +struct tomoyo_policy_namespace; + +struct tomoyo_domain_info { + struct list_head list; + struct list_head acl_info_list; + const struct tomoyo_path_info *domainname; + struct tomoyo_policy_namespace *ns; + long unsigned int group[4]; + u8 profile; + bool is_deleted; + bool flags[2]; + atomic_t users; +}; + +struct tomoyo_condition; + +struct tomoyo_acl_info { + struct list_head list; + struct tomoyo_condition *cond; + s8 is_deleted; + u8 type; +} __attribute__((packed)); + +struct tomoyo_condition { + struct tomoyo_shared_acl_head head; + u32 size; + u16 condc; + u16 numbers_count; + u16 names_count; + u16 argc; + u16 envc; + u8 grant_log; + const struct tomoyo_path_info *transit; +}; + +struct tomoyo_profile; + +struct tomoyo_policy_namespace { + struct tomoyo_profile *profile_ptr[256]; + struct list_head group_list[3]; + struct list_head policy_list[11]; + struct list_head acl_group[256]; + struct list_head namespace_list; + unsigned int profile_version; + const char *name; +}; + +struct tomoyo_io_buffer { + void (*read)(struct tomoyo_io_buffer *); + int (*write)(struct tomoyo_io_buffer *); + __poll_t (*poll)(struct file *, poll_table *); + struct mutex io_sem; + char *read_user_buf; + size_t read_user_buf_avail; + struct { + struct list_head *ns; + struct list_head *domain; + struct list_head *group; + struct list_head *acl; + size_t avail; + unsigned int step; + unsigned int query_index; + u16 index; + u16 cond_index; + u8 acl_group_index; + u8 cond_step; + u8 bit; + u8 w_pos; + bool eof; + bool print_this_domain_only; + bool print_transition_related_only; + bool print_cond_part; + const char *w[64]; + } r; + struct { + struct tomoyo_policy_namespace *ns; + struct tomoyo_domain_info *domain; + size_t avail; + bool is_delete; + } w; + char *read_buf; + size_t readbuf_size; + char *write_buf; + size_t writebuf_size; + enum tomoyo_securityfs_interface_index type; + u8 users; + struct list_head list; +}; + +struct tomoyo_preference { + unsigned int learning_max_entry; + bool enforcing_verbose; + bool learning_verbose; + bool permissive_verbose; +}; + +struct tomoyo_profile { + const struct tomoyo_path_info *comment; + struct tomoyo_preference *learning; + struct tomoyo_preference *permissive; + struct tomoyo_preference *enforcing; + struct tomoyo_preference preference; + u8 default_config; + u8 config[42]; + unsigned int pref[2]; +}; + +struct tomoyo_time { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 min; + u8 sec; +}; + +struct tomoyo_log { + struct list_head list; + char *log; + int size; +}; + +enum tomoyo_value_type { + TOMOYO_VALUE_TYPE_INVALID = 0, + TOMOYO_VALUE_TYPE_DECIMAL = 1, + TOMOYO_VALUE_TYPE_OCTAL = 2, + TOMOYO_VALUE_TYPE_HEXADECIMAL = 3, +}; + +enum tomoyo_transition_type { + TOMOYO_TRANSITION_CONTROL_NO_RESET = 0, + TOMOYO_TRANSITION_CONTROL_RESET = 1, + TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE = 2, + TOMOYO_TRANSITION_CONTROL_INITIALIZE = 3, + TOMOYO_TRANSITION_CONTROL_NO_KEEP = 4, + TOMOYO_TRANSITION_CONTROL_KEEP = 5, + TOMOYO_MAX_TRANSITION_TYPE = 6, +}; + +enum tomoyo_acl_entry_type_index { + TOMOYO_TYPE_PATH_ACL = 0, + TOMOYO_TYPE_PATH2_ACL = 1, + TOMOYO_TYPE_PATH_NUMBER_ACL = 2, + TOMOYO_TYPE_MKDEV_ACL = 3, + TOMOYO_TYPE_MOUNT_ACL = 4, + TOMOYO_TYPE_INET_ACL = 5, + TOMOYO_TYPE_UNIX_ACL = 6, + TOMOYO_TYPE_ENV_ACL = 7, + TOMOYO_TYPE_MANUAL_TASK_ACL = 8, +}; + +enum tomoyo_policy_stat_type { + TOMOYO_STAT_POLICY_UPDATES = 0, + TOMOYO_STAT_POLICY_LEARNING = 1, + TOMOYO_STAT_POLICY_PERMISSIVE = 2, + TOMOYO_STAT_POLICY_ENFORCING = 3, + TOMOYO_MAX_POLICY_STAT = 4, +}; + +struct tomoyo_acl_head { + struct list_head list; + s8 is_deleted; +} __attribute__((packed)); + +struct tomoyo_name { + struct tomoyo_shared_acl_head head; + struct tomoyo_path_info entry; +}; + +struct tomoyo_group; + +struct tomoyo_name_union { + const struct tomoyo_path_info *filename; + struct tomoyo_group *group; +}; + +struct tomoyo_group { + struct tomoyo_shared_acl_head head; + const struct tomoyo_path_info *group_name; + struct list_head member_list; +}; + +struct tomoyo_number_union { + long unsigned int values[2]; + struct tomoyo_group *group; + u8 value_type[2]; +}; + +struct tomoyo_ipaddr_union { + struct in6_addr ip[2]; + struct tomoyo_group *group; + bool is_ipv6; +}; + +struct tomoyo_path_group { + struct tomoyo_acl_head head; + const struct tomoyo_path_info *member_name; +}; + +struct tomoyo_number_group { + struct tomoyo_acl_head head; + struct tomoyo_number_union number; +}; + +struct tomoyo_address_group { + struct tomoyo_acl_head head; + struct tomoyo_ipaddr_union address; +}; + +struct tomoyo_argv { + long unsigned int index; + const struct tomoyo_path_info *value; + bool is_not; +}; + +struct tomoyo_envp { + const struct tomoyo_path_info *name; + const struct tomoyo_path_info *value; + bool is_not; +}; + +struct tomoyo_condition_element { + u8 left; + u8 right; + bool equals; +}; + +struct tomoyo_task_acl { + struct tomoyo_acl_info head; + const struct tomoyo_path_info *domainname; +}; + +struct tomoyo_path_acl { + struct tomoyo_acl_info head; + u16 perm; + struct tomoyo_name_union name; +}; + +struct tomoyo_path_number_acl { + struct tomoyo_acl_info head; + u8 perm; + struct tomoyo_name_union name; + struct tomoyo_number_union number; +}; + +struct tomoyo_mkdev_acl { + struct tomoyo_acl_info head; + u8 perm; + struct tomoyo_name_union name; + struct tomoyo_number_union mode; + struct tomoyo_number_union major; + struct tomoyo_number_union minor; +}; + +struct tomoyo_path2_acl { + struct tomoyo_acl_info head; + u8 perm; + struct tomoyo_name_union name1; + struct tomoyo_name_union name2; +}; + +struct tomoyo_mount_acl { + struct tomoyo_acl_info head; + struct tomoyo_name_union dev_name; + struct tomoyo_name_union dir_name; + struct tomoyo_name_union fs_type; + struct tomoyo_number_union flags; +}; + +struct tomoyo_env_acl { + struct tomoyo_acl_info head; + const struct tomoyo_path_info *env; +}; + +struct tomoyo_inet_acl { + struct tomoyo_acl_info head; + u8 protocol; + u8 perm; + struct tomoyo_ipaddr_union address; + struct tomoyo_number_union port; +}; + +struct tomoyo_unix_acl { + struct tomoyo_acl_info head; + u8 protocol; + u8 perm; + struct tomoyo_name_union name; +}; + +struct tomoyo_acl_param { + char *data; + struct list_head *list; + struct tomoyo_policy_namespace *ns; + bool is_delete; +}; + +struct tomoyo_transition_control { + struct tomoyo_acl_head head; + u8 type; + bool is_last_name; + const struct tomoyo_path_info *domainname; + const struct tomoyo_path_info *program; +}; + +struct tomoyo_aggregator { + struct tomoyo_acl_head head; + const struct tomoyo_path_info *original_name; + const struct tomoyo_path_info *aggregated_name; +}; + +struct tomoyo_manager { + struct tomoyo_acl_head head; + const struct tomoyo_path_info *manager; +}; + +struct tomoyo_task { + struct tomoyo_domain_info *domain_info; + struct tomoyo_domain_info *old_domain_info; +}; + +struct tomoyo_query { + struct list_head list; + struct tomoyo_domain_info *domain; + char *query; + size_t query_len; + unsigned int serial; + u8 timer; + u8 answer; + u8 retry; +}; + +enum tomoyo_special_mount { + TOMOYO_MOUNT_BIND = 0, + TOMOYO_MOUNT_MOVE = 1, + TOMOYO_MOUNT_REMOUNT = 2, + TOMOYO_MOUNT_MAKE_UNBINDABLE = 3, + TOMOYO_MOUNT_MAKE_PRIVATE = 4, + TOMOYO_MOUNT_MAKE_SLAVE = 5, + TOMOYO_MOUNT_MAKE_SHARED = 6, + TOMOYO_MAX_SPECIAL_MOUNT = 7, +}; + +struct tomoyo_inet_addr_info { + __be16 port; + const __be32 *address; + bool is_ipv6; +}; + +struct tomoyo_unix_addr_info { + u8 *addr; + unsigned int addr_len; +}; + +struct tomoyo_addr_info { + u8 protocol; + u8 operation; + struct tomoyo_inet_addr_info inet; + struct tomoyo_unix_addr_info unix0; +}; + +enum audit_mode { + AUDIT_NORMAL = 0, + AUDIT_QUIET_DENIED = 1, + AUDIT_QUIET = 2, + AUDIT_NOQUIET = 3, + AUDIT_ALL = 4, +}; + +enum aa_sfs_type { + AA_SFS_TYPE_BOOLEAN = 0, + AA_SFS_TYPE_STRING = 1, + AA_SFS_TYPE_U64 = 2, + AA_SFS_TYPE_FOPS = 3, + AA_SFS_TYPE_DIR = 4, +}; + +struct aa_sfs_entry { + const char *name; + struct dentry *dentry; + umode_t mode; + enum aa_sfs_type v_type; + union { + bool boolean; + char *string; + long unsigned int u64; + struct aa_sfs_entry *files; + } v; + const struct file_operations *file_ops; +}; + +enum aafs_ns_type { + AAFS_NS_DIR = 0, + AAFS_NS_PROFS = 1, + AAFS_NS_NS = 2, + AAFS_NS_RAW_DATA = 3, + AAFS_NS_LOAD = 4, + AAFS_NS_REPLACE = 5, + AAFS_NS_REMOVE = 6, + AAFS_NS_REVISION = 7, + AAFS_NS_COUNT = 8, + AAFS_NS_MAX_COUNT = 9, + AAFS_NS_SIZE = 10, + AAFS_NS_MAX_SIZE = 11, + AAFS_NS_OWNER = 12, + AAFS_NS_SIZEOF = 13, +}; + +enum aafs_prof_type { + AAFS_PROF_DIR = 0, + AAFS_PROF_PROFS = 1, + AAFS_PROF_NAME = 2, + AAFS_PROF_MODE = 3, + AAFS_PROF_ATTACH = 4, + AAFS_PROF_HASH = 5, + AAFS_PROF_RAW_DATA = 6, + AAFS_PROF_RAW_HASH = 7, + AAFS_PROF_RAW_ABI = 8, + AAFS_PROF_SIZEOF = 9, +}; + +struct table_header { + u16 td_id; + u16 td_flags; + u32 td_hilen; + u32 td_lolen; + char td_data[0]; +}; + +struct aa_dfa { + struct kref count; + u16 flags; + u32 max_oob; + struct table_header *tables[8]; +}; + +struct aa_policy { + const char *name; + char *hname; + struct list_head list; + struct list_head profiles; +}; + +struct aa_labelset { + rwlock_t lock; + struct rb_root root; +}; + +enum label_flags { + FLAG_HAT = 1, + FLAG_UNCONFINED = 2, + FLAG_NULL = 4, + FLAG_IX_ON_NAME_ERROR = 8, + FLAG_IMMUTIBLE = 16, + FLAG_USER_DEFINED = 32, + FLAG_NO_LIST_REF = 64, + FLAG_NS_COUNT = 128, + FLAG_IN_TREE = 256, + FLAG_PROFILE = 512, + FLAG_EXPLICIT = 1024, + FLAG_STALE = 2048, + FLAG_RENAMED = 4096, + FLAG_REVOKED = 8192, +}; + +struct aa_label; + +struct aa_proxy { + struct kref count; + struct aa_label *label; +}; + +struct aa_profile; + +struct aa_label { + struct kref count; + struct rb_node node; + struct callback_head rcu; + struct aa_proxy *proxy; + char *hname; + long int flags; + u32 secid; + int size; + struct aa_profile *vec[0]; +}; + +struct label_it { + int i; + int j; +}; + +struct aa_policydb { + struct aa_dfa *dfa; + unsigned int start[18]; +}; + +struct aa_domain { + int size; + char **table; +}; + +struct aa_file_rules { + unsigned int start; + struct aa_dfa *dfa; + struct aa_domain trans; +}; + +struct aa_caps { + kernel_cap_t allow; + kernel_cap_t audit; + kernel_cap_t denied; + kernel_cap_t quiet; + kernel_cap_t kill; + kernel_cap_t extended; +}; + +struct aa_rlimit { + unsigned int mask; + struct rlimit limits[16]; +}; + +struct aa_ns; + +struct aa_net_compat; + +struct aa_secmark; + +struct aa_loaddata; + +struct aa_profile { + struct aa_policy base; + struct aa_profile *parent; + struct aa_ns *ns; + const char *rename; + const char *attach; + struct aa_dfa *xmatch; + int xmatch_len; + enum audit_mode audit; + long int mode; + u32 path_flags; + const char *disconnected; + int size; + struct aa_policydb policy; + struct aa_file_rules file; + struct aa_caps caps; + struct aa_net_compat *net_compat; + int xattr_count; + char **xattrs; + struct aa_rlimit rlimits; + int secmark_count; + struct aa_secmark *secmark; + struct aa_loaddata *rawdata; + unsigned char *hash; + char *dirname; + struct dentry *dents[9]; + struct rhashtable *data; + struct aa_label label; +}; + +struct aa_perms { + u32 allow; + u32 audit; + u32 deny; + u32 quiet; + u32 kill; + u32 stop; + u32 complain; + u32 cond; + u32 hide; + u32 prompt; + u16 xindex; +}; + +struct path_cond { + kuid_t uid; + umode_t mode; +}; + +struct aa_net_compat { + u16 allow[45]; + u16 audit[45]; + u16 quiet[45]; +}; + +struct aa_secmark { + u8 audit; + u8 deny; + u32 secid; + char *label; +}; + +enum profile_mode { + APPARMOR_ENFORCE = 0, + APPARMOR_COMPLAIN = 1, + APPARMOR_KILL = 2, + APPARMOR_UNCONFINED = 3, +}; + +struct aa_data { + char *key; + u32 size; + char *data; + struct rhash_head head; +}; + +struct aa_ns_acct { + int max_size; + int max_count; + int size; + int count; +}; + +struct aa_ns { + struct aa_policy base; + struct aa_ns *parent; + struct mutex lock; + struct aa_ns_acct acct; + struct aa_profile *unconfined; + struct list_head sub_ns; + atomic_t uniq_null; + long int uniq_id; + int level; + long int revision; + wait_queue_head_t wait; + struct aa_labelset labels; + struct list_head rawdata_list; + struct dentry *dents[13]; +}; + +struct aa_loaddata { + struct kref count; + struct list_head list; + struct work_struct work; + struct dentry *dents[6]; + struct aa_ns *ns; + char *name; + size_t size; + size_t compressed_size; + long int revision; + int abi; + unsigned char *hash; + char *data; +}; + +enum { + AAFS_LOADDATA_ABI = 0, + AAFS_LOADDATA_REVISION = 1, + AAFS_LOADDATA_HASH = 2, + AAFS_LOADDATA_DATA = 3, + AAFS_LOADDATA_COMPRESSED_SIZE = 4, + AAFS_LOADDATA_DIR = 5, + AAFS_LOADDATA_NDENTS = 6, +}; + +struct rawdata_f_data { + struct aa_loaddata *loaddata; +}; + +struct aa_revision { + struct aa_ns *ns; + long int last_read; +}; + +struct multi_transaction { + struct kref count; + ssize_t size; + char data[0]; +}; + +struct apparmor_audit_data { + int error; + int type; + const char *op; + struct aa_label *label; + const char *name; + const char *info; + u32 request; + u32 denied; + union { + struct { + struct aa_label *peer; + union { + struct { + const char *target; + kuid_t ouid; + } fs; + struct { + int rlim; + long unsigned int max; + } rlim; + struct { + int signal; + int unmappedsig; + }; + struct { + int type; + int protocol; + struct sock *peer_sk; + void *addr; + int addrlen; + } net; + }; + }; + struct { + struct aa_profile *profile; + const char *ns; + long int pos; + } iface; + struct { + const char *src_name; + const char *type; + const char *trans; + const char *data; + long unsigned int flags; + } mnt; + }; +}; + +enum audit_type { + AUDIT_APPARMOR_AUDIT = 0, + AUDIT_APPARMOR_ALLOWED = 1, + AUDIT_APPARMOR_DENIED = 2, + AUDIT_APPARMOR_HINT = 3, + AUDIT_APPARMOR_STATUS = 4, + AUDIT_APPARMOR_ERROR = 5, + AUDIT_APPARMOR_KILL = 6, + AUDIT_APPARMOR_AUTO = 7, +}; + +struct aa_audit_rule { + struct aa_label *label; +}; + +struct audit_cache { + struct aa_profile *profile; + kernel_cap_t caps; +}; + +struct aa_task_ctx { + struct aa_label *nnp; + struct aa_label *onexec; + struct aa_label *previous; + u64 token; +}; + +struct counted_str { + struct kref count; + char name[0]; +}; + +struct match_workbuf { + unsigned int count; + unsigned int pos; + unsigned int len; + unsigned int size; + unsigned int history[24]; +}; + +enum path_flags { + PATH_IS_DIR = 1, + PATH_SOCK_COND = 2, + PATH_CONNECT_PATH = 4, + PATH_CHROOT_REL = 8, + PATH_CHROOT_NSCONNECT = 16, + PATH_DELEGATE_DELETED = 32768, + PATH_MEDIATE_DELETED = 65536, +}; + +struct aa_load_ent { + struct list_head list; + struct aa_profile *new; + struct aa_profile *old; + struct aa_profile *rename; + const char *ns_name; +}; + +enum aa_code { + AA_U8 = 0, + AA_U16 = 1, + AA_U32 = 2, + AA_U64 = 3, + AA_NAME = 4, + AA_STRING = 5, + AA_BLOB = 6, + AA_STRUCT = 7, + AA_STRUCTEND = 8, + AA_LIST = 9, + AA_LISTEND = 10, + AA_ARRAY = 11, + AA_ARRAYEND = 12, +}; + +struct aa_ext { + void *start; + void *end; + void *pos; + u32 version; +}; + +struct aa_file_ctx { + spinlock_t lock; + struct aa_label *label; + u32 allow; +}; + +struct aa_sk_ctx { + struct aa_label *label; + struct aa_label *peer; + struct path path; +}; + +union aa_buffer { + struct list_head list; + char buffer[1]; +}; + +struct ptrace_relation { + struct task_struct *tracer; + struct task_struct *tracee; + bool invalid; + struct list_head node; + struct callback_head rcu; +}; + +struct access_report_info { + struct callback_head work; + const char *access; + struct task_struct *target; + struct task_struct *agent; +}; + +enum sid_policy_type { + SIDPOL_DEFAULT = 0, + SIDPOL_CONSTRAINED = 1, + SIDPOL_ALLOWED = 2, +}; + +typedef union { + kuid_t uid; + kgid_t gid; +} kid_t; + +enum setid_type { + UID = 0, + GID = 1, +}; + +struct setid_rule { + struct hlist_node next; + kid_t src_id; + kid_t dst_id; + enum setid_type type; +}; + +struct setid_ruleset { + struct hlist_head rules[256]; + char *policy_str; + struct callback_head rcu; + enum setid_type type; +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; +}; + +struct landlock_ruleset_attr { + __u64 handled_access_fs; +}; + +enum landlock_rule_type { + LANDLOCK_RULE_PATH_BENEATH = 1, +}; + +struct landlock_path_beneath_attr { + __u64 allowed_access; + __s32 parent_fd; +} __attribute__((packed)); + +struct landlock_hierarchy { + struct landlock_hierarchy *parent; + refcount_t usage; +}; + +struct landlock_ruleset { + struct rb_root root; + struct landlock_hierarchy *hierarchy; + union { + struct work_struct work_free; + struct { + struct mutex lock; + refcount_t usage; + u32 num_rules; + u32 num_layers; + u16 fs_access_masks[0]; + }; + }; +}; + +struct landlock_cred_security { + struct landlock_ruleset *domain; +}; + +struct landlock_object; + +struct landlock_object_underops { + void (*release)(struct landlock_object * const); +}; + +struct landlock_object { + refcount_t usage; + spinlock_t lock; + void *underobj; + union { + struct callback_head rcu_free; + const struct landlock_object_underops *underops; + }; +}; + +struct landlock_layer { + u16 level; + u16 access; +}; + +struct landlock_rule { + struct rb_node node; + struct landlock_object *object; + u32 num_layers; + struct landlock_layer layers[0]; +}; + +struct landlock_inode_security { + struct landlock_object *object; +}; + +struct landlock_superblock_security { + atomic_long_t inode_refs; +}; + +enum integrity_status { + INTEGRITY_PASS = 0, + INTEGRITY_PASS_IMMUTABLE = 1, + INTEGRITY_FAIL = 2, + INTEGRITY_NOLABEL = 3, + INTEGRITY_NOXATTRS = 4, + INTEGRITY_UNKNOWN = 5, +}; + +struct ima_digest_data { + u8 algo; + u8 length; + union { + struct { + u8 unused; + u8 type; + } sha1; + struct { + u8 type; + u8 algo; + } ng; + u8 data[2]; + } xattr; + u8 digest[0]; +}; + +struct integrity_iint_cache { + struct rb_node rb_node; + struct mutex mutex; + struct inode *inode; + u64 version; + long unsigned int flags; + long unsigned int measured_pcrs; + long unsigned int atomic_flags; + enum integrity_status ima_file_status: 4; + enum integrity_status ima_mmap_status: 4; + enum integrity_status ima_bprm_status: 4; + enum integrity_status ima_read_status: 4; + enum integrity_status ima_creds_status: 4; + enum integrity_status evm_status: 4; + struct ima_digest_data *ima_hash; +}; + +struct modsig; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; +}; + +struct asymmetric_key_id; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; + u8 *s; + u32 s_size; + u8 *digest; + u8 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; + const void *data; + unsigned int data_size; +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct signature_v2_hdr { + uint8_t type; + uint8_t version; + uint8_t hash_algo; + __be32 keyid; + __be16 sig_size; + uint8_t sig[0]; +} __attribute__((packed)); + +typedef struct { + efi_guid_t signature_owner; + u8 signature_data[0]; +} efi_signature_data_t; + +typedef struct { + efi_guid_t signature_type; + u32 signature_list_size; + u32 signature_header_size; + u32 signature_size; + u8 signature_header[0]; +} efi_signature_list_t; + +typedef void (*efi_element_handler_t)(const char *, const void *, size_t); + +struct efi_mokvar_table_entry { + char name[256]; + u64 data_size; + u8 data[0]; +}; + +struct evm_ima_xattr_data { + u8 type; + u8 data[0]; +}; + +enum ima_show_type { + IMA_SHOW_BINARY = 0, + IMA_SHOW_BINARY_NO_FIELD_LEN = 1, + IMA_SHOW_BINARY_OLD_STRING_FMT = 2, + IMA_SHOW_ASCII = 3, +}; + +struct ima_event_data { + struct integrity_iint_cache *iint; + struct file *file; + const unsigned char *filename; + struct evm_ima_xattr_data *xattr_value; + int xattr_len; + const struct modsig *modsig; + const char *violation; + const void *buf; + int buf_len; +}; + +struct ima_field_data { + u8 *data; + u32 len; +}; + +struct ima_template_field { + const char field_id[16]; + int (*field_init)(struct ima_event_data *, struct ima_field_data *); + void (*field_show)(struct seq_file *, enum ima_show_type, struct ima_field_data *); +}; + +struct ima_template_desc { + struct list_head list; + char *name; + char *fmt; + int num_fields; + const struct ima_template_field **fields; +}; + +struct ima_template_entry { + int pcr; + struct tpm_digest *digests; + struct ima_template_desc *template_desc; + u32 template_data_len; + struct ima_field_data template_data[0]; +}; + +struct ima_queue_entry { + struct hlist_node hnext; + struct list_head later; + struct ima_template_entry *entry; +}; + +struct ima_h_table { + atomic_long_t len; + atomic_long_t violations; + struct hlist_head queue[1024]; +}; + +enum ima_fs_flags { + IMA_FS_BUSY = 0, +}; + +enum evm_ima_xattr_type { + IMA_XATTR_DIGEST = 1, + EVM_XATTR_HMAC = 2, + EVM_IMA_XATTR_DIGSIG = 3, + IMA_XATTR_DIGEST_NG = 4, + EVM_XATTR_PORTABLE_DIGSIG = 5, + IMA_XATTR_LAST = 6, +}; + +enum ima_hooks { + NONE___2 = 0, + FILE_CHECK = 1, + MMAP_CHECK = 2, + BPRM_CHECK = 3, + CREDS_CHECK = 4, + POST_SETATTR = 5, + MODULE_CHECK = 6, + FIRMWARE_CHECK = 7, + KEXEC_KERNEL_CHECK = 8, + KEXEC_INITRAMFS_CHECK = 9, + POLICY_CHECK = 10, + KEXEC_CMDLINE = 11, + KEY_CHECK = 12, + CRITICAL_DATA = 13, + MAX_CHECK = 14, +}; + +enum tpm_pcrs { + TPM_PCR0 = 0, + TPM_PCR8 = 8, + TPM_PCR10 = 10, +}; + +struct ima_algo_desc { + struct crypto_shash *tfm; + enum hash_algo algo; +}; + +enum lsm_rule_types { + LSM_OBJ_USER = 0, + LSM_OBJ_ROLE = 1, + LSM_OBJ_TYPE = 2, + LSM_SUBJ_USER = 3, + LSM_SUBJ_ROLE = 4, + LSM_SUBJ_TYPE = 5, +}; + +enum policy_types { + ORIGINAL_TCB = 1, + DEFAULT_TCB = 2, +}; + +enum policy_rule_list { + IMA_DEFAULT_POLICY = 1, + IMA_CUSTOM_POLICY = 2, +}; + +struct ima_rule_opt_list { + size_t count; + char *items[0]; +}; + +struct ima_rule_entry { + struct list_head list; + int action; + unsigned int flags; + enum ima_hooks func; + int mask; + long unsigned int fsmagic; + uuid_t fsuuid; + kuid_t uid; + kuid_t fowner; + bool (*uid_op)(kuid_t, kuid_t); + bool (*fowner_op)(kuid_t, kuid_t); + int pcr; + struct { + void *rules[4]; + char *args_p; + int type; + } lsm[6]; + char *fsname; + struct ima_rule_opt_list *keyrings; + struct ima_rule_opt_list *label; + struct ima_template_desc *template; +}; + +enum { + Opt_measure = 0, + Opt_dont_measure = 1, + Opt_appraise = 2, + Opt_dont_appraise = 3, + Opt_audit = 4, + Opt_hash___3 = 5, + Opt_dont_hash = 6, + Opt_obj_user = 7, + Opt_obj_role = 8, + Opt_obj_type = 9, + Opt_subj_user = 10, + Opt_subj_role = 11, + Opt_subj_type = 12, + Opt_func = 13, + Opt_mask = 14, + Opt_fsmagic = 15, + Opt_fsname = 16, + Opt_fsuuid = 17, + Opt_uid_eq = 18, + Opt_euid_eq = 19, + Opt_fowner_eq = 20, + Opt_uid_gt = 21, + Opt_euid_gt = 22, + Opt_fowner_gt = 23, + Opt_uid_lt = 24, + Opt_euid_lt = 25, + Opt_fowner_lt = 26, + Opt_appraise_type = 27, + Opt_appraise_flag = 28, + Opt_permit_directio = 29, + Opt_pcr = 30, + Opt_template = 31, + Opt_keyrings = 32, + Opt_label = 33, + Opt_err___10 = 34, +}; + +struct ima_kexec_hdr { + u16 version; + u16 _reserved0; + u32 _reserved1; + u64 buffer_size; + u64 count; +}; + +enum header_fields { + HDR_PCR = 0, + HDR_DIGEST = 1, + HDR_TEMPLATE_NAME = 2, + HDR_TEMPLATE_DATA = 3, + HDR__LAST = 4, +}; + +enum data_formats { + DATA_FMT_DIGEST = 0, + DATA_FMT_DIGEST_WITH_ALGO = 1, + DATA_FMT_STRING = 2, + DATA_FMT_HEX = 3, +}; + +struct modsig___2 { + struct pkcs7_message *pkcs7_msg; + enum hash_algo hash_algo; + const u8 *digest; + u32 digest_size; + int raw_pkcs7_len; + u8 raw_pkcs7[0]; +}; + +struct ima_key_entry { + struct list_head list; + void *payload; + size_t payload_len; + char *keyring_name; +}; + +struct evm_xattr { + struct evm_ima_xattr_data data; + u8 digest[20]; +}; + +struct xattr_list { + struct list_head list; + char *name; +}; + +struct evm_digest { + struct ima_digest_data hdr; + char digest[64]; +}; + +struct h_misc { + long unsigned int ino; + __u32 generation; + uid_t uid; + gid_t gid; + umode_t mode; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + CRYPTOA_U32 = 3, + __CRYPTOA_MAX = 4, +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, + NAPI_STATE_PREFER_BUSY_POLL = 7, + NAPI_STATE_THREADED = 8, + NAPI_STATE_SCHED_THREADED = 9, +}; + +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS = 1, + XPS_MAPS_MAX = 2, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + void *__ctx[0]; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +enum crypto_attr_type_t { + CRYPTOCFGA_UNSPEC = 0, + CRYPTOCFGA_PRIORITY_VAL = 1, + CRYPTOCFGA_REPORT_LARVAL = 2, + CRYPTOCFGA_REPORT_HASH = 3, + CRYPTOCFGA_REPORT_BLKCIPHER = 4, + CRYPTOCFGA_REPORT_AEAD = 5, + CRYPTOCFGA_REPORT_COMPRESS = 6, + CRYPTOCFGA_REPORT_RNG = 7, + CRYPTOCFGA_REPORT_CIPHER = 8, + CRYPTOCFGA_REPORT_AKCIPHER = 9, + CRYPTOCFGA_REPORT_KPP = 10, + CRYPTOCFGA_REPORT_ACOMP = 11, + CRYPTOCFGA_STAT_LARVAL = 12, + CRYPTOCFGA_STAT_HASH = 13, + CRYPTOCFGA_STAT_BLKCIPHER = 14, + CRYPTOCFGA_STAT_AEAD = 15, + CRYPTOCFGA_STAT_COMPRESS = 16, + CRYPTOCFGA_STAT_RNG = 17, + CRYPTOCFGA_STAT_CIPHER = 18, + CRYPTOCFGA_STAT_AKCIPHER = 19, + CRYPTOCFGA_STAT_KPP = 20, + CRYPTOCFGA_STAT_ACOMP = 21, + __CRYPTOCFGA_MAX = 22, +}; + +struct crypto_report_aead { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int maxauthsize; + unsigned int ivsize; +}; + +struct crypto_sync_skcipher; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + union { + struct { + char head[64]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +struct crypto_report_blkcipher { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int alignmask; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; + unsigned int flags; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + union { + struct { + char head[88]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_hash { + char type[64]; + unsigned int blocksize; + unsigned int digestsize; +}; + +struct ahash_request_priv { + crypto_completion_t complete; + void *data; + u8 *result; + u32 flags; + void *ubuf[0]; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + union { + struct { + char head[96]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_akcipher { + char type[64]; +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + void *__ctx[0]; +}; + +struct crypto_akcipher { + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*sign)(struct akcipher_request *); + int (*verify)(struct akcipher_request *); + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + unsigned int reqsize; + struct crypto_alg base; +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + union { + struct { + char head[80]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_kpp { + char type[64]; +}; + +typedef long unsigned int mpi_limb_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +struct dh_ctx { + MPI p; + MPI q; + MPI g; + MPI xa; +}; + +enum { + CRYPTO_KPP_SECRET_TYPE_UNKNOWN = 0, + CRYPTO_KPP_SECRET_TYPE_DH = 1, + CRYPTO_KPP_SECRET_TYPE_ECDH = 2, +}; + +struct kpp_secret { + short unsigned int type; + short unsigned int len; +}; + +enum rsapubkey_actions { + ACT_rsa_get_e = 0, + ACT_rsa_get_n = 1, + NR__rsapubkey_actions = 2, +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e___2 = 3, + ACT_rsa_get_n___2 = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; +}; + +struct asn1_decoder___2; + +struct rsa_asn1_template { + const char *name; + const u8 *data; + size_t size; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct rsa_asn1_template *digest_info; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + struct akcipher_request child_req; +}; + +struct crypto_report_acomp { + char type[64]; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + struct crypto_alg base; +}; + +struct crypto_report_comp { + char type[64]; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + struct crypto_alg base; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + union { + struct rtattr attr; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } alg; + struct { + struct rtattr attr; + struct crypto_attr_u32 data; + } nu32; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +struct hmac_ctx { + struct crypto_shash *hash; +}; + +struct md5_state { + u32 hash[4]; + u32 block[16]; + u64 byte_count; +}; + +struct sha1_state { + u32 state[5]; + u64 count; + u8 buffer[64]; +}; + +typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +typedef struct { + u64 a; + u64 b; +} u128; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +struct crypto_cts_ctx { + struct crypto_skcipher *child; +}; + +struct crypto_cts_reqctx { + struct scatterlist sg[2]; + unsigned int offset; + struct skcipher_request subreq; +}; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + char name[128]; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + struct skcipher_request subreq; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + struct skcipher_request subreq; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +struct deflate_ctx { + struct z_stream_s comp_stream; + struct z_stream_s decomp_stream; +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +struct chksum_desc_ctx___2 { + __u16 crc; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct crypto_report_rng { + char type[64]; + unsigned int seedsize; +}; + +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *); + struct module *owner; +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + bool seeded; + bool pr; + bool fips_primed; + unsigned char *prev; + struct work_struct seed_work; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; + struct random_ready_callback random_ready; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct s { + __be32 conv; +}; + +struct rand_data { + __u64 data; + __u64 old_data; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + int rct_count; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int apt_base_set: 1; + unsigned int health_failure: 1; +}; + +struct rand_data___2; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data___2 *entropy_collector; + unsigned int reset_cnt; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +struct asymmetric_key_ids { + void *id[2]; +}; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment = 1, + ACT_x509_note_OID = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_pkey_algo = 7, + ACT_x509_note_serial = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment___2 = 3, + ACT_x509_note_OID___2 = 4, + NR__x509_akid_actions = 5, +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_key; + bool unsupported_sig; + bool blacklisted; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *cert_start; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID algo_oid; + unsigned char nr_mpi; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + time64_t signing_time; + struct public_key_signature *sig; +}; + +struct pkcs7_message___2 { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +struct pkcs7_parse_context { + struct pkcs7_message___2 *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +struct mz_hdr { + uint16_t magic; + uint16_t lbsize; + uint16_t blocks; + uint16_t relocs; + uint16_t hdrsize; + uint16_t min_extra_pps; + uint16_t max_extra_pps; + uint16_t ss; + uint16_t sp; + uint16_t checksum; + uint16_t ip; + uint16_t cs; + uint16_t reloc_table_offset; + uint16_t overlay_num; + uint16_t reserved0[4]; + uint16_t oem_id; + uint16_t oem_info; + uint16_t reserved1[10]; + uint32_t peaddr; + char message[0]; +}; + +struct pe_hdr { + uint32_t magic; + uint16_t machine; + uint16_t sections; + uint32_t timestamp; + uint32_t symbol_table; + uint32_t symbols; + uint16_t opt_hdr_size; + uint16_t flags; +}; + +struct pe32_opt_hdr { + uint16_t magic; + uint8_t ld_major; + uint8_t ld_minor; + uint32_t text_size; + uint32_t data_size; + uint32_t bss_size; + uint32_t entry_point; + uint32_t code_base; + uint32_t data_base; + uint32_t image_base; + uint32_t section_align; + uint32_t file_align; + uint16_t os_major; + uint16_t os_minor; + uint16_t image_major; + uint16_t image_minor; + uint16_t subsys_major; + uint16_t subsys_minor; + uint32_t win32_version; + uint32_t image_size; + uint32_t header_size; + uint32_t csum; + uint16_t subsys; + uint16_t dll_flags; + uint32_t stack_size_req; + uint32_t stack_size; + uint32_t heap_size_req; + uint32_t heap_size; + uint32_t loader_flags; + uint32_t data_dirs; +}; + +struct pe32plus_opt_hdr { + uint16_t magic; + uint8_t ld_major; + uint8_t ld_minor; + uint32_t text_size; + uint32_t data_size; + uint32_t bss_size; + uint32_t entry_point; + uint32_t code_base; + uint64_t image_base; + uint32_t section_align; + uint32_t file_align; + uint16_t os_major; + uint16_t os_minor; + uint16_t image_major; + uint16_t image_minor; + uint16_t subsys_major; + uint16_t subsys_minor; + uint32_t win32_version; + uint32_t image_size; + uint32_t header_size; + uint32_t csum; + uint16_t subsys; + uint16_t dll_flags; + uint64_t stack_size_req; + uint64_t stack_size; + uint64_t heap_size_req; + uint64_t heap_size; + uint32_t loader_flags; + uint32_t data_dirs; +}; + +struct data_dirent { + uint32_t virtual_address; + uint32_t size; +}; + +struct data_directory { + struct data_dirent exports; + struct data_dirent imports; + struct data_dirent resources; + struct data_dirent exceptions; + struct data_dirent certs; + struct data_dirent base_relocations; + struct data_dirent debug; + struct data_dirent arch; + struct data_dirent global_ptr; + struct data_dirent tls; + struct data_dirent load_config; + struct data_dirent bound_imports; + struct data_dirent import_addrs; + struct data_dirent delay_imports; + struct data_dirent clr_runtime_hdr; + struct data_dirent reserved; +}; + +struct section_header { + char name[8]; + uint32_t virtual_size; + uint32_t virtual_address; + uint32_t raw_data_size; + uint32_t data_addr; + uint32_t relocs; + uint32_t line_numbers; + uint16_t num_relocs; + uint16_t num_lin_numbers; + uint32_t flags; +}; + +struct win_certificate { + uint32_t length; + uint16_t revision; + uint16_t cert_type; +}; + +struct pefile_context { + unsigned int header_size; + unsigned int image_checksum_offset; + unsigned int cert_dirent_offset; + unsigned int n_data_dirents; + unsigned int n_sections; + unsigned int certs_size; + unsigned int sig_offset; + unsigned int sig_len; + const struct section_header *secs; + const void *digest; + unsigned int digest_len; + const char *digest_algo; +}; + +enum mscode_actions { + ACT_mscode_note_content_type = 0, + ACT_mscode_note_digest = 1, + ACT_mscode_note_digest_algo = 2, + NR__mscode_actions = 3, +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + struct rq_qos_ops *ops; + struct request_queue *q; + enum rq_qos_id id; + struct rq_qos *next; + struct dentry *debugfs_dir; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 32, + BLK_MQ_F_NO_SCHED = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_MAX_DEPTH = 10240, + BLK_MQ_CPU_WORK_BATCH = 8, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_KSWAPD = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + sector_t sector; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq_complete { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; +}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, int, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct bio *); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request *, dev_t, sector_t); + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct request_queue *, char *); + ssize_t (*store)(struct request_queue *, const char *, size_t); +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 1250, +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + long unsigned int offset; + int null_mapped; + int from_user; +}; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + struct iov_iter iter; + struct iovec iov[0]; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +typedef bool busy_iter_fn(struct blk_mq_hw_ctx *, struct request *, void *, bool); + +typedef bool busy_tag_iter_fn(struct request *, void *, bool); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +struct mq_inflight { + struct block_device *part; + unsigned int inflight[2]; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + busy_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + bool enable_accounting; +}; + +struct blk_mq_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_ctx *, char *); + ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); + ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); +}; + +typedef u32 compat_caddr_t; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct compat_blkpg_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_caddr_t data; +}; + +struct compat_hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + u32 start; +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; + void (*probe)(dev_t); +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct parsed_partitions { + struct block_device *bdev; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct page *v; +} Sector; + +struct RigidDiskBlock { + __u32 rdb_ID; + __be32 rdb_SummedLongs; + __s32 rdb_ChkSum; + __u32 rdb_HostID; + __be32 rdb_BlockBytes; + __u32 rdb_Flags; + __u32 rdb_BadBlockList; + __be32 rdb_PartitionList; + __u32 rdb_FileSysHeaderList; + __u32 rdb_DriveInit; + __u32 rdb_Reserved1[6]; + __u32 rdb_Cylinders; + __u32 rdb_Sectors; + __u32 rdb_Heads; + __u32 rdb_Interleave; + __u32 rdb_Park; + __u32 rdb_Reserved2[3]; + __u32 rdb_WritePreComp; + __u32 rdb_ReducedWrite; + __u32 rdb_StepRate; + __u32 rdb_Reserved3[5]; + __u32 rdb_RDBBlocksLo; + __u32 rdb_RDBBlocksHi; + __u32 rdb_LoCylinder; + __u32 rdb_HiCylinder; + __u32 rdb_CylBlocks; + __u32 rdb_AutoParkSeconds; + __u32 rdb_HighRDSKBlock; + __u32 rdb_Reserved4; + char rdb_DiskVendor[8]; + char rdb_DiskProduct[16]; + char rdb_DiskRevision[4]; + char rdb_ControllerVendor[8]; + char rdb_ControllerProduct[16]; + char rdb_ControllerRevision[4]; + __u32 rdb_Reserved5[10]; +}; + +struct PartitionBlock { + __be32 pb_ID; + __be32 pb_SummedLongs; + __s32 pb_ChkSum; + __u32 pb_HostID; + __be32 pb_Next; + __u32 pb_Flags; + __u32 pb_Reserved1[2]; + __u32 pb_DevFlags; + __u8 pb_DriveName[32]; + __u32 pb_Reserved2[15]; + __be32 pb_Environment[17]; + __u32 pb_EReserved[15]; +}; + +struct partition_info { + u8 flg; + char id[3]; + __be32 st; + __be32 siz; +}; + +struct rootsector { + char unused[342]; + struct partition_info icdpart[8]; + char unused2[12]; + u32 hd_siz; + struct partition_info part[4]; + u32 bsl_st; + u32 bsl_cnt; + u16 checksum; +} __attribute__((packed)); + +struct lvm_rec { + char lvm_id[4]; + char reserved4[16]; + __be32 lvmarea_len; + __be32 vgda_len; + __be32 vgda_psn[2]; + char reserved36[10]; + __be16 pp_size; + char reserved46[12]; + __be16 version; +}; + +struct vgda { + __be32 secs; + __be32 usec; + char reserved8[16]; + __be16 numlvs; + __be16 maxlvs; + __be16 pp_size; + __be16 numpvs; + __be16 total_vgdas; + __be16 vgda_size; +}; + +struct lvd { + __be16 lv_ix; + __be16 res2; + __be16 res4; + __be16 maxsize; + __be16 lv_state; + __be16 mirror; + __be16 mirror_policy; + __be16 num_lps; + __be16 res10[8]; +}; + +struct lvname { + char name[64]; +}; + +struct ppe { + __be16 lv_ix; + short unsigned int res2; + short unsigned int res4; + __be16 lp_ix; + short unsigned int res8[12]; +}; + +struct pvd { + char reserved0[16]; + __be16 pp_count; + char reserved18[2]; + __be32 psn_part1; + char reserved24[8]; + struct ppe ppe[1016]; +}; + +struct lv_info { + short unsigned int pps_per_lv; + short unsigned int pps_found; + unsigned char lv_is_contiguous; +}; + +struct cmdline_subpart { + char name[32]; + sector_t from; + sector_t size; + int flags; + struct cmdline_subpart *next_subpart; +}; + +struct cmdline_parts { + char name[32]; + unsigned int nr_subparts; + struct cmdline_subpart *subpart; + struct cmdline_parts *next_parts; +}; + +struct mac_partition { + __be16 signature; + __be16 res1; + __be32 map_count; + __be32 start_block; + __be32 block_count; + char name[32]; + char type[32]; + __be32 data_start; + __be32 data_count; + __be32 status; + __be32 boot_start; + __be32 boot_size; + __be32 boot_load; + __be32 boot_load2; + __be32 boot_entry; + __be32 boot_entry2; + __be32 boot_cksum; + char processor[16]; +}; + +struct mac_driver_desc { + __be16 signature; + __be16 block_size; + __be32 block_count; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +struct frag { + struct list_head list; + u32 group; + u8 num; + u8 rec; + u8 map; + u8 data[0]; +}; + +struct privhead { + u16 ver_major; + u16 ver_minor; + u64 logical_disk_start; + u64 logical_disk_size; + u64 config_start; + u64 config_size; + uuid_t disk_id; +}; + +struct tocblock { + u8 bitmap1_name[16]; + u64 bitmap1_start; + u64 bitmap1_size; + u8 bitmap2_name[16]; + u64 bitmap2_start; + u64 bitmap2_size; +}; + +struct vmdb { + u16 ver_major; + u16 ver_minor; + u32 vblk_size; + u32 vblk_offset; + u32 last_vblk_seq; +}; + +struct vblk_comp { + u8 state[16]; + u64 parent_id; + u8 type; + u8 children; + u16 chunksize; +}; + +struct vblk_dgrp { + u8 disk_id[64]; +}; + +struct vblk_disk { + uuid_t disk_id; + u8 alt_name[128]; +}; + +struct vblk_part { + u64 start; + u64 size; + u64 volume_offset; + u64 parent_id; + u64 disk_id; + u8 partnum; +}; + +struct vblk_volu { + u8 volume_type[16]; + u8 volume_state[16]; + u8 guid[16]; + u8 drive_hint[4]; + u64 size; + u8 partition_type; +}; + +struct vblk { + u8 name[64]; + u64 obj_id; + u32 sequence; + u8 flags; + u8 type; + union { + struct vblk_comp comp; + struct vblk_dgrp dgrp; + struct vblk_disk disk; + struct vblk_part part; + struct vblk_volu volu; + } vblk; + struct list_head list; +}; + +struct ldmdb { + struct privhead ph; + struct tocblock toc; + struct vmdb vm; + struct list_head v_dgrp; + struct list_head v_disk; + struct list_head v_volu; + struct list_head v_comp; + struct list_head v_part; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct solaris_x86_slice { + __le16 s_tag; + __le16 s_flag; + __le32 s_start; + __le32 s_size; +}; + +struct solaris_x86_vtoc { + unsigned int v_bootinfo[3]; + __le32 v_sanity; + __le32 v_version; + char v_volume[8]; + __le16 v_sectorsz; + __le16 v_nparts; + unsigned int v_reserved[10]; + struct solaris_x86_slice v_slice[16]; + unsigned int timestamp[16]; + char v_asciilabel[128]; +}; + +struct bsd_partition { + __le32 p_size; + __le32 p_offset; + __le32 p_fsize; + __u8 p_fstype; + __u8 p_frag; + __le16 p_cpg; +}; + +struct bsd_disklabel { + __le32 d_magic; + __s16 d_type; + __s16 d_subtype; + char d_typename[16]; + char d_packname[16]; + __u32 d_secsize; + __u32 d_nsectors; + __u32 d_ntracks; + __u32 d_ncylinders; + __u32 d_secpercyl; + __u32 d_secperunit; + __u16 d_sparespertrack; + __u16 d_sparespercyl; + __u32 d_acylinders; + __u16 d_rpm; + __u16 d_interleave; + __u16 d_trackskew; + __u16 d_cylskew; + __u32 d_headswitch; + __u32 d_trkseek; + __u32 d_flags; + __u32 d_drivedata[5]; + __u32 d_spare[5]; + __le32 d_magic2; + __le16 d_checksum; + __le16 d_npartitions; + __le32 d_bbsize; + __le32 d_sbsize; + struct bsd_partition d_partitions[16]; +}; + +struct unixware_slice { + __le16 s_label; + __le16 s_flags; + __le32 start_sect; + __le32 nr_sects; +}; + +struct unixware_vtoc { + __le32 v_magic; + __le32 v_version; + char v_name[8]; + __le16 v_nslices; + __le16 v_unknown1; + __le32 v_reserved[10]; + struct unixware_slice v_slice[16]; +}; + +struct unixware_disklabel { + __le32 d_type; + __le32 d_magic; + __le32 d_version; + char d_serial[12]; + __le32 d_ncylinders; + __le32 d_ntracks; + __le32 d_nsectors; + __le32 d_secsize; + __le32 d_part_start; + __le32 d_unknown1[12]; + __le32 d_alt_tbl; + __le32 d_alt_len; + __le32 d_phys_cyl; + __le32 d_phys_trk; + __le32 d_phys_sec; + __le32 d_phys_bytes; + __le32 d_unknown2; + __le32 d_unknown3; + __le32 d_pad[8]; + struct unixware_vtoc vtoc; +}; + +struct d_partition { + __le32 p_size; + __le32 p_offset; + __le32 p_fsize; + u8 p_fstype; + u8 p_frag; + __le16 p_cpg; +}; + +struct disklabel { + __le32 d_magic; + __le16 d_type; + __le16 d_subtype; + u8 d_typename[16]; + u8 d_packname[16]; + __le32 d_secsize; + __le32 d_nsectors; + __le32 d_ntracks; + __le32 d_ncylinders; + __le32 d_secpercyl; + __le32 d_secprtunit; + __le16 d_sparespertrack; + __le16 d_sparespercyl; + __le32 d_acylinders; + __le16 d_rpm; + __le16 d_interleave; + __le16 d_trackskew; + __le16 d_cylskew; + __le32 d_headswitch; + __le32 d_trkseek; + __le32 d_flags; + __le32 d_drivedata[5]; + __le32 d_spare[5]; + __le32 d_magic2; + __le16 d_checksum; + __le16 d_npartitions; + __le32 d_bbsize; + __le32 d_sbsize; + struct d_partition d_partitions[18]; +}; + +enum { + LINUX_RAID_PARTITION___2 = 253, +}; + +struct sgi_volume { + s8 name[8]; + __be32 block_num; + __be32 num_bytes; +}; + +struct sgi_partition { + __be32 num_blocks; + __be32 first_block; + __be32 type; +}; + +struct sgi_disklabel { + __be32 magic_mushroom; + __be16 root_part_num; + __be16 swap_part_num; + s8 boot_file[16]; + u8 _unused0[48]; + struct sgi_volume volume[15]; + struct sgi_partition partitions[16]; + __be32 csum; + __be32 _unused1; +}; + +enum { + SUN_WHOLE_DISK = 5, + LINUX_RAID_PARTITION___3 = 253, +}; + +struct sun_info { + __be16 id; + __be16 flags; +}; + +struct sun_vtoc { + __be32 version; + char volume[8]; + __be16 nparts; + struct sun_info infos[8]; + __be16 padding; + __be32 bootinfo[3]; + __be32 sanity; + __be32 reserved[10]; + __be32 timestamp[8]; +}; + +struct sun_partition { + __be32 start_cylinder; + __be32 num_sectors; +}; + +struct sun_disklabel { + unsigned char info[128]; + struct sun_vtoc vtoc; + __be32 write_reinstruct; + __be32 read_reinstruct; + unsigned char spare[148]; + __be16 rspeed; + __be16 pcylcount; + __be16 sparecyl; + __be16 obs1; + __be16 obs2; + __be16 ilfact; + __be16 ncyl; + __be16 nacyl; + __be16 ntrks; + __be16 nsect; + __be16 obs3; + __be16 obs4; + struct sun_partition partitions[8]; + __be16 magic; + __be16 csum; +}; + +struct pt_info { + s32 pi_nblocks; + u32 pi_blkoff; +}; + +struct ultrix_disklabel { + s32 pt_magic; + s32 pt_valid; + struct pt_info pt_part[8]; +}; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +} __attribute__((packed)); + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct d_partition___2 { + __le32 p_res; + u8 p_fstype; + u8 p_res2[3]; + __le32 p_offset; + __le32 p_size; +}; + +struct disklabel___2 { + u8 d_reserved[270]; + struct d_partition___2 d_partitions[2]; + u8 d_blank[208]; + __le16 d_magic; +} __attribute__((packed)); + +struct volumeid { + u8 vid_unused[248]; + u8 vid_mac[8]; +}; + +struct dkconfig { + u8 ios_unused0[128]; + __be32 ios_slcblk; + __be16 ios_slccnt; + u8 ios_unused1[122]; +}; + +struct dkblk0 { + struct volumeid dk_vid; + struct dkconfig dk_ios; +}; + +struct slice { + __be32 nblocks; + __be32 blkoff; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int for_data; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 error_code: 7; + __u8 valid: 1; + __u8 segment_number; + __u8 sense_key: 4; + __u8 reserved2: 1; + __u8 ili: 1; + __u8 reserved1: 2; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, int); + int (*select_disc)(struct cdrom_device_info *, int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + const int capability; + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct scsi_request { + unsigned char __cmd[16]; + unsigned char *cmd; + short unsigned int cmd_len; + int result; + unsigned int sense_len; + unsigned int resid_len; + int retries; + void *sense; +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct blk_cmd_filter { + long unsigned int read_ok[4]; + long unsigned int write_ok[4]; +}; + +struct compat_cdrom_generic_command { + unsigned char cmd[12]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + unsigned char pad[3]; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t unused; +}; + +enum { + OMAX_SB_LEN = 16, +}; + +struct bsg_device { + struct request_queue *queue; + spinlock_t lock; + struct hlist_node dev_list; + refcount_t ref_count; + char name[20]; + int max_queue; +}; + +struct bsg_job; + +typedef int bsg_job_fn(struct bsg_job *); + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + struct kref kref; + unsigned int timeout; + void *request; + void *reply; + unsigned int request_len; + unsigned int reply_len; + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + int result; + unsigned int reply_payload_rcv_len; + struct request *bidi_rq; + struct bio *bidi_bio; + void *dd_data; +}; + +typedef enum blk_eh_timer_return bsg_timeout_fn(struct request *); + +struct bsg_set { + struct blk_mq_tag_set tag_set; + bsg_job_fn *job_fn; + bsg_timeout_fn *timeout_fn; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_init_cpd_fn(struct blkcg_policy_data *); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef void blkcg_pol_bind_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(gfp_t, struct request_queue *, struct blkcg *); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef size_t blkcg_pol_stat_pd_fn(struct blkg_policy_data *, char *, size_t); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct blkg_conf_ctx { + struct block_device *bdev; + struct blkcg_gq *blkg; + char *body; +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct latency_bucket { + long unsigned int total_latency; + int samples; +}; + +struct avg_latency_bucket { + long unsigned int latency; + bool valid; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + unsigned int limit_index; + bool limit_valid[2]; + long unsigned int low_upgrade_time; + long unsigned int low_downgrade_time; + unsigned int scale; + struct latency_bucket tmp_buckets[18]; + struct avg_latency_bucket avg_buckets[18]; + struct latency_bucket *latency_buckets[2]; + long unsigned int last_calculate_time; + long unsigned int filtered_latency; + bool track_bio_latency; +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules[2]; + uint64_t bps[4]; + uint64_t bps_conf[4]; + unsigned int iops[4]; + unsigned int iops_conf[4]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + long unsigned int last_low_overflow_time[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long unsigned int last_check_time; + long unsigned int latency_target; + long unsigned int latency_target_conf; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + long unsigned int last_finish_time; + long unsigned int checked_last_finish_time; + long unsigned int avg_idletime; + long unsigned int idletime_threshold; + long unsigned int idletime_threshold_conf; + unsigned int bio_cnt; + unsigned int bad_bio_cnt; + long unsigned int bio_cnt_reset_time; + atomic_t io_split_cnt[2]; + atomic_t last_io_split_cnt[2]; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, +}; + +enum { + LIMIT_LOW = 0, + LIMIT_MAX = 1, + LIMIT_CNT = 2, +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, + VTIME_PER_SEC_SHIFT = 37, + VTIME_PER_SEC = 0, + VTIME_PER_USEC = 137438, + VTIME_PER_NSEC = 137, + VRATE_MIN_PPM = 10000, + VRATE_MAX_PPM = 100000000, + VRATE_MIN = 1374, + VRATE_CLAMP_ADJ_PCT = 4, + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + AUTOP_CYCLE_NSEC = 1410065408, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + struct iocg_stat local_stat; + struct iocg_stat desc_stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; + u64 vrate; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct trace_event_raw_iocost_iocg_state { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_state { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + u32 cgroup; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_iocg_idle)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +struct deadline_data { + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + struct request *next_rq[2]; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + spinlock_t lock; + spinlock_t zone_lock; + struct list_head dispatch; +}; + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1, + BLK_INTEGRITY_GENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_IP_CHECKSUM = 8, +}; + +struct integrity_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_integrity *, char *); + ssize_t (*store)(struct blk_integrity *, const char *, size_t); +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct t10_pi_tuple { + __be16 guard_tag; + __be16 app_tag; + __be32 ref_tag; +}; + +typedef __be16 csum_fn(void *, unsigned int); + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_enabled; + bool config_change_pending; + spinlock_t config_lock; + spinlock_t vqs_list_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity *); + void (*del_vqs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +struct irq_poll; + +typedef int irq_poll_fn(struct irq_poll *, int); + +struct irq_poll { + struct list_head list; + long unsigned int state; + int weight; + irq_poll_fn *poll; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum rdma_nl_counter_mode { + RDMA_COUNTER_MODE_NONE = 0, + RDMA_COUNTER_MODE_AUTO = 1, + RDMA_COUNTER_MODE_MANUAL = 2, + RDMA_COUNTER_MODE_MAX = 3, +}; + +enum rdma_nl_counter_mask { + RDMA_COUNTER_MASK_QP_TYPE = 1, + RDMA_COUNTER_MASK_PID = 2, +}; + +enum rdma_restrack_type { + RDMA_RESTRACK_PD = 0, + RDMA_RESTRACK_CQ = 1, + RDMA_RESTRACK_QP = 2, + RDMA_RESTRACK_CM_ID = 3, + RDMA_RESTRACK_MR = 4, + RDMA_RESTRACK_CTX = 5, + RDMA_RESTRACK_COUNTER = 6, + RDMA_RESTRACK_SRQ = 7, + RDMA_RESTRACK_MAX = 8, +}; + +struct rdma_restrack_entry { + bool valid; + u8 no_track: 1; + struct kref kref; + struct completion comp; + struct task_struct *task; + const char *kern_name; + enum rdma_restrack_type type; + bool user; + u32 id; +}; + +struct rdma_link_ops { + struct list_head list; + const char *type; + int (*newlink)(const char *, struct net_device *); +}; + +struct auto_mode_param { + int qp_type; +}; + +struct rdma_counter_mode { + enum rdma_nl_counter_mode mode; + enum rdma_nl_counter_mask mask; + struct auto_mode_param param; +}; + +struct rdma_hw_stats; + +struct rdma_port_counter { + struct rdma_counter_mode mode; + struct rdma_hw_stats *hstats; + unsigned int num_counters; + struct mutex lock; +}; + +struct rdma_hw_stats { + struct mutex lock; + long unsigned int timestamp; + long unsigned int lifespan; + const char * const *names; + int num_counters; + u64 value[0]; +}; + +struct ib_device; + +struct rdma_counter { + struct rdma_restrack_entry res; + struct ib_device *device; + uint32_t id; + struct kref kref; + struct rdma_counter_mode mode; + struct mutex lock; + struct rdma_hw_stats *stats; + u32 port; +}; + +enum rdma_driver_id { + RDMA_DRIVER_UNKNOWN = 0, + RDMA_DRIVER_MLX5 = 1, + RDMA_DRIVER_MLX4 = 2, + RDMA_DRIVER_CXGB3 = 3, + RDMA_DRIVER_CXGB4 = 4, + RDMA_DRIVER_MTHCA = 5, + RDMA_DRIVER_BNXT_RE = 6, + RDMA_DRIVER_OCRDMA = 7, + RDMA_DRIVER_NES = 8, + RDMA_DRIVER_I40IW = 9, + RDMA_DRIVER_VMW_PVRDMA = 10, + RDMA_DRIVER_QEDR = 11, + RDMA_DRIVER_HNS = 12, + RDMA_DRIVER_USNIC = 13, + RDMA_DRIVER_RXE = 14, + RDMA_DRIVER_HFI1 = 15, + RDMA_DRIVER_QIB = 16, + RDMA_DRIVER_EFA = 17, + RDMA_DRIVER_SIW = 18, +}; + +enum ib_cq_notify_flags { + IB_CQ_SOLICITED = 1, + IB_CQ_NEXT_COMP = 2, + IB_CQ_SOLICITED_MASK = 3, + IB_CQ_REPORT_MISSED_EVENTS = 4, +}; + +struct ib_mad; + +enum rdma_link_layer { + IB_LINK_LAYER_UNSPECIFIED = 0, + IB_LINK_LAYER_INFINIBAND = 1, + IB_LINK_LAYER_ETHERNET = 2, +}; + +enum rdma_netdev_t { + RDMA_NETDEV_OPA_VNIC = 0, + RDMA_NETDEV_IPOIB = 1, +}; + +enum ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1, + IB_SRQ_LIMIT = 2, +}; + +enum ib_mr_type { + IB_MR_TYPE_MEM_REG = 0, + IB_MR_TYPE_SG_GAPS = 1, + IB_MR_TYPE_DM = 2, + IB_MR_TYPE_USER = 3, + IB_MR_TYPE_DMA = 4, + IB_MR_TYPE_INTEGRITY = 5, +}; + +enum ib_uverbs_advise_mr_advice { + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH = 0, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE = 1, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = 2, +}; + +struct uverbs_attr_bundle; + +struct rdma_cm_id; + +struct iw_cm_id; + +struct iw_cm_conn_param; + +struct ib_qp; + +struct ib_send_wr; + +struct ib_recv_wr; + +struct ib_cq; + +struct ib_wc; + +struct ib_srq; + +struct ib_grh; + +struct ib_device_attr; + +struct ib_udata; + +struct ib_device_modify; + +struct ib_port_attr; + +struct ib_port_modify; + +struct ib_port_immutable; + +struct rdma_netdev_alloc_params; + +union ib_gid; + +struct ib_gid_attr; + +struct ib_ucontext; + +struct rdma_user_mmap_entry; + +struct ib_pd; + +struct ib_ah; + +struct rdma_ah_init_attr; + +struct rdma_ah_attr; + +struct ib_srq_init_attr; + +struct ib_srq_attr; + +struct ib_qp_init_attr; + +struct ib_qp_attr; + +struct ib_cq_init_attr; + +struct ib_mr; + +struct ib_sge; + +struct ib_mr_status; + +struct ib_mw; + +struct ib_xrcd; + +struct ib_flow; + +struct ib_flow_attr; + +struct ib_flow_action; + +struct ib_flow_action_attrs_esp; + +struct ib_wq; + +struct ib_wq_init_attr; + +struct ib_wq_attr; + +struct ib_rwq_ind_table; + +struct ib_rwq_ind_table_init_attr; + +struct ib_dm; + +struct ib_dm_alloc_attr; + +struct ib_dm_mr_attr; + +struct ib_counters; + +struct ib_counters_read_attr; + +struct ib_device_ops { + struct module *owner; + enum rdma_driver_id driver_id; + u32 uverbs_abi_ver; + unsigned int uverbs_no_driver_id_binding: 1; + int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **); + int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **); + void (*drain_rq)(struct ib_qp *); + void (*drain_sq)(struct ib_qp *); + int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); + int (*peek_cq)(struct ib_cq *, int); + int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags); + int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **); + int (*process_mad)(struct ib_device *, int, u32, const struct ib_wc *, const struct ib_grh *, const struct ib_mad *, struct ib_mad *, size_t *, u16 *); + int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); + int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); + void (*get_dev_fw_str)(struct ib_device *, char *); + const struct cpumask * (*get_vector_affinity)(struct ib_device *, int); + int (*query_port)(struct ib_device *, u32, struct ib_port_attr *); + int (*modify_port)(struct ib_device *, u32, int, struct ib_port_modify *); + int (*get_port_immutable)(struct ib_device *, u32, struct ib_port_immutable *); + enum rdma_link_layer (*get_link_layer)(struct ib_device *, u32); + struct net_device * (*get_netdev)(struct ib_device *, u32); + struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u32, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *)); + int (*rdma_netdev_get_params)(struct ib_device *, u32, enum rdma_netdev_t, struct rdma_netdev_alloc_params *); + int (*query_gid)(struct ib_device *, u32, int, union ib_gid *); + int (*add_gid)(const struct ib_gid_attr *, void **); + int (*del_gid)(const struct ib_gid_attr *, void **); + int (*query_pkey)(struct ib_device *, u32, u16, u16 *); + int (*alloc_ucontext)(struct ib_ucontext *, struct ib_udata *); + void (*dealloc_ucontext)(struct ib_ucontext *); + int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); + void (*mmap_free)(struct rdma_user_mmap_entry *); + void (*disassociate_ucontext)(struct ib_ucontext *); + int (*alloc_pd)(struct ib_pd *, struct ib_udata *); + int (*dealloc_pd)(struct ib_pd *, struct ib_udata *); + int (*create_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); + int (*create_user_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); + int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *); + int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *); + int (*destroy_ah)(struct ib_ah *, u32); + int (*create_srq)(struct ib_srq *, struct ib_srq_init_attr *, struct ib_udata *); + int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); + int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); + int (*destroy_srq)(struct ib_srq *, struct ib_udata *); + struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *); + int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); + int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); + int (*destroy_qp)(struct ib_qp *, struct ib_udata *); + int (*create_cq)(struct ib_cq *, const struct ib_cq_init_attr *, struct ib_udata *); + int (*modify_cq)(struct ib_cq *, u16, u16); + int (*destroy_cq)(struct ib_cq *, struct ib_udata *); + int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); + struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); + struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *); + struct ib_mr * (*reg_user_mr_dmabuf)(struct ib_pd *, u64, u64, u64, int, int, struct ib_udata *); + struct ib_mr * (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *); + int (*dereg_mr)(struct ib_mr *, struct ib_udata *); + struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32); + struct ib_mr * (*alloc_mr_integrity)(struct ib_pd *, u32, u32); + int (*advise_mr)(struct ib_pd *, enum ib_uverbs_advise_mr_advice, u32, struct ib_sge *, u32, struct uverbs_attr_bundle *); + int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); + int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *); + int (*alloc_mw)(struct ib_mw *, struct ib_udata *); + int (*dealloc_mw)(struct ib_mw *); + int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16); + int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16); + int (*alloc_xrcd)(struct ib_xrcd *, struct ib_udata *); + int (*dealloc_xrcd)(struct ib_xrcd *, struct ib_udata *); + struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, struct ib_udata *); + int (*destroy_flow)(struct ib_flow *); + struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); + int (*destroy_flow_action)(struct ib_flow_action *); + int (*modify_flow_action_esp)(struct ib_flow_action *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); + int (*set_vf_link_state)(struct ib_device *, int, u32, int); + int (*get_vf_config)(struct ib_device *, int, u32, struct ifla_vf_info *); + int (*get_vf_stats)(struct ib_device *, int, u32, struct ifla_vf_stats *); + int (*get_vf_guid)(struct ib_device *, int, u32, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*set_vf_guid)(struct ib_device *, int, u32, u64, int); + struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); + int (*destroy_wq)(struct ib_wq *, struct ib_udata *); + int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *); + int (*create_rwq_ind_table)(struct ib_rwq_ind_table *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); + int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); + struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *); + int (*dealloc_dm)(struct ib_dm *, struct uverbs_attr_bundle *); + struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *); + int (*create_counters)(struct ib_counters *, struct uverbs_attr_bundle *); + int (*destroy_counters)(struct ib_counters *); + int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *); + int (*map_mr_sg_pi)(struct ib_mr *, struct scatterlist *, int, unsigned int *, struct scatterlist *, int, unsigned int *); + struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u32); + int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u32, int); + int (*init_port)(struct ib_device *, u32, struct kobject *); + int (*fill_res_mr_entry)(struct sk_buff *, struct ib_mr *); + int (*fill_res_mr_entry_raw)(struct sk_buff *, struct ib_mr *); + int (*fill_res_cq_entry)(struct sk_buff *, struct ib_cq *); + int (*fill_res_cq_entry_raw)(struct sk_buff *, struct ib_cq *); + int (*fill_res_qp_entry)(struct sk_buff *, struct ib_qp *); + int (*fill_res_qp_entry_raw)(struct sk_buff *, struct ib_qp *); + int (*fill_res_cm_id_entry)(struct sk_buff *, struct rdma_cm_id *); + int (*enable_driver)(struct ib_device *); + void (*dealloc_driver)(struct ib_device *); + void (*iw_add_ref)(struct ib_qp *); + void (*iw_rem_ref)(struct ib_qp *); + struct ib_qp * (*iw_get_qp)(struct ib_device *, int); + int (*iw_connect)(struct iw_cm_id *, struct iw_cm_conn_param *); + int (*iw_accept)(struct iw_cm_id *, struct iw_cm_conn_param *); + int (*iw_reject)(struct iw_cm_id *, const void *, u8); + int (*iw_create_listen)(struct iw_cm_id *, int); + int (*iw_destroy_listen)(struct iw_cm_id *); + int (*counter_bind_qp)(struct rdma_counter *, struct ib_qp *); + int (*counter_unbind_qp)(struct ib_qp *); + int (*counter_dealloc)(struct rdma_counter *); + struct rdma_hw_stats * (*counter_alloc_stats)(struct rdma_counter *); + int (*counter_update_stats)(struct rdma_counter *); + int (*fill_stat_mr_entry)(struct sk_buff *, struct ib_mr *); + int (*query_ucontext)(struct ib_ucontext *, struct uverbs_attr_bundle *); + size_t size_ib_ah; + size_t size_ib_counters; + size_t size_ib_cq; + size_t size_ib_mw; + size_t size_ib_pd; + size_t size_ib_rwq_ind_table; + size_t size_ib_srq; + size_t size_ib_ucontext; + size_t size_ib_xrcd; +}; + +struct ib_core_device { + struct device dev; + possible_net_t rdma_net; + struct kobject *ports_kobj; + struct list_head port_list; + struct ib_device *owner; +}; + +enum ib_atomic_cap { + IB_ATOMIC_NONE = 0, + IB_ATOMIC_HCA = 1, + IB_ATOMIC_GLOB = 2, +}; + +struct ib_odp_caps { + uint64_t general_caps; + struct { + uint32_t rc_odp_caps; + uint32_t uc_odp_caps; + uint32_t ud_odp_caps; + uint32_t xrc_odp_caps; + } per_transport_caps; +}; + +struct ib_rss_caps { + u32 supported_qpts; + u32 max_rwq_indirection_tables; + u32 max_rwq_indirection_table_size; +}; + +struct ib_tm_caps { + u32 max_rndv_hdr_size; + u32 max_num_tags; + u32 flags; + u32 max_ops; + u32 max_sge; +}; + +struct ib_cq_caps { + u16 max_cq_moderation_count; + u16 max_cq_moderation_period; +}; + +struct ib_device_attr { + u64 fw_ver; + __be64 sys_image_guid; + u64 max_mr_size; + u64 page_size_cap; + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + int max_qp; + int max_qp_wr; + u64 device_cap_flags; + int max_send_sge; + int max_recv_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ib_atomic_cap atomic_cap; + enum ib_atomic_cap masked_atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + int max_ah; + int max_srq; + int max_srq_wr; + int max_srq_sge; + unsigned int max_fast_reg_page_list_len; + unsigned int max_pi_fast_reg_page_list_len; + u16 max_pkeys; + u8 local_ca_ack_delay; + int sig_prot_cap; + int sig_guard_cap; + struct ib_odp_caps odp_caps; + uint64_t timestamp_mask; + uint64_t hca_core_clock; + struct ib_rss_caps rss_caps; + u32 max_wq_type_rq; + u32 raw_packet_caps; + struct ib_tm_caps tm_caps; + struct ib_cq_caps cq_caps; + u64 max_dm_size; + u32 max_sgl_rd; +}; + +struct rdma_restrack_root; + +struct uapi_definition; + +struct ib_port_data; + +struct ib_device { + struct device *dma_device; + struct ib_device_ops ops; + char name[64]; + struct callback_head callback_head; + struct list_head event_handler_list; + struct rw_semaphore event_handler_rwsem; + spinlock_t qp_open_list_lock; + struct rw_semaphore client_data_rwsem; + struct xarray client_data; + struct mutex unregistration_lock; + rwlock_t cache_lock; + struct ib_port_data *port_data; + int num_comp_vectors; + union { + struct device dev; + struct ib_core_device coredev; + }; + const struct attribute_group *groups[3]; + u64 uverbs_cmd_mask; + char node_desc[64]; + __be64 node_guid; + u32 local_dma_lkey; + u16 is_switch: 1; + u16 kverbs_provider: 1; + u16 use_cq_dim: 1; + u8 node_type; + u32 phys_port_cnt; + struct ib_device_attr attrs; + struct attribute_group *hw_stats_ag; + struct rdma_hw_stats *hw_stats; + struct rdmacg_device cg_device; + u32 index; + spinlock_t cq_pools_lock; + struct list_head cq_pools[3]; + struct rdma_restrack_root *res; + const struct uapi_definition *driver_def; + refcount_t refcount; + struct completion unreg_completion; + struct work_struct unregistration_work; + const struct rdma_link_ops *link_ops; + struct mutex compat_devs_mutex; + struct xarray compat_devs; + char iw_ifname[16]; + u32 iw_driver_flags; + u32 lag_flags; +}; + +enum ib_signature_type { + IB_SIG_TYPE_NONE = 0, + IB_SIG_TYPE_T10_DIF = 1, +}; + +enum ib_t10_dif_bg_type { + IB_T10DIF_CRC = 0, + IB_T10DIF_CSUM = 1, +}; + +struct ib_t10_dif_domain { + enum ib_t10_dif_bg_type bg_type; + u16 pi_interval; + u16 bg; + u16 app_tag; + u32 ref_tag; + bool ref_remap; + bool app_escape; + bool ref_escape; + u16 apptag_check_mask; +}; + +struct ib_sig_domain { + enum ib_signature_type sig_type; + union { + struct ib_t10_dif_domain dif; + } sig; +}; + +struct ib_sig_attrs { + u8 check_mask; + struct ib_sig_domain mem; + struct ib_sig_domain wire; + int meta_length; +}; + +enum ib_sig_err_type { + IB_SIG_BAD_GUARD = 0, + IB_SIG_BAD_REFTAG = 1, + IB_SIG_BAD_APPTAG = 2, +}; + +struct ib_sig_err { + enum ib_sig_err_type err_type; + u32 expected; + u32 actual; + u64 sig_err_offset; + u32 key; +}; + +enum ib_uverbs_flow_action_esp_keymat { + IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM = 0, +}; + +struct ib_uverbs_flow_action_esp_keymat_aes_gcm { + __u64 iv; + __u32 iv_algo; + __u32 salt; + __u32 icv_len; + __u32 key_len; + __u32 aes_key[8]; +}; + +enum ib_uverbs_flow_action_esp_replay { + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE = 0, + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP = 1, +}; + +struct ib_uverbs_flow_action_esp_replay_bmp { + __u32 size; +}; + +union ib_gid { + u8 raw[16]; + struct { + __be64 subnet_prefix; + __be64 interface_id; + } global; +}; + +enum ib_gid_type { + IB_GID_TYPE_IB = 0, + IB_GID_TYPE_ROCE = 1, + IB_GID_TYPE_ROCE_UDP_ENCAP = 2, + IB_GID_TYPE_SIZE = 3, +}; + +struct ib_gid_attr { + struct net_device *ndev; + struct ib_device *device; + union ib_gid gid; + enum ib_gid_type gid_type; + u16 index; + u32 port_num; +}; + +struct ib_cq_init_attr { + unsigned int cqe; + u32 comp_vector; + u32 flags; +}; + +struct ib_dm_mr_attr { + u64 length; + u64 offset; + u32 access_flags; +}; + +struct ib_dm_alloc_attr { + u64 length; + u32 alignment; + u32 flags; +}; + +enum ib_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5, +}; + +enum ib_port_state { + IB_PORT_NOP = 0, + IB_PORT_DOWN = 1, + IB_PORT_INIT = 2, + IB_PORT_ARMED = 3, + IB_PORT_ACTIVE = 4, + IB_PORT_ACTIVE_DEFER = 5, +}; + +struct ib_port_attr { + u64 subnet_prefix; + enum ib_port_state state; + enum ib_mtu max_mtu; + enum ib_mtu active_mtu; + u32 phys_mtu; + int gid_tbl_len; + unsigned int ip_gids: 1; + u32 port_cap_flags; + u32 max_msg_sz; + u32 bad_pkey_cntr; + u32 qkey_viol_cntr; + u16 pkey_tbl_len; + u32 sm_lid; + u32 lid; + u8 lmc; + u8 max_vl_num; + u8 sm_sl; + u8 subnet_timeout; + u8 init_type_reply; + u8 active_width; + u16 active_speed; + u8 phys_state; + u16 port_cap_flags2; +}; + +struct ib_device_modify { + u64 sys_image_guid; + char node_desc[64]; +}; + +struct ib_port_modify { + u32 set_port_cap_mask; + u32 clr_port_cap_mask; + u8 init_type; +}; + +enum ib_event_type { + IB_EVENT_CQ_ERR = 0, + IB_EVENT_QP_FATAL = 1, + IB_EVENT_QP_REQ_ERR = 2, + IB_EVENT_QP_ACCESS_ERR = 3, + IB_EVENT_COMM_EST = 4, + IB_EVENT_SQ_DRAINED = 5, + IB_EVENT_PATH_MIG = 6, + IB_EVENT_PATH_MIG_ERR = 7, + IB_EVENT_DEVICE_FATAL = 8, + IB_EVENT_PORT_ACTIVE = 9, + IB_EVENT_PORT_ERR = 10, + IB_EVENT_LID_CHANGE = 11, + IB_EVENT_PKEY_CHANGE = 12, + IB_EVENT_SM_CHANGE = 13, + IB_EVENT_SRQ_ERR = 14, + IB_EVENT_SRQ_LIMIT_REACHED = 15, + IB_EVENT_QP_LAST_WQE_REACHED = 16, + IB_EVENT_CLIENT_REREGISTER = 17, + IB_EVENT_GID_CHANGE = 18, + IB_EVENT_WQ_FATAL = 19, +}; + +struct ib_ucq_object; + +typedef void (*ib_comp_handler)(struct ib_cq *, void *); + +struct ib_event; + +struct ib_cq { + struct ib_device *device; + struct ib_ucq_object *uobject; + ib_comp_handler comp_handler; + void (*event_handler)(struct ib_event *, void *); + void *cq_context; + int cqe; + unsigned int cqe_used; + atomic_t usecnt; + enum ib_poll_context poll_ctx; + struct ib_wc *wc; + struct list_head pool_entry; + union { + struct irq_poll iop; + struct work_struct work; + }; + struct workqueue_struct *comp_wq; + struct dim *dim; + ktime_t timestamp; + u8 interrupt: 1; + u8 shared: 1; + unsigned int comp_vector; + struct rdma_restrack_entry res; +}; + +struct ib_uqp_object; + +enum ib_qp_type { + IB_QPT_SMI = 0, + IB_QPT_GSI = 1, + IB_QPT_RC = 2, + IB_QPT_UC = 3, + IB_QPT_UD = 4, + IB_QPT_RAW_IPV6 = 5, + IB_QPT_RAW_ETHERTYPE = 6, + IB_QPT_RAW_PACKET = 8, + IB_QPT_XRC_INI = 9, + IB_QPT_XRC_TGT = 10, + IB_QPT_MAX = 11, + IB_QPT_DRIVER = 255, + IB_QPT_RESERVED1 = 4096, + IB_QPT_RESERVED2 = 4097, + IB_QPT_RESERVED3 = 4098, + IB_QPT_RESERVED4 = 4099, + IB_QPT_RESERVED5 = 4100, + IB_QPT_RESERVED6 = 4101, + IB_QPT_RESERVED7 = 4102, + IB_QPT_RESERVED8 = 4103, + IB_QPT_RESERVED9 = 4104, + IB_QPT_RESERVED10 = 4105, +}; + +struct ib_qp_security; + +struct ib_qp { + struct ib_device *device; + struct ib_pd *pd; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + spinlock_t mr_lock; + int mrs_used; + struct list_head rdma_mrs; + struct list_head sig_mrs; + struct ib_srq *srq; + struct ib_xrcd *xrcd; + struct list_head xrcd_list; + atomic_t usecnt; + struct list_head open_list; + struct ib_qp *real_qp; + struct ib_uqp_object *uobject; + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + const struct ib_gid_attr *av_sgid_attr; + const struct ib_gid_attr *alt_path_sgid_attr; + u32 qp_num; + u32 max_write_sge; + u32 max_read_sge; + enum ib_qp_type qp_type; + struct ib_rwq_ind_table *rwq_ind_tbl; + struct ib_qp_security *qp_sec; + u32 port; + bool integrity_en; + struct rdma_restrack_entry res; + struct rdma_counter *counter; +}; + +struct ib_usrq_object; + +enum ib_srq_type { + IB_SRQT_BASIC = 0, + IB_SRQT_XRC = 1, + IB_SRQT_TM = 2, +}; + +struct ib_srq { + struct ib_device *device; + struct ib_pd *pd; + struct ib_usrq_object *uobject; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + enum ib_srq_type srq_type; + atomic_t usecnt; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + u32 srq_num; + } xrc; + }; + } ext; + struct rdma_restrack_entry res; +}; + +struct ib_uwq_object; + +enum ib_wq_state { + IB_WQS_RESET = 0, + IB_WQS_RDY = 1, + IB_WQS_ERR = 2, +}; + +enum ib_wq_type { + IB_WQT_RQ = 0, +}; + +struct ib_wq { + struct ib_device *device; + struct ib_uwq_object *uobject; + void *wq_context; + void (*event_handler)(struct ib_event *, void *); + struct ib_pd *pd; + struct ib_cq *cq; + u32 wq_num; + enum ib_wq_state state; + enum ib_wq_type wq_type; + atomic_t usecnt; +}; + +struct ib_event { + struct ib_device *device; + union { + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_srq *srq; + struct ib_wq *wq; + u32 port_num; + } element; + enum ib_event_type event; +}; + +struct ib_global_route { + const struct ib_gid_attr *sgid_attr; + union ib_gid dgid; + u32 flow_label; + u8 sgid_index; + u8 hop_limit; + u8 traffic_class; +}; + +struct ib_grh { + __be32 version_tclass_flow; + __be16 paylen; + u8 next_hdr; + u8 hop_limit; + union ib_gid sgid; + union ib_gid dgid; +}; + +struct ib_mr_status { + u32 fail_status; + struct ib_sig_err sig_err; +}; + +struct rdma_ah_init_attr { + struct rdma_ah_attr *ah_attr; + u32 flags; + struct net_device *xmit_slave; +}; + +enum rdma_ah_attr_type { + RDMA_AH_ATTR_TYPE_UNDEFINED = 0, + RDMA_AH_ATTR_TYPE_IB = 1, + RDMA_AH_ATTR_TYPE_ROCE = 2, + RDMA_AH_ATTR_TYPE_OPA = 3, +}; + +struct ib_ah_attr { + u16 dlid; + u8 src_path_bits; +}; + +struct roce_ah_attr { + u8 dmac[6]; +}; + +struct opa_ah_attr { + u32 dlid; + u8 src_path_bits; + bool make_grd; +}; + +struct rdma_ah_attr { + struct ib_global_route grh; + u8 sl; + u8 static_rate; + u32 port_num; + u8 ah_flags; + enum rdma_ah_attr_type type; + union { + struct ib_ah_attr ib; + struct roce_ah_attr roce; + struct opa_ah_attr opa; + }; +}; + +enum ib_wc_status { + IB_WC_SUCCESS = 0, + IB_WC_LOC_LEN_ERR = 1, + IB_WC_LOC_QP_OP_ERR = 2, + IB_WC_LOC_EEC_OP_ERR = 3, + IB_WC_LOC_PROT_ERR = 4, + IB_WC_WR_FLUSH_ERR = 5, + IB_WC_MW_BIND_ERR = 6, + IB_WC_BAD_RESP_ERR = 7, + IB_WC_LOC_ACCESS_ERR = 8, + IB_WC_REM_INV_REQ_ERR = 9, + IB_WC_REM_ACCESS_ERR = 10, + IB_WC_REM_OP_ERR = 11, + IB_WC_RETRY_EXC_ERR = 12, + IB_WC_RNR_RETRY_EXC_ERR = 13, + IB_WC_LOC_RDD_VIOL_ERR = 14, + IB_WC_REM_INV_RD_REQ_ERR = 15, + IB_WC_REM_ABORT_ERR = 16, + IB_WC_INV_EECN_ERR = 17, + IB_WC_INV_EEC_STATE_ERR = 18, + IB_WC_FATAL_ERR = 19, + IB_WC_RESP_TIMEOUT_ERR = 20, + IB_WC_GENERAL_ERR = 21, +}; + +enum ib_wc_opcode { + IB_WC_SEND = 0, + IB_WC_RDMA_WRITE = 1, + IB_WC_RDMA_READ = 2, + IB_WC_COMP_SWAP = 3, + IB_WC_FETCH_ADD = 4, + IB_WC_BIND_MW = 5, + IB_WC_LOCAL_INV = 6, + IB_WC_LSO = 7, + IB_WC_REG_MR = 8, + IB_WC_MASKED_COMP_SWAP = 9, + IB_WC_MASKED_FETCH_ADD = 10, + IB_WC_RECV = 128, + IB_WC_RECV_RDMA_WITH_IMM = 129, +}; + +struct ib_cqe { + void (*done)(struct ib_cq *, struct ib_wc *); +}; + +struct ib_wc { + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + enum ib_wc_status status; + enum ib_wc_opcode opcode; + u32 vendor_err; + u32 byte_len; + struct ib_qp *qp; + union { + __be32 imm_data; + u32 invalidate_rkey; + } ex; + u32 src_qp; + u32 slid; + int wc_flags; + u16 pkey_index; + u8 sl; + u8 dlid_path_bits; + u32 port_num; + u8 smac[6]; + u16 vlan_id; + u8 network_hdr_type; +}; + +struct ib_srq_attr { + u32 max_wr; + u32 max_sge; + u32 srq_limit; +}; + +struct ib_xrcd { + struct ib_device *device; + atomic_t usecnt; + struct inode *inode; + struct rw_semaphore tgt_qps_rwsem; + struct xarray tgt_qps; +}; + +struct ib_srq_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + struct ib_srq_attr attr; + enum ib_srq_type srq_type; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + } xrc; + struct { + u32 max_num_tags; + } tag_matching; + }; + } ext; +}; + +struct ib_qp_cap { + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; + u32 max_rdma_ctxs; +}; + +enum ib_sig_type { + IB_SIGNAL_ALL_WR = 0, + IB_SIGNAL_REQ_WR = 1, +}; + +struct ib_qp_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_xrcd *xrcd; + struct ib_qp_cap cap; + enum ib_sig_type sq_sig_type; + enum ib_qp_type qp_type; + u32 create_flags; + u32 port_num; + struct ib_rwq_ind_table *rwq_ind_tbl; + u32 source_qpn; +}; + +struct ib_uobject; + +struct ib_rwq_ind_table { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; + u32 ind_tbl_num; + u32 log_ind_tbl_size; + struct ib_wq **ind_tbl; +}; + +enum ib_qp_state { + IB_QPS_RESET = 0, + IB_QPS_INIT = 1, + IB_QPS_RTR = 2, + IB_QPS_RTS = 3, + IB_QPS_SQD = 4, + IB_QPS_SQE = 5, + IB_QPS_ERR = 6, +}; + +enum ib_mig_state { + IB_MIG_MIGRATED = 0, + IB_MIG_REARM = 1, + IB_MIG_ARMED = 2, +}; + +enum ib_mw_type { + IB_MW_TYPE_1 = 1, + IB_MW_TYPE_2 = 2, +}; + +struct ib_qp_attr { + enum ib_qp_state qp_state; + enum ib_qp_state cur_qp_state; + enum ib_mtu path_mtu; + enum ib_mig_state path_mig_state; + u32 qkey; + u32 rq_psn; + u32 sq_psn; + u32 dest_qp_num; + int qp_access_flags; + struct ib_qp_cap cap; + struct rdma_ah_attr ah_attr; + struct rdma_ah_attr alt_ah_attr; + u16 pkey_index; + u16 alt_pkey_index; + u8 en_sqd_async_notify; + u8 sq_draining; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 min_rnr_timer; + u32 port_num; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u32 alt_port_num; + u8 alt_timeout; + u32 rate_limit; + struct net_device *xmit_slave; +}; + +enum ib_wr_opcode { + IB_WR_RDMA_WRITE = 0, + IB_WR_RDMA_WRITE_WITH_IMM = 1, + IB_WR_SEND = 2, + IB_WR_SEND_WITH_IMM = 3, + IB_WR_RDMA_READ = 4, + IB_WR_ATOMIC_CMP_AND_SWP = 5, + IB_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_WR_BIND_MW = 8, + IB_WR_LSO = 10, + IB_WR_SEND_WITH_INV = 9, + IB_WR_RDMA_READ_WITH_INV = 11, + IB_WR_LOCAL_INV = 7, + IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_WR_REG_MR = 32, + IB_WR_REG_MR_INTEGRITY = 33, + IB_WR_RESERVED1 = 240, + IB_WR_RESERVED2 = 241, + IB_WR_RESERVED3 = 242, + IB_WR_RESERVED4 = 243, + IB_WR_RESERVED5 = 244, + IB_WR_RESERVED6 = 245, + IB_WR_RESERVED7 = 246, + IB_WR_RESERVED8 = 247, + IB_WR_RESERVED9 = 248, + IB_WR_RESERVED10 = 249, +}; + +struct ib_sge { + u64 addr; + u32 length; + u32 lkey; +}; + +struct ib_send_wr { + struct ib_send_wr *next; + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + struct ib_sge *sg_list; + int num_sge; + enum ib_wr_opcode opcode; + int send_flags; + union { + __be32 imm_data; + u32 invalidate_rkey; + } ex; +}; + +struct ib_ah { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + const struct ib_gid_attr *sgid_attr; + enum rdma_ah_attr_type type; +}; + +struct ib_mr { + struct ib_device *device; + struct ib_pd *pd; + u32 lkey; + u32 rkey; + u64 iova; + u64 length; + unsigned int page_size; + enum ib_mr_type type; + bool need_inval; + union { + struct ib_uobject *uobject; + struct list_head qp_entry; + }; + struct ib_dm *dm; + struct ib_sig_attrs *sig_attrs; + struct rdma_restrack_entry res; +}; + +struct ib_recv_wr { + struct ib_recv_wr *next; + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + struct ib_sge *sg_list; + int num_sge; +}; + +struct ib_rdmacg_object { + struct rdma_cgroup *cg; +}; + +struct ib_uverbs_file; + +struct ib_ucontext { + struct ib_device *device; + struct ib_uverbs_file *ufile; + struct ib_rdmacg_object cg_obj; + struct rdma_restrack_entry res; + struct xarray mmap_xa; +}; + +struct uverbs_api_object; + +struct ib_uobject { + u64 user_handle; + struct ib_uverbs_file *ufile; + struct ib_ucontext *context; + void *object; + struct list_head list; + struct ib_rdmacg_object cg_obj; + int id; + struct kref ref; + atomic_t usecnt; + struct callback_head rcu; + const struct uverbs_api_object *uapi_object; +}; + +struct ib_udata { + const void *inbuf; + void *outbuf; + size_t inlen; + size_t outlen; +}; + +struct ib_pd { + u32 local_dma_lkey; + u32 flags; + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; + u32 unsafe_global_rkey; + struct ib_mr *__internal_mr; + struct rdma_restrack_entry res; +}; + +struct ib_wq_init_attr { + void *wq_context; + enum ib_wq_type wq_type; + u32 max_wr; + u32 max_sge; + struct ib_cq *cq; + void (*event_handler)(struct ib_event *, void *); + u32 create_flags; +}; + +struct ib_wq_attr { + enum ib_wq_state wq_state; + enum ib_wq_state curr_wq_state; + u32 flags; + u32 flags_mask; +}; + +struct ib_rwq_ind_table_init_attr { + u32 log_ind_tbl_size; + struct ib_wq **ind_tbl; +}; + +enum port_pkey_state { + IB_PORT_PKEY_NOT_VALID = 0, + IB_PORT_PKEY_VALID = 1, + IB_PORT_PKEY_LISTED = 2, +}; + +struct ib_port_pkey { + enum port_pkey_state state; + u16 pkey_index; + u32 port_num; + struct list_head qp_list; + struct list_head to_error_list; + struct ib_qp_security *sec; +}; + +struct ib_ports_pkeys; + +struct ib_qp_security { + struct ib_qp *qp; + struct ib_device *dev; + struct mutex mutex; + struct ib_ports_pkeys *ports_pkeys; + struct list_head shared_qp_list; + void *security; + bool destroying; + atomic_t error_list_count; + struct completion error_complete; + int error_comps_pending; +}; + +struct ib_ports_pkeys { + struct ib_port_pkey main; + struct ib_port_pkey alt; +}; + +struct ib_dm { + struct ib_device *device; + u32 length; + u32 flags; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + +struct ib_mw { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + u32 rkey; + enum ib_mw_type type; +}; + +enum ib_flow_attr_type { + IB_FLOW_ATTR_NORMAL = 0, + IB_FLOW_ATTR_ALL_DEFAULT = 1, + IB_FLOW_ATTR_MC_DEFAULT = 2, + IB_FLOW_ATTR_SNIFFER = 3, +}; + +enum ib_flow_spec_type { + IB_FLOW_SPEC_ETH = 32, + IB_FLOW_SPEC_IB = 34, + IB_FLOW_SPEC_IPV4 = 48, + IB_FLOW_SPEC_IPV6 = 49, + IB_FLOW_SPEC_ESP = 52, + IB_FLOW_SPEC_TCP = 64, + IB_FLOW_SPEC_UDP = 65, + IB_FLOW_SPEC_VXLAN_TUNNEL = 80, + IB_FLOW_SPEC_GRE = 81, + IB_FLOW_SPEC_MPLS = 96, + IB_FLOW_SPEC_INNER = 256, + IB_FLOW_SPEC_ACTION_TAG = 4096, + IB_FLOW_SPEC_ACTION_DROP = 4097, + IB_FLOW_SPEC_ACTION_HANDLE = 4098, + IB_FLOW_SPEC_ACTION_COUNT = 4099, +}; + +struct ib_flow_eth_filter { + u8 dst_mac[6]; + u8 src_mac[6]; + __be16 ether_type; + __be16 vlan_tag; + u8 real_sz[0]; +}; + +struct ib_flow_spec_eth { + u32 type; + u16 size; + struct ib_flow_eth_filter val; + struct ib_flow_eth_filter mask; +}; + +struct ib_flow_ib_filter { + __be16 dlid; + __u8 sl; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ib { + u32 type; + u16 size; + struct ib_flow_ib_filter val; + struct ib_flow_ib_filter mask; +}; + +struct ib_flow_ipv4_filter { + __be32 src_ip; + __be32 dst_ip; + u8 proto; + u8 tos; + u8 ttl; + u8 flags; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ipv4 { + u32 type; + u16 size; + struct ib_flow_ipv4_filter val; + struct ib_flow_ipv4_filter mask; +}; + +struct ib_flow_ipv6_filter { + u8 src_ip[16]; + u8 dst_ip[16]; + __be32 flow_label; + u8 next_hdr; + u8 traffic_class; + u8 hop_limit; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ipv6 { + u32 type; + u16 size; + struct ib_flow_ipv6_filter val; + struct ib_flow_ipv6_filter mask; +}; + +struct ib_flow_tcp_udp_filter { + __be16 dst_port; + __be16 src_port; + u8 real_sz[0]; +}; + +struct ib_flow_spec_tcp_udp { + u32 type; + u16 size; + struct ib_flow_tcp_udp_filter val; + struct ib_flow_tcp_udp_filter mask; +}; + +struct ib_flow_tunnel_filter { + __be32 tunnel_id; + u8 real_sz[0]; +}; + +struct ib_flow_spec_tunnel { + u32 type; + u16 size; + struct ib_flow_tunnel_filter val; + struct ib_flow_tunnel_filter mask; +}; + +struct ib_flow_esp_filter { + __be32 spi; + __be32 seq; + u8 real_sz[0]; +}; + +struct ib_flow_spec_esp { + u32 type; + u16 size; + struct ib_flow_esp_filter val; + struct ib_flow_esp_filter mask; +}; + +struct ib_flow_gre_filter { + __be16 c_ks_res0_ver; + __be16 protocol; + __be32 key; + u8 real_sz[0]; +}; + +struct ib_flow_spec_gre { + u32 type; + u16 size; + struct ib_flow_gre_filter val; + struct ib_flow_gre_filter mask; +}; + +struct ib_flow_mpls_filter { + __be32 tag; + u8 real_sz[0]; +}; + +struct ib_flow_spec_mpls { + u32 type; + u16 size; + struct ib_flow_mpls_filter val; + struct ib_flow_mpls_filter mask; +}; + +struct ib_flow_spec_action_tag { + enum ib_flow_spec_type type; + u16 size; + u32 tag_id; +}; + +struct ib_flow_spec_action_drop { + enum ib_flow_spec_type type; + u16 size; +}; + +struct ib_flow_spec_action_handle { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_action *act; +}; + +enum ib_flow_action_type { + IB_FLOW_ACTION_UNSPECIFIED = 0, + IB_FLOW_ACTION_ESP = 1, +}; + +struct ib_flow_action { + struct ib_device *device; + struct ib_uobject *uobject; + enum ib_flow_action_type type; + atomic_t usecnt; +}; + +struct ib_flow_spec_action_count { + enum ib_flow_spec_type type; + u16 size; + struct ib_counters *counters; +}; + +struct ib_counters { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + +union ib_flow_spec { + struct { + u32 type; + u16 size; + }; + struct ib_flow_spec_eth eth; + struct ib_flow_spec_ib ib; + struct ib_flow_spec_ipv4 ipv4; + struct ib_flow_spec_tcp_udp tcp_udp; + struct ib_flow_spec_ipv6 ipv6; + struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_esp esp; + struct ib_flow_spec_gre gre; + struct ib_flow_spec_mpls mpls; + struct ib_flow_spec_action_tag flow_tag; + struct ib_flow_spec_action_drop drop; + struct ib_flow_spec_action_handle action; + struct ib_flow_spec_action_count flow_count; +}; + +struct ib_flow_attr { + enum ib_flow_attr_type type; + u16 size; + u16 priority; + u32 flags; + u8 num_of_specs; + u32 port; + union ib_flow_spec flows[0]; +}; + +struct ib_flow { + struct ib_qp *qp; + struct ib_device *device; + struct ib_uobject *uobject; +}; + +struct ib_flow_action_attrs_esp_keymats { + enum ib_uverbs_flow_action_esp_keymat protocol; + union { + struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; + } keymat; +}; + +struct ib_flow_action_attrs_esp_replays { + enum ib_uverbs_flow_action_esp_replay protocol; + union { + struct ib_uverbs_flow_action_esp_replay_bmp bmp; + } replay; +}; + +struct ib_flow_spec_list { + struct ib_flow_spec_list *next; + union ib_flow_spec spec; +}; + +struct ib_flow_action_attrs_esp { + struct ib_flow_action_attrs_esp_keymats *keymat; + struct ib_flow_action_attrs_esp_replays *replay; + struct ib_flow_spec_list *encap; + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + u64 flags; + u64 hard_limit_pkts; +}; + +struct ib_pkey_cache; + +struct ib_gid_table; + +struct ib_port_cache { + u64 subnet_prefix; + struct ib_pkey_cache *pkey; + struct ib_gid_table *gid; + u8 lmc; + enum ib_port_state port_state; +}; + +struct ib_port_immutable { + int pkey_tbl_len; + int gid_tbl_len; + u32 core_cap_flags; + u32 max_mad_size; +}; + +struct ib_port_data { + struct ib_device *ib_dev; + struct ib_port_immutable immutable; + spinlock_t pkey_list_lock; + struct list_head pkey_list; + struct ib_port_cache cache; + spinlock_t netdev_lock; + struct net_device *netdev; + struct hlist_node ndev_hash_link; + struct rdma_port_counter port_counter; + struct rdma_hw_stats *hw_stats; +}; + +struct rdma_netdev_alloc_params { + size_t sizeof_priv; + unsigned int txqs; + unsigned int rxqs; + void *param; + int (*initialize_rdma_netdev)(struct ib_device *, u32, struct net_device *, void *); +}; + +struct ib_counters_read_attr { + u64 *counters_buff; + u32 ncounters; + u32 flags; +}; + +struct rdma_user_mmap_entry { + struct kref ref; + struct ib_ucontext *ucontext; + long unsigned int start_pgoff; + size_t npages; + bool driver_removed; +}; + +enum blk_zone_type { + BLK_ZONE_TYPE_CONVENTIONAL = 1, + BLK_ZONE_TYPE_SEQWRITE_REQ = 2, + BLK_ZONE_TYPE_SEQWRITE_PREF = 3, +}; + +enum blk_zone_cond { + BLK_ZONE_COND_NOT_WP = 0, + BLK_ZONE_COND_EMPTY = 1, + BLK_ZONE_COND_IMP_OPEN = 2, + BLK_ZONE_COND_EXP_OPEN = 3, + BLK_ZONE_COND_CLOSED = 4, + BLK_ZONE_COND_READONLY = 13, + BLK_ZONE_COND_FULL = 14, + BLK_ZONE_COND_OFFLINE = 15, +}; + +enum blk_zone_report_flags { + BLK_ZONE_REP_CAPACITY = 1, +}; + +struct blk_zone_report { + __u64 sector; + __u32 nr_zones; + __u32 flags; + struct blk_zone zones[0]; +}; + +struct blk_zone_range { + __u64 sector; + __u64 nr_sectors; +}; + +struct zone_report_args { + struct blk_zone *zones; +}; + +struct blk_revalidate_zone_args { + struct gendisk *disk; + long unsigned int *conv_zones_bitmap; + long unsigned int *seq_zones_wlock; + unsigned int nr_zones; + sector_t zone_sectors; + sector_t sector; +}; + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_KSWAPD = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, + WBT_STATE_OFF_DEFAULT = 3, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + u64 sync_issue; + void *sync_cookie; + unsigned int wc; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + long unsigned int rw; +}; + +struct show_busy_params { + struct seq_file *m; + struct blk_mq_hw_ctx *hctx; +}; + +enum opal_mbr { + OPAL_MBR_ENABLE = 0, + OPAL_MBR_DISABLE = 1, +}; + +enum opal_mbr_done_flag { + OPAL_MBR_NOT_DONE = 0, + OPAL_MBR_DONE = 1, +}; + +enum opal_user { + OPAL_ADMIN1 = 0, + OPAL_USER1 = 1, + OPAL_USER2 = 2, + OPAL_USER3 = 3, + OPAL_USER4 = 4, + OPAL_USER5 = 5, + OPAL_USER6 = 6, + OPAL_USER7 = 7, + OPAL_USER8 = 8, + OPAL_USER9 = 9, +}; + +enum opal_lock_state { + OPAL_RO = 1, + OPAL_RW = 2, + OPAL_LK = 4, +}; + +struct opal_key { + __u8 lr; + __u8 key_len; + __u8 __align[6]; + __u8 key[256]; +}; + +struct opal_lr_act { + struct opal_key key; + __u32 sum; + __u8 num_lrs; + __u8 lr[9]; + __u8 align[2]; +}; + +struct opal_session_info { + __u32 sum; + __u32 who; + struct opal_key opal_key; +}; + +struct opal_user_lr_setup { + __u64 range_start; + __u64 range_length; + __u32 RLE; + __u32 WLE; + struct opal_session_info session; +}; + +struct opal_lock_unlock { + struct opal_session_info session; + __u32 l_state; + __u8 __align[4]; +}; + +struct opal_new_pw { + struct opal_session_info session; + struct opal_session_info new_user_pw; +}; + +struct opal_mbr_data { + struct opal_key key; + __u8 enable_disable; + __u8 __align[7]; +}; + +struct opal_mbr_done { + struct opal_key key; + __u8 done_flag; + __u8 __align[7]; +}; + +struct opal_shadow_mbr { + struct opal_key key; + const __u64 data; + __u64 offset; + __u64 size; +}; + +enum opal_table_ops { + OPAL_READ_TABLE = 0, + OPAL_WRITE_TABLE = 1, +}; + +struct opal_read_write_table { + struct opal_key key; + const __u64 data; + const __u8 table_uid[8]; + __u64 offset; + __u64 size; + __u64 flags; + __u64 priv; +}; + +typedef int sec_send_recv(void *, u16, u8, void *, size_t, bool); + +enum { + TCG_SECP_00 = 0, + TCG_SECP_01 = 1, +}; + +enum opal_response_token { + OPAL_DTA_TOKENID_BYTESTRING = 224, + OPAL_DTA_TOKENID_SINT = 225, + OPAL_DTA_TOKENID_UINT = 226, + OPAL_DTA_TOKENID_TOKEN = 227, + OPAL_DTA_TOKENID_INVALID = 0, +}; + +enum opal_uid { + OPAL_SMUID_UID = 0, + OPAL_THISSP_UID = 1, + OPAL_ADMINSP_UID = 2, + OPAL_LOCKINGSP_UID = 3, + OPAL_ENTERPRISE_LOCKINGSP_UID = 4, + OPAL_ANYBODY_UID = 5, + OPAL_SID_UID = 6, + OPAL_ADMIN1_UID = 7, + OPAL_USER1_UID = 8, + OPAL_USER2_UID = 9, + OPAL_PSID_UID = 10, + OPAL_ENTERPRISE_BANDMASTER0_UID = 11, + OPAL_ENTERPRISE_ERASEMASTER_UID = 12, + OPAL_TABLE_TABLE = 13, + OPAL_LOCKINGRANGE_GLOBAL = 14, + OPAL_LOCKINGRANGE_ACE_RDLOCKED = 15, + OPAL_LOCKINGRANGE_ACE_WRLOCKED = 16, + OPAL_MBRCONTROL = 17, + OPAL_MBR = 18, + OPAL_AUTHORITY_TABLE = 19, + OPAL_C_PIN_TABLE = 20, + OPAL_LOCKING_INFO_TABLE = 21, + OPAL_ENTERPRISE_LOCKING_INFO_TABLE = 22, + OPAL_DATASTORE = 23, + OPAL_C_PIN_MSID = 24, + OPAL_C_PIN_SID = 25, + OPAL_C_PIN_ADMIN1 = 26, + OPAL_HALF_UID_AUTHORITY_OBJ_REF = 27, + OPAL_HALF_UID_BOOLEAN_ACE = 28, + OPAL_UID_HEXFF = 29, +}; + +enum opal_method { + OPAL_PROPERTIES = 0, + OPAL_STARTSESSION = 1, + OPAL_REVERT = 2, + OPAL_ACTIVATE = 3, + OPAL_EGET = 4, + OPAL_ESET = 5, + OPAL_NEXT = 6, + OPAL_EAUTHENTICATE = 7, + OPAL_GETACL = 8, + OPAL_GENKEY = 9, + OPAL_REVERTSP = 10, + OPAL_GET = 11, + OPAL_SET = 12, + OPAL_AUTHENTICATE = 13, + OPAL_RANDOM = 14, + OPAL_ERASE = 15, +}; + +enum opal_token { + OPAL_TRUE = 1, + OPAL_FALSE = 0, + OPAL_BOOLEAN_EXPR = 3, + OPAL_TABLE = 0, + OPAL_STARTROW = 1, + OPAL_ENDROW = 2, + OPAL_STARTCOLUMN = 3, + OPAL_ENDCOLUMN = 4, + OPAL_VALUES = 1, + OPAL_TABLE_UID = 0, + OPAL_TABLE_NAME = 1, + OPAL_TABLE_COMMON = 2, + OPAL_TABLE_TEMPLATE = 3, + OPAL_TABLE_KIND = 4, + OPAL_TABLE_COLUMN = 5, + OPAL_TABLE_COLUMNS = 6, + OPAL_TABLE_ROWS = 7, + OPAL_TABLE_ROWS_FREE = 8, + OPAL_TABLE_ROW_BYTES = 9, + OPAL_TABLE_LASTID = 10, + OPAL_TABLE_MIN = 11, + OPAL_TABLE_MAX = 12, + OPAL_PIN = 3, + OPAL_RANGESTART = 3, + OPAL_RANGELENGTH = 4, + OPAL_READLOCKENABLED = 5, + OPAL_WRITELOCKENABLED = 6, + OPAL_READLOCKED = 7, + OPAL_WRITELOCKED = 8, + OPAL_ACTIVEKEY = 10, + OPAL_LIFECYCLE = 6, + OPAL_MAXRANGES = 4, + OPAL_MBRENABLE = 1, + OPAL_MBRDONE = 2, + OPAL_HOSTPROPERTIES = 0, + OPAL_STARTLIST = 240, + OPAL_ENDLIST = 241, + OPAL_STARTNAME = 242, + OPAL_ENDNAME = 243, + OPAL_CALL = 248, + OPAL_ENDOFDATA = 249, + OPAL_ENDOFSESSION = 250, + OPAL_STARTTRANSACTON = 251, + OPAL_ENDTRANSACTON = 252, + OPAL_EMPTYATOM = 255, + OPAL_WHERE = 0, +}; + +enum opal_parameter { + OPAL_SUM_SET_LIST = 393216, +}; + +struct opal_compacket { + __be32 reserved0; + u8 extendedComID[4]; + __be32 outstandingData; + __be32 minTransfer; + __be32 length; +}; + +struct opal_packet { + __be32 tsn; + __be32 hsn; + __be32 seq_number; + __be16 reserved0; + __be16 ack_type; + __be32 acknowledgment; + __be32 length; +}; + +struct opal_data_subpacket { + u8 reserved0[6]; + __be16 kind; + __be32 length; +}; + +struct opal_header { + struct opal_compacket cp; + struct opal_packet pkt; + struct opal_data_subpacket subpkt; +}; + +struct d0_header { + __be32 length; + __be32 revision; + __be32 reserved01; + __be32 reserved02; + u8 ignored[32]; +}; + +struct d0_tper_features { + u8 supported_features; + u8 reserved01[3]; + __be32 reserved02; + __be32 reserved03; +}; + +struct d0_locking_features { + u8 supported_features; + u8 reserved01[3]; + __be32 reserved02; + __be32 reserved03; +}; + +struct d0_geometry_features { + u8 header[4]; + u8 reserved01; + u8 reserved02[7]; + __be32 logical_block_size; + __be64 alignment_granularity; + __be64 lowest_aligned_lba; +}; + +struct d0_opal_v100 { + __be16 baseComID; + __be16 numComIDs; +}; + +struct d0_single_user_mode { + __be32 num_locking_objects; + u8 reserved01; + u8 reserved02; + __be16 reserved03; + __be32 reserved04; +}; + +struct d0_opal_v200 { + __be16 baseComID; + __be16 numComIDs; + u8 range_crossing; + u8 num_locking_admin_auth[2]; + u8 num_locking_user_auth[2]; + u8 initialPIN; + u8 revertedPIN; + u8 reserved01; + __be32 reserved02; +}; + +struct d0_features { + __be16 code; + u8 r_version; + u8 length; + u8 features[0]; +}; + +struct opal_dev; + +struct opal_step { + int (*fn)(struct opal_dev *, void *); + void *data; +}; + +enum opal_atom_width { + OPAL_WIDTH_TINY = 0, + OPAL_WIDTH_SHORT = 1, + OPAL_WIDTH_MEDIUM = 2, + OPAL_WIDTH_LONG = 3, + OPAL_WIDTH_TOKEN = 4, +}; + +struct opal_resp_tok { + const u8 *pos; + size_t len; + enum opal_response_token type; + enum opal_atom_width width; + union { + u64 u; + s64 s; + } stored; +}; + +struct parsed_resp { + int num; + struct opal_resp_tok toks[64]; +}; + +struct opal_dev { + bool supported; + bool mbr_enabled; + void *data; + sec_send_recv *send_recv; + struct mutex dev_lock; + u16 comid; + u32 hsn; + u32 tsn; + u64 align; + u64 lowest_lba; + size_t pos; + u8 cmd[2048]; + u8 resp[2048]; + struct parsed_resp parsed; + size_t prev_d_len; + void *prev_data; + struct list_head unlk_lst; +}; + +typedef int cont_fn(struct opal_dev *); + +struct opal_suspend_data { + struct opal_lock_unlock unlk; + u8 lr; + struct list_head node; +}; + +struct blk_ksm_keyslot { + atomic_t slot_refs; + struct list_head idle_slot_node; + struct hlist_node hash_node; + const struct blk_crypto_key *key; + struct blk_keyslot_manager *ksm; +}; + +struct blk_ksm_ll_ops { + int (*keyslot_program)(struct blk_keyslot_manager *, const struct blk_crypto_key *, unsigned int); + int (*keyslot_evict)(struct blk_keyslot_manager *, const struct blk_crypto_key *, unsigned int); +}; + +struct blk_keyslot_manager { + struct blk_ksm_ll_ops ksm_ll_ops; + unsigned int max_dun_bytes_supported; + unsigned int crypto_modes_supported[4]; + struct device *dev; + unsigned int num_slots; + struct rw_semaphore lock; + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + struct hlist_head *slot_hashtable; + unsigned int log_slot_ht_size; + struct blk_ksm_keyslot *slots; +}; + +struct blk_crypto_mode { + const char *cipher_str; + unsigned int keysize; + unsigned int ivsize; +}; + +struct bio_fallback_crypt_ctx { + struct bio_crypt_ctx crypt_ctx; + struct bvec_iter crypt_iter; + union { + struct { + struct work_struct work; + struct bio *bio; + }; + struct { + void *bi_private_orig; + bio_end_io_t *bi_end_io_orig; + }; + }; +}; + +struct blk_crypto_keyslot { + enum blk_crypto_mode_num crypto_mode; + struct crypto_skcipher *tfms[4]; +}; + +union blk_crypto_iv { + __le64 dun[4]; + u8 bytes[32]; +}; + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +struct siprand_state { + long unsigned int v0; + long unsigned int v1; + long unsigned int v2; + long unsigned int v3; +}; + +typedef __kernel_long_t __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; + unsigned int nbits; +}; + +enum { + REG_OP_ISFREE = 0, + REG_OP_ALLOC = 1, + REG_OP_RELEASE = 2, +}; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +struct csum_state { + __wsum csum; + size_t off; +}; + +struct rhltable { + struct rhashtable ht; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; + struct module *module; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +struct genradix_node { + union { + struct genradix_node *children[512]; + u8 data[4096]; + }; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, + DEVM_IOREMAP_NP = 3, +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct btree_head { + long unsigned int *node; + mempool_t *mempool; + int height; +}; + +struct btree_geo { + int keylen; + int no_pairs; + int no_longs; +}; + +typedef void (*visitor128_t)(void *, long unsigned int, u64, u64, size_t); + +typedef void (*visitorl_t)(void *, long unsigned int, long unsigned int, size_t); + +typedef void (*visitor32_t)(void *, long unsigned int, u32, size_t); + +typedef void (*visitor64_t)(void *, long unsigned int, u64, size_t); + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +struct linear_range { + unsigned int min; + unsigned int min_sel; + unsigned int max_sel; + unsigned int step; +}; + +enum packing_op { + PACK = 0, + UNPACK = 1, +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +typedef unsigned int uInt; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +typedef struct tree_desc_s tree_desc; + +typedef struct { + const uint8_t *externalDict; + size_t extDictSize; + const uint8_t *prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +typedef union { + long long unsigned int table[4]; + LZ4_streamDecode_t_internal internal_donotuse; +} LZ4_streamDecode_t; + +typedef uint8_t BYTE; + +typedef uint16_t U16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef uintptr_t uptrval; + +typedef enum { + noDict = 0, + withPrefix64k = 1, + usingExtDict = 2, +} dict_directive; + +typedef enum { + endOnOutputSize = 0, + endOnInputSize = 1, +} endCondition_directive; + +typedef enum { + decode_full_block = 0, + partial_decode = 1, +} earlyEnd_directive; + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef U32 HUF_DTable; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE byte; + BYTE nbBits; +} HUF_DEltX2; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX4; + +typedef struct { + BYTE symbol; + BYTE weight; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +typedef unsigned int FSE_DTable; + +typedef struct { + FSE_DTable LLTable[513]; + FSE_DTable OFTable[257]; + FSE_DTable MLTable[513]; + HUF_DTable hufTable[4097]; + U64 workspace[384]; + U32 rep[3]; +} ZSTD_entropyTables_t; + +typedef struct { + long long unsigned int frameContentSize; + unsigned int windowSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameParams; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +struct ZSTD_DCtx_s { + const FSE_DTable *LLTptr; + const FSE_DTable *MLTptr; + const FSE_DTable *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyTables_t entropy; + const void *previousDstEnd; + const void *base; + const void *vBase; + const void *dictEnd; + size_t expected; + ZSTD_frameParams fParams; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + U32 dictID; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + BYTE litBuffer[131080]; + BYTE headerBuffer[18]; +}; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +struct ZSTD_DStream_s___2 { + ZSTD_DCtx *dctx; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + ZSTD_frameParams fParams; + ZSTD_dStreamStage stage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t blockSize; + BYTE headerBuffer[18]; + size_t lhSize; + ZSTD_customMem customMem; + void *legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; +}; + +typedef struct ZSTD_DStream_s___2 ZSTD_DStream___2; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef int16_t S16; + +typedef uintptr_t uPtrDiff; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef union { + FSE_decode_t realData; + U32 alignedBy4; +} FSE_decode_t4; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE *match; +} seq_t; + +typedef struct { + BIT_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset[3]; + const BYTE *base; + size_t pos; + uPtrDiff gotoDict; +} seqState_t; + +typedef struct { + void *ptr; + const void *end; +} ZSTD_stack; + +typedef uint64_t vli_type; + +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10, +}; + +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec_lzma2; + +struct xz_dec_bcj; + +struct xz_dec___2 { + enum { + SEQ_STREAM_HEADER = 0, + SEQ_BLOCK_START = 1, + SEQ_BLOCK_HEADER = 2, + SEQ_BLOCK_UNCOMPRESS = 3, + SEQ_BLOCK_PADDING = 4, + SEQ_BLOCK_CHECK = 5, + SEQ_INDEX = 6, + SEQ_INDEX_PADDING = 7, + SEQ_INDEX_CRC32 = 8, + SEQ_STREAM_FOOTER = 9, + } sequence; + uint32_t pos; + vli_type vli; + size_t in_start; + size_t out_start; + uint32_t crc32; + enum xz_check check_type; + enum xz_mode mode; + bool allow_buf_error; + struct { + vli_type compressed; + vli_type uncompressed; + uint32_t size; + } block_header; + struct { + vli_type compressed; + vli_type uncompressed; + vli_type count; + struct xz_dec_hash hash; + } block; + struct { + enum { + SEQ_INDEX_COUNT = 0, + SEQ_INDEX_UNPADDED = 1, + SEQ_INDEX_UNCOMPRESSED = 2, + } sequence; + vli_type size; + vli_type count; + struct xz_dec_hash hash; + } index; + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + struct xz_dec_lzma2 *lzma2; + struct xz_dec_bcj *bcj; + bool bcj_active; +}; + +enum lzma_state { + STATE_LIT_LIT = 0, + STATE_MATCH_LIT_LIT = 1, + STATE_REP_LIT_LIT = 2, + STATE_SHORTREP_LIT_LIT = 3, + STATE_MATCH_LIT = 4, + STATE_REP_LIT = 5, + STATE_SHORTREP_LIT = 6, + STATE_LIT_MATCH = 7, + STATE_LIT_LONGREP = 8, + STATE_LIT_SHORTREP = 9, + STATE_NONLIT_MATCH = 10, + STATE_NONLIT_REP = 11, +}; + +struct dictionary { + uint8_t *buf; + size_t start; + size_t pos; + size_t full; + size_t limit; + size_t end; + uint32_t size; + uint32_t size_max; + uint32_t allocated; + enum xz_mode mode; +}; + +struct rc_dec { + uint32_t range; + uint32_t code; + uint32_t init_bytes_left; + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +struct lzma_len_dec { + uint16_t choice; + uint16_t choice2; + uint16_t low[128]; + uint16_t mid[128]; + uint16_t high[256]; +}; + +struct lzma_dec { + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + enum lzma_state state; + uint32_t len; + uint32_t lc; + uint32_t literal_pos_mask; + uint32_t pos_mask; + uint16_t is_match[192]; + uint16_t is_rep[12]; + uint16_t is_rep0[12]; + uint16_t is_rep1[12]; + uint16_t is_rep2[12]; + uint16_t is_rep0_long[192]; + uint16_t dist_slot[256]; + uint16_t dist_special[114]; + uint16_t dist_align[16]; + struct lzma_len_dec match_len_dec; + struct lzma_len_dec rep_len_dec; + uint16_t literal[12288]; +}; + +enum lzma2_seq { + SEQ_CONTROL = 0, + SEQ_UNCOMPRESSED_1 = 1, + SEQ_UNCOMPRESSED_2 = 2, + SEQ_COMPRESSED_0 = 3, + SEQ_COMPRESSED_1 = 4, + SEQ_PROPERTIES = 5, + SEQ_LZMA_PREPARE = 6, + SEQ_LZMA_RUN = 7, + SEQ_COPY = 8, +}; + +struct lzma2_dec { + enum lzma2_seq sequence; + enum lzma2_seq next_sequence; + uint32_t uncompressed; + uint32_t compressed; + bool need_dict_reset; + bool need_props; +}; + +struct xz_dec_lzma2___2 { + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + struct { + uint32_t size; + uint8_t buf[63]; + } temp; +}; + +struct xz_dec_bcj___2 { + enum { + BCJ_X86 = 4, + BCJ_POWERPC = 5, + BCJ_IA64 = 6, + BCJ_ARM = 7, + BCJ_ARMTHUMB = 8, + BCJ_SPARC = 9, + } type; + enum xz_ret ret; + bool single_call; + uint32_t pos; + uint32_t x86_prev_mask; + uint8_t *out; + size_t out_pos; + size_t out_size; + struct { + size_t filtered; + size_t size; + uint8_t buf[16]; + } temp; +}; + +struct ts_state { + unsigned int offset; + char cb[48]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct ts_linear_state { + unsigned int len; + const void *data; +}; + +struct ei_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; + int etype; + void *priv; +}; + +struct ddebug_table { + struct list_head link; + const char *mod_name; + unsigned int num_ddebugs; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + unsigned int first_lineno; + unsigned int last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + unsigned int idx; +}; + +struct flag_settings { + unsigned int flags; + unsigned int mask; +}; + +struct flagsbuf { + char buf[7]; +}; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + u16 used; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +typedef mpi_limb_t UWtype; + +typedef unsigned int UHWtype; + +enum gcry_mpi_constants { + MPI_C_ZERO = 0, + MPI_C_ONE = 1, + MPI_C_TWO = 2, + MPI_C_THREE = 3, + MPI_C_FOUR = 4, + MPI_C_EIGHT = 5, +}; + +struct barrett_ctx_s; + +typedef struct barrett_ctx_s *mpi_barrett_t; + +struct gcry_mpi_point { + MPI x; + MPI y; + MPI z; +}; + +typedef struct gcry_mpi_point *MPI_POINT; + +enum gcry_mpi_ec_models { + MPI_EC_WEIERSTRASS = 0, + MPI_EC_MONTGOMERY = 1, + MPI_EC_EDWARDS = 2, +}; + +enum ecc_dialects { + ECC_DIALECT_STANDARD = 0, + ECC_DIALECT_ED25519 = 1, + ECC_DIALECT_SAFECURVE = 2, +}; + +struct mpi_ec_ctx { + enum gcry_mpi_ec_models model; + enum ecc_dialects dialect; + int flags; + unsigned int nbits; + MPI p; + MPI a; + MPI b; + MPI_POINT G; + MPI n; + unsigned int h; + MPI_POINT Q; + MPI d; + const char *name; + struct { + struct { + unsigned int a_is_pminus3: 1; + unsigned int two_inv_p: 1; + } valid; + int a_is_pminus3; + MPI two_inv_p; + mpi_barrett_t p_barrett; + MPI scratch[11]; + } t; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); +}; + +struct field_table { + const char *p; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); +}; + +enum gcry_mpi_format { + GCRYMPI_FMT_NONE = 0, + GCRYMPI_FMT_STD = 1, + GCRYMPI_FMT_PGP = 2, + GCRYMPI_FMT_SSH = 3, + GCRYMPI_FMT_HEX = 4, + GCRYMPI_FMT_USG = 5, + GCRYMPI_FMT_OPAQUE = 8, +}; + +struct barrett_ctx_s___2; + +typedef struct barrett_ctx_s___2 *mpi_barrett_t___2; + +struct barrett_ctx_s___2 { + MPI m; + int m_copied; + int k; + MPI y; + MPI r1; + MPI r2; + MPI r3; +}; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +typedef long int mpi_limb_signed_t; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +enum pubkey_algo { + PUBKEY_ALGO_RSA = 0, + PUBKEY_ALGO_MAX = 1, +}; + +struct pubkey_hdr { + uint8_t version; + uint32_t timestamp; + uint8_t algo; + uint8_t nmpi; + char mpi[0]; +} __attribute__((packed)); + +struct signature_hdr { + uint8_t version; + uint32_t timestamp; + uint8_t algo; + uint8_t hash; + uint8_t keyid[8]; + uint8_t nmpi; + char mpi[0]; +} __attribute__((packed)); + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +enum { + IRQ_POLL_F_SCHED = 0, + IRQ_POLL_F_DISABLE = 1, +}; + +struct font_desc { + int idx; + const char *name; + unsigned int width; + unsigned int height; + unsigned int charcount; + const void *data; + int pref; +}; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +struct pldmfw_record { + struct list_head entry; + struct list_head descs; + const u8 *version_string; + u8 version_type; + u8 version_len; + u16 package_data_len; + u32 device_update_flags; + const u8 *package_data; + long unsigned int *component_bitmap; + u16 component_bitmap_len; +}; + +struct pldmfw_desc_tlv { + struct list_head entry; + const u8 *data; + u16 type; + u16 size; +}; + +struct pldmfw_component { + struct list_head entry; + u16 classification; + u16 identifier; + u16 options; + u16 activation_method; + u32 comparison_stamp; + u32 component_size; + const u8 *component_data; + const u8 *version_string; + u8 version_type; + u8 version_len; + u8 index; +}; + +struct pldmfw_ops; + +struct pldmfw { + const struct pldmfw_ops *ops; + struct device *dev; +}; + +struct pldmfw_ops { + bool (*match_record)(struct pldmfw *, struct pldmfw_record *); + int (*send_package_data)(struct pldmfw *, const u8 *, u16); + int (*send_component_table)(struct pldmfw *, struct pldmfw_component *, u8); + int (*flash_component)(struct pldmfw *, struct pldmfw_component *); + int (*finalize_update)(struct pldmfw *); +}; + +struct __pldm_timestamp { + u8 b[13]; +}; + +struct __pldm_header { + uuid_t id; + u8 revision; + __le16 size; + struct __pldm_timestamp release_date; + __le16 component_bitmap_len; + u8 version_type; + u8 version_len; + u8 version_string[0]; +} __attribute__((packed)); + +struct __pldmfw_record_info { + __le16 record_len; + u8 descriptor_count; + __le32 device_update_flags; + u8 version_type; + u8 version_len; + __le16 package_data_len; + u8 variable_record_data[0]; +} __attribute__((packed)); + +struct __pldmfw_desc_tlv { + __le16 type; + __le16 size; + u8 data[0]; +}; + +struct __pldmfw_record_area { + u8 record_count; + u8 records[0]; +}; + +struct __pldmfw_component_info { + __le16 classification; + __le16 identifier; + __le32 comparison_stamp; + __le16 options; + __le16 activation_method; + __le32 location_offset; + __le32 size; + u8 version_type; + u8 version_len; + u8 version_string[0]; +} __attribute__((packed)); + +struct __pldmfw_component_area { + __le16 component_image_count; + u8 components[0]; +}; + +struct pldmfw_priv { + struct pldmfw *context; + const struct firmware *fw; + size_t offset; + struct list_head records; + struct list_head components; + const struct __pldm_header *header; + u16 total_header_size; + u16 component_bitmap_len; + u16 bitmap_size; + u16 component_count; + const u8 *component_start; + const u8 *record_start; + u8 record_count; + u32 header_crc; + struct pldmfw_record *matching_record; +}; + +struct pldm_pci_record_id { + int vendor; + int device; + int subsystem_vendor; + int subsystem_device; +}; + +struct msr { + union { + struct { + u32 l; + u32 h; + }; + u64 q; + }; +}; + +struct msr_info { + u32 msr_no; + struct msr reg; + struct msr *msrs; + int err; +}; + +struct msr_regs_info { + u32 *regs; + int err; +}; + +struct msr_info_completion { + struct msr_info msr; + struct completion done; +}; + +struct trace_event_raw_msr_trace_class { + struct trace_entry ent; + unsigned int msr; + u64 val; + int failed; + char __data[0]; +}; + +struct trace_event_data_offsets_msr_trace_class {}; + +typedef void (*btf_trace_read_msr)(void *, unsigned int, u64, int); + +typedef void (*btf_trace_write_msr)(void *, unsigned int, u64, int); + +typedef void (*btf_trace_rdpmc)(void *, unsigned int, u64, int); + +struct warn_args___2; + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +struct group_data { + int limit[21]; + int base[20]; + int permute[258]; + int minLen; + int maxLen; +}; + +struct bunzip_data { + int writeCopies; + int writePos; + int writeRunCountdown; + int writeCount; + int writeCurrent; + long int (*fill)(void *, long unsigned int); + long int inbufCount; + long int inbufPos; + unsigned char *inbuf; + unsigned int inbufBitCount; + unsigned int inbufBits; + unsigned int crc32Table[256]; + unsigned int headerCRC; + unsigned int totalCRC; + unsigned int writeCRC; + unsigned int *dbuf; + unsigned int dbufSize; + unsigned char selectors[32768]; + struct group_data groups[6]; + int io_error; + int byteCount[256]; + unsigned char symToByte[256]; + unsigned char mtfSymbol[256]; +}; + +struct rc { + long int (*fill)(void *, long unsigned int); + uint8_t *ptr; + uint8_t *buffer; + uint8_t *buffer_end; + long int buffer_size; + uint32_t code; + uint32_t range; + uint32_t bound; + void (*error)(char *); +}; + +struct lzma_header { + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} __attribute__((packed)); + +struct writer { + uint8_t *buffer; + uint8_t previous_byte; + size_t buffer_pos; + int bufsize; + size_t global_pos; + long int (*flush)(void *, long unsigned int); + struct lzma_header *header; +}; + +struct cstate { + int state; + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; +}; + +struct ZSTD_DCtx_s___2; + +typedef struct ZSTD_DCtx_s___2 ZSTD_DCtx___2; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +struct fprop_local_single { + long unsigned int events; + unsigned int period; + raw_spinlock_t lock; +}; + +struct ida_bitmap { + long unsigned int bitmap[16]; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +struct radix_tree_preload { + unsigned int nr; + struct xa_node *nodes; +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +struct clk_core; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct page_flags_fields { + int width; + int shift; + int mask; + const struct printf_spec *spec; + const char *name; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +enum { + st_wordstart = 0, + st_wordcmp = 1, + st_wordskip = 2, + st_bufcpy = 3, +}; + +enum { + st_wordstart___2 = 0, + st_wordcmp___2 = 1, + st_wordskip___2 = 2, +}; + +struct in6_addr___2; + +enum reg_type { + REG_TYPE_RM = 0, + REG_TYPE_REG = 1, + REG_TYPE_INDEX = 2, + REG_TYPE_BASE = 3, +}; + +enum device_link_state { + DL_STATE_NONE = 4294967295, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct work_struct rm_work; + bool supplier_preactivated; +}; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +enum phy_media { + PHY_MEDIA_DEFAULT = 0, + PHY_MEDIA_SR = 1, + PHY_MEDIA_DAC = 2, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*set_media)(struct phy *, enum phy_media); + int (*set_speed)(struct phy *, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct regulator; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; +}; + +struct phy_provider { + struct device *dev; + struct device_node *children; + struct module *owner; + struct list_head list; + struct phy * (*of_xlate)(struct device *, struct of_phandle_args *); +}; + +struct phy_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct phy *phy; +}; + +struct pinctrl; + +struct pinctrl_state; + +struct dev_pin_info { + struct pinctrl *p; + struct pinctrl_state *default_state; + struct pinctrl_state *init_state; + struct pinctrl_state *sleep_state; + struct pinctrl_state *idle_state; +}; + +struct pinctrl { + struct list_head node; + struct device *dev; + struct list_head states; + struct pinctrl_state *state; + struct list_head dt_maps; + struct kref users; +}; + +struct pinctrl_state { + struct list_head node; + const char *name; + struct list_head settings; +}; + +struct pinctrl_pin_desc { + unsigned int number; + const char *name; + void *drv_data; +}; + +struct gpio_chip; + +struct pinctrl_gpio_range { + struct list_head node; + const char *name; + unsigned int id; + unsigned int base; + unsigned int pin_base; + unsigned int npins; + const unsigned int *pins; + struct gpio_chip *gc; +}; + +struct gpio_irq_chip { + struct irq_chip *chip; + struct irq_domain *domain; + const struct irq_domain_ops *domain_ops; + struct fwnode_handle *fwnode; + struct irq_domain *parent_domain; + int (*child_to_parent_hwirq)(struct gpio_chip *, unsigned int, unsigned int, unsigned int *, unsigned int *); + void * (*populate_parent_alloc_arg)(struct gpio_chip *, unsigned int, unsigned int); + unsigned int (*child_offset_to_irq)(struct gpio_chip *, unsigned int); + struct irq_domain_ops child_irq_domain_ops; + irq_flow_handler_t handler; + unsigned int default_type; + struct lock_class_key *lock_key; + struct lock_class_key *request_key; + irq_flow_handler_t parent_handler; + void *parent_handler_data; + unsigned int num_parents; + unsigned int *parents; + unsigned int *map; + bool threaded; + int (*init_hw)(struct gpio_chip *); + void (*init_valid_mask)(struct gpio_chip *, long unsigned int *, unsigned int); + long unsigned int *valid_mask; + unsigned int first; + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_mask)(struct irq_data *); +}; + +struct gpio_device; + +struct gpio_chip { + const char *label; + struct gpio_device *gpiodev; + struct device *parent; + struct module *owner; + int (*request)(struct gpio_chip *, unsigned int); + void (*free)(struct gpio_chip *, unsigned int); + int (*get_direction)(struct gpio_chip *, unsigned int); + int (*direction_input)(struct gpio_chip *, unsigned int); + int (*direction_output)(struct gpio_chip *, unsigned int, int); + int (*get)(struct gpio_chip *, unsigned int); + int (*get_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); + void (*set)(struct gpio_chip *, unsigned int, int); + void (*set_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); + int (*set_config)(struct gpio_chip *, unsigned int, long unsigned int); + int (*to_irq)(struct gpio_chip *, unsigned int); + void (*dbg_show)(struct seq_file *, struct gpio_chip *); + int (*init_valid_mask)(struct gpio_chip *, long unsigned int *, unsigned int); + int (*add_pin_ranges)(struct gpio_chip *); + int base; + u16 ngpio; + const char * const *names; + bool can_sleep; + long unsigned int (*read_reg)(void *); + void (*write_reg)(void *, long unsigned int); + bool be_bits; + void *reg_dat; + void *reg_set; + void *reg_clr; + void *reg_dir_out; + void *reg_dir_in; + bool bgpio_dir_unreadable; + int bgpio_bits; + spinlock_t bgpio_lock; + long unsigned int bgpio_data; + long unsigned int bgpio_dir; + struct gpio_irq_chip irq; + long unsigned int *valid_mask; +}; + +struct pinctrl_dev; + +struct pinctrl_map; + +struct pinctrl_ops { + int (*get_groups_count)(struct pinctrl_dev *); + const char * (*get_group_name)(struct pinctrl_dev *, unsigned int); + int (*get_group_pins)(struct pinctrl_dev *, unsigned int, const unsigned int **, unsigned int *); + void (*pin_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + int (*dt_node_to_map)(struct pinctrl_dev *, struct device_node *, struct pinctrl_map **, unsigned int *); + void (*dt_free_map)(struct pinctrl_dev *, struct pinctrl_map *, unsigned int); +}; + +struct pinctrl_desc; + +struct pinctrl_dev { + struct list_head node; + struct pinctrl_desc *desc; + struct xarray pin_desc_tree; + struct list_head gpio_ranges; + struct device *dev; + struct module *owner; + void *driver_data; + struct pinctrl *p; + struct pinctrl_state *hog_default; + struct pinctrl_state *hog_sleep; + struct mutex mutex; + struct dentry *device_root; +}; + +enum pinctrl_map_type { + PIN_MAP_TYPE_INVALID = 0, + PIN_MAP_TYPE_DUMMY_STATE = 1, + PIN_MAP_TYPE_MUX_GROUP = 2, + PIN_MAP_TYPE_CONFIGS_PIN = 3, + PIN_MAP_TYPE_CONFIGS_GROUP = 4, +}; + +struct pinctrl_map_mux { + const char *group; + const char *function; +}; + +struct pinctrl_map_configs { + const char *group_or_pin; + long unsigned int *configs; + unsigned int num_configs; +}; + +struct pinctrl_map { + const char *dev_name; + const char *name; + enum pinctrl_map_type type; + const char *ctrl_dev_name; + union { + struct pinctrl_map_mux mux; + struct pinctrl_map_configs configs; + } data; +}; + +struct pinmux_ops; + +struct pinconf_ops; + +struct pinconf_generic_params; + +struct pin_config_item; + +struct pinctrl_desc { + const char *name; + const struct pinctrl_pin_desc *pins; + unsigned int npins; + const struct pinctrl_ops *pctlops; + const struct pinmux_ops *pmxops; + const struct pinconf_ops *confops; + struct module *owner; + unsigned int num_custom_params; + const struct pinconf_generic_params *custom_params; + const struct pin_config_item *custom_conf_items; + bool link_consumers; +}; + +struct pinmux_ops { + int (*request)(struct pinctrl_dev *, unsigned int); + int (*free)(struct pinctrl_dev *, unsigned int); + int (*get_functions_count)(struct pinctrl_dev *); + const char * (*get_function_name)(struct pinctrl_dev *, unsigned int); + int (*get_function_groups)(struct pinctrl_dev *, unsigned int, const char * const **, unsigned int *); + int (*set_mux)(struct pinctrl_dev *, unsigned int, unsigned int); + int (*gpio_request_enable)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); + void (*gpio_disable_free)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); + int (*gpio_set_direction)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int, bool); + bool strict; +}; + +struct pinconf_ops { + bool is_generic; + int (*pin_config_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); + int (*pin_config_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); + int (*pin_config_group_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); + int (*pin_config_group_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); + void (*pin_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + void (*pin_config_group_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + void (*pin_config_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, long unsigned int); +}; + +enum pin_config_param { + PIN_CONFIG_BIAS_BUS_HOLD = 0, + PIN_CONFIG_BIAS_DISABLE = 1, + PIN_CONFIG_BIAS_HIGH_IMPEDANCE = 2, + PIN_CONFIG_BIAS_PULL_DOWN = 3, + PIN_CONFIG_BIAS_PULL_PIN_DEFAULT = 4, + PIN_CONFIG_BIAS_PULL_UP = 5, + PIN_CONFIG_DRIVE_OPEN_DRAIN = 6, + PIN_CONFIG_DRIVE_OPEN_SOURCE = 7, + PIN_CONFIG_DRIVE_PUSH_PULL = 8, + PIN_CONFIG_DRIVE_STRENGTH = 9, + PIN_CONFIG_DRIVE_STRENGTH_UA = 10, + PIN_CONFIG_INPUT_DEBOUNCE = 11, + PIN_CONFIG_INPUT_ENABLE = 12, + PIN_CONFIG_INPUT_SCHMITT = 13, + PIN_CONFIG_INPUT_SCHMITT_ENABLE = 14, + PIN_CONFIG_MODE_LOW_POWER = 15, + PIN_CONFIG_MODE_PWM = 16, + PIN_CONFIG_OUTPUT_ENABLE = 17, + PIN_CONFIG_OUTPUT = 18, + PIN_CONFIG_PERSIST_STATE = 19, + PIN_CONFIG_POWER_SOURCE = 20, + PIN_CONFIG_SLEEP_HARDWARE_STATE = 21, + PIN_CONFIG_SLEW_RATE = 22, + PIN_CONFIG_SKEW_DELAY = 23, + PIN_CONFIG_END = 127, + PIN_CONFIG_MAX = 255, +}; + +struct pinconf_generic_params { + const char * const property; + enum pin_config_param param; + u32 default_value; +}; + +struct pin_config_item { + const enum pin_config_param param; + const char * const display; + const char * const format; + bool has_arg; +}; + +struct gpio_desc___2; + +struct gpio_device { + int id; + struct device dev; + struct cdev chrdev; + struct device *mockdev; + struct module *owner; + struct gpio_chip *chip; + struct gpio_desc___2 *descs; + int base; + u16 ngpio; + const char *label; + void *data; + struct list_head list; + struct blocking_notifier_head notifier; + struct list_head pin_ranges; +}; + +struct gpio_desc___2 { + struct gpio_device *gdev; + long unsigned int flags; + const char *label; + const char *name; + unsigned int debounce_period_us; +}; + +struct pinctrl_setting_mux { + unsigned int group; + unsigned int func; +}; + +struct pinctrl_setting_configs { + unsigned int group_or_pin; + long unsigned int *configs; + unsigned int num_configs; +}; + +struct pinctrl_setting { + struct list_head node; + enum pinctrl_map_type type; + struct pinctrl_dev *pctldev; + const char *dev_name; + union { + struct pinctrl_setting_mux mux; + struct pinctrl_setting_configs configs; + } data; +}; + +struct pin_desc { + struct pinctrl_dev *pctldev; + const char *name; + bool dynamic_name; + void *drv_data; + unsigned int mux_usecount; + const char *mux_owner; + const struct pinctrl_setting_mux *mux_setting; + const char *gpio_owner; +}; + +struct pinctrl_maps { + struct list_head node; + const struct pinctrl_map *maps; + unsigned int num_maps; +}; + +struct pctldev; + +struct amd_pingroup { + const char *name; + const unsigned int *pins; + unsigned int npins; +}; + +struct amd_gpio { + raw_spinlock_t lock; + void *base; + const struct amd_pingroup *groups; + u32 ngroups; + struct pinctrl_dev *pctrl; + struct gpio_chip gc; + unsigned int hwbank_num; + struct resource *res; + struct platform_device *pdev; + u32 *saved_regs; +}; + +enum regcache_type { + REGCACHE_NONE = 0, + REGCACHE_RBTREE = 1, + REGCACHE_COMPRESSED = 2, + REGCACHE_FLAT = 3, +}; + +struct reg_default { + unsigned int reg; + unsigned int def; +}; + +enum regmap_endian { + REGMAP_ENDIAN_DEFAULT = 0, + REGMAP_ENDIAN_BIG = 1, + REGMAP_ENDIAN_LITTLE = 2, + REGMAP_ENDIAN_NATIVE = 3, +}; + +struct regmap_range { + unsigned int range_min; + unsigned int range_max; +}; + +struct regmap_access_table { + const struct regmap_range *yes_ranges; + unsigned int n_yes_ranges; + const struct regmap_range *no_ranges; + unsigned int n_no_ranges; +}; + +typedef void (*regmap_lock)(void *); + +typedef void (*regmap_unlock)(void *); + +struct regmap_range_cfg; + +struct regmap_config { + const char *name; + int reg_bits; + int reg_stride; + int pad_bits; + int val_bits; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + bool disable_locking; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + bool fast_io; + unsigned int max_register; + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + const struct reg_default *reg_defaults; + unsigned int num_reg_defaults; + enum regcache_type cache_type; + const void *reg_defaults_raw; + unsigned int num_reg_defaults_raw; + long unsigned int read_flag_mask; + long unsigned int write_flag_mask; + bool zero_flag_mask; + bool use_single_read; + bool use_single_write; + bool use_relaxed_mmio; + bool can_multi_write; + enum regmap_endian reg_format_endian; + enum regmap_endian val_format_endian; + const struct regmap_range_cfg *ranges; + unsigned int num_ranges; + bool use_hwlock; + unsigned int hwlock_id; + unsigned int hwlock_mode; + bool can_sleep; +}; + +struct regmap_range_cfg { + const char *name; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +typedef int (*regmap_hw_write)(void *, const void *, size_t); + +typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t); + +struct regmap_async; + +typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, struct regmap_async *); + +typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t); + +typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *); + +typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int); + +typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + +typedef struct regmap_async * (*regmap_hw_async_alloc)(); + +typedef void (*regmap_hw_free_context)(void *); + +struct regmap_bus { + bool fast_io; + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_async_write async_write; + regmap_hw_reg_write reg_write; + regmap_hw_reg_update_bits reg_update_bits; + regmap_hw_read read; + regmap_hw_reg_read reg_read; + regmap_hw_free_context free_context; + regmap_hw_async_alloc async_alloc; + u8 read_flag_mask; + enum regmap_endian reg_format_endian_default; + enum regmap_endian val_format_endian_default; + size_t max_raw_read; + size_t max_raw_write; +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + void *devres_group_id; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_board_info; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *, const struct i2c_device_id *); + int (*remove)(struct i2c_client *); + int (*probe_new)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct software_node *swnode; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; +}; + +struct i2c_algorithm { + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; +}; + +enum { + SX150X_123 = 0, + SX150X_456 = 1, + SX150X_789 = 2, +}; + +enum { + SX150X_789_REG_MISC_AUTOCLEAR_OFF = 1, + SX150X_MAX_REGISTER = 173, + SX150X_IRQ_TYPE_EDGE_RISING = 1, + SX150X_IRQ_TYPE_EDGE_FALLING = 2, + SX150X_789_RESET_KEY1 = 18, + SX150X_789_RESET_KEY2 = 52, +}; + +struct sx150x_123_pri { + u8 reg_pld_mode; + u8 reg_pld_table0; + u8 reg_pld_table1; + u8 reg_pld_table2; + u8 reg_pld_table3; + u8 reg_pld_table4; + u8 reg_advanced; +}; + +struct sx150x_456_pri { + u8 reg_pld_mode; + u8 reg_pld_table0; + u8 reg_pld_table1; + u8 reg_pld_table2; + u8 reg_pld_table3; + u8 reg_pld_table4; + u8 reg_advanced; +}; + +struct sx150x_789_pri { + u8 reg_drain; + u8 reg_polarity; + u8 reg_clock; + u8 reg_misc; + u8 reg_reset; + u8 ngpios; +}; + +struct sx150x_device_data { + u8 model; + u8 reg_pullup; + u8 reg_pulldn; + u8 reg_dir; + u8 reg_data; + u8 reg_irq_mask; + u8 reg_irq_src; + u8 reg_sense; + u8 ngpios; + union { + struct sx150x_123_pri x123; + struct sx150x_456_pri x456; + struct sx150x_789_pri x789; + } pri; + const struct pinctrl_pin_desc *pins; + unsigned int npins; +}; + +struct regmap; + +struct sx150x_pinctrl { + struct device *dev; + struct i2c_client *client; + struct pinctrl_dev *pctldev; + struct pinctrl_desc pinctrl_desc; + struct gpio_chip gpio; + struct irq_chip irq_chip; + struct regmap *regmap; + struct { + u32 sense; + u32 masked; + } irq; + struct mutex lock; + const struct sx150x_device_data *data; +}; + +struct intel_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + short unsigned int mode; + const unsigned int *modes; +}; + +struct intel_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +struct intel_padgroup { + unsigned int reg_num; + unsigned int base; + unsigned int size; + int gpio_base; + unsigned int padown_num; +}; + +struct intel_community { + unsigned int barno; + unsigned int padown_offset; + unsigned int padcfglock_offset; + unsigned int hostown_offset; + unsigned int is_offset; + unsigned int ie_offset; + unsigned int features; + unsigned int pin_base; + size_t npins; + unsigned int gpp_size; + unsigned int gpp_num_padown_regs; + const struct intel_padgroup *gpps; + size_t ngpps; + const unsigned int *pad_map; + short unsigned int nirqs; + short unsigned int acpi_space_id; + void *regs; + void *pad_regs; +}; + +struct intel_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct intel_pingroup *groups; + size_t ngroups; + const struct intel_function *functions; + size_t nfunctions; + const struct intel_community *communities; + size_t ncommunities; +}; + +struct intel_community_context; + +struct intel_pad_context; + +struct intel_pinctrl_context { + struct intel_pad_context *pads; + struct intel_community_context *communities; +}; + +struct intel_pad_context { + u32 conf0; + u32 val; +}; + +struct intel_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct intel_pinctrl_soc_data *soc; + struct intel_community *communities; + size_t ncommunities; + struct intel_pinctrl_context context; + int irq; +}; + +typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *); + +typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **); + +struct intel_pad_context___2; + +struct intel_community_context___2; + +struct intel_pinctrl_context___2 { + struct intel_pad_context___2 *pads; + struct intel_community_context___2 *communities; +}; + +struct intel_pad_context___2 { + u32 padctrl0; + u32 padctrl1; +}; + +struct intel_community_context___2 { + unsigned int intr_lines[16]; + u32 saved_intmask; +}; + +struct intel_pinctrl___2 { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct intel_pinctrl_soc_data *soc; + struct intel_community *communities; + size_t ncommunities; + struct intel_pinctrl_context___2 context; + int irq; +}; + +enum { + INTEL_GPIO_BASE_ZERO = 4294967294, + INTEL_GPIO_BASE_NOMAP = 4294967295, + INTEL_GPIO_BASE_MATCH = 0, +}; + +struct intel_pad_context___3; + +struct intel_community_context___3; + +struct intel_pinctrl_context___3 { + struct intel_pad_context___3 *pads; + struct intel_community_context___3 *communities; +}; + +struct intel_pad_context___3 { + u32 padcfg0; + u32 padcfg1; + u32 padcfg2; +}; + +struct intel_community_context___3 { + u32 *intmask; + u32 *hostown; +}; + +struct intel_pinctrl___3 { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct intel_pinctrl_soc_data *soc; + struct intel_community *communities; + size_t ncommunities; + struct intel_pinctrl_context___3 context; + int irq; +}; + +enum { + PAD_UNLOCKED = 0, + PAD_LOCKED = 1, + PAD_LOCKED_TX = 2, + PAD_LOCKED_FULL = 3, +}; + +struct gpio_pin_range { + struct list_head node; + struct pinctrl_dev *pctldev; + struct pinctrl_gpio_range range; +}; + +struct gpio_array; + +struct gpio_descs { + struct gpio_array *info; + unsigned int ndescs; + struct gpio_desc___2 *desc[0]; +}; + +struct gpio_array { + struct gpio_desc___2 **desc; + unsigned int size; + struct gpio_chip *chip; + long unsigned int *get_mask; + long unsigned int *set_mask; + long unsigned int invert_mask[0]; +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +enum gpio_lookup_flags { + GPIO_ACTIVE_HIGH = 0, + GPIO_ACTIVE_LOW = 1, + GPIO_OPEN_DRAIN = 2, + GPIO_OPEN_SOURCE = 4, + GPIO_PERSISTENT = 0, + GPIO_TRANSITORY = 8, + GPIO_PULL_UP = 16, + GPIO_PULL_DOWN = 32, + GPIO_LOOKUP_FLAGS_DEFAULT = 0, +}; + +struct gpiod_lookup { + const char *key; + u16 chip_hwnum; + const char *con_id; + unsigned int idx; + long unsigned int flags; +}; + +struct gpiod_lookup_table { + struct list_head list; + const char *dev_id; + struct gpiod_lookup table[0]; +}; + +struct gpiod_hog { + struct list_head list; + const char *chip_label; + u16 chip_hwnum; + const char *line_name; + long unsigned int lflags; + int dflags; +}; + +enum { + GPIOLINE_CHANGED_REQUESTED = 1, + GPIOLINE_CHANGED_RELEASED = 2, + GPIOLINE_CHANGED_CONFIG = 3, +}; + +struct acpi_gpio_info { + struct acpi_device *adev; + enum gpiod_flags flags; + bool gpioint; + int pin_config; + int polarity; + int triggering; + unsigned int debounce; + unsigned int quirks; +}; + +struct trace_event_raw_gpio_direction { + struct trace_entry ent; + unsigned int gpio; + int in; + int err; + char __data[0]; +}; + +struct trace_event_raw_gpio_value { + struct trace_entry ent; + unsigned int gpio; + int get; + int value; + char __data[0]; +}; + +struct trace_event_data_offsets_gpio_direction {}; + +struct trace_event_data_offsets_gpio_value {}; + +typedef void (*btf_trace_gpio_direction)(void *, unsigned int, int, int); + +typedef void (*btf_trace_gpio_value)(void *, unsigned int, int, int); + +struct devres; + +struct gpio { + unsigned int gpio; + long unsigned int flags; + const char *label; +}; + +struct gpiochip_info { + char name[32]; + char label[32]; + __u32 lines; +}; + +enum gpio_v2_line_flag { + GPIO_V2_LINE_FLAG_USED = 1, + GPIO_V2_LINE_FLAG_ACTIVE_LOW = 2, + GPIO_V2_LINE_FLAG_INPUT = 4, + GPIO_V2_LINE_FLAG_OUTPUT = 8, + GPIO_V2_LINE_FLAG_EDGE_RISING = 16, + GPIO_V2_LINE_FLAG_EDGE_FALLING = 32, + GPIO_V2_LINE_FLAG_OPEN_DRAIN = 64, + GPIO_V2_LINE_FLAG_OPEN_SOURCE = 128, + GPIO_V2_LINE_FLAG_BIAS_PULL_UP = 256, + GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = 512, + GPIO_V2_LINE_FLAG_BIAS_DISABLED = 1024, + GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME = 2048, +}; + +struct gpio_v2_line_values { + __u64 bits; + __u64 mask; +}; + +enum gpio_v2_line_attr_id { + GPIO_V2_LINE_ATTR_ID_FLAGS = 1, + GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES = 2, + GPIO_V2_LINE_ATTR_ID_DEBOUNCE = 3, +}; + +struct gpio_v2_line_attribute { + __u32 id; + __u32 padding; + union { + __u64 flags; + __u64 values; + __u32 debounce_period_us; + }; +}; + +struct gpio_v2_line_config_attribute { + struct gpio_v2_line_attribute attr; + __u64 mask; +}; + +struct gpio_v2_line_config { + __u64 flags; + __u32 num_attrs; + __u32 padding[5]; + struct gpio_v2_line_config_attribute attrs[10]; +}; + +struct gpio_v2_line_request { + __u32 offsets[64]; + char consumer[32]; + struct gpio_v2_line_config config; + __u32 num_lines; + __u32 event_buffer_size; + __u32 padding[5]; + __s32 fd; +}; + +struct gpio_v2_line_info { + char name[32]; + char consumer[32]; + __u32 offset; + __u32 num_attrs; + __u64 flags; + struct gpio_v2_line_attribute attrs[10]; + __u32 padding[4]; +}; + +enum gpio_v2_line_changed_type { + GPIO_V2_LINE_CHANGED_REQUESTED = 1, + GPIO_V2_LINE_CHANGED_RELEASED = 2, + GPIO_V2_LINE_CHANGED_CONFIG = 3, +}; + +struct gpio_v2_line_info_changed { + struct gpio_v2_line_info info; + __u64 timestamp_ns; + __u32 event_type; + __u32 padding[5]; +}; + +enum gpio_v2_line_event_id { + GPIO_V2_LINE_EVENT_RISING_EDGE = 1, + GPIO_V2_LINE_EVENT_FALLING_EDGE = 2, +}; + +struct gpio_v2_line_event { + __u64 timestamp_ns; + __u32 id; + __u32 offset; + __u32 seqno; + __u32 line_seqno; + __u32 padding[6]; +}; + +struct linereq; + +struct line { + struct gpio_desc___2 *desc; + struct linereq *req; + unsigned int irq; + u64 eflags; + u64 timestamp_ns; + u32 req_seqno; + u32 line_seqno; + struct delayed_work work; + unsigned int sw_debounced; + unsigned int level; +}; + +struct linereq { + struct gpio_device *gdev; + const char *label; + u32 num_lines; + wait_queue_head_t wait; + u32 event_buffer_size; + struct { + union { + struct __kfifo kfifo; + struct gpio_v2_line_event *type; + const struct gpio_v2_line_event *const_type; + char (*rectype)[0]; + struct gpio_v2_line_event *ptr; + const struct gpio_v2_line_event *ptr_const; + }; + struct gpio_v2_line_event buf[0]; + } events; + atomic_t seqno; + struct mutex config_mutex; + struct line lines[0]; +}; + +struct gpio_chardev_data { + struct gpio_device *gdev; + wait_queue_head_t wait; + struct { + union { + struct __kfifo kfifo; + struct gpio_v2_line_info_changed *type; + const struct gpio_v2_line_info_changed *const_type; + char (*rectype)[0]; + struct gpio_v2_line_info_changed *ptr; + const struct gpio_v2_line_info_changed *ptr_const; + }; + struct gpio_v2_line_info_changed buf[32]; + } events; + struct notifier_block lineinfo_changed_nb; + long unsigned int *watched_lines; +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *, struct class_attribute *, char *); + ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); +}; + +struct gpiod_data { + struct gpio_desc___2 *desc; + struct mutex mutex; + struct kernfs_node *value_kn; + int irq; + unsigned char irq_flags; + bool direction_can_change; +}; + +typedef u64 acpi_size; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +typedef void (*acpi_object_handler)(acpi_handle, void *); + +struct acpi_connection_info { + u8 *connection; + u16 length; + u8 access_length; +}; + +struct acpi_resource_irq { + u8 descriptor_length; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + u8 interrupts[1]; +}; + +struct acpi_resource_dma { + u8 type; + u8 bus_master; + u8 transfer; + u8 channel_count; + u8 channels[1]; +}; + +struct acpi_resource_start_dependent { + u8 descriptor_length; + u8 compatibility_priority; + u8 performance_robustness; +}; + +struct acpi_resource_io { + u8 io_decode; + u8 alignment; + u8 address_length; + u16 minimum; + u16 maximum; +} __attribute__((packed)); + +struct acpi_resource_fixed_io { + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_dma { + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct acpi_resource_vendor { + u16 byte_length; + u8 byte_data[1]; +} __attribute__((packed)); + +struct acpi_resource_vendor_typed { + u16 byte_length; + u8 uuid_subtype; + u8 uuid[16]; + u8 byte_data[1]; +}; + +struct acpi_resource_end_tag { + u8 checksum; +}; + +struct acpi_resource_memory24 { + u8 write_protect; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct acpi_resource_memory32 { + u8 write_protect; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_memory32 { + u8 write_protect; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct acpi_memory_attribute { + u8 write_protect; + u8 caching; + u8 range_type; + u8 translation; +}; + +struct acpi_io_attribute { + u8 range_type; + u8 translation; + u8 translation_type; + u8 reserved1; +}; + +union acpi_resource_attribute { + struct acpi_memory_attribute mem; + struct acpi_io_attribute io; + u8 type_specific; +}; + +struct acpi_resource_label { + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_resource_source { + u8 index; + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_address16_attribute { + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +}; + +struct acpi_address32_attribute { + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +}; + +struct acpi_address64_attribute { + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +}; + +struct acpi_resource_address { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; +}; + +struct acpi_resource_address16 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address16_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_address32 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address32_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address64_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_extended_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + u8 revision_ID; + struct acpi_address64_attribute address; + u64 type_specific; +} __attribute__((packed)); + +struct acpi_resource_extended_irq { + u8 producer_consumer; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + struct acpi_resource_source resource_source; + u32 interrupts[1]; +} __attribute__((packed)); + +struct acpi_resource_generic_register { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct acpi_resource_gpio { + u8 revision_id; + u8 connection_type; + u8 producer_consumer; + u8 pin_config; + u8 shareable; + u8 wake_capable; + u8 io_restriction; + u8 triggering; + u8 polarity; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_common_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_i2c_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 access_mode; + u16 slave_address; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_spi_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 wire_mode; + u8 device_polarity; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_uart_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 endian; + u8 data_bits; + u8 stop_bits; + u8 flow_control; + u8 parity; + u8 lines_enabled; + u16 rx_fifo_size; + u16 tx_fifo_size; + u32 default_baud_rate; +} __attribute__((packed)); + +struct acpi_resource_csi2_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 local_port_instance; + u8 phy_type; +} __attribute__((packed)); + +struct acpi_resource_pin_function { + u8 revision_id; + u8 pin_config; + u8 shareable; + u16 function_number; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group { + u8 revision_id; + u8 producer_consumer; + u16 pin_table_length; + u16 vendor_length; + u16 *pin_table; + struct acpi_resource_label resource_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_function { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u16 function_number; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +union acpi_resource_data { + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dependent start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_fixed_dma fixed_dma; + struct acpi_resource_vendor vendor; + struct acpi_resource_vendor_typed vendor_typed; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_memory24 memory24; + struct acpi_resource_memory32 memory32; + struct acpi_resource_fixed_memory32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_extended_address64 ext_address64; + struct acpi_resource_extended_irq extended_irq; + struct acpi_resource_generic_register generic_reg; + struct acpi_resource_gpio gpio; + struct acpi_resource_i2c_serialbus i2c_serial_bus; + struct acpi_resource_spi_serialbus spi_serial_bus; + struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_csi2_serialbus csi2_serial_bus; + struct acpi_resource_common_serialbus common_serial_bus; + struct acpi_resource_pin_function pin_function; + struct acpi_resource_pin_config pin_config; + struct acpi_resource_pin_group pin_group; + struct acpi_resource_pin_group_function pin_group_function; + struct acpi_resource_pin_group_config pin_group_config; + struct acpi_resource_address address; +}; + +struct acpi_resource { + u32 type; + u32 length; + union acpi_resource_data data; +} __attribute__((packed)); + +typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; +}; + +struct acpi_gpio_event { + struct list_head node; + acpi_handle handle; + irq_handler_t handler; + unsigned int pin; + unsigned int irq; + long unsigned int irqflags; + bool irq_is_wake; + bool irq_requested; + struct gpio_desc___2 *desc; +}; + +struct acpi_gpio_connection { + struct list_head node; + unsigned int pin; + struct gpio_desc___2 *desc; +}; + +struct acpi_gpio_chip { + struct acpi_connection_info conn_info; + struct list_head conns; + struct mutex conn_lock; + struct gpio_chip *chip; + struct list_head events; + struct list_head deferred_req_irqs_list_entry; +}; + +struct acpi_gpio_lookup { + struct acpi_gpio_info info; + int index; + u16 pin_index; + bool active_low; + struct gpio_desc___2 *desc; + int n; +}; + +struct regmap_irq_chip_data; + +struct intel_scu_ipc_dev; + +struct intel_soc_pmic { + int irq; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_pwrbtn; + struct regmap_irq_chip_data *irq_chip_data_tmu; + struct regmap_irq_chip_data *irq_chip_data_bcu; + struct regmap_irq_chip_data *irq_chip_data_adc; + struct regmap_irq_chip_data *irq_chip_data_chgr; + struct regmap_irq_chip_data *irq_chip_data_crit; + struct device *dev; + struct intel_scu_ipc_dev *scu; +}; + +enum ctrl_register { + CTRL_IN = 0, + CTRL_OUT = 1, +}; + +struct crystalcove_gpio { + struct mutex buslock; + struct gpio_chip chip; + struct regmap *regmap; + int update; + int intcnt_value; + bool set_irq_mask; +}; + +struct extcon_dev; + +struct regulator_dev; + +struct regulator_ops { + int (*list_voltage)(struct regulator_dev *, unsigned int); + int (*set_voltage)(struct regulator_dev *, int, int, unsigned int *); + int (*map_voltage)(struct regulator_dev *, int, int); + int (*set_voltage_sel)(struct regulator_dev *, unsigned int); + int (*get_voltage)(struct regulator_dev *); + int (*get_voltage_sel)(struct regulator_dev *); + int (*set_current_limit)(struct regulator_dev *, int, int); + int (*get_current_limit)(struct regulator_dev *); + int (*set_input_current_limit)(struct regulator_dev *, int); + int (*set_over_current_protection)(struct regulator_dev *); + int (*set_active_discharge)(struct regulator_dev *, bool); + int (*enable)(struct regulator_dev *); + int (*disable)(struct regulator_dev *); + int (*is_enabled)(struct regulator_dev *); + int (*set_mode)(struct regulator_dev *, unsigned int); + unsigned int (*get_mode)(struct regulator_dev *); + int (*get_error_flags)(struct regulator_dev *, unsigned int *); + int (*enable_time)(struct regulator_dev *); + int (*set_ramp_delay)(struct regulator_dev *, int); + int (*set_voltage_time)(struct regulator_dev *, int, int); + int (*set_voltage_time_sel)(struct regulator_dev *, unsigned int, unsigned int); + int (*set_soft_start)(struct regulator_dev *); + int (*get_status)(struct regulator_dev *); + unsigned int (*get_optimum_mode)(struct regulator_dev *, int, int, int); + int (*set_load)(struct regulator_dev *, int); + int (*set_bypass)(struct regulator_dev *, bool); + int (*get_bypass)(struct regulator_dev *, bool *); + int (*set_suspend_voltage)(struct regulator_dev *, int); + int (*set_suspend_enable)(struct regulator_dev *); + int (*set_suspend_disable)(struct regulator_dev *); + int (*set_suspend_mode)(struct regulator_dev *, unsigned int); + int (*resume)(struct regulator_dev *); + int (*set_pull_down)(struct regulator_dev *); +}; + +struct regulator_coupler; + +struct coupling_desc { + struct regulator_dev **coupled_rdevs; + struct regulator_coupler *coupler; + int n_resolved; + int n_coupled; +}; + +struct regulator_desc; + +struct regulation_constraints; + +struct regulator_enable_gpio; + +struct regulator_dev { + const struct regulator_desc *desc; + int exclusive; + u32 use_count; + u32 open_count; + u32 bypass_count; + struct list_head list; + struct list_head consumer_list; + struct coupling_desc coupling_desc; + struct blocking_notifier_head notifier; + struct ww_mutex mutex; + struct task_struct *mutex_owner; + int ref_cnt; + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator *supply; + const char *supply_name; + struct regmap *regmap; + struct delayed_work disable_work; + void *reg_data; + struct dentry *debugfs; + struct regulator_enable_gpio *ena_pin; + unsigned int ena_gpio_state: 1; + unsigned int is_switch: 1; + ktime_t last_off; +}; + +enum regulator_type { + REGULATOR_VOLTAGE = 0, + REGULATOR_CURRENT = 1, +}; + +struct regulator_config; + +struct regulator_desc { + const char *name; + const char *supply_name; + const char *of_match; + bool of_match_full_name; + const char *regulators_node; + int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, struct regulator_config *); + int id; + unsigned int continuous_voltage_range: 1; + unsigned int n_voltages; + unsigned int n_current_limits; + const struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; + unsigned int min_uV; + unsigned int uV_step; + unsigned int linear_min_sel; + int fixed_uV; + unsigned int ramp_delay; + int min_dropout_uV; + const struct linear_range *linear_ranges; + const unsigned int *linear_range_selectors; + int n_linear_ranges; + const unsigned int *volt_table; + const unsigned int *curr_table; + unsigned int vsel_range_reg; + unsigned int vsel_range_mask; + unsigned int vsel_reg; + unsigned int vsel_mask; + unsigned int vsel_step; + unsigned int csel_reg; + unsigned int csel_mask; + unsigned int apply_reg; + unsigned int apply_bit; + unsigned int enable_reg; + unsigned int enable_mask; + unsigned int enable_val; + unsigned int disable_val; + bool enable_is_inverted; + unsigned int bypass_reg; + unsigned int bypass_mask; + unsigned int bypass_val_on; + unsigned int bypass_val_off; + unsigned int active_discharge_on; + unsigned int active_discharge_off; + unsigned int active_discharge_mask; + unsigned int active_discharge_reg; + unsigned int soft_start_reg; + unsigned int soft_start_mask; + unsigned int soft_start_val_on; + unsigned int pull_down_reg; + unsigned int pull_down_mask; + unsigned int pull_down_val_on; + unsigned int ramp_reg; + unsigned int ramp_mask; + const unsigned int *ramp_delay_table; + unsigned int n_ramp_values; + unsigned int enable_time; + unsigned int off_on_delay; + unsigned int poll_enabled_time; + unsigned int (*of_map_mode)(unsigned int); +}; + +struct regulator_init_data; + +struct regulator_config { + struct device *dev; + const struct regulator_init_data *init_data; + void *driver_data; + struct device_node *of_node; + struct regmap *regmap; + struct gpio_desc *ena_gpiod; +}; + +struct regulator_state { + int uV; + int min_uV; + int max_uV; + unsigned int mode; + int enabled; + bool changeable; +}; + +struct regulation_constraints { + const char *name; + int min_uV; + int max_uV; + int uV_offset; + int min_uA; + int max_uA; + int ilim_uA; + int system_load; + u32 *max_spread; + int max_uV_step; + unsigned int valid_modes_mask; + unsigned int valid_ops_mask; + int input_uV; + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; + unsigned int initial_mode; + unsigned int ramp_delay; + unsigned int settling_time; + unsigned int settling_time_up; + unsigned int settling_time_down; + unsigned int enable_time; + unsigned int active_discharge; + unsigned int always_on: 1; + unsigned int boot_on: 1; + unsigned int apply_uV: 1; + unsigned int ramp_disable: 1; + unsigned int soft_start: 1; + unsigned int pull_down: 1; + unsigned int over_current_protection: 1; +}; + +struct regulator_consumer_supply; + +struct regulator_init_data { + const char *supply_regulator; + struct regulation_constraints constraints; + int num_consumer_supplies; + struct regulator_consumer_supply *consumer_supplies; + int (*regulator_init)(void *); + void *driver_data; +}; + +enum palmas_usb_state { + PALMAS_USB_STATE_DISCONNECT = 0, + PALMAS_USB_STATE_VBUS = 1, + PALMAS_USB_STATE_ID = 2, +}; + +struct palmas_gpadc; + +struct palmas_pmic_driver_data; + +struct palmas_pmic; + +struct palmas_resource; + +struct palmas_usb; + +struct palmas { + struct device *dev; + struct i2c_client *i2c_clients[3]; + struct regmap *regmap[3]; + int id; + unsigned int features; + int irq; + u32 irq_mask; + struct mutex irq_lock; + struct regmap_irq_chip_data *irq_data; + struct palmas_pmic_driver_data *pmic_ddata; + struct palmas_pmic *pmic; + struct palmas_gpadc *gpadc; + struct palmas_resource *resource; + struct palmas_usb *usb; + u8 gpio_muxed; + u8 led_muxed; + u8 pwm_muxed; +}; + +struct of_regulator_match; + +struct palmas_regs_info; + +struct palmas_sleep_requestor_info; + +struct palmas_pmic_platform_data; + +struct palmas_pmic_driver_data { + int smps_start; + int smps_end; + int ldo_begin; + int ldo_end; + int max_reg; + bool has_regen3; + struct palmas_regs_info *palmas_regs_info; + struct of_regulator_match *palmas_matches; + struct palmas_sleep_requestor_info *sleep_req_info; + int (*smps_register)(struct palmas_pmic *, struct palmas_pmic_driver_data *, struct palmas_pmic_platform_data *, const char *, struct regulator_config); + int (*ldo_register)(struct palmas_pmic *, struct palmas_pmic_driver_data *, struct palmas_pmic_platform_data *, const char *, struct regulator_config); +}; + +struct palmas_pmic { + struct palmas *palmas; + struct device *dev; + struct regulator_desc desc[27]; + struct mutex mutex; + int smps123; + int smps457; + int smps12; + int range[10]; + unsigned int ramp_delay[10]; + unsigned int current_reg_mode[10]; +}; + +struct palmas_resource { + struct palmas *palmas; + struct device *dev; +}; + +struct palmas_usb { + struct palmas *palmas; + struct device *dev; + struct extcon_dev *edev; + int id_otg_irq; + int id_irq; + int vbus_otg_irq; + int vbus_irq; + int gpio_id_irq; + int gpio_vbus_irq; + struct gpio_desc *id_gpiod; + struct gpio_desc *vbus_gpiod; + long unsigned int sw_debounce_jiffies; + struct delayed_work wq_detectid; + enum palmas_usb_state linkstat; + int wakeup; + bool enable_vbus_detection; + bool enable_id_detection; + bool enable_gpio_id_detection; + bool enable_gpio_vbus_detection; +}; + +struct palmas_sleep_requestor_info { + int id; + int reg_offset; + int bit_pos; +}; + +struct palmas_regs_info { + char *name; + char *sname; + u8 vsel_addr; + u8 ctrl_addr; + u8 tstep_addr; + int sleep_id; +}; + +struct palmas_reg_init; + +struct palmas_pmic_platform_data { + struct regulator_init_data *reg_data[27]; + struct palmas_reg_init *reg_init[27]; + int ldo6_vibrator; + bool enable_ldo8_tracking; +}; + +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + +struct palmas_gpadc_platform_data { + int ch3_current; + int ch0_current; + bool extended_delay; + int bat_removal; + int start_polarity; + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; +}; + +struct palmas_reg_init { + int warm_reset; + int roof_floor; + int mode_sleep; + u8 vsel; +}; + +enum palmas_regulators { + PALMAS_REG_SMPS12 = 0, + PALMAS_REG_SMPS123 = 1, + PALMAS_REG_SMPS3 = 2, + PALMAS_REG_SMPS45 = 3, + PALMAS_REG_SMPS457 = 4, + PALMAS_REG_SMPS6 = 5, + PALMAS_REG_SMPS7 = 6, + PALMAS_REG_SMPS8 = 7, + PALMAS_REG_SMPS9 = 8, + PALMAS_REG_SMPS10_OUT2 = 9, + PALMAS_REG_SMPS10_OUT1 = 10, + PALMAS_REG_LDO1 = 11, + PALMAS_REG_LDO2 = 12, + PALMAS_REG_LDO3 = 13, + PALMAS_REG_LDO4 = 14, + PALMAS_REG_LDO5 = 15, + PALMAS_REG_LDO6 = 16, + PALMAS_REG_LDO7 = 17, + PALMAS_REG_LDO8 = 18, + PALMAS_REG_LDO9 = 19, + PALMAS_REG_LDOLN = 20, + PALMAS_REG_LDOUSB = 21, + PALMAS_REG_REGEN1 = 22, + PALMAS_REG_REGEN2 = 23, + PALMAS_REG_REGEN3 = 24, + PALMAS_REG_SYSEN1 = 25, + PALMAS_REG_SYSEN2 = 26, + PALMAS_NUM_REGS = 27, +}; + +struct palmas_usb_platform_data { + int wakeup; +}; + +struct palmas_resource_platform_data { + int regen1_mode_sleep; + int regen2_mode_sleep; + int sysen1_mode_sleep; + int sysen2_mode_sleep; + u8 nsleep_res; + u8 nsleep_smps; + u8 nsleep_ldo1; + u8 nsleep_ldo2; + u8 enable1_res; + u8 enable1_smps; + u8 enable1_ldo1; + u8 enable1_ldo2; + u8 enable2_res; + u8 enable2_smps; + u8 enable2_ldo1; + u8 enable2_ldo2; +}; + +struct palmas_clk_platform_data { + int clk32kg_mode_sleep; + int clk32kgaudio_mode_sleep; +}; + +struct palmas_platform_data { + int irq_flags; + int gpio_base; + u8 power_ctrl; + int mux_from_pdata; + u8 pad1; + u8 pad2; + bool pm_off; + struct palmas_pmic_platform_data *pmic_pdata; + struct palmas_gpadc_platform_data *gpadc_pdata; + struct palmas_usb_platform_data *usb_pdata; + struct palmas_resource_platform_data *resource_pdata; + struct palmas_clk_platform_data *clk_pdata; +}; + +enum palmas_irqs { + PALMAS_CHARG_DET_N_VBUS_OVV_IRQ = 0, + PALMAS_PWRON_IRQ = 1, + PALMAS_LONG_PRESS_KEY_IRQ = 2, + PALMAS_RPWRON_IRQ = 3, + PALMAS_PWRDOWN_IRQ = 4, + PALMAS_HOTDIE_IRQ = 5, + PALMAS_VSYS_MON_IRQ = 6, + PALMAS_VBAT_MON_IRQ = 7, + PALMAS_RTC_ALARM_IRQ = 8, + PALMAS_RTC_TIMER_IRQ = 9, + PALMAS_WDT_IRQ = 10, + PALMAS_BATREMOVAL_IRQ = 11, + PALMAS_RESET_IN_IRQ = 12, + PALMAS_FBI_BB_IRQ = 13, + PALMAS_SHORT_IRQ = 14, + PALMAS_VAC_ACOK_IRQ = 15, + PALMAS_GPADC_AUTO_0_IRQ = 16, + PALMAS_GPADC_AUTO_1_IRQ = 17, + PALMAS_GPADC_EOC_SW_IRQ = 18, + PALMAS_GPADC_EOC_RT_IRQ = 19, + PALMAS_ID_OTG_IRQ = 20, + PALMAS_ID_IRQ = 21, + PALMAS_VBUS_OTG_IRQ = 22, + PALMAS_VBUS_IRQ = 23, + PALMAS_GPIO_0_IRQ = 24, + PALMAS_GPIO_1_IRQ = 25, + PALMAS_GPIO_2_IRQ = 26, + PALMAS_GPIO_3_IRQ = 27, + PALMAS_GPIO_4_IRQ = 28, + PALMAS_GPIO_5_IRQ = 29, + PALMAS_GPIO_6_IRQ = 30, + PALMAS_GPIO_7_IRQ = 31, + PALMAS_NUM_IRQ = 32, +}; + +struct palmas_gpio { + struct gpio_chip gpio_chip; + struct palmas *palmas; +}; + +struct palmas_device_data { + int ngpio; +}; + +enum { + RC5T583_IRQ_ONKEY = 0, + RC5T583_IRQ_ACOK = 1, + RC5T583_IRQ_LIDOPEN = 2, + RC5T583_IRQ_PREOT = 3, + RC5T583_IRQ_CLKSTP = 4, + RC5T583_IRQ_ONKEY_OFF = 5, + RC5T583_IRQ_WD = 6, + RC5T583_IRQ_EN_PWRREQ1 = 7, + RC5T583_IRQ_EN_PWRREQ2 = 8, + RC5T583_IRQ_PRE_VINDET = 9, + RC5T583_IRQ_DC0LIM = 10, + RC5T583_IRQ_DC1LIM = 11, + RC5T583_IRQ_DC2LIM = 12, + RC5T583_IRQ_DC3LIM = 13, + RC5T583_IRQ_CTC = 14, + RC5T583_IRQ_YALE = 15, + RC5T583_IRQ_DALE = 16, + RC5T583_IRQ_WALE = 17, + RC5T583_IRQ_AIN1L = 18, + RC5T583_IRQ_AIN2L = 19, + RC5T583_IRQ_AIN3L = 20, + RC5T583_IRQ_VBATL = 21, + RC5T583_IRQ_VIN3L = 22, + RC5T583_IRQ_VIN8L = 23, + RC5T583_IRQ_AIN1H = 24, + RC5T583_IRQ_AIN2H = 25, + RC5T583_IRQ_AIN3H = 26, + RC5T583_IRQ_VBATH = 27, + RC5T583_IRQ_VIN3H = 28, + RC5T583_IRQ_VIN8H = 29, + RC5T583_IRQ_ADCEND = 30, + RC5T583_IRQ_GPIO0 = 31, + RC5T583_IRQ_GPIO1 = 32, + RC5T583_IRQ_GPIO2 = 33, + RC5T583_IRQ_GPIO3 = 34, + RC5T583_IRQ_GPIO4 = 35, + RC5T583_IRQ_GPIO5 = 36, + RC5T583_IRQ_GPIO6 = 37, + RC5T583_IRQ_GPIO7 = 38, + RC5T583_MAX_IRQS = 39, +}; + +enum { + RC5T583_GPIO0 = 0, + RC5T583_GPIO1 = 1, + RC5T583_GPIO2 = 2, + RC5T583_GPIO3 = 3, + RC5T583_GPIO4 = 4, + RC5T583_GPIO5 = 5, + RC5T583_GPIO6 = 6, + RC5T583_GPIO7 = 7, + RC5T583_MAX_GPIO = 8, +}; + +enum { + RC5T583_REGULATOR_DC0 = 0, + RC5T583_REGULATOR_DC1 = 1, + RC5T583_REGULATOR_DC2 = 2, + RC5T583_REGULATOR_DC3 = 3, + RC5T583_REGULATOR_LDO0 = 4, + RC5T583_REGULATOR_LDO1 = 5, + RC5T583_REGULATOR_LDO2 = 6, + RC5T583_REGULATOR_LDO3 = 7, + RC5T583_REGULATOR_LDO4 = 8, + RC5T583_REGULATOR_LDO5 = 9, + RC5T583_REGULATOR_LDO6 = 10, + RC5T583_REGULATOR_LDO7 = 11, + RC5T583_REGULATOR_LDO8 = 12, + RC5T583_REGULATOR_LDO9 = 13, + RC5T583_REGULATOR_MAX = 14, +}; + +struct rc5t583 { + struct device *dev; + struct regmap *regmap; + int chip_irq; + int irq_base; + struct mutex irq_lock; + long unsigned int group_irq_en[5]; + uint8_t intc_inten_reg; + uint8_t irq_en_reg[8]; + uint8_t gpedge_reg[2]; +}; + +struct rc5t583_platform_data { + int irq_base; + int gpio_base; + bool enable_shutdown; + int regulator_deepsleep_slot[14]; + long unsigned int regulator_ext_pwr_control[14]; + struct regulator_init_data *reg_init_data[14]; +}; + +struct rc5t583_gpio { + struct gpio_chip gpio_chip; + struct rc5t583 *rc5t583; +}; + +enum { + TPS6586X_ID_SYS = 0, + TPS6586X_ID_SM_0 = 1, + TPS6586X_ID_SM_1 = 2, + TPS6586X_ID_SM_2 = 3, + TPS6586X_ID_LDO_0 = 4, + TPS6586X_ID_LDO_1 = 5, + TPS6586X_ID_LDO_2 = 6, + TPS6586X_ID_LDO_3 = 7, + TPS6586X_ID_LDO_4 = 8, + TPS6586X_ID_LDO_5 = 9, + TPS6586X_ID_LDO_6 = 10, + TPS6586X_ID_LDO_7 = 11, + TPS6586X_ID_LDO_8 = 12, + TPS6586X_ID_LDO_9 = 13, + TPS6586X_ID_LDO_RTC = 14, + TPS6586X_ID_MAX_REGULATOR = 15, +}; + +enum { + TPS6586X_INT_PLDO_0 = 0, + TPS6586X_INT_PLDO_1 = 1, + TPS6586X_INT_PLDO_2 = 2, + TPS6586X_INT_PLDO_3 = 3, + TPS6586X_INT_PLDO_4 = 4, + TPS6586X_INT_PLDO_5 = 5, + TPS6586X_INT_PLDO_6 = 6, + TPS6586X_INT_PLDO_7 = 7, + TPS6586X_INT_COMP_DET = 8, + TPS6586X_INT_ADC = 9, + TPS6586X_INT_PLDO_8 = 10, + TPS6586X_INT_PLDO_9 = 11, + TPS6586X_INT_PSM_0 = 12, + TPS6586X_INT_PSM_1 = 13, + TPS6586X_INT_PSM_2 = 14, + TPS6586X_INT_PSM_3 = 15, + TPS6586X_INT_RTC_ALM1 = 16, + TPS6586X_INT_ACUSB_OVP = 17, + TPS6586X_INT_USB_DET = 18, + TPS6586X_INT_AC_DET = 19, + TPS6586X_INT_BAT_DET = 20, + TPS6586X_INT_CHG_STAT = 21, + TPS6586X_INT_CHG_TEMP = 22, + TPS6586X_INT_PP = 23, + TPS6586X_INT_RESUME = 24, + TPS6586X_INT_LOW_SYS = 25, + TPS6586X_INT_RTC_ALM2 = 26, +}; + +struct tps6586x_subdev_info { + int id; + const char *name; + void *platform_data; + struct device_node *of_node; +}; + +struct tps6586x_platform_data { + int num_subdevs; + struct tps6586x_subdev_info *subdevs; + int gpio_base; + int irq_base; + bool pm_off; + struct regulator_init_data *reg_init_data[15]; +}; + +struct tps6586x_gpio { + struct gpio_chip gpio_chip; + struct device *parent; +}; + +struct tps65910_sleep_keepon_data { + unsigned int therm_keepon: 1; + unsigned int clkout32k_keepon: 1; + unsigned int i2chs_keepon: 1; +}; + +struct tps65910_board { + int gpio_base; + int irq; + int irq_base; + int vmbch_threshold; + int vmbch2_threshold; + bool en_ck32k_xtal; + bool en_dev_slp; + bool pm_off; + struct tps65910_sleep_keepon_data slp_keepon; + bool en_gpio_sleep[9]; + long unsigned int regulator_ext_sleep_control[14]; + struct regulator_init_data *tps65910_pmic_init_data[14]; +}; + +struct tps65910 { + struct device *dev; + struct i2c_client *i2c_client; + struct regmap *regmap; + long unsigned int id; + struct tps65910_board *of_plat_data; + int chip_irq; + struct regmap_irq_chip_data *irq_data; +}; + +struct tps65910_gpio { + struct gpio_chip gpio_chip; + struct tps65910 *tps65910; +}; + +struct tps68470_gpio_data { + struct regmap *tps68470_regmap; + struct gpio_chip gc; +}; + +enum pwm_polarity { + PWM_POLARITY_NORMAL = 0, + PWM_POLARITY_INVERSED = 1, +}; + +struct pwm_args { + u64 period; + enum pwm_polarity polarity; +}; + +enum { + PWMF_REQUESTED = 1, + PWMF_EXPORTED = 2, +}; + +struct pwm_state { + u64 period; + u64 duty_cycle; + enum pwm_polarity polarity; + bool enabled; +}; + +struct pwm_chip; + +struct pwm_device { + const char *label; + long unsigned int flags; + unsigned int hwpwm; + unsigned int pwm; + struct pwm_chip *chip; + void *chip_data; + struct pwm_args args; + struct pwm_state state; + struct pwm_state last; +}; + +struct pwm_ops; + +struct pwm_chip { + struct device *dev; + const struct pwm_ops *ops; + int base; + unsigned int npwm; + struct pwm_device * (*of_xlate)(struct pwm_chip *, const struct of_phandle_args *); + unsigned int of_pwm_n_cells; + struct list_head list; + struct pwm_device *pwms; +}; + +struct pwm_capture; + +struct pwm_ops { + int (*request)(struct pwm_chip *, struct pwm_device *); + void (*free)(struct pwm_chip *, struct pwm_device *); + int (*capture)(struct pwm_chip *, struct pwm_device *, struct pwm_capture *, long unsigned int); + int (*apply)(struct pwm_chip *, struct pwm_device *, const struct pwm_state *); + void (*get_state)(struct pwm_chip *, struct pwm_device *, struct pwm_state *); + struct module *owner; + int (*config)(struct pwm_chip *, struct pwm_device *, int, int); + int (*set_polarity)(struct pwm_chip *, struct pwm_device *, enum pwm_polarity); + int (*enable)(struct pwm_chip *, struct pwm_device *); + void (*disable)(struct pwm_chip *, struct pwm_device *); +}; + +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + +struct pwm_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; + unsigned int period; + enum pwm_polarity polarity; + const char *module; +}; + +struct trace_event_raw_pwm { + struct trace_entry ent; + struct pwm_device *pwm; + u64 period; + u64 duty_cycle; + enum pwm_polarity polarity; + bool enabled; + char __data[0]; +}; + +struct trace_event_data_offsets_pwm {}; + +typedef void (*btf_trace_pwm_apply)(void *, struct pwm_device *, const struct pwm_state *); + +typedef void (*btf_trace_pwm_get)(void *, struct pwm_device *, const struct pwm_state *); + +struct pwm_export { + struct device child; + struct pwm_device *pwm; + struct mutex lock; + struct pwm_state suspend; +}; + +struct crystalcove_pwm { + struct pwm_chip chip; + struct regmap *regmap; +}; + +struct pwm_lpss_boardinfo; + +struct pwm_lpss_chip { + struct pwm_chip chip; + void *regs; + const struct pwm_lpss_boardinfo *info; +}; + +struct pwm_lpss_boardinfo { + long unsigned int clk_rate; + unsigned int npwm; + long unsigned int base_unit_bits; + bool bypass; + bool other_devices_aml_touches_pwm_regs; +}; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +struct pci_sriov { + int pos; + int nres; + u32 cap; + u16 ctrl; + u16 total_VFs; + u16 initial_VFs; + u16 num_VFs; + u16 offset; + u16 stride; + u16 vf_device; + u32 pgsz; + u8 link; + u8 max_VF_buses; + u16 driver_max_VFs; + struct pci_dev *dev; + struct pci_dev *self; + u32 class; + u8 hdr_type; + u16 subsystem_vendor; + u16 subsystem_device; + resource_size_t barsz[6]; + bool drivers_autoprobe; +}; + +struct rcec_ea { + u8 nextbusn; + u8 lastbusn; + u32 bitmap; +}; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; + unsigned int flags; +}; + +typedef u64 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, + PCI_DEV_FLAGS_ENABLE_ASPM = 4096, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCIE_SPEED_64_0GT = 25, + PCI_SPEED_UNKNOWN = 255, +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + unsigned int msi_domain: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int private[0]; +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, int); +}; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *, char *); + ssize_t (*store)(struct bus_type *, const char *, size_t); +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +struct pci_platform_pm_ops { + bool (*bridge_d3)(struct pci_dev *); + bool (*is_manageable)(struct pci_dev *); + int (*set_state)(struct pci_dev *, pci_power_t); + pci_power_t (*get_state)(struct pci_dev *); + void (*refresh_state)(struct pci_dev *); + pci_power_t (*choose_state)(struct pci_dev *); + int (*set_wakeup)(struct pci_dev *, bool); + bool (*need_resume)(struct pci_dev *); +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct pci_devres { + unsigned int enabled: 1; + unsigned int pinned: 1; + unsigned int orig_intx: 1; + unsigned int restore_intx: 1; + unsigned int mwi: 1; + u32 region_mask; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct pcie_device { + int irq; + struct pci_dev *port; + u32 service; + void *priv_data; + struct device device; +}; + +struct pcie_port_service_driver { + const char *name; + int (*probe)(struct pcie_device *); + void (*remove)(struct pcie_device *); + int (*suspend)(struct pcie_device *); + int (*resume_noirq)(struct pcie_device *); + int (*resume)(struct pcie_device *); + int (*runtime_suspend)(struct pcie_device *); + int (*runtime_resume)(struct pcie_device *); + void (*error_resume)(struct pci_dev *); + int port_type; + u32 service; + struct device_driver driver; +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct pci_vpd_ops; + +struct pci_vpd { + const struct pci_vpd_ops *ops; + struct mutex lock; + unsigned int len; + u16 flag; + u8 cap; + unsigned int busy: 1; + unsigned int valid: 1; +}; + +struct pci_vpd_ops { + ssize_t (*read)(struct pci_dev *, loff_t, size_t, void *); + ssize_t (*write)(struct pci_dev *, loff_t, size_t, const void *); +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = 4294967295, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +struct portdrv_service_data { + struct pcie_port_service_driver *drv; + struct device *dev; + u32 service; +}; + +typedef int (*pcie_pm_callback_t)(struct pcie_device *); + +struct walk_rcec_data { + struct pci_dev *rcec; + int (*user_callback)(struct pci_dev *, void *); + void *user_data; +}; + +struct aspm_latency { + u32 l0s; + u32 l1; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + char: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; + struct aspm_latency latency_up; + struct aspm_latency latency_dw; + struct aspm_latency acceptable[8]; +}; + +struct aer_stats { + u64 dev_cor_errs[16]; + u64 dev_fatal_errs[27]; + u64 dev_nonfatal_errs[27]; + u64 dev_total_cor_errs; + u64 dev_total_fatal_errs; + u64 dev_total_nonfatal_errs; + u64 rootport_total_cor_errs; + u64 rootport_total_fatal_errs; + u64 rootport_total_nonfatal_errs; +}; + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +struct aer_err_info { + struct pci_dev *dev[5]; + int error_dev_num; + unsigned int id: 16; + unsigned int severity: 2; + unsigned int __pad1: 5; + unsigned int multi_error_valid: 1; + unsigned int first_error: 5; + unsigned int __pad2: 2; + unsigned int tlp_header_valid: 1; + unsigned int status; + unsigned int mask; + struct aer_header_log_regs tlp; +}; + +struct aer_err_source { + unsigned int status; + unsigned int id; +}; + +struct aer_rpc { + struct pci_dev *rpd; + struct { + union { + struct __kfifo kfifo; + struct aer_err_source *type; + const struct aer_err_source *const_type; + char (*rectype)[0]; + struct aer_err_source *ptr; + const struct aer_err_source *ptr_const; + }; + struct aer_err_source buf[128]; + } aer_fifo; +}; + +struct aer_recover_entry { + u8 bus; + u8 devfn; + u16 domain; + int severity; + struct aer_capability_regs *regs; +}; + +struct pcie_pme_service_data { + spinlock_t lock; + struct pcie_device *srv; + struct work_struct work; + bool noirq; +}; + +struct pci_filp_private { + enum pci_mmap_state mmap_state; + int write_combine; +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct acpi_bus_type { + struct list_head list; + const char *name; + bool (*match)(struct device *); + struct acpi_device * (*find_companion)(struct device *); + void (*setup)(struct device *); + void (*cleanup)(struct device *); +}; + +struct acpi_pci_root { + struct acpi_device *device; + struct pci_bus *bus; + u16 segment; + struct resource secondary; + u32 osc_support_set; + u32 osc_control_set; + phys_addr_t mcfg_addr; +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = 4294967295, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +struct hpx_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +struct hpx_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +struct hpx_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hpx_type3 { + u16 device_type; + u16 function_type; + u16 config_space_location; + u16 pci_exp_cap_id; + u16 pci_exp_cap_ver; + u16 pci_exp_vendor_id; + u16 dvsec_id; + u16 dvsec_rev; + u16 match_offset; + u32 match_mask_and; + u32 match_value; + u16 reg_offset; + u32 reg_mask_and; + u32 reg_mask_or; +}; + +enum hpx_type3_dev_type { + HPX_TYPE_ENDPOINT = 1, + HPX_TYPE_LEG_END = 2, + HPX_TYPE_RC_END = 4, + HPX_TYPE_RC_EC = 8, + HPX_TYPE_ROOT_PORT = 16, + HPX_TYPE_UPSTREAM = 32, + HPX_TYPE_DOWNSTREAM = 64, + HPX_TYPE_PCI_BRIDGE = 128, + HPX_TYPE_PCIE_BRIDGE = 256, +}; + +enum hpx_type3_fn_type { + HPX_FN_NORMAL = 1, + HPX_FN_SRIOV_PHYS = 2, + HPX_FN_SRIOV_VIRT = 4, +}; + +enum hpx_type3_cfg_loc { + HPX_CFG_PCICFG = 0, + HPX_CFG_PCIE_CAP = 1, + HPX_CFG_PCIE_CAP_EXT = 2, + HPX_CFG_VEND_CAP = 3, + HPX_CFG_DVSEC = 4, + HPX_CFG_MAX = 5, +}; + +enum pci_irq_reroute_variant { + INTEL_IRQ_REROUTE_VARIANT = 1, + MAX_IRQ_REROUTE_VARIANTS = 3, +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + int hook_offset; +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_CMBMSC = 80, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, + NVME_CMBMSC_CRE = 1, + NVME_CMBMSC_CMSE = 2, +}; + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +} __attribute__((packed)); + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, int); +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +struct slot { + u8 number; + unsigned int devfn; + struct pci_bus *bus; + struct pci_dev *dev; + unsigned int latch_status: 1; + unsigned int adapter_status: 1; + unsigned int extracting; + struct hotplug_slot hotplug_slot; + struct list_head slot_list; +}; + +struct cpci_hp_controller_ops { + int (*query_enum)(); + int (*enable_irq)(); + int (*disable_irq)(); + int (*check_irq)(void *); + int (*hardware_test)(struct slot *, u32); + u8 (*get_power)(struct slot *); + int (*set_power)(struct slot *, int); +}; + +struct cpci_hp_controller { + unsigned int irq; + long unsigned int irq_flags; + char *devname; + void *dev_id; + char *name; + struct cpci_hp_controller_ops *ops; +}; + +typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **); + +struct controller { + struct pcie_device *pcie; + u32 slot_cap; + unsigned int inband_presence_disabled: 1; + u16 slot_ctrl; + struct mutex ctrl_lock; + long unsigned int cmd_started; + unsigned int cmd_busy: 1; + wait_queue_head_t queue; + atomic_t pending_events; + unsigned int notification_enabled: 1; + unsigned int power_fault_detected; + struct task_struct *poll_thread; + u8 state; + struct mutex state_lock; + struct delayed_work button_work; + struct hotplug_slot hotplug_slot; + struct rw_semaphore reset_lock; + unsigned int ist_running; + int request_result; + wait_queue_head_t requester; +}; + +struct controller___2; + +struct hpc_ops; + +struct slot___2 { + u8 bus; + u8 device; + u16 status; + u32 number; + u8 is_a_board; + u8 state; + u8 attention_save; + u8 presence_save; + u8 latch_save; + u8 pwr_save; + struct controller___2 *ctrl; + const struct hpc_ops *hpc_ops; + struct hotplug_slot hotplug_slot; + struct list_head slot_list; + struct delayed_work work; + struct mutex lock; + struct workqueue_struct *wq; + u8 hp_slot; +}; + +struct controller___2 { + struct mutex crit_sect; + struct mutex cmd_lock; + int num_slots; + int slot_num_inc; + struct pci_dev *pci_dev; + struct list_head slot_list; + const struct hpc_ops *hpc_ops; + wait_queue_head_t queue; + u8 slot_device_offset; + u32 pcix_misc2_reg; + u32 first_slot; + u32 cap_offset; + long unsigned int mmio_base; + long unsigned int mmio_size; + void *creg; + struct timer_list poll_timer; +}; + +struct hpc_ops { + int (*power_on_slot)(struct slot___2 *); + int (*slot_enable)(struct slot___2 *); + int (*slot_disable)(struct slot___2 *); + int (*set_bus_speed_mode)(struct slot___2 *, enum pci_bus_speed); + int (*get_power_status)(struct slot___2 *, u8 *); + int (*get_attention_status)(struct slot___2 *, u8 *); + int (*set_attention_status)(struct slot___2 *, u8); + int (*get_latch_status)(struct slot___2 *, u8 *); + int (*get_adapter_status)(struct slot___2 *, u8 *); + int (*get_adapter_speed)(struct slot___2 *, enum pci_bus_speed *); + int (*get_mode1_ECC_cap)(struct slot___2 *, u8 *); + int (*get_prog_int)(struct slot___2 *, u8 *); + int (*query_power_fault)(struct slot___2 *); + void (*green_led_on)(struct slot___2 *); + void (*green_led_off)(struct slot___2 *); + void (*green_led_blink)(struct slot___2 *); + void (*release_ctlr)(struct controller___2 *); + int (*check_cmd_status)(struct controller___2 *); +}; + +struct event_info { + u32 event_type; + struct slot___2 *p_slot; + struct work_struct work; +}; + +struct pushbutton_work_info { + struct slot___2 *p_slot; + struct work_struct work; +}; + +enum ctrl_offsets { + BASE_OFFSET = 0, + SLOT_AVAIL1 = 4, + SLOT_AVAIL2 = 8, + SLOT_CONFIG = 12, + SEC_BUS_CONFIG = 16, + MSI_CTRL = 18, + PROG_INTERFACE = 19, + CMD = 20, + CMD_STATUS = 22, + INTR_LOC = 24, + SERR_LOC = 28, + SERR_INTR_ENABLE = 32, + SLOT1 = 36, +}; + +struct acpiphp_slot; + +struct slot___3 { + struct hotplug_slot hotplug_slot; + struct acpiphp_slot *acpi_slot; + unsigned int sun; +}; + +struct acpiphp_slot { + struct list_head node; + struct pci_bus *bus; + struct list_head funcs; + struct slot___3 *slot; + u8 device; + u32 flags; +}; + +struct acpiphp_attention_info { + int (*set_attn)(struct hotplug_slot *, u8); + int (*get_attn)(struct hotplug_slot *, u8 *); + struct module *owner; +}; + +struct acpiphp_context; + +struct acpiphp_bridge { + struct list_head list; + struct list_head slots; + struct kref ref; + struct acpiphp_context *context; + int nr_slots; + struct pci_bus *pci_bus; + struct pci_dev *pci_dev; + bool is_going_away; +}; + +struct acpiphp_func { + struct acpiphp_bridge *parent; + struct acpiphp_slot *slot; + struct list_head sibling; + u8 function; + u32 flags; +}; + +struct acpiphp_context { + struct acpi_hotplug_context hp; + struct acpiphp_func func; + struct acpiphp_bridge *bridge; + unsigned int refcount; +}; + +struct acpiphp_root_context { + struct acpi_hotplug_context hp; + struct acpiphp_bridge *root_bridge; +}; + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER = 1, + DMI_DEV_TYPE_UNKNOWN = 2, + DMI_DEV_TYPE_VIDEO = 3, + DMI_DEV_TYPE_SCSI = 4, + DMI_DEV_TYPE_ETHERNET = 5, + DMI_DEV_TYPE_TOKENRING = 6, + DMI_DEV_TYPE_SOUND = 7, + DMI_DEV_TYPE_PATA = 8, + DMI_DEV_TYPE_SATA = 9, + DMI_DEV_TYPE_SAS = 10, + DMI_DEV_TYPE_IPMI = 4294967295, + DMI_DEV_TYPE_OEM_STRING = 4294967294, + DMI_DEV_TYPE_DEV_ONBOARD = 4294967293, + DMI_DEV_TYPE_DEV_SLOT = 4294967292, +}; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; +}; + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +enum smbios_attr_enum { + SMBIOS_ATTR_NONE = 0, + SMBIOS_ATTR_LABEL_SHOW = 1, + SMBIOS_ATTR_INSTANCE_SHOW = 2, +}; + +enum acpi_attr_enum { + ACPI_ATTR_LABEL_SHOW = 0, + ACPI_ATTR_INDEX_SHOW = 1, +}; + +struct pci_epf_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +enum pci_interrupt_pin { + PCI_INTERRUPT_UNKNOWN = 0, + PCI_INTERRUPT_INTA = 1, + PCI_INTERRUPT_INTB = 2, + PCI_INTERRUPT_INTC = 3, + PCI_INTERRUPT_INTD = 4, +}; + +enum pci_barno { + NO_BAR = 4294967295, + BAR_0 = 0, + BAR_1 = 1, + BAR_2 = 2, + BAR_3 = 3, + BAR_4 = 4, + BAR_5 = 5, +}; + +struct pci_epf_header { + u16 vendorid; + u16 deviceid; + u8 revid; + u8 progif_code; + u8 subclass_code; + u8 baseclass_code; + u8 cache_line_size; + u16 subsys_vendor_id; + u16 subsys_id; + enum pci_interrupt_pin interrupt_pin; +}; + +struct pci_epf; + +struct pci_epf_ops { + int (*bind)(struct pci_epf *); + void (*unbind)(struct pci_epf *); + struct config_group * (*add_cfs)(struct pci_epf *, struct config_group *); +}; + +struct pci_epf_bar { + dma_addr_t phys_addr; + void *addr; + size_t size; + enum pci_barno barno; + int flags; +}; + +struct pci_epc; + +struct pci_epf_driver; + +struct pci_epf { + struct device dev; + const char *name; + struct pci_epf_header *header; + struct pci_epf_bar bar[6]; + u8 msi_interrupts; + u16 msix_interrupts; + u8 func_no; + struct pci_epc *epc; + struct pci_epf_driver *driver; + struct list_head list; + struct notifier_block nb; + struct mutex lock; + struct pci_epc *sec_epc; + struct list_head sec_epc_list; + struct pci_epf_bar sec_epc_bar[6]; + u8 sec_epc_func_no; + struct config_group *group; +}; + +struct pci_epf_driver { + int (*probe)(struct pci_epf *); + int (*remove)(struct pci_epf *); + struct device_driver driver; + struct pci_epf_ops *ops; + struct module *owner; + struct list_head epf_group; + const struct pci_epf_device_id *id_table; +}; + +struct pci_epc_ops; + +struct pci_epc_mem; + +struct pci_epc { + struct device dev; + struct list_head pci_epf; + const struct pci_epc_ops *ops; + struct pci_epc_mem **windows; + struct pci_epc_mem *mem; + unsigned int num_windows; + u8 max_functions; + struct config_group *group; + struct mutex lock; + long unsigned int function_num_map; + struct atomic_notifier_head notifier; +}; + +enum pci_epc_interface_type { + UNKNOWN_INTERFACE = 4294967295, + PRIMARY_INTERFACE = 0, + SECONDARY_INTERFACE = 1, +}; + +enum pci_epc_irq_type { + PCI_EPC_IRQ_UNKNOWN = 0, + PCI_EPC_IRQ_LEGACY = 1, + PCI_EPC_IRQ_MSI = 2, + PCI_EPC_IRQ_MSIX = 3, +}; + +struct pci_epc_features; + +struct pci_epc_ops { + int (*write_header)(struct pci_epc *, u8, struct pci_epf_header *); + int (*set_bar)(struct pci_epc *, u8, struct pci_epf_bar *); + void (*clear_bar)(struct pci_epc *, u8, struct pci_epf_bar *); + int (*map_addr)(struct pci_epc *, u8, phys_addr_t, u64, size_t); + void (*unmap_addr)(struct pci_epc *, u8, phys_addr_t); + int (*set_msi)(struct pci_epc *, u8, u8); + int (*get_msi)(struct pci_epc *, u8); + int (*set_msix)(struct pci_epc *, u8, u16, enum pci_barno, u32); + int (*get_msix)(struct pci_epc *, u8); + int (*raise_irq)(struct pci_epc *, u8, enum pci_epc_irq_type, u16); + int (*map_msi_irq)(struct pci_epc *, u8, phys_addr_t, u8, u32, u32 *, u32 *); + int (*start)(struct pci_epc *); + void (*stop)(struct pci_epc *); + const struct pci_epc_features * (*get_features)(struct pci_epc *, u8); + struct module *owner; +}; + +struct pci_epc_features { + unsigned int linkup_notifier: 1; + unsigned int core_init_notifier: 1; + unsigned int msi_capable: 1; + unsigned int msix_capable: 1; + u8 reserved_bar; + u8 bar_fixed_64bit; + u64 bar_fixed_size[6]; + size_t align; +}; + +struct pci_epc_mem_window { + phys_addr_t phys_base; + size_t size; + size_t page_size; +}; + +struct pci_epc_mem { + struct pci_epc_mem_window window; + long unsigned int *bitmap; + int pages; + struct mutex lock; +}; + +struct pci_epf_group { + struct config_group group; + struct config_group primary_epc_group; + struct config_group secondary_epc_group; + struct delayed_work cfs_work; + struct pci_epf *epf; + int index; +}; + +struct pci_epc_group { + struct config_group group; + struct pci_epc *epc; + bool start; +}; + +enum pci_notify_event { + CORE_INIT = 0, + LINK_UP = 1, +}; + +enum dw_pcie_region_type { + DW_PCIE_REGION_UNKNOWN = 0, + DW_PCIE_REGION_INBOUND = 1, + DW_PCIE_REGION_OUTBOUND = 2, +}; + +struct pcie_port; + +struct dw_pcie_host_ops { + int (*host_init)(struct pcie_port *); + int (*msi_host_init)(struct pcie_port *); +}; + +struct pcie_port { + bool has_msi_ctrl: 1; + u64 cfg0_base; + void *va_cfg0_base; + u32 cfg0_size; + resource_size_t io_base; + phys_addr_t io_bus_addr; + u32 io_size; + int irq; + const struct dw_pcie_host_ops *ops; + int msi_irq; + struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + u16 msi_msg; + dma_addr_t msi_data; + struct irq_chip *msi_irq_chip; + u32 num_vectors; + u32 irq_mask[8]; + struct pci_host_bridge *bridge; + raw_spinlock_t lock; + long unsigned int msi_irq_in_use[4]; +}; + +enum dw_pcie_as_type { + DW_PCIE_AS_UNKNOWN = 0, + DW_PCIE_AS_MEM = 1, + DW_PCIE_AS_IO = 2, +}; + +struct dw_pcie_ep; + +struct dw_pcie_ep_ops { + void (*ep_init)(struct dw_pcie_ep *); + int (*raise_irq)(struct dw_pcie_ep *, u8, enum pci_epc_irq_type, u16); + const struct pci_epc_features * (*get_features)(struct dw_pcie_ep *); + unsigned int (*func_conf_select)(struct dw_pcie_ep *, u8); +}; + +struct dw_pcie_ep { + struct pci_epc *epc; + struct list_head func_list; + const struct dw_pcie_ep_ops *ops; + phys_addr_t phys_base; + size_t addr_size; + size_t page_size; + u8 bar_to_atu[6]; + phys_addr_t *outbound_addr; + long unsigned int *ib_window_map; + long unsigned int *ob_window_map; + void *msi_mem; + phys_addr_t msi_mem_phys; + struct pci_epf_bar *epf_bar[6]; +}; + +struct dw_pcie; + +struct dw_pcie_ops { + u64 (*cpu_addr_fixup)(struct dw_pcie *, u64); + u32 (*read_dbi)(struct dw_pcie *, void *, u32, size_t); + void (*write_dbi)(struct dw_pcie *, void *, u32, size_t, u32); + void (*write_dbi2)(struct dw_pcie *, void *, u32, size_t, u32); + int (*link_up)(struct dw_pcie *); + int (*start_link)(struct dw_pcie *); + void (*stop_link)(struct dw_pcie *); +}; + +struct dw_pcie { + struct device *dev; + void *dbi_base; + void *dbi_base2; + void *atu_base; + size_t atu_size; + u32 num_ib_windows; + u32 num_ob_windows; + struct pcie_port pp; + struct dw_pcie_ep ep; + const struct dw_pcie_ops *ops; + unsigned int version; + int num_lanes; + int link_gen; + u8 n_fts[2]; + bool iatu_unroll_enabled: 1; + bool io_cfg_atu_shared: 1; +}; + +struct pci_epf_msix_tbl { + u64 msg_addr; + u32 msg_data; + u32 vector_ctrl; +}; + +struct dw_pcie_ep_func { + struct list_head list; + u8 func_no; + u8 msi_cap; + u8 msix_cap; +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE = 0, + DW_PCIE_EP_TYPE = 1, + DW_PCIE_LEG_EP_TYPE = 2, + DW_PCIE_RC_TYPE = 3, +}; + +struct dw_plat_pcie { + struct dw_pcie *pci; + struct regmap *regmap; + enum dw_pcie_device_mode mode; +}; + +struct dw_plat_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +struct rio_device_id { + __u16 did; + __u16 vid; + __u16 asm_did; + __u16 asm_vid; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan___2 { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan___2 *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, + DMAENGINE_ALIGN_128_BYTES = 7, + DMAENGINE_ALIGN_256_BYTES = 8, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + struct mutex chan_mutex; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan___2 *); + int (*device_router_config)(struct dma_chan___2 *); + void (*device_free_chan_resources)(struct dma_chan___2 *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan___2 *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan___2 *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan___2 *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan___2 *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan___2 *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan___2 *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan___2 *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan___2 *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan___2 *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan___2 *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan___2 *); + int (*device_resume)(struct dma_chan___2 *); + int (*device_terminate_all)(struct dma_chan___2 *); + void (*device_synchronize)(struct dma_chan___2 *); + enum dma_status (*device_tx_status)(struct dma_chan___2 *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan___2 *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan___2 *chan; + struct device device; + int dev_id; + bool chan_dma_dev; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + unsigned int slave_id; + void *peripheral_config; + size_t peripheral_size; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u16 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan___2 *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +struct rio_switch_ops; + +struct rio_dev; + +struct rio_switch { + struct list_head node; + u8 *route_table; + u32 port_ok; + struct rio_switch_ops *ops; + spinlock_t lock; + struct rio_dev *nextdev[0]; +}; + +struct rio_mport; + +struct rio_switch_ops { + struct module *owner; + int (*add_entry)(struct rio_mport *, u16, u8, u16, u16, u8); + int (*get_entry)(struct rio_mport *, u16, u8, u16, u16, u8 *); + int (*clr_table)(struct rio_mport *, u16, u8, u16); + int (*set_domain)(struct rio_mport *, u16, u8, u8); + int (*get_domain)(struct rio_mport *, u16, u8, u8 *); + int (*em_init)(struct rio_dev *); + int (*em_handle)(struct rio_dev *, u8); +}; + +struct rio_net; + +struct rio_driver; + +union rio_pw_msg; + +struct rio_dev { + struct list_head global_list; + struct list_head net_list; + struct rio_net *net; + bool do_enum; + u16 did; + u16 vid; + u32 device_rev; + u16 asm_did; + u16 asm_vid; + u16 asm_rev; + u16 efptr; + u32 pef; + u32 swpinfo; + u32 src_ops; + u32 dst_ops; + u32 comp_tag; + u32 phys_efptr; + u32 phys_rmap; + u32 em_efptr; + u64 dma_mask; + struct rio_driver *driver; + struct device dev; + struct resource riores[16]; + int (*pwcback)(struct rio_dev *, union rio_pw_msg *, int); + u16 destid; + u8 hopcount; + struct rio_dev *prev; + atomic_t state; + struct rio_switch rswitch[0]; +}; + +struct rio_msg { + struct resource *res; + void (*mcback)(struct rio_mport *, void *, int, int); +}; + +struct rio_ops; + +struct rio_scan; + +struct rio_mport { + struct list_head dbells; + struct list_head pwrites; + struct list_head node; + struct list_head nnode; + struct rio_net *net; + struct mutex lock; + struct resource iores; + struct resource riores[16]; + struct rio_msg inb_msg[4]; + struct rio_msg outb_msg[4]; + int host_deviceid; + struct rio_ops *ops; + unsigned char id; + unsigned char index; + unsigned int sys_size; + u32 phys_efptr; + u32 phys_rmap; + unsigned char name[40]; + struct device dev; + void *priv; + struct dma_device dma; + struct rio_scan *nscan; + atomic_t state; + unsigned int pwe_refcnt; +}; + +enum rio_device_state { + RIO_DEVICE_INITIALIZING = 0, + RIO_DEVICE_RUNNING = 1, + RIO_DEVICE_GONE = 2, + RIO_DEVICE_SHUTDOWN = 3, +}; + +struct rio_net { + struct list_head node; + struct list_head devices; + struct list_head switches; + struct list_head mports; + struct rio_mport *hport; + unsigned char id; + struct device dev; + void *enum_data; + void (*release)(struct rio_net *); +}; + +struct rio_driver { + struct list_head node; + char *name; + const struct rio_device_id *id_table; + int (*probe)(struct rio_dev *, const struct rio_device_id *); + void (*remove)(struct rio_dev *); + void (*shutdown)(struct rio_dev *); + int (*suspend)(struct rio_dev *, u32); + int (*resume)(struct rio_dev *); + int (*enable_wake)(struct rio_dev *, u32, int); + struct device_driver driver; +}; + +union rio_pw_msg { + struct { + u32 comptag; + u32 errdetect; + u32 is_port; + u32 ltlerrdet; + u32 padding[12]; + } em; + u32 raw[16]; +}; + +struct rio_dbell { + struct list_head node; + struct resource *res; + void (*dinb)(struct rio_mport *, void *, u16, u16, u16); + void *dev_id; +}; + +struct rio_mport_attr; + +struct rio_ops { + int (*lcread)(struct rio_mport *, int, u32, int, u32 *); + int (*lcwrite)(struct rio_mport *, int, u32, int, u32); + int (*cread)(struct rio_mport *, int, u16, u8, u32, int, u32 *); + int (*cwrite)(struct rio_mport *, int, u16, u8, u32, int, u32); + int (*dsend)(struct rio_mport *, int, u16, u16); + int (*pwenable)(struct rio_mport *, int); + int (*open_outb_mbox)(struct rio_mport *, void *, int, int); + void (*close_outb_mbox)(struct rio_mport *, int); + int (*open_inb_mbox)(struct rio_mport *, void *, int, int); + void (*close_inb_mbox)(struct rio_mport *, int); + int (*add_outb_message)(struct rio_mport *, struct rio_dev *, int, void *, size_t); + int (*add_inb_buffer)(struct rio_mport *, int, void *); + void * (*get_inb_message)(struct rio_mport *, int); + int (*map_inb)(struct rio_mport *, dma_addr_t, u64, u64, u32); + void (*unmap_inb)(struct rio_mport *, dma_addr_t); + int (*query_mport)(struct rio_mport *, struct rio_mport_attr *); + int (*map_outb)(struct rio_mport *, u16, u64, u32, u32, dma_addr_t *); + void (*unmap_outb)(struct rio_mport *, u16, u64); +}; + +struct rio_scan { + struct module *owner; + int (*enumerate)(struct rio_mport *, u32); + int (*discover)(struct rio_mport *, u32); +}; + +struct rio_mport_attr { + int flags; + int link_speed; + int link_width; + int dma_max_sge; + int dma_max_size; + int dma_align; +}; + +enum rio_write_type { + RDW_DEFAULT = 0, + RDW_ALL_NWRITE = 1, + RDW_ALL_NWRITE_R = 2, + RDW_LAST_NWRITE_R = 3, +}; + +struct rio_dma_ext { + u16 destid; + u64 rio_addr; + u8 rio_addr_u; + enum rio_write_type wr_type; +}; + +struct rio_dma_data { + struct scatterlist *sg; + unsigned int sg_len; + u64 rio_addr; + u8 rio_addr_u; + enum rio_write_type wr_type; +}; + +struct rio_scan_node { + int mport_id; + struct list_head node; + struct rio_scan *ops; +}; + +struct rio_pwrite { + struct list_head node; + int (*pwcback)(struct rio_mport *, void *, union rio_pw_msg *, int); + void *context; +}; + +struct rio_disc_work { + struct work_struct work; + struct rio_mport *mport; +}; + +enum hdmi_infoframe_type { + HDMI_INFOFRAME_TYPE_VENDOR = 129, + HDMI_INFOFRAME_TYPE_AVI = 130, + HDMI_INFOFRAME_TYPE_SPD = 131, + HDMI_INFOFRAME_TYPE_AUDIO = 132, + HDMI_INFOFRAME_TYPE_DRM = 135, +}; + +struct hdmi_any_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; +}; + +enum hdmi_colorspace { + HDMI_COLORSPACE_RGB = 0, + HDMI_COLORSPACE_YUV422 = 1, + HDMI_COLORSPACE_YUV444 = 2, + HDMI_COLORSPACE_YUV420 = 3, + HDMI_COLORSPACE_RESERVED4 = 4, + HDMI_COLORSPACE_RESERVED5 = 5, + HDMI_COLORSPACE_RESERVED6 = 6, + HDMI_COLORSPACE_IDO_DEFINED = 7, +}; + +enum hdmi_scan_mode { + HDMI_SCAN_MODE_NONE = 0, + HDMI_SCAN_MODE_OVERSCAN = 1, + HDMI_SCAN_MODE_UNDERSCAN = 2, + HDMI_SCAN_MODE_RESERVED = 3, +}; + +enum hdmi_colorimetry { + HDMI_COLORIMETRY_NONE = 0, + HDMI_COLORIMETRY_ITU_601 = 1, + HDMI_COLORIMETRY_ITU_709 = 2, + HDMI_COLORIMETRY_EXTENDED = 3, +}; + +enum hdmi_picture_aspect { + HDMI_PICTURE_ASPECT_NONE = 0, + HDMI_PICTURE_ASPECT_4_3 = 1, + HDMI_PICTURE_ASPECT_16_9 = 2, + HDMI_PICTURE_ASPECT_64_27 = 3, + HDMI_PICTURE_ASPECT_256_135 = 4, + HDMI_PICTURE_ASPECT_RESERVED = 5, +}; + +enum hdmi_active_aspect { + HDMI_ACTIVE_ASPECT_16_9_TOP = 2, + HDMI_ACTIVE_ASPECT_14_9_TOP = 3, + HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, + HDMI_ACTIVE_ASPECT_PICTURE = 8, + HDMI_ACTIVE_ASPECT_4_3 = 9, + HDMI_ACTIVE_ASPECT_16_9 = 10, + HDMI_ACTIVE_ASPECT_14_9 = 11, + HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, + HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, + HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, +}; + +enum hdmi_extended_colorimetry { + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, + HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, + HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3, + HDMI_EXTENDED_COLORIMETRY_OPRGB = 4, + HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, + HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, + HDMI_EXTENDED_COLORIMETRY_RESERVED = 7, +}; + +enum hdmi_quantization_range { + HDMI_QUANTIZATION_RANGE_DEFAULT = 0, + HDMI_QUANTIZATION_RANGE_LIMITED = 1, + HDMI_QUANTIZATION_RANGE_FULL = 2, + HDMI_QUANTIZATION_RANGE_RESERVED = 3, +}; + +enum hdmi_nups { + HDMI_NUPS_UNKNOWN = 0, + HDMI_NUPS_HORIZONTAL = 1, + HDMI_NUPS_VERTICAL = 2, + HDMI_NUPS_BOTH = 3, +}; + +enum hdmi_ycc_quantization_range { + HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, + HDMI_YCC_QUANTIZATION_RANGE_FULL = 1, +}; + +enum hdmi_content_type { + HDMI_CONTENT_TYPE_GRAPHICS = 0, + HDMI_CONTENT_TYPE_PHOTO = 1, + HDMI_CONTENT_TYPE_CINEMA = 2, + HDMI_CONTENT_TYPE_GAME = 3, +}; + +enum hdmi_metadata_type { + HDMI_STATIC_METADATA_TYPE1 = 0, +}; + +enum hdmi_eotf { + HDMI_EOTF_TRADITIONAL_GAMMA_SDR = 0, + HDMI_EOTF_TRADITIONAL_GAMMA_HDR = 1, + HDMI_EOTF_SMPTE_ST2084 = 2, + HDMI_EOTF_BT_2100_HLG = 3, +}; + +struct hdmi_avi_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_colorspace colorspace; + enum hdmi_scan_mode scan_mode; + enum hdmi_colorimetry colorimetry; + enum hdmi_picture_aspect picture_aspect; + enum hdmi_active_aspect active_aspect; + bool itc; + enum hdmi_extended_colorimetry extended_colorimetry; + enum hdmi_quantization_range quantization_range; + enum hdmi_nups nups; + unsigned char video_code; + enum hdmi_ycc_quantization_range ycc_quantization_range; + enum hdmi_content_type content_type; + unsigned char pixel_repeat; + short unsigned int top_bar; + short unsigned int bottom_bar; + short unsigned int left_bar; + short unsigned int right_bar; +}; + +struct hdmi_drm_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_eotf eotf; + enum hdmi_metadata_type metadata_type; + struct { + u16 x; + u16 y; + } display_primaries[3]; + struct { + u16 x; + u16 y; + } white_point; + u16 max_display_mastering_luminance; + u16 min_display_mastering_luminance; + u16 max_cll; + u16 max_fall; +}; + +enum hdmi_spd_sdi { + HDMI_SPD_SDI_UNKNOWN = 0, + HDMI_SPD_SDI_DSTB = 1, + HDMI_SPD_SDI_DVDP = 2, + HDMI_SPD_SDI_DVHS = 3, + HDMI_SPD_SDI_HDDVR = 4, + HDMI_SPD_SDI_DVC = 5, + HDMI_SPD_SDI_DSC = 6, + HDMI_SPD_SDI_VCD = 7, + HDMI_SPD_SDI_GAME = 8, + HDMI_SPD_SDI_PC = 9, + HDMI_SPD_SDI_BD = 10, + HDMI_SPD_SDI_SACD = 11, + HDMI_SPD_SDI_HDDVD = 12, + HDMI_SPD_SDI_PMP = 13, +}; + +struct hdmi_spd_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + char vendor[8]; + char product[16]; + enum hdmi_spd_sdi sdi; +}; + +enum hdmi_audio_coding_type { + HDMI_AUDIO_CODING_TYPE_STREAM = 0, + HDMI_AUDIO_CODING_TYPE_PCM = 1, + HDMI_AUDIO_CODING_TYPE_AC3 = 2, + HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, + HDMI_AUDIO_CODING_TYPE_MP3 = 4, + HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, + HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_DTS = 7, + HDMI_AUDIO_CODING_TYPE_ATRAC = 8, + HDMI_AUDIO_CODING_TYPE_DSD = 9, + HDMI_AUDIO_CODING_TYPE_EAC3 = 10, + HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, + HDMI_AUDIO_CODING_TYPE_MLP = 12, + HDMI_AUDIO_CODING_TYPE_DST = 13, + HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, + HDMI_AUDIO_CODING_TYPE_CXT = 15, +}; + +enum hdmi_audio_sample_size { + HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, + HDMI_AUDIO_SAMPLE_SIZE_16 = 1, + HDMI_AUDIO_SAMPLE_SIZE_20 = 2, + HDMI_AUDIO_SAMPLE_SIZE_24 = 3, +}; + +enum hdmi_audio_sample_frequency { + HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, + HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, + HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, + HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, + HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, + HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, + HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, + HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7, +}; + +enum hdmi_audio_coding_type_ext { + HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, +}; + +struct hdmi_audio_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned char channels; + enum hdmi_audio_coding_type coding_type; + enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_sample_frequency sample_frequency; + enum hdmi_audio_coding_type_ext coding_type_ext; + unsigned char channel_allocation; + unsigned char level_shift_value; + bool downmix_inhibit; +}; + +enum hdmi_3d_structure { + HDMI_3D_STRUCTURE_INVALID = 4294967295, + HDMI_3D_STRUCTURE_FRAME_PACKING = 0, + HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, + HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, + HDMI_3D_STRUCTURE_L_DEPTH = 4, + HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, + HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, +}; + +struct hdmi_vendor_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + u8 vic; + enum hdmi_3d_structure s3d_struct; + unsigned int s3d_ext_data; +}; + +union hdmi_vendor_any_infoframe { + struct { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + } any; + struct hdmi_vendor_infoframe hdmi; +}; + +union hdmi_infoframe { + struct hdmi_any_infoframe any; + struct hdmi_avi_infoframe avi; + struct hdmi_spd_infoframe spd; + union hdmi_vendor_any_infoframe vendor; + struct hdmi_audio_infoframe audio; + struct hdmi_drm_infoframe drm; +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct vgastate { + void *vgabase; + long unsigned int membase; + __u32 memsize; + __u32 flags; + __u32 depth; + __u32 num_attr; + __u32 num_crtc; + __u32 num_gfx; + __u32 num_seq; + void *vidstate; +}; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_videomode; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_info; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + u32 blit_x; + u32 blit_y; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct backlight_device; + +struct fb_deferred_io; + +struct fb_ops; + +struct fb_tile_ops; + +struct apertures_struct; + +struct fb_info { + atomic_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct work_struct queue; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct backlight_device *bl_dev; + struct mutex bl_curve_mutex; + u8 bl_curve[128]; + struct delayed_work deferred_work; + struct fb_deferred_io *fbdefio; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + struct fb_tile_ops *tileops; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + struct apertures_struct *apertures; + bool skip_vt_switch; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +struct fb_deferred_io { + long unsigned int delay; + struct mutex lock; + struct list_head pagelist; + void (*first_io)(struct fb_info *); + void (*deferred_io)(struct fb_info *, struct list_head *); +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct fb_tilemap { + __u32 width; + __u32 height; + __u32 depth; + __u32 length; + const __u8 *data; +}; + +struct fb_tilerect { + __u32 sx; + __u32 sy; + __u32 width; + __u32 height; + __u32 index; + __u32 fg; + __u32 bg; + __u32 rop; +}; + +struct fb_tilearea { + __u32 sx; + __u32 sy; + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; +}; + +struct fb_tileblit { + __u32 sx; + __u32 sy; + __u32 width; + __u32 height; + __u32 fg; + __u32 bg; + __u32 length; + __u32 *indices; +}; + +struct fb_tilecursor { + __u32 sx; + __u32 sy; + __u32 mode; + __u32 shape; + __u32 fg; + __u32 bg; +}; + +struct fb_tile_ops { + void (*fb_settile)(struct fb_info *, struct fb_tilemap *); + void (*fb_tilecopy)(struct fb_info *, struct fb_tilearea *); + void (*fb_tilefill)(struct fb_info *, struct fb_tilerect *); + void (*fb_tileblit)(struct fb_info *, struct fb_tileblit *); + void (*fb_tilecursor)(struct fb_info *, struct fb_tilecursor *); + int (*fb_get_tilemax)(struct fb_info *); +}; + +struct aperture { + resource_size_t base; + resource_size_t size; +}; + +struct apertures_struct { + unsigned int count; + struct aperture ranges[0]; +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + int fb_blank; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_ops; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; +}; + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_notification { + BACKLIGHT_REGISTERED = 0, + BACKLIGHT_UNREGISTERED = 1, +}; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + int (*check_fb)(struct backlight_device *, struct fb_info *); +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct fb_fix_screeninfo32 { + char id[16]; + compat_caddr_t smem_start; + u32 smem_len; + u32 type; + u32 type_aux; + u32 visual; + u16 xpanstep; + u16 ypanstep; + u16 ywrapstep; + u32 line_length; + compat_caddr_t mmio_start; + u32 mmio_len; + u32 accel; + u16 reserved[3]; +}; + +struct fb_cmap32 { + u32 start; + u32 len; + compat_caddr_t red; + compat_caddr_t green; + compat_caddr_t blue; + compat_caddr_t transp; +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +enum display_flags { + DISPLAY_FLAGS_HSYNC_LOW = 1, + DISPLAY_FLAGS_HSYNC_HIGH = 2, + DISPLAY_FLAGS_VSYNC_LOW = 4, + DISPLAY_FLAGS_VSYNC_HIGH = 8, + DISPLAY_FLAGS_DE_LOW = 16, + DISPLAY_FLAGS_DE_HIGH = 32, + DISPLAY_FLAGS_PIXDATA_POSEDGE = 64, + DISPLAY_FLAGS_PIXDATA_NEGEDGE = 128, + DISPLAY_FLAGS_INTERLACED = 256, + DISPLAY_FLAGS_DOUBLESCAN = 512, + DISPLAY_FLAGS_DOUBLECLK = 1024, + DISPLAY_FLAGS_SYNC_POSEDGE = 2048, + DISPLAY_FLAGS_SYNC_NEGEDGE = 4096, +}; + +struct videomode { + long unsigned int pixelclock; + u32 hactive; + u32 hfront_porch; + u32 hback_porch; + u32 hsync_len; + u32 vactive; + u32 vfront_porch; + u32 vback_porch; + u32 vsync_len; + enum display_flags flags; +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +typedef unsigned char u_char; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short scrollmode; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, int, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct timer_list cursor_timer; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + int flags; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +enum { + FBCON_LOGO_CANSHOW = 4294967295, + FBCON_LOGO_DRAW = 4294967294, + FBCON_LOGO_DONTSHOW = 4294967293, +}; + +typedef long unsigned int u_long; + +enum { + S1SA = 0, + S2SA = 1, + SP = 2, + DSA = 3, + CNT = 4, + DP_OCTL = 5, + CLR = 6, + BI = 8, + MBC = 9, + BLTCTL = 10, + HES = 12, + HEB = 13, + HSB = 14, + HT = 15, + VES = 16, + VEB = 17, + VSB = 18, + VT = 19, + HCIV = 20, + VCIV = 21, + TCDR = 22, + VIL = 23, + STGCTL = 24, + SSR = 25, + HRIR = 26, + SPR = 27, + CMR = 28, + SRGCTL = 29, + RRCIV = 30, + RRSC = 31, + RRCR = 34, + GIOE = 32, + GIO = 33, + SCR = 35, + SSTATUS = 36, + PRC = 37, +}; + +enum { + PADDRW = 0, + PDATA = 4, + PPMASK = 8, + PADDRR = 12, + PIDXLO = 16, + PIDXHI = 20, + PIDXDATA = 24, + PIDXCTL = 28, +}; + +enum { + CLKCTL = 2, + SYNCCTL = 3, + HSYNCPOS = 4, + PWRMNGMT = 5, + DACOP = 6, + PALETCTL = 7, + SYSCLKCTL = 8, + PIXFMT = 10, + BPP8 = 11, + BPP16 = 12, + BPP24 = 13, + BPP32 = 14, + PIXCTL1 = 16, + PIXCTL2 = 17, + SYSCLKN = 21, + SYSCLKM = 22, + SYSCLKP = 23, + SYSCLKC = 24, + PIXM0 = 32, + PIXN0 = 33, + PIXP0 = 34, + PIXC0 = 35, + CURSCTL = 48, + CURSXLO = 49, + CURSXHI = 50, + CURSYLO = 51, + CURSYHI = 52, + CURSHOTX = 53, + CURSHOTY = 54, + CURSACCTL = 55, + CURSACATTR = 56, + CURS1R = 64, + CURS1G = 65, + CURS1B = 66, + CURS2R = 67, + CURS2G = 68, + CURS2B = 69, + CURS3R = 70, + CURS3G = 71, + CURS3B = 72, + BORDR = 96, + BORDG = 97, + BORDB = 98, + MISCTL1 = 112, + MISCTL2 = 113, + MISCTL3 = 114, + KEYCTL = 120, +}; + +enum { + TVPADDRW = 0, + TVPPDATA = 4, + TVPPMASK = 8, + TVPPADRR = 12, + TVPCADRW = 16, + TVPCDATA = 20, + TVPCADRR = 28, + TVPDCCTL = 36, + TVPIDATA = 40, + TVPCRDAT = 44, + TVPCXPOL = 48, + TVPCXPOH = 52, + TVPCYPOL = 56, + TVPCYPOH = 60, +}; + +enum { + TVPIRREV = 1, + TVPIRICC = 6, + TVPIRBRC = 7, + TVPIRLAC = 15, + TVPIRTCC = 24, + TVPIRMXC = 25, + TVPIRCLS = 26, + TVPIRPPG = 28, + TVPIRGEC = 29, + TVPIRMIC = 30, + TVPIRPLA = 44, + TVPIRPPD = 45, + TVPIRMPD = 46, + TVPIRLPD = 47, + TVPIRCKL = 48, + TVPIRCKH = 49, + TVPIRCRL = 50, + TVPIRCRH = 51, + TVPIRCGL = 52, + TVPIRCGH = 53, + TVPIRCBL = 54, + TVPIRCBH = 55, + TVPIRCKC = 56, + TVPIRMLC = 57, + TVPIRSEN = 58, + TVPIRTMD = 59, + TVPIRRML = 60, + TVPIRRMM = 61, + TVPIRRMS = 62, + TVPIRDID = 63, + TVPIRRES = 255, +}; + +struct initvalues { + __u8 addr; + __u8 value; +}; + +struct imstt_regvals { + __u32 pitch; + __u16 hes; + __u16 heb; + __u16 hsb; + __u16 ht; + __u16 ves; + __u16 veb; + __u16 vsb; + __u16 vt; + __u16 vil; + __u8 pclk_m; + __u8 pclk_n; + __u8 pclk_p; + __u8 mlc[3]; + __u8 lckl_p[3]; +}; + +struct imstt_par { + struct imstt_regvals init; + __u32 *dc_regs; + long unsigned int cmap_regs_phys; + __u8 *cmap_regs; + __u32 ramdac; + __u32 palette[16]; +}; + +enum { + IBM = 0, + TVP = 1, +}; + +struct chips_init_reg { + unsigned char addr; + unsigned char data; +}; + +struct vesafb_par { + u32 pseudo_palette[256]; + int wc_cookie; + struct resource *region; +}; + +struct acpi_table_bgrt { + struct acpi_table_header header; + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; +}; + +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = 4294967295, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, +}; + +struct bmp_file_header { + u16 id; + u32 file_size; + u32 reserved; + u32 bitmap_offset; +} __attribute__((packed)); + +struct bmp_dib_header { + u32 dib_header_size; + s32 width; + s32 height; + u16 planes; + u16 bpp; + u32 compression; + u32 bitmap_size; + u32 horz_resolution; + u32 vert_resolution; + u32 colors_used; + u32 colors_important; +}; + +struct simplefb_format { + const char *name; + u32 bits_per_pixel; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + u32 fourcc; +}; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +struct simplefb_par { + u32 palette[16]; +}; + +struct timing_entry { + u32 min; + u32 typ; + u32 max; +}; + +struct display_timing { + struct timing_entry pixelclock; + struct timing_entry hactive; + struct timing_entry hfront_porch; + struct timing_entry hback_porch; + struct timing_entry hsync_len; + struct timing_entry vactive; + struct timing_entry vfront_porch; + struct timing_entry vback_porch; + struct timing_entry vsync_len; + enum display_flags flags; +}; + +struct display_timings { + unsigned int num_timings; + unsigned int native_mode; + struct display_timing **timings; +}; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + char *type; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct idle_cpu { + struct cpuidle_state *state_table; + long unsigned int auto_demotion_disable_flags; + bool byt_auto_demotion_disable_flag; + bool disable_promotion_to_c1e; + bool use_acpi; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[32]; +}; + +struct acpi_processor_power { + int count; + union { + struct acpi_processor_cx states[8]; + struct acpi_lpi_state lpi_states[8]; + }; + int timer_broadcast_on_state; +}; + +struct acpi_psd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __attribute__((packed)); + +struct acpi_processor_px { + u64 core_frequency; + u64 power; + u64 transition_latency; + u64 bus_master_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_performance { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + short: 16; + unsigned int state_count; + int: 32; + struct acpi_processor_px *states; + struct acpi_psd_package domain_info; + cpumask_var_t shared_cpu_map; + unsigned int shared_type; + int: 32; +} __attribute__((packed)); + +struct acpi_tsd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_processor_tx_tss { + u64 freqpercentage; + u64 power; + u64 transition_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor; + +struct acpi_processor_throttling { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + short: 16; + unsigned int state_count; + int: 32; + struct acpi_processor_tx_tss *states_tss; + struct acpi_tsd_package domain_info; + cpumask_var_t shared_cpu_map; + int (*acpi_processor_get_throttling)(struct acpi_processor *); + int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool); + u32 address; + u8 duty_offset; + u8 duty_width; + u8 tsd_valid_flag; + char: 8; + unsigned int shared_type; + struct acpi_processor_tx states[16]; + int: 32; +} __attribute__((packed)); + +struct acpi_processor_lx { + int px; + int tx; +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; + struct acpi_processor_lx thermal; + struct acpi_processor_lx user; +}; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + phys_cpuid_t phys_id; + u32 id; + u32 pblk; + int performance_platform_limit; + int throttling_platform_limit; + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance *performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; + struct thermal_cooling_device *cdev; + struct device *dev; + struct freq_qos_request perflib_req; + struct freq_qos_request thermal_req; +}; + +enum ipmi_addr_src { + SI_INVALID = 0, + SI_HOTMOD = 1, + SI_HARDCODED = 2, + SI_SPMI = 3, + SI_ACPI = 4, + SI_SMBIOS = 5, + SI_PCI = 6, + SI_DEVICETREE = 7, + SI_PLATFORM = 8, + SI_LAST = 9, +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +}; + +enum si_type { + SI_TYPE_INVALID = 0, + SI_KCS = 1, + SI_SMIC = 2, + SI_BT = 3, + SI_TYPE_MAX = 4, +}; + +enum ipmi_addr_space { + IPMI_IO_ADDR_SPACE = 0, + IPMI_MEM_ADDR_SPACE = 1, +}; + +enum ipmi_plat_interface_type { + IPMI_PLAT_IF_SI = 0, + IPMI_PLAT_IF_SSIF = 1, +}; + +struct ipmi_plat_data { + enum ipmi_plat_interface_type iftype; + unsigned int type; + unsigned int space; + long unsigned int addr; + unsigned int regspacing; + unsigned int regsize; + unsigned int regshift; + unsigned int irq; + unsigned int slave_addr; + enum ipmi_addr_src addr_source; +}; + +struct ipmi_dmi_info { + enum si_type si_type; + unsigned int space; + long unsigned int addr; + u8 slave_addr; + struct ipmi_dmi_info *next; +}; + +typedef u16 acpi_owner_id; + +union acpi_name_union { + u32 integer; + char ascii[4]; +}; + +struct acpi_table_desc { + acpi_physical_address address; + struct acpi_table_header *pointer; + u32 length; + union acpi_name_union signature; + acpi_owner_id owner_id; + u8 flags; + u16 validation_count; +}; + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 global_irq_base; + u64 address; +}; + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; + u8 id; + u8 eid; + u8 io_sapic_vector; + u32 global_irq; + u32 flags; +}; + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[1]; + u16 spe_interrupt; +} __attribute__((packed)); + +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; + u32 gic_id; + u64 base_address; + u32 global_irq_base; + u8 version; + u8 reserved2[3]; +}; + +enum acpi_subtable_type { + ACPI_SUBTABLE_COMMON = 0, + ACPI_SUBTABLE_HMAT = 1, +}; + +struct acpi_subtable_entry { + union acpi_subtable_headers *hdr; + enum acpi_subtable_type type; +}; + +enum acpi_predicate { + all_versions = 0, + less_than_or_equal = 1, + equal = 2, + greater_than_or_equal = 3, +}; + +struct acpi_platform_list { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; + char *table; + enum acpi_predicate pred; + char *reason; + u32 data; +}; + +typedef u32 (*acpi_interface_handler)(acpi_string, u32); + +struct acpi_osi_entry { + char string[64]; + bool enable; +}; + +struct acpi_osi_config { + u8 default_disabling; + unsigned int linux_enable: 1; + unsigned int linux_dmi: 1; + unsigned int linux_cmdline: 1; + unsigned int darwin_enable: 1; + unsigned int darwin_dmi: 1; + unsigned int darwin_cmdline: 1; +}; + +struct acpi_predefined_names { + const char *name; + u8 type; + char *val; +}; + +typedef u32 (*acpi_osd_handler)(void *); + +typedef void (*acpi_osd_exec_callback)(void *); + +typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *); + +typedef void (*acpi_notify_handler)(acpi_handle, u32, void *); + +struct acpi_pci_id { + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + +struct acpi_mem_mapping { + acpi_physical_address physical_address; + u8 *logical_address; + acpi_size length; + struct acpi_mem_mapping *next_mm; +}; + +struct acpi_mem_space_context { + u32 length; + acpi_physical_address address; + struct acpi_mem_mapping *cur_mm; + struct acpi_mem_mapping *first_mm; +}; + +typedef enum { + OSL_GLOBAL_LOCK_HANDLER = 0, + OSL_NOTIFY_HANDLER = 1, + OSL_GPE_HANDLER = 2, + OSL_DEBUGGER_MAIN_THREAD = 3, + OSL_DEBUGGER_EXEC_THREAD = 4, + OSL_EC_POLL_HANDLER = 5, + OSL_EC_BURST_HANDLER = 6, +} acpi_execute_type; + +struct acpi_debugger_ops { + int (*create_thread)(acpi_osd_exec_callback, void *); + ssize_t (*write_log)(const char *); + ssize_t (*read_cmd)(char *, size_t); + int (*wait_command_ready)(bool, char *, size_t); + int (*notify_command_complete)(); +}; + +struct acpi_debugger { + const struct acpi_debugger_ops *ops; + struct module *owner; + struct mutex lock; +}; + +union acpi_operand_object; + +struct acpi_namespace_node { + union acpi_operand_object *object; + u8 descriptor_type; + u8 type; + u16 flags; + union acpi_name_union name; + struct acpi_namespace_node *parent; + struct acpi_namespace_node *child; + struct acpi_namespace_node *peer; + acpi_owner_id owner_id; +}; + +struct acpi_object_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; +}; + +struct acpi_object_integer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 fill[3]; + u64 value; +}; + +struct acpi_object_string { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + char *pointer; + u32 length; +}; + +struct acpi_object_buffer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 *pointer; + u32 length; + u32 aml_length; + u8 *aml_start; + struct acpi_namespace_node *node; +}; + +struct acpi_object_package { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + union acpi_operand_object **elements; + u8 *aml_start; + u32 aml_length; + u32 count; +}; + +struct acpi_object_event { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + void *os_semaphore; +}; + +struct acpi_walk_state; + +typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *); + +struct acpi_object_method { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 info_flags; + u8 param_count; + u8 sync_level; + union acpi_operand_object *mutex; + union acpi_operand_object *node; + u8 *aml_start; + union { + acpi_internal_method implementation; + union acpi_operand_object *handler; + } dispatch; + u32 aml_length; + acpi_owner_id owner_id; + u8 thread_count; +}; + +struct acpi_thread_state; + +struct acpi_object_mutex { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 sync_level; + u16 acquisition_depth; + void *os_mutex; + u64 thread_id; + struct acpi_thread_state *owner_thread; + union acpi_operand_object *prev; + union acpi_operand_object *next; + struct acpi_namespace_node *node; + u8 original_sync_level; +}; + +struct acpi_object_region { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler; + union acpi_operand_object *next; + acpi_physical_address address; + u32 length; +}; + +struct acpi_object_notify_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_gpe_block_info; + +struct acpi_object_device { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + struct acpi_gpe_block_info *gpe_block; +}; + +struct acpi_object_power_resource { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + u32 system_level; + u32 resource_order; +}; + +struct acpi_object_processor { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 proc_id; + u8 length; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + acpi_io_address address; +}; + +struct acpi_object_thermal_zone { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_object_field_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; +}; + +struct acpi_object_region_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u16 resource_length; + union acpi_operand_object *region_obj; + u8 *resource_buffer; + u16 pin_number_index; + u8 *internal_pcc_buffer; +}; + +struct acpi_object_buffer_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u8 is_create_field; + union acpi_operand_object *buffer_obj; +}; + +struct acpi_object_bank_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; + union acpi_operand_object *bank_obj; +}; + +struct acpi_object_index_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *index_obj; + union acpi_operand_object *data_obj; +}; + +struct acpi_object_notify_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + u32 handler_type; + acpi_notify_handler handler; + void *context; + union acpi_operand_object *next[2]; +}; + +struct acpi_object_addr_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + u8 handler_flags; + acpi_adr_space_handler handler; + struct acpi_namespace_node *node; + void *context; + void *context_mutex; + acpi_adr_space_setup setup; + union acpi_operand_object *region_list; + union acpi_operand_object *next; +}; + +struct acpi_object_reference { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 class; + u8 target_type; + u8 resolved; + void *object; + struct acpi_namespace_node *node; + union acpi_operand_object **where; + u8 *index_pointer; + u8 *aml; + u32 value; +}; + +struct acpi_object_extra { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *method_REG; + struct acpi_namespace_node *scope_node; + void *region_context; + u8 *aml_start; + u32 aml_length; +}; + +struct acpi_object_data { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + acpi_object_handler handler; + void *pointer; +}; + +struct acpi_object_cache_list { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *next; +}; + +union acpi_operand_object { + struct acpi_object_common common; + struct acpi_object_integer integer; + struct acpi_object_string string; + struct acpi_object_buffer buffer; + struct acpi_object_package package; + struct acpi_object_event event; + struct acpi_object_method method; + struct acpi_object_mutex mutex; + struct acpi_object_region region; + struct acpi_object_notify_common common_notify; + struct acpi_object_device device; + struct acpi_object_power_resource power_resource; + struct acpi_object_processor processor; + struct acpi_object_thermal_zone thermal_zone; + struct acpi_object_field_common common_field; + struct acpi_object_region_field field; + struct acpi_object_buffer_field buffer_field; + struct acpi_object_bank_field bank_field; + struct acpi_object_index_field index_field; + struct acpi_object_notify_handler notify; + struct acpi_object_addr_handler address_space; + struct acpi_object_reference reference; + struct acpi_object_extra extra; + struct acpi_object_data data; + struct acpi_object_cache_list cache; + struct acpi_namespace_node node; +}; + +union acpi_parse_object; + +union acpi_generic_state; + +struct acpi_parse_state { + u8 *aml_start; + u8 *aml; + u8 *aml_end; + u8 *pkg_start; + u8 *pkg_end; + union acpi_parse_object *start_op; + struct acpi_namespace_node *start_node; + union acpi_generic_state *scope; + union acpi_parse_object *start_scope; + u32 aml_size; +}; + +typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **); + +typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *); + +struct acpi_opcode_info; + +struct acpi_walk_state { + struct acpi_walk_state *next; + u8 descriptor_type; + u8 walk_type; + u16 opcode; + u8 next_op_info; + u8 num_operands; + u8 operand_index; + acpi_owner_id owner_id; + u8 last_predicate; + u8 current_result; + u8 return_used; + u8 scope_depth; + u8 pass_number; + u8 namespace_override; + u8 result_size; + u8 result_count; + u8 *aml; + u32 arg_types; + u32 method_breakpoint; + u32 user_breakpoint; + u32 parse_flags; + struct acpi_parse_state parser_state; + u32 prev_arg_types; + u32 arg_count; + u16 method_nesting_depth; + u8 method_is_nested; + struct acpi_namespace_node arguments[7]; + struct acpi_namespace_node local_variables[8]; + union acpi_operand_object *operands[9]; + union acpi_operand_object **params; + u8 *aml_last_while; + union acpi_operand_object **caller_return_desc; + union acpi_generic_state *control_state; + struct acpi_namespace_node *deferred_node; + union acpi_operand_object *implicit_return_obj; + struct acpi_namespace_node *method_call_node; + union acpi_parse_object *method_call_op; + union acpi_operand_object *method_desc; + struct acpi_namespace_node *method_node; + char *method_pathname; + union acpi_parse_object *op; + const struct acpi_opcode_info *op_info; + union acpi_parse_object *origin; + union acpi_operand_object *result_obj; + union acpi_generic_state *results; + union acpi_operand_object *return_desc; + union acpi_generic_state *scope_info; + union acpi_parse_object *prev_op; + union acpi_parse_object *next_op; + struct acpi_thread_state *thread; + acpi_parse_downwards descending_callback; + acpi_parse_upwards ascending_callback; +}; + +struct acpi_gpe_handler_info { + acpi_gpe_handler address; + void *context; + struct acpi_namespace_node *method_node; + u8 original_flags; + u8 originally_enabled; +}; + +struct acpi_gpe_notify_info { + struct acpi_namespace_node *device_node; + struct acpi_gpe_notify_info *next; +}; + +union acpi_gpe_dispatch_info { + struct acpi_namespace_node *method_node; + struct acpi_gpe_handler_info *handler; + struct acpi_gpe_notify_info *notify_list; +}; + +struct acpi_gpe_register_info; + +struct acpi_gpe_event_info { + union acpi_gpe_dispatch_info dispatch; + struct acpi_gpe_register_info *register_info; + u8 flags; + u8 gpe_number; + u8 runtime_count; + u8 disable_for_dispatch; +}; + +struct acpi_gpe_address { + u8 space_id; + u64 address; +}; + +struct acpi_gpe_register_info { + struct acpi_gpe_address status_address; + struct acpi_gpe_address enable_address; + u16 base_gpe_number; + u8 enable_for_wake; + u8 enable_for_run; + u8 mask_for_run; + u8 enable_mask; +}; + +struct acpi_gpe_xrupt_info; + +struct acpi_gpe_block_info { + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *previous; + struct acpi_gpe_block_info *next; + struct acpi_gpe_xrupt_info *xrupt_block; + struct acpi_gpe_register_info *register_info; + struct acpi_gpe_event_info *event_info; + u64 address; + u32 register_count; + u16 gpe_count; + u16 block_base_number; + u8 space_id; + u8 initialized; +}; + +struct acpi_gpe_xrupt_info { + struct acpi_gpe_xrupt_info *previous; + struct acpi_gpe_xrupt_info *next; + struct acpi_gpe_block_info *gpe_block_list_head; + u32 interrupt_number; +}; + +struct acpi_common_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; +}; + +struct acpi_update_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *object; +}; + +struct acpi_pkg_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 index; + union acpi_operand_object *source_object; + union acpi_operand_object *dest_object; + struct acpi_walk_state *walk_state; + void *this_target_obj; + u32 num_packages; +}; + +struct acpi_control_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u16 opcode; + union acpi_parse_object *predicate_op; + u8 *aml_predicate_start; + u8 *package_end; + u64 loop_timeout; +}; + +union acpi_parse_value { + u64 integer; + u32 size; + char *string; + u8 *buffer; + char *name; + union acpi_parse_object *arg; +}; + +struct acpi_parse_obj_common { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + u16 disasm_flags; + u8 disasm_opcode; + char *operator_symbol; + char aml_op_name[16]; +}; + +struct acpi_parse_obj_named { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + u16 disasm_flags; + u8 disasm_opcode; + char *operator_symbol; + char aml_op_name[16]; + char *path; + u8 *data; + u32 length; + u32 name; +}; + +struct acpi_parse_obj_asl { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + u16 disasm_flags; + u8 disasm_opcode; + char *operator_symbol; + char aml_op_name[16]; + union acpi_parse_object *child; + union acpi_parse_object *parent_method; + char *filename; + u8 file_changed; + char *parent_filename; + char *external_name; + char *namepath; + char name_seg[4]; + u32 extra_value; + u32 column; + u32 line_number; + u32 logical_line_number; + u32 logical_byte_offset; + u32 end_line; + u32 end_logical_line; + u32 acpi_btype; + u32 aml_length; + u32 aml_subtree_length; + u32 final_aml_length; + u32 final_aml_offset; + u32 compile_flags; + u16 parse_opcode; + u8 aml_opcode_length; + u8 aml_pkg_len_bytes; + u8 extra; + char parse_op_name[20]; +}; + +union acpi_parse_object { + struct acpi_parse_obj_common common; + struct acpi_parse_obj_named named; + struct acpi_parse_obj_asl asl; +}; + +struct acpi_scope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + struct acpi_namespace_node *node; +}; + +struct acpi_pscope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 arg_count; + union acpi_parse_object *op; + u8 *arg_end; + u8 *pkg_end; + u32 arg_list; +}; + +struct acpi_thread_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 current_sync_level; + struct acpi_walk_state *walk_state_list; + union acpi_operand_object *acquired_mutex_list; + u64 thread_id; +}; + +struct acpi_result_values { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *obj_desc[8]; +}; + +struct acpi_global_notify_handler { + acpi_notify_handler handler; + void *context; +}; + +struct acpi_notify_info { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 handler_list_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler_list_head; + struct acpi_global_notify_handler *global; +}; + +union acpi_generic_state { + struct acpi_common_state common; + struct acpi_control_state control; + struct acpi_update_state update; + struct acpi_scope_state scope; + struct acpi_pscope_state parse_scope; + struct acpi_pkg_state pkg; + struct acpi_thread_state thread; + struct acpi_result_values results; + struct acpi_notify_info notify; +}; + +struct acpi_opcode_info { + char *name; + u32 parse_args; + u32 runtime_args; + u16 flags; + u8 object_type; + u8 class; + u8 type; +}; + +struct acpi_os_dpc { + acpi_osd_exec_callback function; + void *context; + struct work_struct work; +}; + +struct acpi_ioremap { + struct list_head list; + void *virt; + acpi_physical_address phys; + acpi_size size; + union { + long unsigned int refcount; + struct rcu_work rwork; + } track; +}; + +struct acpi_hp_work { + struct work_struct work; + struct acpi_device *adev; + u32 src; +}; + +struct acpi_pld_info { + u8 revision; + u8 ignore_color; + u8 red; + u8 green; + u8 blue; + u16 width; + u16 height; + u8 user_visible; + u8 dock; + u8 lid; + u8 panel; + u8 vertical_position; + u8 horizontal_position; + u8 shape; + u8 group_orientation; + u8 group_token; + u8 group_position; + u8 bay; + u8 ejectable; + u8 ospm_eject_required; + u8 cabinet_number; + u8 card_cage_number; + u8 reference; + u8 rotation; + u8 order; + u8 reserved; + u16 vertical_offset; + u16 horizontal_offset; +}; + +struct acpi_handle_list { + u32 count; + acpi_handle handles[10]; +}; + +struct acpi_device_bus_id { + const char *bus_id; + struct ida instance_ida; + struct list_head node; +}; + +struct acpi_dev_match_info { + struct acpi_device_id hid[2]; + const char *uid; + s64 hrv; +}; + +struct nvs_region { + __u64 phys_start; + __u64 size; + struct list_head node; +}; + +struct nvs_page { + long unsigned int phys_start; + unsigned int size; + void *kaddr; + void *data; + bool unmap; + struct list_head node; +}; + +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *); + void *context; +}; + +typedef u32 acpi_event_status; + +struct acpi_table_facs { + char signature[4]; + u32 length; + u32 hardware_signature; + u32 firmware_waking_vector; + u32 global_lock; + u32 flags; + u64 xfirmware_waking_vector; + u8 version; + u8 reserved[3]; + u32 ospm_flags; + u8 reserved1[24]; +}; + +struct acpi_hardware_id { + struct list_head list; + const char *id; +}; + +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct fwnode_handle *parent; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + +struct acpi_data_node_attr { + struct attribute attr; + ssize_t (*show)(struct acpi_data_node *, char *); + ssize_t (*store)(struct acpi_data_node *, const char *, size_t); +}; + +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + +struct acpi_device_physical_node { + unsigned int node_id; + struct list_head node; + struct device *dev; + bool put_online: 1; +}; + +typedef u32 (*acpi_event_handler)(void *); + +typedef acpi_status (*acpi_table_handler)(u32, void *, void *); + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER = 1, + ACPI_BUS_TYPE_PROCESSOR = 2, + ACPI_BUS_TYPE_THERMAL = 3, + ACPI_BUS_TYPE_POWER_BUTTON = 4, + ACPI_BUS_TYPE_SLEEP_BUTTON = 5, + ACPI_BUS_TYPE_ECDT_EC = 6, + ACPI_BUS_DEVICE_TYPE_COUNT = 7, +}; + +struct acpi_osc_context { + char *uuid_str; + int rev; + struct acpi_buffer cap; + struct acpi_buffer ret; +}; + +struct acpi_pnp_device_id { + u32 length; + char *string; +}; + +struct acpi_pnp_device_id_list { + u32 count; + u32 list_size; + struct acpi_pnp_device_id ids[0]; +}; + +struct acpi_device_info { + u32 info_size; + u32 name; + acpi_object_type type; + u8 param_count; + u16 valid; + u8 flags; + u8 highest_dstates[4]; + u8 lowest_dstates[5]; + u64 address; + struct acpi_pnp_device_id hardware_id; + struct acpi_pnp_device_id unique_id; + struct acpi_pnp_device_id class_code; + struct acpi_pnp_device_id_list compatible_id_list; +}; + +struct acpi_table_spcr { + struct acpi_table_header header; + u8 interface_type; + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +} __attribute__((packed)); + +struct acpi_table_stao { + struct acpi_table_header header; + u8 ignore_uart; +} __attribute__((packed)); + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE = 1, +}; + +struct acpi_probe_entry; + +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); + +struct acpi_probe_entry { + __u8 id[5]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +struct acpi_dep_data { + struct list_head node; + acpi_handle supplier; + acpi_handle consumer; +}; + +struct acpi_table_events_work { + struct work_struct work; + void *table; + u32 event; +}; + +struct resource_win { + struct resource res; + resource_size_t offset; +}; + +struct irq_override_cmp { + const struct dmi_system_id *system; + unsigned char irq; + unsigned char triggering; + unsigned char polarity; + unsigned char shareable; +}; + +struct res_proc_context { + struct list_head *list; + int (*preproc)(struct acpi_resource *, void *); + void *preproc_data; + int count; + int error; +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle: 1; + u8 fdma: 1; + u8 reserved: 6; + u32 bmisx; + } piix4; +}; + +struct acpi_table_ecdt { + struct acpi_table_header header; + struct acpi_generic_address control; + struct acpi_generic_address data; + u32 uid; + u8 gpe; + u8 id[1]; +} __attribute__((packed)); + +struct transaction; + +struct acpi_ec { + acpi_handle handle; + int gpe; + int irq; + long unsigned int command_addr; + long unsigned int data_addr; + bool global_lock; + long unsigned int flags; + long unsigned int reference_count; + struct mutex mutex; + wait_queue_head_t wait; + struct list_head list; + struct transaction *curr; + spinlock_t lock; + struct work_struct work; + long unsigned int timestamp; + long unsigned int nr_pending_queries; + bool busy_polling; + unsigned int polling_guard; +}; + +struct transaction { + const u8 *wdata; + u8 *rdata; + short unsigned int irq_count; + u8 command; + u8 wi; + u8 ri; + u8 wlen; + u8 rlen; + u8 flags; +}; + +typedef int (*acpi_ec_query_func)(void *); + +enum ec_command { + ACPI_EC_COMMAND_READ = 128, + ACPI_EC_COMMAND_WRITE = 129, + ACPI_EC_BURST_ENABLE = 130, + ACPI_EC_BURST_DISABLE = 131, + ACPI_EC_COMMAND_QUERY = 132, +}; + +enum { + EC_FLAGS_QUERY_ENABLED = 0, + EC_FLAGS_QUERY_PENDING = 1, + EC_FLAGS_QUERY_GUARDING = 2, + EC_FLAGS_EVENT_HANDLER_INSTALLED = 3, + EC_FLAGS_EC_HANDLER_INSTALLED = 4, + EC_FLAGS_QUERY_METHODS_INSTALLED = 5, + EC_FLAGS_STARTED = 6, + EC_FLAGS_STOPPED = 7, + EC_FLAGS_EVENTS_MASKED = 8, +}; + +struct acpi_ec_query_handler { + struct list_head node; + acpi_ec_query_func func; + acpi_handle handle; + void *data; + u8 query_bit; + struct kref kref; +}; + +struct acpi_ec_query { + struct transaction transaction; + struct work_struct work; + struct acpi_ec_query_handler *handler; +}; + +struct dock_station { + acpi_handle handle; + long unsigned int last_dock_time; + u32 flags; + struct list_head dependent_devices; + struct list_head sibling; + struct platform_device *dock_device; +}; + +struct dock_dependent_device { + struct list_head list; + struct acpi_device *adev; +}; + +enum dock_callback_type { + DOCK_CALL_HANDLER = 0, + DOCK_CALL_FIXUP = 1, + DOCK_CALL_UEVENT = 2, +}; + +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *); + void (*release_info)(struct acpi_pci_root_info *); + int (*prepare_resources)(struct acpi_pci_root_info *); +}; + +struct pci_osc_bit_struct { + u32 bit; + char *desc; +}; + +struct acpi_handle_node { + struct list_head node; + acpi_handle handle; +}; + +struct acpi_pci_link_irq { + u32 active; + u8 triggering; + u8 polarity; + u8 resource_type; + u8 possible_count; + u32 possible[16]; + u8 initialized: 1; + u8 reserved: 7; +}; + +struct acpi_pci_link { + struct list_head list; + struct acpi_device *device; + struct acpi_pci_link_irq irq; + int refcnt; +}; + +struct acpi_pci_routing_table { + u32 length; + u32 pin; + u64 address; + u32 source_index; + char source[4]; +}; + +struct acpi_prt_entry { + struct acpi_pci_id id; + u8 pin; + acpi_handle link; + u32 index; +}; + +struct prt_quirk { + const struct dmi_system_id *system; + unsigned int segment; + unsigned int bus; + unsigned int device; + unsigned char pin; + const char *source; + const char *actual_source; +}; + +struct lpss_clk_data { + const char *name; + struct clk *clk; +}; + +struct lpss_private_data; + +struct lpss_device_desc { + unsigned int flags; + const char *clk_con_id; + unsigned int prv_offset; + size_t prv_size_override; + struct property_entry *properties; + void (*setup)(struct lpss_private_data *); + bool resume_from_noirq; +}; + +struct lpss_private_data { + struct acpi_device *adev; + void *mmio_base; + resource_size_t mmio_size; + unsigned int fixed_clk_rate; + struct clk *clk; + const struct lpss_device_desc *dev_desc; + u32 prv_reg_ctx[9]; +}; + +struct lpss_device_links { + const char *supplier_hid; + const char *supplier_uid; + const char *consumer_hid; + const char *consumer_uid; + u32 flags; + const struct dmi_system_id *dep_missing_ids; +}; + +struct hid_uid { + const char *hid; + const char *uid; +}; + +struct fch_clk_data { + void *base; + u32 is_rv; +}; + +struct apd_private_data; + +struct apd_device_desc { + unsigned int fixed_clk_rate; + struct property_entry *properties; + int (*setup)(struct apd_private_data *); +}; + +struct apd_private_data { + struct clk *clk; + struct acpi_device *adev; + const struct apd_device_desc *dev_desc; +}; + +struct acpi_power_dependent_device { + struct device *dev; + struct list_head node; +}; + +struct acpi_power_resource { + struct acpi_device device; + struct list_head list_node; + char *name; + u32 system_level; + u32 order; + unsigned int ref_count; + unsigned int users; + bool wakeup_enabled; + struct mutex resource_lock; + struct list_head dependents; +}; + +struct acpi_power_resource_entry { + struct list_head node; + struct acpi_power_resource *resource; +}; + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +struct acpi_genl_event { + acpi_device_class device_class; + char bus_id[15]; + u32 type; + u32 data; +}; + +enum { + ACPI_GENL_ATTR_UNSPEC = 0, + ACPI_GENL_ATTR_EVENT = 1, + __ACPI_GENL_ATTR_MAX = 2, +}; + +enum { + ACPI_GENL_CMD_UNSPEC = 0, + ACPI_GENL_CMD_EVENT = 1, + __ACPI_GENL_CMD_MAX = 2, +}; + +struct acpi_ged_device { + struct device *dev; + struct list_head event_list; +}; + +struct acpi_ged_event { + struct list_head node; + struct device *dev; + unsigned int gsi; + unsigned int irq; + acpi_handle handle; +}; + +typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *); + +struct acpi_table_bert { + struct acpi_table_header header; + u32 region_length; + u64 address; +}; + +struct acpi_dlayer { + const char *name; + long unsigned int value; +}; + +struct acpi_dlevel { + const char *name; + long unsigned int value; +}; + +struct acpi_table_attr { + struct bin_attribute attr; + char name[4]; + int instance; + char filename[8]; + struct list_head node; +}; + +struct acpi_data_attr { + struct bin_attribute attr; + u64 addr; +}; + +struct acpi_data_obj { + char *name; + int (*fn)(void *, struct acpi_data_attr *); +}; + +struct event_counter { + u32 count; + u32 flags; +}; + +struct acpi_device_properties { + const guid_t *guid; + const union acpi_object *properties; + struct list_head list; +}; + +struct always_present_id { + struct acpi_device_id hid[2]; + struct x86_cpu_id cpu_ids[2]; + struct dmi_system_id dmi_ids[2]; + const char *uid; +}; + +struct lpi_device_info { + char *name; + int enabled; + union acpi_object *package; +}; + +struct lpi_device_constraint { + int uid; + int min_dstate; + int function_states; +}; + +struct lpi_constraints { + acpi_handle handle; + int min_dstate; +}; + +struct lpi_device_constraint_amd { + char *name; + int enabled; + int function_states; + int min_dstate; +}; + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +enum fpdt_subtable_type { + SUBTABLE_FBPT = 0, + SUBTABLE_S3PT = 1, +}; + +struct fpdt_subtable_entry { + u16 type; + u8 length; + u8 revision; + u32 reserved; + u64 address; +}; + +struct fpdt_subtable_header { + u32 signature; + u32 length; +}; + +enum fpdt_record_type { + RECORD_S3_RESUME = 0, + RECORD_S3_SUSPEND = 1, + RECORD_BOOT = 2, +}; + +struct fpdt_record_header { + u16 type; + u8 length; + u8 revision; +}; + +struct resume_performance_record { + struct fpdt_record_header header; + u32 resume_count; + u64 resume_prev; + u64 resume_avg; +}; + +struct boot_performance_record { + struct fpdt_record_header header; + u32 reserved; + u64 firmware_start; + u64 bootloader_load; + u64 bootloader_launch; + u64 exitbootservice_start; + u64 exitbootservice_end; +}; + +struct suspend_performance_record { + struct fpdt_record_header header; + u64 suspend_start; + u64 suspend_end; +} __attribute__((packed)); + +struct acpi_table_lpit { + struct acpi_table_header header; +}; + +struct acpi_lpit_header { + u32 type; + u32 length; + u16 unique_id; + u16 reserved; + u32 flags; +}; + +struct acpi_lpit_native { + struct acpi_lpit_header header; + struct acpi_generic_address entry_trigger; + u32 residency; + u32 latency; + struct acpi_generic_address residency_counter; + u64 counter_frequency; +} __attribute__((packed)); + +struct lpit_residency_info { + struct acpi_generic_address gaddr; + u64 frequency; + void *iomem_addr; +}; + +struct acpi_table_wdat { + struct acpi_table_header header; + u32 header_length; + u16 pci_segment; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u8 reserved[3]; + u32 timer_period; + u32 max_count; + u32 min_count; + u8 flags; + u8 reserved2[3]; + u32 entries; +}; + +struct acpi_wdat_entry { + u8 action; + u8 instruction; + u16 reserved; + struct acpi_generic_address register_region; + u32 value; + u32 mask; +} __attribute__((packed)); + +struct acpi_name_info { + char name[4]; + u16 argument_list; + u8 expected_btypes; +} __attribute__((packed)); + +struct acpi_package_info { + u8 type; + u8 object_type1; + u8 count1; + u8 object_type2; + u8 count2; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info2 { + u8 type; + u8 count; + u8 object_type[4]; + u8 reserved; +}; + +struct acpi_package_info3 { + u8 type; + u8 count; + u8 object_type[2]; + u8 tail_object_type; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info4 { + u8 type; + u8 object_type1; + u8 count1; + u8 sub_object_types; + u8 pkg_count; + u16 reserved; +} __attribute__((packed)); + +union acpi_predefined_info { + struct acpi_name_info info; + struct acpi_package_info ret_info; + struct acpi_package_info2 ret_info2; + struct acpi_package_info3 ret_info3; + struct acpi_package_info4 ret_info4; +}; + +struct acpi_evaluate_info { + struct acpi_namespace_node *prefix_node; + const char *relative_pathname; + union acpi_operand_object **parameters; + struct acpi_namespace_node *node; + union acpi_operand_object *obj_desc; + char *full_pathname; + const union acpi_predefined_info *predefined; + union acpi_operand_object *return_object; + union acpi_operand_object *parent_package; + u32 return_flags; + u32 return_btype; + u16 param_count; + u16 node_flags; + u8 pass_number; + u8 return_object_type; + u8 flags; +}; + +enum { + ACPI_REFCLASS_LOCAL = 0, + ACPI_REFCLASS_ARG = 1, + ACPI_REFCLASS_REFOF = 2, + ACPI_REFCLASS_INDEX = 3, + ACPI_REFCLASS_TABLE = 4, + ACPI_REFCLASS_NAME = 5, + ACPI_REFCLASS_DEBUG = 6, + ACPI_REFCLASS_MAX = 6, +}; + +struct acpi_common_descriptor { + void *common_pointer; + u8 descriptor_type; +}; + +union acpi_descriptor { + struct acpi_common_descriptor common; + union acpi_operand_object object; + struct acpi_namespace_node node; + union acpi_parse_object op; +}; + +typedef enum { + ACPI_IMODE_LOAD_PASS1 = 1, + ACPI_IMODE_LOAD_PASS2 = 2, + ACPI_IMODE_EXECUTE = 3, +} acpi_interpreter_mode; + +struct acpi_create_field_info { + struct acpi_namespace_node *region_node; + struct acpi_namespace_node *field_node; + struct acpi_namespace_node *register_node; + struct acpi_namespace_node *data_register_node; + struct acpi_namespace_node *connection_node; + u8 *resource_buffer; + u32 bank_value; + u32 field_bit_position; + u32 field_bit_length; + u16 resource_length; + u16 pin_number_index; + u8 field_flags; + u8 attribute; + u8 field_type; + u8 access_length; +}; + +struct acpi_init_walk_info { + u32 table_index; + u32 object_count; + u32 method_count; + u32 serial_method_count; + u32 non_serial_method_count; + u32 serialized_method_count; + u32 device_count; + u32 op_region_count; + u32 field_count; + u32 buffer_count; + u32 package_count; + u32 op_region_init; + u32 field_init; + u32 buffer_init; + u32 package_init; + acpi_owner_id owner_id; +}; + +typedef u32 acpi_name; + +typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *); + +enum { + AML_FIELD_ACCESS_ANY = 0, + AML_FIELD_ACCESS_BYTE = 1, + AML_FIELD_ACCESS_WORD = 2, + AML_FIELD_ACCESS_DWORD = 3, + AML_FIELD_ACCESS_QWORD = 4, + AML_FIELD_ACCESS_BUFFER = 5, +}; + +typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *); + +struct acpi_fixed_event_handler { + acpi_event_handler handler; + void *context; +}; + +struct acpi_fixed_event_info { + u8 status_register_id; + u8 enable_register_id; + u16 status_bit_mask; + u16 enable_bit_mask; +}; + +typedef u32 acpi_mutex_handle; + +struct acpi_gpe_walk_info { + struct acpi_namespace_node *gpe_device; + struct acpi_gpe_block_info *gpe_block; + u16 count; + acpi_owner_id owner_id; + u8 execute_by_owner_id; +}; + +struct acpi_gpe_device_info { + u32 index; + u32 next_block_base_index; + acpi_status status; + struct acpi_namespace_node *gpe_device; +}; + +typedef acpi_status (*acpi_gpe_callback)(struct acpi_gpe_xrupt_info *, struct acpi_gpe_block_info *, void *); + +struct acpi_reg_walk_info { + u32 function; + u32 reg_run_count; + acpi_adr_space_type space_id; +}; + +typedef u32 (*acpi_sci_handler)(void *); + +struct acpi_sci_handler_info { + struct acpi_sci_handler_info *next; + acpi_sci_handler address; + void *context; +}; + +struct acpi_exdump_info { + u8 opcode; + u8 offset; + const char *name; +} __attribute__((packed)); + +enum { + AML_FIELD_UPDATE_PRESERVE = 0, + AML_FIELD_UPDATE_WRITE_AS_ONES = 32, + AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64, +}; + +struct acpi_signal_fatal_info { + u32 type; + u32 code; + u32 argument; +}; + +enum { + MATCH_MTR = 0, + MATCH_MEQ = 1, + MATCH_MLE = 2, + MATCH_MLT = 3, + MATCH_MGE = 4, + MATCH_MGT = 5, +}; + +enum { + AML_FIELD_ATTRIB_QUICK = 2, + AML_FIELD_ATTRIB_SEND_RECEIVE = 4, + AML_FIELD_ATTRIB_BYTE = 6, + AML_FIELD_ATTRIB_WORD = 8, + AML_FIELD_ATTRIB_BLOCK = 10, + AML_FIELD_ATTRIB_BYTES = 11, + AML_FIELD_ATTRIB_PROCESS_CALL = 12, + AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 13, + AML_FIELD_ATTRIB_RAW_BYTES = 14, + AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 15, +}; + +typedef enum { + ACPI_TRACE_AML_METHOD = 0, + ACPI_TRACE_AML_OPCODE = 1, + ACPI_TRACE_AML_REGION = 2, +} acpi_trace_event_type; + +struct acpi_gpe_block_status_context { + struct acpi_gpe_register_info *gpe_skip_register_info; + u8 gpe_skip_mask; + u8 retval; +}; + +struct acpi_bit_register_info { + u8 parent_register; + u8 bit_position; + u16 access_bit_mask; +}; + +struct acpi_port_info { + char *name; + u16 start; + u16 end; + u8 osi_dependency; +}; + +struct acpi_pci_device { + acpi_handle device; + struct acpi_pci_device *next; +}; + +struct acpi_walk_info { + u32 debug_level; + u32 count; + acpi_owner_id owner_id; + u8 display_type; +}; + +typedef acpi_status (*acpi_init_handler)(acpi_handle, u32); + +struct acpi_device_walk_info { + struct acpi_table_desc *table_desc; + struct acpi_evaluate_info *evaluate_info; + u32 device_count; + u32 num_STA; + u32 num_INI; +}; + +typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *); + +struct acpi_table_list { + struct acpi_table_desc *tables; + u32 current_table_count; + u32 max_table_count; + u8 flags; +}; + +enum acpi_return_package_types { + ACPI_PTYPE1_FIXED = 1, + ACPI_PTYPE1_VAR = 2, + ACPI_PTYPE1_OPTION = 3, + ACPI_PTYPE2 = 4, + ACPI_PTYPE2_COUNT = 5, + ACPI_PTYPE2_PKG_COUNT = 6, + ACPI_PTYPE2_FIXED = 7, + ACPI_PTYPE2_MIN = 8, + ACPI_PTYPE2_REV_FIXED = 9, + ACPI_PTYPE2_FIX_VAR = 10, + ACPI_PTYPE2_VAR_VAR = 11, + ACPI_PTYPE2_UUID_PAIR = 12, + ACPI_PTYPE_CUSTOM = 13, +}; + +typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **); + +struct acpi_simple_repair_info { + char name[4]; + u32 unexpected_btypes; + u32 package_index; + acpi_object_converter object_converter; +}; + +typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **); + +struct acpi_repair_info { + char name[4]; + acpi_repair_function repair_function; +}; + +struct acpi_namestring_info { + const char *external_name; + const char *next_external_char; + char *internal_name; + u32 length; + u32 num_segments; + u32 num_carats; + u8 fully_qualified; +}; + +struct acpi_rw_lock { + void *writer_mutex; + void *reader_mutex; + u32 num_readers; +}; + +struct acpi_get_devices_info { + acpi_walk_callback user_function; + void *context; + const char *hid; +}; + +struct aml_resource_small_header { + u8 descriptor_type; +}; + +struct aml_resource_irq { + u8 descriptor_type; + u16 irq_mask; + u8 flags; +} __attribute__((packed)); + +struct aml_resource_dma { + u8 descriptor_type; + u8 dma_channel_mask; + u8 flags; +}; + +struct aml_resource_start_dependent { + u8 descriptor_type; + u8 flags; +}; + +struct aml_resource_end_dependent { + u8 descriptor_type; +}; + +struct aml_resource_io { + u8 descriptor_type; + u8 flags; + u16 minimum; + u16 maximum; + u8 alignment; + u8 address_length; +}; + +struct aml_resource_fixed_io { + u8 descriptor_type; + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct aml_resource_vendor_small { + u8 descriptor_type; +}; + +struct aml_resource_end_tag { + u8 descriptor_type; + u8 checksum; +}; + +struct aml_resource_fixed_dma { + u8 descriptor_type; + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct aml_resource_large_header { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_memory24 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_vendor_large { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_fixed_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; +} __attribute__((packed)); + +struct aml_resource_extended_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u8 revision_ID; + u8 reserved; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; + u64 type_specific; +} __attribute__((packed)); + +struct aml_resource_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +} __attribute__((packed)); + +struct aml_resource_address32 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address16 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_extended_irq { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u8 interrupt_count; + u32 interrupts[1]; +} __attribute__((packed)); + +struct aml_resource_generic_register { + u8 descriptor_type; + u16 resource_length; + u8 address_space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct aml_resource_gpio { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 connection_type; + u16 flags; + u16 int_flags; + u8 pin_config; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_common_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; +} __attribute__((packed)); + +struct aml_resource_csi2_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; +} __attribute__((packed)); + +struct aml_resource_i2c_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u16 slave_address; +} __attribute__((packed)); + +struct aml_resource_spi_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; +} __attribute__((packed)); + +struct aml_resource_uart_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 default_baud_rate; + u16 rx_fifo_size; + u16 tx_fifo_size; + u8 parity; + u8 lines_enabled; +} __attribute__((packed)); + +struct aml_resource_pin_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config; + u16 function_number; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 pin_table_offset; + u16 label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 function_number; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +union aml_resource { + u8 descriptor_type; + struct aml_resource_small_header small_header; + struct aml_resource_large_header large_header; + struct aml_resource_irq irq; + struct aml_resource_dma dma; + struct aml_resource_start_dependent start_dpf; + struct aml_resource_end_dependent end_dpf; + struct aml_resource_io io; + struct aml_resource_fixed_io fixed_io; + struct aml_resource_fixed_dma fixed_dma; + struct aml_resource_vendor_small vendor_small; + struct aml_resource_end_tag end_tag; + struct aml_resource_memory24 memory24; + struct aml_resource_generic_register generic_reg; + struct aml_resource_vendor_large vendor_large; + struct aml_resource_memory32 memory32; + struct aml_resource_fixed_memory32 fixed_memory32; + struct aml_resource_address16 address16; + struct aml_resource_address32 address32; + struct aml_resource_address64 address64; + struct aml_resource_extended_address64 ext_address64; + struct aml_resource_extended_irq extended_irq; + struct aml_resource_gpio gpio; + struct aml_resource_i2c_serialbus i2c_serial_bus; + struct aml_resource_spi_serialbus spi_serial_bus; + struct aml_resource_uart_serialbus uart_serial_bus; + struct aml_resource_csi2_serialbus csi2_serial_bus; + struct aml_resource_common_serialbus common_serial_bus; + struct aml_resource_pin_function pin_function; + struct aml_resource_pin_config pin_config; + struct aml_resource_pin_group pin_group; + struct aml_resource_pin_group_function pin_group_function; + struct aml_resource_pin_group_config pin_group_config; + struct aml_resource_address address; + u32 dword_item; + u16 word_item; + u8 byte_item; +}; + +struct acpi_rsconvert_info { + u8 opcode; + u8 resource_offset; + u8 aml_offset; + u8 value; +}; + +enum { + ACPI_RSC_INITGET = 0, + ACPI_RSC_INITSET = 1, + ACPI_RSC_FLAGINIT = 2, + ACPI_RSC_1BITFLAG = 3, + ACPI_RSC_2BITFLAG = 4, + ACPI_RSC_3BITFLAG = 5, + ACPI_RSC_6BITFLAG = 6, + ACPI_RSC_ADDRESS = 7, + ACPI_RSC_BITMASK = 8, + ACPI_RSC_BITMASK16 = 9, + ACPI_RSC_COUNT = 10, + ACPI_RSC_COUNT16 = 11, + ACPI_RSC_COUNT_GPIO_PIN = 12, + ACPI_RSC_COUNT_GPIO_RES = 13, + ACPI_RSC_COUNT_GPIO_VEN = 14, + ACPI_RSC_COUNT_SERIAL_RES = 15, + ACPI_RSC_COUNT_SERIAL_VEN = 16, + ACPI_RSC_DATA8 = 17, + ACPI_RSC_EXIT_EQ = 18, + ACPI_RSC_EXIT_LE = 19, + ACPI_RSC_EXIT_NE = 20, + ACPI_RSC_LENGTH = 21, + ACPI_RSC_MOVE_GPIO_PIN = 22, + ACPI_RSC_MOVE_GPIO_RES = 23, + ACPI_RSC_MOVE_SERIAL_RES = 24, + ACPI_RSC_MOVE_SERIAL_VEN = 25, + ACPI_RSC_MOVE8 = 26, + ACPI_RSC_MOVE16 = 27, + ACPI_RSC_MOVE32 = 28, + ACPI_RSC_MOVE64 = 29, + ACPI_RSC_SET8 = 30, + ACPI_RSC_SOURCE = 31, + ACPI_RSC_SOURCEX = 32, +}; + +typedef u16 acpi_rs_length; + +typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **); + +struct acpi_rsdump_info { + u8 opcode; + u8 offset; + const char *name; + const char **pointer; +} __attribute__((packed)); + +enum { + ACPI_RSD_TITLE = 0, + ACPI_RSD_1BITFLAG = 1, + ACPI_RSD_2BITFLAG = 2, + ACPI_RSD_3BITFLAG = 3, + ACPI_RSD_6BITFLAG = 4, + ACPI_RSD_ADDRESS = 5, + ACPI_RSD_DWORDLIST = 6, + ACPI_RSD_LITERAL = 7, + ACPI_RSD_LONGLIST = 8, + ACPI_RSD_SHORTLIST = 9, + ACPI_RSD_SHORTLISTX = 10, + ACPI_RSD_SOURCE = 11, + ACPI_RSD_STRING = 12, + ACPI_RSD_UINT8 = 13, + ACPI_RSD_UINT16 = 14, + ACPI_RSD_UINT32 = 15, + ACPI_RSD_UINT64 = 16, + ACPI_RSD_WORDLIST = 17, + ACPI_RSD_LABEL = 18, + ACPI_RSD_SOURCE_LABEL = 19, +}; + +typedef u32 acpi_rsdesc_size; + +struct acpi_vendor_uuid { + u8 subtype; + u8 data[16]; +}; + +struct acpi_vendor_walk_info { + struct acpi_vendor_uuid *uuid; + struct acpi_buffer *buffer; + acpi_status status; +}; + +struct acpi_fadt_info { + const char *name; + u16 address64; + u16 address32; + u16 length; + u8 default_length; + u8 flags; +}; + +struct acpi_fadt_pm_info { + struct acpi_generic_address *target; + u16 source; + u8 register_num; +}; + +struct acpi_table_rsdp { + char signature[8]; + u8 checksum; + char oem_id[6]; + u8 revision; + u32 rsdt_physical_address; + u32 length; + u64 xsdt_physical_address; + u8 extended_checksum; + u8 reserved[3]; +} __attribute__((packed)); + +struct acpi_address_range { + struct acpi_address_range *next; + struct acpi_namespace_node *region_node; + acpi_physical_address start_address; + acpi_physical_address end_address; +}; + +struct acpi_pkg_info { + u8 *free_space; + acpi_size length; + u32 object_space; + u32 num_packages; +}; + +struct acpi_exception_info { + char *name; +}; + +struct acpi_mutex_info { + void *mutex; + u32 use_count; + u64 thread_id; +}; + +struct acpi_comment_node { + char *comment; + struct acpi_comment_node *next; +}; + +struct acpi_interface_info { + char *name; + struct acpi_interface_info *next; + u8 flags; + u8 value; +}; + +struct acpi_handler_info { + void *handler; + char *name; +}; + +struct acpi_db_method_info { + acpi_handle method; + acpi_handle main_thread_gate; + acpi_handle thread_complete_gate; + acpi_handle info_gate; + u64 *threads; + u32 num_threads; + u32 num_created; + u32 num_completed; + char *name; + u32 flags; + u32 num_loops; + char pathname[512]; + char **args; + acpi_object_type *types; + char init_args; + acpi_object_type arg_types[7]; + char *arguments[7]; + char num_threads_str[11]; + char id_of_thread_str[11]; + char index_of_thread_str[11]; +}; + +struct history_info { + char *command; + u32 cmd_num; +}; + +typedef struct history_info HISTORY_INFO; + +struct acpi_db_command_info { + const char *name; + u8 min_args; +}; + +struct acpi_db_command_help { + u8 line_count; + char *invocation; + char *description; +}; + +enum acpi_ex_debugger_commands { + CMD_NOT_FOUND = 0, + CMD_NULL = 1, + CMD_ALL = 2, + CMD_ALLOCATIONS = 3, + CMD_ARGS = 4, + CMD_ARGUMENTS = 5, + CMD_BREAKPOINT = 6, + CMD_BUSINFO = 7, + CMD_CALL = 8, + CMD_DEBUG = 9, + CMD_DISASSEMBLE = 10, + CMD_DISASM = 11, + CMD_DUMP = 12, + CMD_EVALUATE = 13, + CMD_EXECUTE = 14, + CMD_EXIT = 15, + CMD_FIELDS = 16, + CMD_FIND = 17, + CMD_GO = 18, + CMD_HANDLERS = 19, + CMD_HELP = 20, + CMD_HELP2 = 21, + CMD_HISTORY = 22, + CMD_HISTORY_EXE = 23, + CMD_HISTORY_LAST = 24, + CMD_INFORMATION = 25, + CMD_INTEGRITY = 26, + CMD_INTO = 27, + CMD_LEVEL = 28, + CMD_LIST = 29, + CMD_LOCALS = 30, + CMD_LOCKS = 31, + CMD_METHODS = 32, + CMD_NAMESPACE = 33, + CMD_NOTIFY = 34, + CMD_OBJECTS = 35, + CMD_OSI = 36, + CMD_OWNER = 37, + CMD_PATHS = 38, + CMD_PREDEFINED = 39, + CMD_PREFIX = 40, + CMD_QUIT = 41, + CMD_REFERENCES = 42, + CMD_RESOURCES = 43, + CMD_RESULTS = 44, + CMD_SET = 45, + CMD_STATS = 46, + CMD_STOP = 47, + CMD_TABLES = 48, + CMD_TEMPLATE = 49, + CMD_TRACE = 50, + CMD_TREE = 51, + CMD_TYPE = 52, +}; + +struct acpi_db_execute_walk { + u32 count; + u32 max_count; + char name_seg[5]; +}; + +struct acpi_integrity_info { + u32 nodes; + u32 objects; +}; + +struct acpi_object_info { + u32 types[28]; +}; + +struct acpi_region_walk_info { + u32 debug_level; + u32 count; + acpi_owner_id owner_id; + u8 display_type; + u32 address_space_id; +}; + +struct acpi_db_argument_info { + const char *name; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct led_hw_trigger_type { + int dummy; +}; + +struct led_pattern; + +struct led_trigger; + +struct led_classdev { + const char *name; + unsigned int brightness; + unsigned int max_brightness; + int flags; + long unsigned int work_flags; + void (*brightness_set)(struct led_classdev *, enum led_brightness); + int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness); + enum led_brightness (*brightness_get)(struct led_classdev *); + int (*blink_set)(struct led_classdev *, long unsigned int *, long unsigned int *); + int (*pattern_set)(struct led_classdev *, struct led_pattern *, u32, int); + int (*pattern_clear)(struct led_classdev *); + struct device *dev; + const struct attribute_group **groups; + struct list_head node; + const char *default_trigger; + long unsigned int blink_delay_on; + long unsigned int blink_delay_off; + struct timer_list blink_timer; + int blink_brightness; + int new_blink_brightness; + void (*flash_resume)(struct led_classdev *); + struct work_struct set_brightness_work; + int delayed_set_value; + struct rw_semaphore trigger_lock; + struct led_trigger *trigger; + struct list_head trig_list; + void *trigger_data; + bool activated; + struct led_hw_trigger_type *trigger_type; + int brightness_hw_changed; + struct kernfs_node *brightness_hw_changed_kn; + struct mutex led_access; +}; + +struct led_pattern { + u32 delta_t; + int brightness; +}; + +struct led_trigger { + const char *name; + int (*activate)(struct led_classdev *); + void (*deactivate)(struct led_classdev *); + struct led_hw_trigger_type *trigger_type; + rwlock_t leddev_list_lock; + struct list_head led_cdevs; + struct list_head next_trig; + const struct attribute_group **groups; +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 37, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 39, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 40, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_FULL = 42, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 43, + POWER_SUPPLY_PROP_ENERGY_NOW = 44, + POWER_SUPPLY_PROP_ENERGY_AVG = 45, + POWER_SUPPLY_PROP_CAPACITY = 46, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 48, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 49, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 50, + POWER_SUPPLY_PROP_TEMP = 51, + POWER_SUPPLY_PROP_TEMP_MAX = 52, + POWER_SUPPLY_PROP_TEMP_MIN = 53, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 55, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 58, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 60, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 62, + POWER_SUPPLY_PROP_TYPE = 63, + POWER_SUPPLY_PROP_USB_TYPE = 64, + POWER_SUPPLY_PROP_SCOPE = 65, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 66, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 67, + POWER_SUPPLY_PROP_CALIBRATE = 68, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 69, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 70, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 71, + POWER_SUPPLY_PROP_MODEL_NAME = 72, + POWER_SUPPLY_PROP_MANUFACTURER = 73, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 74, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + const enum power_supply_usb_type *usb_types; + size_t num_usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct thermal_zone_device; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + struct led_trigger *charging_full_trig; + char *charging_full_trig_name; + struct led_trigger *charging_trig; + char *charging_trig_name; + struct led_trigger *full_trig; + char *full_trig_name; + struct led_trigger *online_trig; + char *online_trig_name; + struct led_trigger *charging_blink_full_solid_trig; + char *charging_blink_full_solid_trig_name; +}; + +struct acpi_ac_bl { + const char *hid; + int hrv; +}; + +struct acpi_ac { + struct power_supply *charger; + struct power_supply_desc charger_desc; + struct acpi_device *device; + long long unsigned int state; + struct notifier_block battery_nb; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[12]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[1]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[2]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[12]; + long unsigned int relbit[1]; + long unsigned int absbit[1]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[2]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[12]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; + bool inhibited; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[2]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + void (*events)(struct input_handle *, const struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +enum { + ACPI_BUTTON_LID_INIT_IGNORE = 0, + ACPI_BUTTON_LID_INIT_OPEN = 1, + ACPI_BUTTON_LID_INIT_METHOD = 2, + ACPI_BUTTON_LID_INIT_DISABLED = 3, +}; + +struct acpi_button { + unsigned int type; + struct input_dev *input; + char phys[32]; + long unsigned int pushed; + int last_state; + ktime_t last_time; + bool suspended; + bool lid_state_initialized; +}; + +struct acpi_fan_fps { + u64 control; + u64 trip_point; + u64 speed; + u64 noise_level; + u64 power; + char name[20]; + struct device_attribute dev_attr; +}; + +struct acpi_fan_fif { + u64 revision; + u64 fine_grain_ctrl; + u64 step_size; + u64 low_speed_notification; +}; + +struct acpi_fan { + bool acpi4; + struct acpi_fan_fif fif; + struct acpi_fan_fps *fps; + int fps_count; + struct thermal_cooling_device *cdev; +}; + +struct acpi_pci_slot { + struct pci_slot *pci_slot; + struct list_head list; +}; + +struct acpi_lpi_states_array { + unsigned int size; + unsigned int composite_states_size; + struct acpi_lpi_state *entries; + struct acpi_lpi_state *composite_states[8]; +}; + +struct throttling_tstate { + unsigned int cpu; + int target_state; +}; + +struct acpi_processor_throttling_arg { + struct acpi_processor *pr; + int target_state; + bool force; +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, + THERMAL_TREND_RAISE_FULL = 3, + THERMAL_TREND_DROP_FULL = 4, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, +}; + +struct thermal_zone_device_ops { + int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*get_trip_type)(struct thermal_zone_device *, int, enum thermal_trip_type *); + int (*get_trip_temp)(struct thermal_zone_device *, int, int *); + int (*set_trip_temp)(struct thermal_zone_device *, int, int); + int (*get_trip_hyst)(struct thermal_zone_device *, int, int *); + int (*set_trip_hyst)(struct thermal_zone_device *, int, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, int, enum thermal_trend *); + void (*hot)(struct thermal_zone_device *); + void (*critical)(struct thermal_zone_device *); +}; + +struct thermal_attr; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct attribute_group trips_attribute_group; + struct thermal_attr *trip_temp_attrs; + struct thermal_attr *trip_type_attrs; + struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; + void *devdata; + int trips; + long unsigned int trips_disabled; + long unsigned int passive_delay_jiffies; + long unsigned int polling_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + atomic_t need_update; + struct thermal_zone_device_ops *ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct list_head thermal_instances; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; +}; + +struct thermal_bind_params; + +struct thermal_zone_params { + char governor_name[20]; + bool no_hwmon; + int num_tbps; + struct thermal_bind_params *tbp; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct thermal_governor { + char name[20]; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + int (*throttle)(struct thermal_zone_device *, int); + struct list_head governor_list; +}; + +struct thermal_bind_params { + struct thermal_cooling_device *cdev; + int weight; + int trip_mask; + long unsigned int *binding_limits; + int (*match)(struct thermal_zone_device *, struct thermal_cooling_device *); +}; + +struct acpi_thermal_state { + u8 critical: 1; + u8 hot: 1; + u8 passive: 1; + u8 active: 1; + u8 reserved: 4; + int active_index; +}; + +struct acpi_thermal_state_flags { + u8 valid: 1; + u8 enabled: 1; + u8 reserved: 6; +}; + +struct acpi_thermal_critical { + struct acpi_thermal_state_flags flags; + long unsigned int temperature; +}; + +struct acpi_thermal_hot { + struct acpi_thermal_state_flags flags; + long unsigned int temperature; +}; + +struct acpi_thermal_passive { + struct acpi_thermal_state_flags flags; + long unsigned int temperature; + long unsigned int tc1; + long unsigned int tc2; + long unsigned int tsp; + struct acpi_handle_list devices; +}; + +struct acpi_thermal_active { + struct acpi_thermal_state_flags flags; + long unsigned int temperature; + struct acpi_handle_list devices; +}; + +struct acpi_thermal_trips { + struct acpi_thermal_critical critical; + struct acpi_thermal_hot hot; + struct acpi_thermal_passive passive; + struct acpi_thermal_active active[10]; +}; + +struct acpi_thermal_flags { + u8 cooling_mode: 1; + u8 devices: 1; + u8 reserved: 6; +}; + +struct acpi_thermal { + struct acpi_device *device; + acpi_bus_id name; + long unsigned int temperature; + long unsigned int last_temperature; + long unsigned int polling_frequency; + volatile u8 zombie; + struct acpi_thermal_flags flags; + struct acpi_thermal_state state; + struct acpi_thermal_trips trips; + struct acpi_handle_list devices; + struct thermal_zone_device *thermal_zone; + int kelvin_offset; + struct work_struct thermal_check_work; + struct mutex thermal_check_lock; + refcount_t thermal_check_count; +}; + +struct acpi_table_slit { + struct acpi_table_header header; + u64 locality_count; + u8 entry[1]; +} __attribute__((packed)); + +struct acpi_table_srat { + struct acpi_table_header header; + u32 table_revision; + u64 reserved; +}; + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, + ACPI_SRAT_TYPE_RESERVED = 6, +}; + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; +} __attribute__((packed)); + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +} __attribute__((packed)); + +struct acpi_srat_generic_affinity { + struct acpi_subtable_header header; + u8 reserved; + u8 device_handle_type; + u32 proximity_domain; + u8 device_handle[16]; + u32 flags; + u32 reserved1; +}; + +enum acpi_hmat_type { + ACPI_HMAT_TYPE_PROXIMITY = 0, + ACPI_HMAT_TYPE_LOCALITY = 1, + ACPI_HMAT_TYPE_CACHE = 2, + ACPI_HMAT_TYPE_RESERVED = 3, +}; + +struct acpi_hmat_proximity_domain { + struct acpi_hmat_structure header; + u16 flags; + u16 reserved1; + u32 processor_PD; + u32 memory_PD; + u32 reserved2; + u64 reserved3; + u64 reserved4; +}; + +struct acpi_hmat_locality { + struct acpi_hmat_structure header; + u8 flags; + u8 data_type; + u8 min_transfer_size; + u8 reserved1; + u32 number_of_initiator_Pds; + u32 number_of_target_Pds; + u32 reserved2; + u64 entry_base_unit; +}; + +struct acpi_hmat_cache { + struct acpi_hmat_structure header; + u32 memory_PD; + u32 reserved1; + u64 cache_size; + u32 cache_attributes; + u16 reserved2; + u16 number_of_SMBIOShandles; +}; + +struct node_hmem_attrs { + unsigned int read_bandwidth; + unsigned int write_bandwidth; + unsigned int read_latency; + unsigned int write_latency; +}; + +enum cache_indexing { + NODE_CACHE_DIRECT_MAP = 0, + NODE_CACHE_INDEXED = 1, + NODE_CACHE_OTHER = 2, +}; + +enum cache_write_policy { + NODE_CACHE_WRITE_BACK = 0, + NODE_CACHE_WRITE_THROUGH = 1, + NODE_CACHE_WRITE_OTHER = 2, +}; + +struct node_cache_attrs { + enum cache_indexing indexing; + enum cache_write_policy write_policy; + u64 size; + u16 line_size; + u8 level; +}; + +enum locality_types { + WRITE_LATENCY = 0, + READ_LATENCY = 1, + WRITE_BANDWIDTH = 2, + READ_BANDWIDTH = 3, +}; + +struct memory_locality { + struct list_head node; + struct acpi_hmat_locality *hmat_loc; +}; + +struct target_cache { + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct resource memregions; + struct node_hmem_attrs hmem_attrs[2]; + struct list_head caches; + struct node_cache_attrs cache_attrs; + bool registered; +}; + +struct memory_initiator { + struct list_head node; + unsigned int processor_pxm; + bool has_cpu; +}; + +struct acpi_memory_info { + struct list_head list; + u64 start_addr; + u64 length; + short unsigned int caching; + short unsigned int write_protect; + unsigned int enabled: 1; +}; + +struct acpi_memory_device { + struct acpi_device *device; + struct list_head res_list; +}; + +struct acpi_pci_ioapic { + acpi_handle root_handle; + acpi_handle handle; + u32 gsi_base; + struct resource res; + struct pci_dev *pdev; + struct list_head list; +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM = 1, + DMI_ENTRY_BASEBOARD = 2, + DMI_ENTRY_CHASSIS = 3, + DMI_ENTRY_PROCESSOR = 4, + DMI_ENTRY_MEM_CONTROLLER = 5, + DMI_ENTRY_MEM_MODULE = 6, + DMI_ENTRY_CACHE = 7, + DMI_ENTRY_PORT_CONNECTOR = 8, + DMI_ENTRY_SYSTEM_SLOT = 9, + DMI_ENTRY_ONBOARD_DEVICE = 10, + DMI_ENTRY_OEMSTRINGS = 11, + DMI_ENTRY_SYSCONF = 12, + DMI_ENTRY_BIOS_LANG = 13, + DMI_ENTRY_GROUP_ASSOC = 14, + DMI_ENTRY_SYSTEM_EVENT_LOG = 15, + DMI_ENTRY_PHYS_MEM_ARRAY = 16, + DMI_ENTRY_MEM_DEVICE = 17, + DMI_ENTRY_32_MEM_ERROR = 18, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, + DMI_ENTRY_BUILTIN_POINTING_DEV = 21, + DMI_ENTRY_PORTABLE_BATTERY = 22, + DMI_ENTRY_SYSTEM_RESET = 23, + DMI_ENTRY_HW_SECURITY = 24, + DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, + DMI_ENTRY_VOLTAGE_PROBE = 26, + DMI_ENTRY_COOLING_DEV = 27, + DMI_ENTRY_TEMP_PROBE = 28, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, + DMI_ENTRY_OOB_REMOTE_ACCESS = 30, + DMI_ENTRY_BIS_ENTRY = 31, + DMI_ENTRY_SYSTEM_BOOT = 32, + DMI_ENTRY_MGMT_DEV = 33, + DMI_ENTRY_MGMT_DEV_COMPONENT = 34, + DMI_ENTRY_MGMT_DEV_THRES = 35, + DMI_ENTRY_MEM_CHANNEL = 36, + DMI_ENTRY_IPMI_DEV = 37, + DMI_ENTRY_SYS_POWER_SUPPLY = 38, + DMI_ENTRY_ADDITIONAL = 39, + DMI_ENTRY_ONBOARD_DEV_EXT = 40, + DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +struct acpi_battery_hook { + const char *name; + int (*add_battery)(struct power_supply *); + int (*remove_battery)(struct power_supply *); + struct list_head list; +}; + +enum { + ACPI_BATTERY_ALARM_PRESENT = 0, + ACPI_BATTERY_XINFO_PRESENT = 1, + ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY = 2, + ACPI_BATTERY_QUIRK_THINKPAD_MAH = 3, + ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE = 4, +}; + +struct acpi_battery { + struct mutex lock; + struct mutex sysfs_lock; + struct power_supply *bat; + struct power_supply_desc bat_desc; + struct acpi_device *device; + struct notifier_block pm_nb; + struct list_head list; + long unsigned int update_time; + int revision; + int rate_now; + int capacity_now; + int voltage_now; + int design_capacity; + int full_charge_capacity; + int technology; + int design_voltage; + int design_capacity_warning; + int design_capacity_low; + int cycle_count; + int measurement_accuracy; + int max_sampling_time; + int min_sampling_time; + int max_averaging_interval; + int min_averaging_interval; + int capacity_granularity_1; + int capacity_granularity_2; + int alarm; + char model_number[32]; + char serial_number[32]; + char type[32]; + char oem_info[32]; + int state; + int power_unit; + long unsigned int flags; +}; + +struct acpi_offsets { + size_t offset; + u8 mode; +}; + +struct acpi_pcct_hw_reduced { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_client; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + struct hrtimer poll_hrt; + struct list_head node; +}; + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct cpc_register_resource { + acpi_object_type type; + u64 *sys_mem_vaddr; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + int write_cmd_status; + int write_cmd_id; + struct cpc_register_resource cpc_regs[21]; + struct acpi_psd_package domain_info; + struct kobject kobj; +}; + +enum cppc_regs { + HIGHEST_PERF = 0, + NOMINAL_PERF = 1, + LOW_NON_LINEAR_PERF = 2, + LOWEST_PERF = 3, + GUARANTEED_PERF = 4, + DESIRED_PERF = 5, + MIN_PERF = 6, + MAX_PERF = 7, + PERF_REDUC_TOLERANCE = 8, + TIME_WINDOW = 9, + CTR_WRAP_TIME = 10, + REFERENCE_CTR = 11, + DELIVERED_CTR = 12, + PERF_LIMITED = 13, + ENABLE = 14, + AUTO_SEL_ENABLE = 15, + AUTO_ACT_WINDOW = 16, + ENERGY_PERF = 17, + REFERENCE_PERF = 18, + LOWEST_FREQ = 19, + NOMINAL_FREQ = 20, +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 delivered; + u64 reference_perf; + u64 wraparound_time; +}; + +struct cppc_cpudata { + struct list_head node; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +struct cppc_pcc_data { + struct mbox_chan *pcc_channel; + void *pcc_comm_addr; + bool pcc_channel_acquired; + unsigned int deadline_us; + unsigned int pcc_mpar; + unsigned int pcc_mrtt; + unsigned int pcc_nominal; + bool pending_pcc_write_cmd; + bool platform_owns_pcc; + unsigned int pcc_write_cnt; + struct rw_semaphore pcc_lock; + wait_queue_head_t pcc_write_wait_q; + ktime_t last_cmd_cmpl_time; + ktime_t last_mpar_reset; + int mpar_count; + int refcount; +}; + +struct acpi_aml_io { + wait_queue_head_t wait; + long unsigned int flags; + long unsigned int users; + struct mutex lock; + struct task_struct *thread; + char out_buf[4096]; + struct circ_buf out_crc; + char in_buf[4096]; + struct circ_buf in_crc; + acpi_osd_exec_callback function; + void *context; + long unsigned int usages; +}; + +struct acpi_whea_header { + u8 action; + u8 instruction; + u8 flags; + u8 reserved; + struct acpi_generic_address register_region; + u64 value; + u64 mask; +} __attribute__((packed)); + +struct apei_exec_context; + +typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *, struct acpi_whea_header *); + +struct apei_exec_ins_type; + +struct apei_exec_context { + u32 ip; + u64 value; + u64 var1; + u64 var2; + u64 src_base; + u64 dst_base; + struct apei_exec_ins_type *ins_table; + u32 instructions; + struct acpi_whea_header *action_table; + u32 entries; +}; + +struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +}; + +struct apei_resources { + struct list_head iomem; + struct list_head ioport; +}; + +typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *, struct acpi_whea_header *, void *); + +struct apei_res { + struct list_head list; + long unsigned int start; + long unsigned int end; +}; + +struct acpi_table_hest { + struct acpi_table_header header; + u32 error_source_count; +}; + +enum acpi_hest_types { + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, + ACPI_HEST_TYPE_AER_ROOT_PORT = 6, + ACPI_HEST_TYPE_AER_ENDPOINT = 7, + ACPI_HEST_TYPE_AER_BRIDGE = 8, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, + ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, + ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, + ACPI_HEST_TYPE_RESERVED = 12, +}; + +struct acpi_hest_ia_machine_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u64 global_capability_data; + u64 global_control_data; + u8 num_hardware_banks; + u8 reserved3[7]; +}; + +struct acpi_hest_generic { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; +} __attribute__((packed)); + +struct acpi_hest_ia_deferred_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +enum hest_status { + HEST_ENABLED = 0, + HEST_DISABLED = 1, + HEST_NOT_FOUND = 2, +}; + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *, void *); + +struct ghes_arr { + struct platform_device **ghes_devs; + unsigned int count; +}; + +struct acpi_table_erst { + struct acpi_table_header header; + u32 header_length; + u32 reserved; + u32 entries; +}; + +enum acpi_erst_actions { + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, + ACPI_ERST_SET_RECORD_OFFSET = 4, + ACPI_ERST_EXECUTE_OPERATION = 5, + ACPI_ERST_CHECK_BUSY_STATUS = 6, + ACPI_ERST_GET_COMMAND_STATUS = 7, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, + ACPI_ERST_GET_RECORD_COUNT = 10, + ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, + ACPI_ERST_NOT_USED = 12, + ACPI_ERST_GET_ERROR_RANGE = 13, + ACPI_ERST_GET_ERROR_LENGTH = 14, + ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, + ACPI_ERST_EXECUTE_TIMINGS = 16, + ACPI_ERST_ACTION_RESERVED = 17, +}; + +enum acpi_erst_instructions { + ACPI_ERST_READ_REGISTER = 0, + ACPI_ERST_READ_REGISTER_VALUE = 1, + ACPI_ERST_WRITE_REGISTER = 2, + ACPI_ERST_WRITE_REGISTER_VALUE = 3, + ACPI_ERST_NOOP = 4, + ACPI_ERST_LOAD_VAR1 = 5, + ACPI_ERST_LOAD_VAR2 = 6, + ACPI_ERST_STORE_VAR1 = 7, + ACPI_ERST_ADD = 8, + ACPI_ERST_SUBTRACT = 9, + ACPI_ERST_ADD_VALUE = 10, + ACPI_ERST_SUBTRACT_VALUE = 11, + ACPI_ERST_STALL = 12, + ACPI_ERST_STALL_WHILE_TRUE = 13, + ACPI_ERST_SKIP_NEXT_IF_TRUE = 14, + ACPI_ERST_GOTO = 15, + ACPI_ERST_SET_SRC_ADDRESS_BASE = 16, + ACPI_ERST_SET_DST_ADDRESS_BASE = 17, + ACPI_ERST_MOVE_DATA = 18, + ACPI_ERST_INSTRUCTION_RESERVED = 19, +}; + +struct erst_erange { + u64 base; + u64 size; + void *vaddr; + u32 attr; +}; + +struct erst_record_id_cache { + struct mutex lock; + u64 *entries; + int len; + int size; + int refcount; +}; + +struct cper_pstore_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + char data[0]; +}; + +struct acpi_bert_region { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +enum acpi_hest_notify_types { + ACPI_HEST_NOTIFY_POLLED = 0, + ACPI_HEST_NOTIFY_EXTERNAL = 1, + ACPI_HEST_NOTIFY_LOCAL = 2, + ACPI_HEST_NOTIFY_SCI = 3, + ACPI_HEST_NOTIFY_NMI = 4, + ACPI_HEST_NOTIFY_CMCI = 5, + ACPI_HEST_NOTIFY_MCE = 6, + ACPI_HEST_NOTIFY_GPIO = 7, + ACPI_HEST_NOTIFY_SEA = 8, + ACPI_HEST_NOTIFY_SEI = 9, + ACPI_HEST_NOTIFY_GSIV = 10, + ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, + ACPI_HEST_NOTIFY_RESERVED = 12, +}; + +struct acpi_hest_generic_v2 { + struct acpi_hest_header header; + u16 related_source_id; + u8 reserved; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u32 max_raw_data_length; + struct acpi_generic_address error_status_address; + struct acpi_hest_notify notify; + u32 error_block_length; + struct acpi_generic_address read_ack_register; + u64 read_ack_preserve; + u64 read_ack_write; +} __attribute__((packed)); + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; +}; + +struct acpi_hest_generic_data_v300 { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; + u64 time_stamp; +}; + +struct cper_sec_proc_arm { + u32 validation_bits; + u16 err_info_num; + u16 context_info_num; + u32 section_length; + u8 affinity_level; + u8 reserved[3]; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; +}; + +struct cper_arm_err_info { + u8 version; + u8 length; + u16 validation_bits; + u8 type; + u16 multiple_error; + u8 flags; + u64 error_info; + u64 virt_fault_addr; + u64 physical_fault_addr; +} __attribute__((packed)); + +struct cper_sec_pcie { + u64 validation_bits; + u32 port_type; + struct { + u8 minor; + u8 major; + u8 reserved[2]; + } version; + u16 command; + u16 status; + u32 reserved; + struct { + u16 vendor_id; + u16 device_id; + u8 class_code[3]; + u8 function; + u8 device; + u16 segment; + u8 bus; + u8 secondary_bus; + u16 slot; + u8 reserved; + } __attribute__((packed)) device_id; + struct { + u32 lower; + u32 upper; + } serial_number; + struct { + u16 secondary_status; + u16 control; + } bridge; + u8 capability[60]; + u8 aer_info[96]; +}; + +struct ghes { + union { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_v2 *generic_v2; + }; + struct acpi_hest_generic_status *estatus; + long unsigned int flags; + union { + struct list_head list; + struct timer_list timer; + unsigned int irq; + }; +}; + +struct ghes_estatus_node { + struct llist_node llnode; + struct acpi_hest_generic *generic; + struct ghes *ghes; + int task_work_cpu; + struct callback_head task_work; +}; + +struct ghes_estatus_cache { + u32 estatus_len; + atomic_t count; + struct acpi_hest_generic *generic; + long long unsigned int time_in; + struct callback_head rcu; +}; + +struct ghes_vendor_record_entry { + struct work_struct work; + int error_severity; + char vendor_record[0]; +}; + +struct pmic_table { + int address; + int reg; + int bit; +}; + +struct intel_pmic_opregion_data { + int (*get_power)(struct regmap *, int, int, u64 *); + int (*update_power)(struct regmap *, int, int, bool); + int (*get_raw_temp)(struct regmap *, int); + int (*update_aux)(struct regmap *, int, int); + int (*get_policy)(struct regmap *, int, int, u64 *); + int (*update_policy)(struct regmap *, int, int, int); + int (*exec_mipi_pmic_seq_element)(struct regmap *, u16, u32, u32, u32); + struct pmic_table *power_table; + int power_table_count; + struct pmic_table *thermal_table; + int thermal_table_count; + int pmic_i2c_address; +}; + +struct intel_pmic_regs_handler_ctx { + unsigned int val; + u16 addr; +}; + +struct intel_pmic_opregion { + struct mutex lock; + struct acpi_lpat_conversion_table *lpat_table; + struct regmap *regmap; + struct intel_pmic_opregion_data *data; + struct intel_pmic_regs_handler_ctx ctx; +}; + +struct regmap_irq_type { + unsigned int type_reg_offset; + unsigned int type_reg_mask; + unsigned int type_rising_val; + unsigned int type_falling_val; + unsigned int type_level_low_val; + unsigned int type_level_high_val; + unsigned int types_supported; +}; + +struct regmap_irq { + unsigned int reg_offset; + unsigned int mask; + struct regmap_irq_type type; +}; + +struct regmap_irq_sub_irq_map { + unsigned int num_regs; + unsigned int *offset; +}; + +struct regmap_irq_chip { + const char *name; + unsigned int main_status; + unsigned int num_main_status_bits; + struct regmap_irq_sub_irq_map *sub_reg_offsets; + int num_main_regs; + unsigned int status_base; + unsigned int mask_base; + unsigned int unmask_base; + unsigned int ack_base; + unsigned int wake_base; + unsigned int type_base; + unsigned int *virt_reg_base; + unsigned int irq_reg_stride; + bool mask_writeonly: 1; + bool init_ack_masked: 1; + bool mask_invert: 1; + bool use_ack: 1; + bool ack_invert: 1; + bool clear_ack: 1; + bool wake_invert: 1; + bool runtime_pm: 1; + bool type_invert: 1; + bool type_in_mask: 1; + bool clear_on_unmask: 1; + bool not_fixed_stride: 1; + int num_regs; + const struct regmap_irq *irqs; + int num_irqs; + int num_type_reg; + int num_virt_regs; + unsigned int type_reg_stride; + int (*handle_pre_irq)(void *); + int (*handle_post_irq)(void *); + int (*set_type_virt)(unsigned int **, unsigned int, long unsigned int, int); + void *irq_drv_data; +}; + +struct axp20x_dev { + struct device *dev; + int irq; + long unsigned int irq_flags; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irqc; + long int variant; + int nr_cells; + const struct mfd_cell *cells; + const struct regmap_config *regmap_cfg; + const struct regmap_irq_chip *regmap_irq_chip; +}; + +struct mfd_cell_acpi_match; + +struct mfd_cell { + const char *name; + int id; + int level; + int (*enable)(struct platform_device *); + int (*disable)(struct platform_device *); + int (*suspend)(struct platform_device *); + int (*resume)(struct platform_device *); + void *platform_data; + size_t pdata_size; + const struct software_node *swnode; + const char *of_compatible; + const u64 of_reg; + bool use_of_reg; + const struct mfd_cell_acpi_match *acpi_match; + int num_resources; + const struct resource *resources; + bool ignore_resource_conflicts; + bool pm_runtime_no_callbacks; + const char * const *parent_supplies; + int num_parent_supplies; +}; + +struct tps68470_pmic_table { + u32 address; + u32 reg; + u32 bitmask; +}; + +struct tps68470_pmic_opregion { + struct mutex lock; + struct regmap *regmap; +}; + +struct pnp_resource { + struct list_head list; + struct resource res; +}; + +struct pnp_port { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +typedef struct { + long unsigned int bits[4]; +} pnp_irq_mask_t; + +struct pnp_irq { + pnp_irq_mask_t map; + unsigned char flags; +}; + +struct pnp_dma { + unsigned char map; + unsigned char flags; +}; + +struct pnp_mem { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +struct pnp_option { + struct list_head list; + unsigned int flags; + long unsigned int type; + union { + struct pnp_port port; + struct pnp_irq irq; + struct pnp_dma dma; + struct pnp_mem mem; + } u; +}; + +struct pnp_info_buffer { + char *buffer; + char *curr; + long unsigned int size; + long unsigned int len; + int stop; + int error; +}; + +typedef struct pnp_info_buffer pnp_info_buffer_t; + +struct pnp_fixup { + char id[7]; + void (*quirk_function)(struct pnp_dev *); +}; + +struct acpipnp_parse_option_s { + struct pnp_dev *dev; + unsigned int option_flags; +}; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct clk_hw; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_rate_request { + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[20]; + char con_id[16]; +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_rate_range { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int min; + long unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; +}; + +struct trace_event_data_offsets_clk_rate_range { + u32 name; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + u32 pname; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_min_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_max_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_range)(void *, struct clk_core *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +struct clk_notifier_devres { + struct clk *clk; + struct notifier_block *nb; +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; +}; + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u32 mmask; + u8 nshift; + u8 nwidth; + u32 nmask; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct pmc_clk { + const char *name; + long unsigned int freq; + const char *parent_name; +}; + +struct pmc_clk_data { + void *base; + const struct pmc_clk *clks; + bool critical; +}; + +struct clk_plt_fixed { + struct clk_hw *clk; + struct clk_lookup *lookup; +}; + +struct clk_plt { + struct clk_hw hw; + void *reg; + struct clk_lookup *lookup; + spinlock_t lock; +}; + +struct clk_plt_data { + struct clk_plt_fixed **parents; + u8 nparents; + struct clk_plt *clks[6]; + struct clk_lookup *mclk_lookup; + struct clk_lookup *ether_clk_lookup; +}; + +struct dma_chan_tbl_ent { + struct dma_chan___2 *chan; +}; + +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan___2 chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + spinlock_t lock; + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + struct virt_dma_desc *cyclic; +}; + +struct acpi_table_csrt { + struct acpi_table_header header; +}; + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; +}; + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; +}; + +struct acpi_dma_spec { + int chan_id; + int slave_id; + struct device *dev; +}; + +struct acpi_dma { + struct list_head dma_controllers; + struct device *dev; + struct dma_chan___2 * (*acpi_dma_xlate)(struct acpi_dma_spec *, struct acpi_dma *); + void *data; + short unsigned int base_request_line; + short unsigned int end_request_line; +}; + +struct acpi_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; + size_t index; + size_t n; +}; + +struct of_dma { + struct list_head of_dma_controllers; + struct device_node *of_node; + struct dma_chan___2 * (*of_dma_xlate)(struct of_phandle_args *, struct of_dma *); + void * (*of_dma_route_allocate)(struct of_phandle_args *, struct of_dma *); + struct dma_router *dma_router; + void *of_dma_data; +}; + +struct reset_control; + +enum ldma_chan_on_off { + DMA_CH_OFF = 0, + DMA_CH_ON = 1, +}; + +enum { + DMA_TYPE_TX = 0, + DMA_TYPE_RX = 1, + DMA_TYPE_MCPY = 2, +}; + +struct dma_pool___2; + +struct ldma_port; + +struct dw2_desc_sw; + +struct ldma_chan { + struct virt_dma_chan vchan; + struct ldma_port *port; + char name[8]; + int nr; + u32 flags; + enum ldma_chan_on_off onoff; + dma_addr_t desc_phys; + void *desc_base; + u32 desc_cnt; + int rst; + u32 hdrm_len; + bool hdrm_csum; + u32 boff_len; + u32 data_endian; + u32 desc_endian; + bool pden; + bool desc_rx_np; + bool data_endian_en; + bool desc_endian_en; + bool abc_en; + bool desc_init; + struct dma_pool___2 *desc_pool; + u32 desc_num; + struct dw2_desc_sw *ds; + struct work_struct work; + struct dma_slave_config config; +}; + +struct ldma_dev; + +struct ldma_port { + struct ldma_dev *ldev; + u32 portid; + u32 rxbl; + u32 txbl; + u32 rxendi; + u32 txendi; + u32 pkt_drop; +}; + +struct dw2_desc; + +struct dw2_desc_sw { + struct virt_dma_desc vdesc; + struct ldma_chan *chan; + dma_addr_t desc_phys; + size_t desc_cnt; + size_t size; + struct dw2_desc *desc_hw; +}; + +struct ldma_inst_data; + +struct ldma_dev { + struct device *dev; + void *base; + struct reset_control *rst; + struct clk *core_clk; + struct dma_device dma_dev; + u32 ver; + int irq; + struct ldma_port *ports; + struct ldma_chan *chans; + spinlock_t dev_lock; + u32 chan_nrs; + u32 port_nrs; + u32 channels_mask; + u32 flags; + u32 pollcnt; + const struct ldma_inst_data *inst; + struct workqueue_struct *wq; +}; + +struct ldma_inst_data { + bool desc_in_sram; + bool chan_fc; + bool desc_fod; + bool valid_desc_fetch_ack; + u32 orrc; + const char *name; + u32 type; +}; + +struct dw2_desc { + u32 field; + u32 addr; +}; + +struct virtio_driver { + struct device_driver driver; + const struct virtio_device_id *id_table; + const unsigned int *feature_table; + unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *); + int (*probe)(struct virtio_device *); + void (*scan)(struct virtio_device *); + void (*remove)(struct virtio_device *); + void (*config_changed)(struct virtio_device *); + int (*freeze)(struct virtio_device *); + int (*restore)(struct virtio_device *); +}; + +typedef __u16 __virtio16; + +typedef __u32 __virtio32; + +typedef __u64 __virtio64; + +struct vring_desc { + __virtio64 addr; + __virtio32 len; + __virtio16 flags; + __virtio16 next; +}; + +struct vring_avail { + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[0]; +}; + +struct vring_used_elem { + __virtio32 id; + __virtio32 len; +}; + +typedef struct vring_used_elem vring_used_elem_t; + +struct vring_used { + __virtio16 flags; + __virtio16 idx; + vring_used_elem_t ring[0]; +}; + +typedef struct vring_desc vring_desc_t; + +typedef struct vring_avail vring_avail_t; + +typedef struct vring_used vring_used_t; + +struct vring { + unsigned int num; + vring_desc_t *desc; + vring_avail_t *avail; + vring_used_t *used; +}; + +struct vring_packed_desc_event { + __le16 off_wrap; + __le16 flags; +}; + +struct vring_packed_desc { + __le64 addr; + __le32 len; + __le16 id; + __le16 flags; +}; + +struct vring_desc_state_split { + void *data; + struct vring_desc *indir_desc; +}; + +struct vring_desc_state_packed { + void *data; + struct vring_packed_desc *indir_desc; + u16 num; + u16 next; + u16 last; +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; + u32 len; + u16 flags; +}; + +struct vring_virtqueue { + struct virtqueue vq; + bool packed_ring; + bool use_dma_api; + bool weak_barriers; + bool broken; + bool indirect; + bool event; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + union { + struct { + struct vring vring; + u16 avail_flags_shadow; + u16 avail_idx_shadow; + struct vring_desc_state_split *desc_state; + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + struct { + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + bool avail_wrap_counter; + bool used_wrap_counter; + u16 avail_used_flags; + u16 next_avail_idx; + u16 event_flags_shadow; + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra_packed *desc_extra; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + bool (*notify)(struct virtqueue *); + bool we_own_ring; +}; + +struct virtio_pci_common_cfg { + __le32 device_feature_select; + __le32 device_feature; + __le32 guest_feature_select; + __le32 guest_feature; + __le16 msix_config; + __le16 num_queues; + __u8 device_status; + __u8 config_generation; + __le16 queue_select; + __le16 queue_size; + __le16 queue_msix_vector; + __le16 queue_enable; + __le16 queue_notify_off; + __le32 queue_desc_lo; + __le32 queue_desc_hi; + __le32 queue_avail_lo; + __le32 queue_avail_hi; + __le32 queue_used_lo; + __le32 queue_used_hi; +}; + +struct virtio_pci_modern_device { + struct pci_dev *pci_dev; + struct virtio_pci_common_cfg *common; + void *device; + void *notify_base; + resource_size_t notify_pa; + u8 *isr; + size_t notify_len; + size_t device_len; + int notify_map_cap; + u32 notify_offset_multiplier; + int modern_bars; + struct virtio_device_id id; +}; + +struct virtio_mmio_device { + struct virtio_device vdev; + struct platform_device *pdev; + void *base; + long unsigned int version; + spinlock_t lock; + struct list_head virtqueues; +}; + +struct virtio_mmio_vq_info { + struct virtqueue *vq; + struct list_head node; +}; + +struct virtio_pci_vq_info { + struct virtqueue *vq; + struct list_head node; + unsigned int msix_vector; +}; + +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + struct virtio_pci_modern_device mdev; + u8 *isr; + void *ioaddr; + spinlock_t lock; + struct list_head virtqueues; + struct virtio_pci_vq_info **vqs; + int msix_enabled; + int intx_enabled; + cpumask_var_t *msix_affinity_masks; + char (*msix_names)[256]; + unsigned int msix_vectors; + unsigned int msix_used_vectors; + bool per_vq_vectors; + struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16); + void (*del_vq)(struct virtio_pci_vq_info *); + u16 (*config_vector)(struct virtio_pci_device *, u16); +}; + +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +struct virtio_balloon_config { + __le32 num_pages; + __le32 actual; + union { + __le32 free_page_hint_cmd_id; + __le32 free_page_report_cmd_id; + }; + __le32 poison_val; +}; + +struct virtio_balloon_stat { + __virtio16 tag; + __virtio64 val; +} __attribute__((packed)); + +enum virtio_balloon_vq { + VIRTIO_BALLOON_VQ_INFLATE = 0, + VIRTIO_BALLOON_VQ_DEFLATE = 1, + VIRTIO_BALLOON_VQ_STATS = 2, + VIRTIO_BALLOON_VQ_FREE_PAGE = 3, + VIRTIO_BALLOON_VQ_REPORTING = 4, + VIRTIO_BALLOON_VQ_MAX = 5, +}; + +enum virtio_balloon_config_read { + VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, +}; + +struct virtio_balloon { + struct virtio_device *vdev; + struct virtqueue *inflate_vq; + struct virtqueue *deflate_vq; + struct virtqueue *stats_vq; + struct virtqueue *free_page_vq; + struct workqueue_struct *balloon_wq; + struct work_struct report_free_page_work; + struct work_struct update_balloon_stats_work; + struct work_struct update_balloon_size_work; + spinlock_t stop_update_lock; + bool stop_update; + int: 24; + long unsigned int config_read_bitmap; + struct list_head free_page_list; + spinlock_t free_page_list_lock; + int: 32; + long unsigned int num_free_page_blocks; + u32 cmd_id_received_cache; + __virtio32 cmd_id_active; + __virtio32 cmd_id_stop; + int: 32; + wait_queue_head_t acked; + unsigned int num_pages; + int: 32; + struct balloon_dev_info vb_dev_info; + struct mutex balloon_lock; + unsigned int num_pfns; + __virtio32 pfns[256]; + struct virtio_balloon_stat stats[10]; + struct shrinker shrinker; + struct notifier_block oom_nb; + struct virtqueue *reporting_vq; + struct page_reporting_dev_info pr_dev_info; +} __attribute__((packed)); + +struct xsd_errors { + int errnum; + const char *errstring; +}; + +struct xenbus_watch { + struct list_head list; + const char *node; + unsigned int nr_pending; + bool (*will_handle)(struct xenbus_watch *, const char *, const char *); + void (*callback)(struct xenbus_watch *, const char *, const char *); +}; + +struct xenbus_transaction { + u32 id; +}; + +struct grant_entry_v1 { + uint16_t flags; + domid_t domid; + uint32_t frame; +}; + +struct grant_entry_header { + uint16_t flags; + domid_t domid; +}; + +union grant_entry_v2 { + struct grant_entry_header hdr; + struct { + struct grant_entry_header hdr; + uint32_t pad0; + uint64_t frame; + } full_page; + struct { + struct grant_entry_header hdr; + uint16_t page_off; + uint16_t length; + uint64_t frame; + } sub_page; + struct { + struct grant_entry_header hdr; + domid_t trans_domid; + uint16_t pad0; + grant_ref_t gref; + } transitive; + uint32_t __spacer[4]; +}; + +struct gnttab_setup_table { + domid_t dom; + uint32_t nr_frames; + int16_t status; + __guest_handle_xen_pfn_t frame_list; +}; + +struct gnttab_copy { + struct { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } source; + struct { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } dest; + uint16_t len; + uint16_t flags; + int16_t status; +}; + +struct gnttab_query_size { + domid_t dom; + uint32_t nr_frames; + uint32_t max_nr_frames; + int16_t status; +}; + +struct gnttab_set_version { + uint32_t version; +}; + +struct gnttab_get_status_frames { + uint32_t nr_frames; + domid_t dom; + int16_t status; + __guest_handle_uint64_t frame_list; +}; + +struct gnttab_free_callback { + struct gnttab_free_callback *next; + void (*fn)(void *); + void *arg; + u16 count; +}; + +struct gntab_unmap_queue_data; + +typedef void (*gnttab_unmap_refs_done)(int, struct gntab_unmap_queue_data *); + +struct gntab_unmap_queue_data { + struct delayed_work gnttab_work; + void *data; + gnttab_unmap_refs_done done; + struct gnttab_unmap_grant_ref *unmap_ops; + struct gnttab_unmap_grant_ref *kunmap_ops; + struct page **pages; + unsigned int count; + unsigned int age; +}; + +struct gnttab_page_cache { + spinlock_t lock; + struct page *pages; + unsigned int num_pages; +}; + +struct gnttab_dma_alloc_args { + struct device *dev; + bool coherent; + int nr_pages; + struct page **pages; + xen_pfn_t *frames; + void *vaddr; + dma_addr_t dev_bus_addr; +}; + +struct xen_page_foreign { + domid_t domid; + grant_ref_t gref; +}; + +typedef void (*xen_grant_fn_t)(long unsigned int, unsigned int, unsigned int, void *); + +struct gnttab_ops { + unsigned int version; + unsigned int grefs_per_grant_frame; + int (*map_frames)(xen_pfn_t *, unsigned int); + void (*unmap_frames)(); + void (*update_entry)(grant_ref_t, domid_t, long unsigned int, unsigned int); + int (*end_foreign_access_ref)(grant_ref_t, int); + long unsigned int (*end_foreign_transfer_ref)(grant_ref_t); + int (*query_foreign_access)(grant_ref_t); +}; + +struct unmap_refs_callback_data { + struct completion completion; + int result; +}; + +struct deferred_entry { + struct list_head list; + grant_ref_t ref; + bool ro; + uint16_t warn_delay; + struct page *page; +}; + +struct xen_feature_info { + unsigned int submap_idx; + uint32_t submap; +}; + +struct balloon_stats { + long unsigned int current_pages; + long unsigned int target_pages; + long unsigned int target_unpopulated; + long unsigned int balloon_low; + long unsigned int balloon_high; + long unsigned int total_pages; + long unsigned int schedule_delay; + long unsigned int max_schedule_delay; + long unsigned int retry_count; + long unsigned int max_retry_count; +}; + +enum bp_state { + BP_DONE = 0, + BP_WAIT = 1, + BP_EAGAIN = 2, + BP_ECANCELED = 3, +}; + +enum shutdown_state { + SHUTDOWN_INVALID = 4294967295, + SHUTDOWN_POWEROFF = 0, + SHUTDOWN_SUSPEND = 2, + SHUTDOWN_HALT = 4, +}; + +struct suspend_info { + int cancelled; +}; + +struct shutdown_handler { + const char command[11]; + bool flag; + void (*cb)(); +}; + +struct vcpu_runstate_info { + int state; + uint64_t state_entry_time; + uint64_t time[4]; +}; + +typedef struct vcpu_runstate_info *__guest_handle_vcpu_runstate_info; + +struct vcpu_register_runstate_memory_area { + union { + __guest_handle_vcpu_runstate_info h; + struct vcpu_runstate_info *v; + uint64_t p; + } addr; +}; + +typedef uint32_t evtchn_port_t; + +typedef evtchn_port_t *__guest_handle_evtchn_port_t; + +struct evtchn_bind_interdomain { + domid_t remote_dom; + evtchn_port_t remote_port; + evtchn_port_t local_port; +}; + +struct evtchn_bind_virq { + uint32_t virq; + uint32_t vcpu; + evtchn_port_t port; +}; + +struct evtchn_bind_pirq { + uint32_t pirq; + uint32_t flags; + evtchn_port_t port; +}; + +struct evtchn_bind_ipi { + uint32_t vcpu; + evtchn_port_t port; +}; + +struct evtchn_close { + evtchn_port_t port; +}; + +struct evtchn_send { + evtchn_port_t port; +}; + +struct evtchn_status { + domid_t dom; + evtchn_port_t port; + uint32_t status; + uint32_t vcpu; + union { + struct { + domid_t dom; + } unbound; + struct { + domid_t dom; + evtchn_port_t port; + } interdomain; + uint32_t pirq; + uint32_t virq; + } u; +}; + +struct evtchn_bind_vcpu { + evtchn_port_t port; + uint32_t vcpu; +}; + +struct evtchn_set_priority { + evtchn_port_t port; + uint32_t priority; +}; + +struct sched_poll { + __guest_handle_evtchn_port_t ports; + unsigned int nr_ports; + uint64_t timeout; +}; + +struct physdev_eoi { + uint32_t irq; +}; + +struct physdev_pirq_eoi_gmfn { + xen_ulong_t gmfn; +}; + +struct physdev_irq_status_query { + uint32_t irq; + uint32_t flags; +}; + +struct physdev_irq { + uint32_t irq; + uint32_t vector; +}; + +struct physdev_map_pirq { + domid_t domid; + int type; + int index; + int pirq; + int bus; + int devfn; + int entry_nr; + uint64_t table_base; +}; + +struct physdev_unmap_pirq { + domid_t domid; + int pirq; +}; + +struct physdev_get_free_pirq { + int type; + uint32_t pirq; +}; + +enum xenbus_state { + XenbusStateUnknown = 0, + XenbusStateInitialising = 1, + XenbusStateInitWait = 2, + XenbusStateInitialised = 3, + XenbusStateConnected = 4, + XenbusStateClosing = 5, + XenbusStateClosed = 6, + XenbusStateReconfiguring = 7, + XenbusStateReconfigured = 8, +}; + +struct xenbus_device { + const char *devicetype; + const char *nodename; + const char *otherend; + int otherend_id; + struct xenbus_watch otherend_watch; + struct device dev; + enum xenbus_state state; + struct completion down; + struct work_struct work; + struct semaphore reclaim_sem; + atomic_t event_channels; + atomic_t events; + atomic_t spurious_events; + atomic_t jiffies_eoi_delayed; + unsigned int spurious_threshold; +}; + +struct evtchn_loop_ctrl; + +struct evtchn_ops { + unsigned int (*max_channels)(); + unsigned int (*nr_channels)(); + int (*setup)(evtchn_port_t); + void (*remove)(evtchn_port_t, unsigned int); + void (*bind_to_cpu)(evtchn_port_t, unsigned int, unsigned int); + void (*clear_pending)(evtchn_port_t); + void (*set_pending)(evtchn_port_t); + bool (*is_pending)(evtchn_port_t); + void (*mask)(evtchn_port_t); + void (*unmask)(evtchn_port_t); + void (*handle_events)(unsigned int, struct evtchn_loop_ctrl *); + void (*resume)(); + int (*percpu_init)(unsigned int); + int (*percpu_deinit)(unsigned int); +}; + +struct evtchn_loop_ctrl { + ktime_t timeout; + unsigned int count; + bool defer_eoi; +}; + +enum xen_irq_type { + IRQT_UNBOUND = 0, + IRQT_PIRQ = 1, + IRQT_VIRQ = 2, + IRQT_IPI = 3, + IRQT_EVTCHN = 4, +}; + +struct irq_info { + struct list_head list; + struct list_head eoi_list; + short int refcnt; + u8 spurious_cnt; + u8 is_accounted; + short int type; + u8 mask_reason; + u8 is_active; + unsigned int irq; + evtchn_port_t evtchn; + short unsigned int cpu; + short unsigned int eoi_cpu; + unsigned int irq_epoch; + u64 eoi_time; + raw_spinlock_t lock; + union { + short unsigned int virq; + enum ipi_vector ipi; + struct { + short unsigned int pirq; + short unsigned int gsi; + unsigned char vector; + unsigned char flags; + uint16_t domid; + } pirq; + struct xenbus_device *interdomain; + } u; +}; + +struct lateeoi_work { + struct delayed_work delayed; + spinlock_t eoi_list_lock; + struct list_head eoi_list; +}; + +struct evtchn_unmask { + evtchn_port_t port; +}; + +struct evtchn_init_control { + uint64_t control_gfn; + uint32_t offset; + uint32_t vcpu; + uint8_t link_bits; + uint8_t _pad[7]; +}; + +struct evtchn_expand_array { + uint64_t array_gfn; +}; + +typedef uint32_t event_word_t; + +struct evtchn_fifo_control_block { + uint32_t ready; + uint32_t _rsvd; + event_word_t head[16]; +}; + +struct evtchn_fifo_queue { + uint32_t head[16]; +}; + +struct evtchn_alloc_unbound { + domid_t dom; + domid_t remote_dom; + evtchn_port_t port; +}; + +struct xenbus_map_node { + struct list_head next; + union { + struct { + struct vm_struct *area; + } pv; + struct { + struct page *pages[16]; + long unsigned int addrs[16]; + void *addr; + } hvm; + }; + grant_handle_t handles[16]; + unsigned int nr_handles; +}; + +struct map_ring_valloc { + struct xenbus_map_node *node; + long unsigned int addrs[16]; + phys_addr_t phys_addrs[16]; + struct gnttab_map_grant_ref map[16]; + struct gnttab_unmap_grant_ref unmap[16]; + unsigned int idx; +}; + +struct xenbus_ring_ops { + int (*map)(struct xenbus_device *, struct map_ring_valloc *, grant_ref_t *, unsigned int, void **); + int (*unmap)(struct xenbus_device *, void *); +}; + +struct unmap_ring_hvm { + unsigned int idx; + long unsigned int addrs[16]; +}; + +enum xsd_sockmsg_type { + XS_DEBUG = 0, + XS_DIRECTORY = 1, + XS_READ = 2, + XS_GET_PERMS = 3, + XS_WATCH = 4, + XS_UNWATCH = 5, + XS_TRANSACTION_START = 6, + XS_TRANSACTION_END = 7, + XS_INTRODUCE = 8, + XS_RELEASE = 9, + XS_GET_DOMAIN_PATH = 10, + XS_WRITE = 11, + XS_MKDIR = 12, + XS_RM = 13, + XS_SET_PERMS = 14, + XS_WATCH_EVENT = 15, + XS_ERROR = 16, + XS_IS_DOMAIN_INTRODUCED = 17, + XS_RESUME = 18, + XS_SET_TARGET = 19, + XS_RESTRICT = 20, + XS_RESET_WATCHES = 21, +}; + +struct xsd_sockmsg { + uint32_t type; + uint32_t req_id; + uint32_t tx_id; + uint32_t len; +}; + +typedef uint32_t XENSTORE_RING_IDX; + +struct xenstore_domain_interface { + char req[1024]; + char rsp[1024]; + XENSTORE_RING_IDX req_cons; + XENSTORE_RING_IDX req_prod; + XENSTORE_RING_IDX rsp_cons; + XENSTORE_RING_IDX rsp_prod; +}; + +struct xs_watch_event { + struct list_head list; + unsigned int len; + struct xenbus_watch *handle; + const char *path; + const char *token; + char body[0]; +}; + +enum xb_req_state { + xb_req_state_queued = 0, + xb_req_state_wait_reply = 1, + xb_req_state_got_reply = 2, + xb_req_state_aborted = 3, +}; + +struct xb_req_data { + struct list_head list; + wait_queue_head_t wq; + struct xsd_sockmsg msg; + uint32_t caller_req_id; + enum xsd_sockmsg_type type; + char *body; + const struct kvec *vec; + int num_vecs; + int err; + enum xb_req_state state; + bool user_req; + void (*cb)(struct xb_req_data *); + void *par; +}; + +enum xenstore_init { + XS_UNKNOWN = 0, + XS_PV = 1, + XS_HVM = 2, + XS_LOCAL = 3, +}; + +struct xenbus_device_id { + char devicetype[32]; +}; + +struct xenbus_driver { + const char *name; + const struct xenbus_device_id *ids; + bool allow_rebind; + int (*probe)(struct xenbus_device *, const struct xenbus_device_id *); + void (*otherend_changed)(struct xenbus_device *, enum xenbus_state); + int (*remove)(struct xenbus_device *); + int (*suspend)(struct xenbus_device *); + int (*resume)(struct xenbus_device *); + int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); + struct device_driver driver; + int (*read_otherend_details)(struct xenbus_device *); + int (*is_ready)(struct xenbus_device *); + void (*reclaim_memory)(struct xenbus_device *); +}; + +struct xen_hvm_param { + domid_t domid; + uint32_t index; + uint64_t value; +}; + +struct xen_bus_type { + char *root; + unsigned int levels; + int (*get_bus_id)(char *, const char *); + int (*probe)(struct xen_bus_type *, const char *, const char *); + bool (*otherend_will_handle)(struct xenbus_watch *, const char *, const char *); + void (*otherend_changed)(struct xenbus_watch *, const char *, const char *); + struct bus_type bus; +}; + +struct xb_find_info { + struct xenbus_device *dev; + const char *nodename; +}; + +struct xenbus_transaction_holder { + struct list_head list; + struct xenbus_transaction handle; + unsigned int generation_id; +}; + +struct read_buffer { + struct list_head list; + unsigned int cons; + unsigned int len; + char msg[0]; +}; + +struct xenbus_file_priv { + struct mutex msgbuffer_mutex; + struct list_head transactions; + struct list_head watches; + unsigned int len; + union { + struct xsd_sockmsg msg; + char buffer[4096]; + } u; + struct mutex reply_mutex; + struct list_head read_buffers; + wait_queue_head_t read_waitq; + struct kref kref; + struct work_struct wq; +}; + +struct watch_adapter { + struct list_head list; + struct xenbus_watch watch; + struct xenbus_file_priv *dev_data; + char *token; +}; + +struct physdev_manage_pci { + uint8_t bus; + uint8_t devfn; +}; + +struct physdev_manage_pci_ext { + uint8_t bus; + uint8_t devfn; + unsigned int is_extfn; + unsigned int is_virtfn; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; +}; + +struct physdev_pci_mmcfg_reserved { + uint64_t address; + uint16_t segment; + uint8_t start_bus; + uint8_t end_bus; + uint32_t flags; +}; + +struct physdev_pci_device_add { + uint16_t seg; + uint8_t bus; + uint8_t devfn; + uint32_t flags; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; + uint32_t optarr[0]; +}; + +struct physdev_pci_device { + uint16_t seg; + uint8_t bus; + uint8_t devfn; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + __le32 bmSublinkSpeedAttr[1]; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +enum usb_ssp_rate { + USB_SSP_GEN_UNKNOWN = 0, + USB_SSP_GEN_2x1 = 1, + USB_SSP_GEN_1x2 = 2, + USB_SSP_GEN_2x2 = 3, +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + char: 8; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; + int: 32; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_devmap { + long unsigned int devicemap[2]; +}; + +struct mon_bus; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + struct usb_devmap devmap; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; + struct mon_bus *mon_bus; + int monitored; +}; + +struct wusb_dev; + +enum usb_device_removable { + USB_DEVICE_REMOVABLE_UNKNOWN = 0, + USB_DEVICE_REMOVABLE = 1, + USB_DEVICE_FIXED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int wusb: 1; + unsigned int lpm_capable: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + struct wusb_dev *wusb_dev; + int slot_id; + enum usb_device_removable removable; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +struct giveback_urb_bh { + bool running; + spinlock_t lock; + struct list_head head; + struct tasklet_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct usb_phy_roothub; + +struct hc_driver; + +struct usb_phy; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int wireless: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool___2 *pool[4]; + int state; + struct gen_pool *localmem_pool; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct physdev_dbgp_op { + uint8_t op; + uint8_t bus; + union { + struct physdev_pci_device pci; + } u; +}; + +struct pcpu { + struct list_head list; + struct device dev; + uint32_t cpu_id; + uint32_t flags; +}; + +typedef uint8_t xen_domain_handle_t[16]; + +struct xen_compile_info { + char compiler[64]; + char compile_by[16]; + char compile_domain[32]; + char compile_date[32]; +}; + +struct xen_platform_parameters { + xen_ulong_t virt_start; +}; + +struct xen_build_id { + uint32_t len; + unsigned char buf[0]; +}; + +struct hyp_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct hyp_sysfs_attr *, char *); + ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); + void *hyp_attr_data; +}; + +struct pmu_mode { + const char *name; + uint32_t mode; +}; + +enum xen_swiotlb_err { + XEN_SWIOTLB_UNKNOWN = 0, + XEN_SWIOTLB_ENOMEM = 1, + XEN_SWIOTLB_EFIXUP = 2, +}; + +struct mcinfo_common { + uint16_t type; + uint16_t size; +}; + +struct mcinfo_global { + struct mcinfo_common common; + uint16_t mc_domid; + uint16_t mc_vcpuid; + uint32_t mc_socketid; + uint16_t mc_coreid; + uint16_t mc_core_threadid; + uint32_t mc_apicid; + uint32_t mc_flags; + uint64_t mc_gstatus; +}; + +struct mcinfo_bank { + struct mcinfo_common common; + uint16_t mc_bank; + uint16_t mc_domid; + uint64_t mc_status; + uint64_t mc_addr; + uint64_t mc_misc; + uint64_t mc_ctrl2; + uint64_t mc_tsc; +}; + +struct mcinfo_msr { + uint64_t reg; + uint64_t value; +}; + +struct mc_info { + uint32_t mi_nentries; + uint32_t flags; + uint64_t mi_data[95]; +}; + +typedef struct mc_info *__guest_handle_mc_info; + +struct mcinfo_logical_cpu { + uint32_t mc_cpunr; + uint32_t mc_chipid; + uint16_t mc_coreid; + uint16_t mc_threadid; + uint32_t mc_apicid; + uint32_t mc_clusterid; + uint32_t mc_ncores; + uint32_t mc_ncores_active; + uint32_t mc_nthreads; + uint32_t mc_cpuid_level; + uint32_t mc_family; + uint32_t mc_vendor; + uint32_t mc_model; + uint32_t mc_step; + char mc_vendorid[16]; + char mc_brandid[64]; + uint32_t mc_cpu_caps[7]; + uint32_t mc_cache_size; + uint32_t mc_cache_alignment; + uint32_t mc_nmsrvals; + struct mcinfo_msr mc_msrvalues[8]; +}; + +typedef struct mcinfo_logical_cpu *__guest_handle_mcinfo_logical_cpu; + +struct xen_mc_fetch { + uint32_t flags; + uint32_t _pad0; + uint64_t fetch_id; + __guest_handle_mc_info data; +}; + +struct xen_mc_notifydomain { + uint16_t mc_domid; + uint16_t mc_vcpuid; + uint32_t flags; +}; + +struct xen_mc_physcpuinfo { + uint32_t ncpus; + uint32_t _pad0; + __guest_handle_mcinfo_logical_cpu info; +}; + +struct xen_mc_msrinject { + uint32_t mcinj_cpunr; + uint32_t mcinj_flags; + uint32_t mcinj_count; + uint32_t _pad0; + struct mcinfo_msr mcinj_msr[8]; +}; + +struct xen_mc_mceinject { + unsigned int mceinj_cpunr; +}; + +struct xen_mc { + uint32_t cmd; + uint32_t interface_version; + union { + struct xen_mc_fetch mc_fetch; + struct xen_mc_notifydomain mc_notifydomain; + struct xen_mc_physcpuinfo mc_physcpuinfo; + struct xen_mc_msrinject mc_msrinject; + struct xen_mc_mceinject mc_mceinject; + } u; +}; + +struct xen_mce { + __u64 status; + __u64 misc; + __u64 addr; + __u64 mcgstatus; + __u64 ip; + __u64 tsc; + __u64 time; + __u8 cpuvendor; + __u8 inject_flags; + __u16 pad; + __u32 cpuid; + __u8 cs; + __u8 bank; + __u8 cpu; + __u8 finished; + __u32 extcpu; + __u32 socketid; + __u32 apicid; + __u64 mcgcap; + __u64 synd; + __u64 ipid; + __u64 ppin; +}; + +struct xen_mce_log { + char signature[12]; + unsigned int len; + unsigned int next; + unsigned int flags; + unsigned int recordlen; + struct xen_mce entry[32]; +}; + +typedef int *__guest_handle_int; + +typedef xen_ulong_t *__guest_handle_xen_ulong_t; + +struct xen_add_to_physmap_range { + domid_t domid; + uint16_t space; + uint16_t size; + domid_t foreign_domid; + __guest_handle_xen_ulong_t idxs; + __guest_handle_xen_pfn_t gpfns; + __guest_handle_int errs; +}; + +struct xen_remove_from_physmap { + domid_t domid; + xen_pfn_t gpfn; +}; + +typedef void (*xen_gfn_fn_t)(long unsigned int, void *); + +struct xen_remap_gfn_info; + +struct remap_data___2 { + xen_pfn_t *fgfn; + int nr_fgfn; + pgprot_t prot; + domid_t domid; + struct vm_area_struct *vma; + int index; + struct page **pages; + struct xen_remap_gfn_info *info; + int *err_ptr; + int mapped; + int h_errs[1]; + xen_ulong_t h_idxs[1]; + xen_pfn_t h_gpfns[1]; + int h_iter; +}; + +struct map_balloon_pages { + xen_pfn_t *pfns; + unsigned int idx; +}; + +struct remap_pfn { + struct mm_struct *mm; + struct page **pages; + pgprot_t prot; + long unsigned int i; +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +struct pre_voltage_change_data { + long unsigned int old_uV; + long unsigned int min_uV; + long unsigned int max_uV; +}; + +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; + int ret; +}; + +struct regulator_voltage { + int min_uV; + int max_uV; +}; + +struct regulator { + struct device *dev; + struct list_head list; + unsigned int always_on: 1; + unsigned int bypass: 1; + unsigned int device_link: 1; + int uA_load; + unsigned int enable_count; + unsigned int deferred_disables; + struct regulator_voltage voltage[5]; + const char *supply_name; + struct device_attribute dev_attr; + struct regulator_dev *rdev; + struct dentry *debugfs; +}; + +struct regulator_coupler { + struct list_head list; + int (*attach_regulator)(struct regulator_coupler *, struct regulator_dev *); + int (*detach_regulator)(struct regulator_coupler *, struct regulator_dev *); + int (*balance_voltage)(struct regulator_coupler *, struct regulator_dev *, suspend_state_t); +}; + +enum regulator_status { + REGULATOR_STATUS_OFF = 0, + REGULATOR_STATUS_ON = 1, + REGULATOR_STATUS_ERROR = 2, + REGULATOR_STATUS_FAST = 3, + REGULATOR_STATUS_NORMAL = 4, + REGULATOR_STATUS_IDLE = 5, + REGULATOR_STATUS_STANDBY = 6, + REGULATOR_STATUS_BYPASS = 7, + REGULATOR_STATUS_UNDEFINED = 8, +}; + +struct regulator_enable_gpio { + struct list_head list; + struct gpio_desc *gpiod; + u32 enable_count; + u32 request_count; +}; + +enum regulator_active_discharge { + REGULATOR_ACTIVE_DISCHARGE_DEFAULT = 0, + REGULATOR_ACTIVE_DISCHARGE_DISABLE = 1, + REGULATOR_ACTIVE_DISCHARGE_ENABLE = 2, +}; + +struct regulator_consumer_supply { + const char *dev_name; + const char *supply; +}; + +struct trace_event_raw_regulator_basic { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_regulator_range { + struct trace_entry ent; + u32 __data_loc_name; + int min; + int max; + char __data[0]; +}; + +struct trace_event_raw_regulator_value { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int val; + char __data[0]; +}; + +struct trace_event_data_offsets_regulator_basic { + u32 name; +}; + +struct trace_event_data_offsets_regulator_range { + u32 name; +}; + +struct trace_event_data_offsets_regulator_value { + u32 name; +}; + +typedef void (*btf_trace_regulator_enable)(void *, const char *); + +typedef void (*btf_trace_regulator_enable_delay)(void *, const char *); + +typedef void (*btf_trace_regulator_enable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_disable)(void *, const char *); + +typedef void (*btf_trace_regulator_disable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_enable)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_enable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_disable)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_disable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_set_voltage)(void *, const char *, int, int); + +typedef void (*btf_trace_regulator_set_voltage_complete)(void *, const char *, unsigned int); + +enum regulator_get_type { + NORMAL_GET = 0, + EXCLUSIVE_GET = 1, + OPTIONAL_GET = 2, + MAX_GET_TYPE = 3, +}; + +struct regulator_map { + struct list_head list; + const char *dev_name; + const char *supply; + struct regulator_dev *regulator; +}; + +struct regulator_supply_alias { + struct list_head list; + struct device *src_dev; + const char *src_supply; + struct device *alias_dev; + const char *alias_supply; +}; + +struct summary_data { + struct seq_file *s; + struct regulator_dev *parent; + int level; +}; + +struct summary_lock_data { + struct ww_acquire_ctx *ww_ctx; + struct regulator_dev **new_contended_rdev; + struct regulator_dev **old_contended_rdev; +}; + +struct fixed_voltage_config { + const char *supply_name; + const char *input_supply; + int microvolts; + unsigned int startup_delay; + unsigned int off_on_delay; + unsigned int enabled_at_boot: 1; + struct regulator_init_data *init_data; +}; + +struct fixed_regulator_data { + struct fixed_voltage_config cfg; + struct regulator_init_data init_data; + struct platform_device pdev; +}; + +struct regulator_bulk_devres { + struct regulator_bulk_data *consumers; + int num_consumers; +}; + +struct regulator_supply_alias_match { + struct device *dev; + const char *id; +}; + +struct regulator_notifier_match { + struct regulator *regulator; + struct notifier_block *nb; +}; + +struct reset_control___2; + +struct reset_control_bulk_data { + const char *id; + struct reset_control___2 *rstc; +}; + +struct reset_controller_dev; + +struct reset_control___2 { + struct reset_controller_dev *rcdev; + struct list_head list; + unsigned int id; + struct kref refcnt; + bool acquired; + bool shared; + bool array; + atomic_t deassert_count; + atomic_t triggered_count; +}; + +struct reset_control_ops { + int (*reset)(struct reset_controller_dev *, long unsigned int); + int (*assert)(struct reset_controller_dev *, long unsigned int); + int (*deassert)(struct reset_controller_dev *, long unsigned int); + int (*status)(struct reset_controller_dev *, long unsigned int); +}; + +struct reset_controller_dev { + const struct reset_control_ops *ops; + struct module *owner; + struct list_head list; + struct list_head reset_control_head; + struct device *dev; + struct device_node *of_node; + int of_reset_n_cells; + int (*of_xlate)(struct reset_controller_dev *, const struct of_phandle_args *); + unsigned int nr_resets; +}; + +struct reset_control_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +struct reset_control_array { + struct reset_control___2 base; + unsigned int num_rstcs; + struct reset_control___2 *rstc[0]; +}; + +struct reset_control_bulk_devres { + int num_rstcs; + struct reset_control_bulk_data *rstcs; +}; + +struct serial_struct32 { + compat_int_t type; + compat_int_t line; + compat_uint_t port; + compat_int_t irq; + compat_int_t flags; + compat_int_t xmit_fifo_size; + compat_int_t custom_divisor; + compat_int_t baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char; + compat_int_t hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + compat_uint_t iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + compat_int_t reserved; +}; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[4]; + long unsigned int overrun_time; + int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + char read_buf[4096]; + long unsigned int read_flags[64]; + unsigned char echo_buf[4096]; + size_t read_tail; + size_t line_start; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + unsigned char c_line; + unsigned char c_cc[8]; +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct pts_fs_info___2; + +struct tty_audit_buf { + struct mutex mutex; + dev_t dev; + unsigned int icanon: 1; + size_t valid; + unsigned char *data; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[12]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[12]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct compat_console_font_op { + compat_uint_t op; + compat_uint_t flags; + compat_uint_t width; + compat_uint_t height; + compat_uint_t charcount; + compat_caddr_t data; +}; + +struct compat_unimapdesc { + short unsigned int entry_ct; + compat_caddr_t entries; +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + char: 1; + unsigned char modeflags: 5; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +struct kbd_led_trigger { + struct led_trigger trigger; + unsigned int mask; +}; + +struct uni_pagedir { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +typedef uint32_t char32_t; + +struct uni_screen { + char32_t *lines[0]; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct hv_ops; + +struct hvc_struct { + struct tty_port port; + spinlock_t lock; + int index; + int do_wakeup; + char *outbuf; + int outbuf_size; + int n_outbuf; + uint32_t vtermno; + const struct hv_ops *ops; + int irq_requested; + int data; + struct winsize ws; + struct work_struct tty_resize; + struct list_head next; + long unsigned int flags; +}; + +struct hv_ops { + int (*get_chars)(uint32_t, char *, int); + int (*put_chars)(uint32_t, const char *, int); + int (*flush)(uint32_t, bool); + int (*notifier_add)(struct hvc_struct *, int); + void (*notifier_del)(struct hvc_struct *, int); + void (*notifier_hangup)(struct hvc_struct *, int); + int (*tiocmget)(struct hvc_struct *); + int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int); + void (*dtr_rts)(struct hvc_struct *, int); +}; + +struct earlycon_device { + struct console *con; + struct uart_port port; + char options[16]; + unsigned int baud; +}; + +struct earlycon_id { + char name[15]; + char name_term; + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *); +}; + +typedef uint32_t XENCONS_RING_IDX; + +struct xencons_interface { + char in[1024]; + char out[2048]; + XENCONS_RING_IDX in_cons; + XENCONS_RING_IDX in_prod; + XENCONS_RING_IDX out_cons; + XENCONS_RING_IDX out_prod; +}; + +struct xencons_info { + struct list_head list; + struct xenbus_device *xbdev; + struct xencons_interface *intf; + unsigned int evtchn; + struct hvc_struct *hvc; + int irq; + int vtermno; + grant_ref_t gntref; +}; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +}; + +enum hwparam_type { + hwparam_ioport = 0, + hwparam_iomem = 1, + hwparam_ioport_or_iomem = 2, + hwparam_irq = 3, + hwparam_dma = 4, + hwparam_dma_addr = 5, + hwparam_other = 6, +}; + +struct plat_serial8250_port { + long unsigned int iobase; + void *membase; + resource_size_t mapbase; + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + void *private_data; + unsigned char regshift; + unsigned char iotype; + unsigned char hub6; + unsigned char has_sysrq; + upf_t flags; + unsigned int type; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); +}; + +enum { + PLAT8250_DEV_LEGACY = 4294967295, + PLAT8250_DEV_PLATFORM = 0, + PLAT8250_DEV_PLATFORM1 = 1, + PLAT8250_DEV_PLATFORM2 = 2, + PLAT8250_DEV_FOURPORT = 3, + PLAT8250_DEV_ACCENT = 4, + PLAT8250_DEV_BOCA = 5, + PLAT8250_DEV_EXAR_ST16C554 = 6, + PLAT8250_DEV_HUB6 = 7, + PLAT8250_DEV_AU1X00 = 8, + PLAT8250_DEV_SM501 = 9, +}; + +struct uart_8250_port; + +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); +}; + +struct mctrl_gpios; + +struct uart_8250_dma; + +struct uart_8250_em485; + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; + struct list_head list; + u32 capabilities; + short unsigned int bugs; + bool fifo_bug; + unsigned int tx_loadsz; + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char mcr_mask; + unsigned char mcr_force; + unsigned char cur_iotype; + unsigned int rpm_tx_active; + unsigned char canary; + unsigned char probe; + struct mctrl_gpios *gpios; + unsigned char lsr_saved_flags; + unsigned char msr_saved_flags; + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + int (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, int); + struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; + struct hrtimer stop_tx_timer; + struct hrtimer *active_timer; + struct uart_8250_port *port; + unsigned int tx_stopped: 1; +}; + +struct uart_8250_dma { + int (*tx_dma)(struct uart_8250_port *); + int (*rx_dma)(struct uart_8250_port *); + dma_filter_fn fn; + void *rx_param; + void *tx_param; + struct dma_slave_config rxconf; + struct dma_slave_config txconf; + struct dma_chan___2 *rxchan; + struct dma_chan___2 *txchan; + phys_addr_t rx_dma_addr; + phys_addr_t tx_dma_addr; + dma_addr_t rx_addr; + dma_addr_t tx_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + void *rx_buf; + size_t rx_size; + size_t tx_size; + unsigned char tx_running; + unsigned char tx_err; + unsigned char rx_running; +}; + +struct old_serial_port { + unsigned int uart; + unsigned int baud_base; + unsigned int port; + unsigned int irq; + upf_t flags; + unsigned char io_type; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; +}; + +struct irq_info___2 { + struct hlist_node node; + int irq; + spinlock_t lock; + struct list_head *head; +}; + +struct serial8250_config { + const char *name; + short unsigned int fifo_size; + short unsigned int tx_loadsz; + unsigned char fcr; + unsigned char rxtrig_bytes[4]; + unsigned int flags; +}; + +struct dw8250_port_data { + int line; + struct uart_8250_dma dma; + u8 dlf_size; +}; + +struct fintek_8250 { + u16 pid; + u16 base_port; + u8 index; + u8 key; +}; + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct pci_serial_quirk { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; + int (*probe)(struct pci_dev *); + int (*init)(struct pci_dev *); + int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct serial_private { + struct pci_dev *dev; + unsigned int nr; + struct pci_serial_quirk *quirk; + const struct pciserial_board *board; + int line[0]; +}; + +struct f815xxa_data { + spinlock_t lock; + int idx; +}; + +struct timedia_struct { + int num; + const short unsigned int *ids; +}; + +struct quatech_feature { + u16 devid; + bool amcc; +}; + +enum pci_board_num_t { + pbn_default = 0, + pbn_b0_1_115200 = 1, + pbn_b0_2_115200 = 2, + pbn_b0_4_115200 = 3, + pbn_b0_5_115200 = 4, + pbn_b0_8_115200 = 5, + pbn_b0_1_921600 = 6, + pbn_b0_2_921600 = 7, + pbn_b0_4_921600 = 8, + pbn_b0_2_1130000 = 9, + pbn_b0_4_1152000 = 10, + pbn_b0_4_1250000 = 11, + pbn_b0_2_1843200 = 12, + pbn_b0_4_1843200 = 13, + pbn_b0_1_4000000 = 14, + pbn_b0_bt_1_115200 = 15, + pbn_b0_bt_2_115200 = 16, + pbn_b0_bt_4_115200 = 17, + pbn_b0_bt_8_115200 = 18, + pbn_b0_bt_1_460800 = 19, + pbn_b0_bt_2_460800 = 20, + pbn_b0_bt_4_460800 = 21, + pbn_b0_bt_1_921600 = 22, + pbn_b0_bt_2_921600 = 23, + pbn_b0_bt_4_921600 = 24, + pbn_b0_bt_8_921600 = 25, + pbn_b1_1_115200 = 26, + pbn_b1_2_115200 = 27, + pbn_b1_4_115200 = 28, + pbn_b1_8_115200 = 29, + pbn_b1_16_115200 = 30, + pbn_b1_1_921600 = 31, + pbn_b1_2_921600 = 32, + pbn_b1_4_921600 = 33, + pbn_b1_8_921600 = 34, + pbn_b1_2_1250000 = 35, + pbn_b1_bt_1_115200 = 36, + pbn_b1_bt_2_115200 = 37, + pbn_b1_bt_4_115200 = 38, + pbn_b1_bt_2_921600 = 39, + pbn_b1_1_1382400 = 40, + pbn_b1_2_1382400 = 41, + pbn_b1_4_1382400 = 42, + pbn_b1_8_1382400 = 43, + pbn_b2_1_115200 = 44, + pbn_b2_2_115200 = 45, + pbn_b2_4_115200 = 46, + pbn_b2_8_115200 = 47, + pbn_b2_1_460800 = 48, + pbn_b2_4_460800 = 49, + pbn_b2_8_460800 = 50, + pbn_b2_16_460800 = 51, + pbn_b2_1_921600 = 52, + pbn_b2_4_921600 = 53, + pbn_b2_8_921600 = 54, + pbn_b2_8_1152000 = 55, + pbn_b2_bt_1_115200 = 56, + pbn_b2_bt_2_115200 = 57, + pbn_b2_bt_4_115200 = 58, + pbn_b2_bt_2_921600 = 59, + pbn_b2_bt_4_921600 = 60, + pbn_b3_2_115200 = 61, + pbn_b3_4_115200 = 62, + pbn_b3_8_115200 = 63, + pbn_b4_bt_2_921600 = 64, + pbn_b4_bt_4_921600 = 65, + pbn_b4_bt_8_921600 = 66, + pbn_panacom = 67, + pbn_panacom2 = 68, + pbn_panacom4 = 69, + pbn_plx_romulus = 70, + pbn_endrun_2_4000000 = 71, + pbn_oxsemi = 72, + pbn_oxsemi_1_4000000 = 73, + pbn_oxsemi_2_4000000 = 74, + pbn_oxsemi_4_4000000 = 75, + pbn_oxsemi_8_4000000 = 76, + pbn_intel_i960 = 77, + pbn_sgi_ioc3 = 78, + pbn_computone_4 = 79, + pbn_computone_6 = 80, + pbn_computone_8 = 81, + pbn_sbsxrsio = 82, + pbn_pasemi_1682M = 83, + pbn_ni8430_2 = 84, + pbn_ni8430_4 = 85, + pbn_ni8430_8 = 86, + pbn_ni8430_16 = 87, + pbn_ADDIDATA_PCIe_1_3906250 = 88, + pbn_ADDIDATA_PCIe_2_3906250 = 89, + pbn_ADDIDATA_PCIe_4_3906250 = 90, + pbn_ADDIDATA_PCIe_8_3906250 = 91, + pbn_ce4100_1_115200 = 92, + pbn_omegapci = 93, + pbn_NETMOS9900_2s_115200 = 94, + pbn_brcm_trumanage = 95, + pbn_fintek_4 = 96, + pbn_fintek_8 = 97, + pbn_fintek_12 = 98, + pbn_fintek_F81504A = 99, + pbn_fintek_F81508A = 100, + pbn_fintek_F81512A = 101, + pbn_wch382_2 = 102, + pbn_wch384_4 = 103, + pbn_wch384_8 = 104, + pbn_pericom_PI7C9X7951 = 105, + pbn_pericom_PI7C9X7952 = 106, + pbn_pericom_PI7C9X7954 = 107, + pbn_pericom_PI7C9X7958 = 108, + pbn_sunix_pci_1s = 109, + pbn_sunix_pci_2s = 110, + pbn_sunix_pci_4s = 111, + pbn_sunix_pci_8s = 112, + pbn_sunix_pci_16s = 113, + pbn_moxa8250_2p = 114, + pbn_moxa8250_4p = 115, + pbn_moxa8250_8p = 116, +}; + +struct spi_device_id { + char name[32]; + kernel_ulong_t driver_data; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +struct spi_statistics { + spinlock_t lock; + long unsigned int messages; + long unsigned int transfers; + long unsigned int errors; + long unsigned int timedout; + long unsigned int spi_sync; + long unsigned int spi_sync_immediate; + long unsigned int spi_async; + long long unsigned int bytes; + long long unsigned int bytes_rx; + long long unsigned int bytes_tx; + long unsigned int transfer_bytes_histo[17]; + long unsigned int transfers_split_maxsize; +}; + +struct spi_delay { + u16 value; + u8 unit; +}; + +struct spi_controller; + +struct spi_device { + struct device dev; + struct spi_controller *controller; + struct spi_controller *master; + u32 max_speed_hz; + u8 chip_select; + u8 bits_per_word; + bool rt; + u32 mode; + int irq; + void *controller_state; + void *controller_data; + char modalias[32]; + const char *driver_override; + int cs_gpio; + struct gpio_desc *cs_gpiod; + struct spi_delay word_delay; + struct spi_statistics statistics; +}; + +struct spi_message; + +struct spi_transfer; + +struct spi_controller_mem_ops; + +struct spi_controller { + struct device dev; + struct list_head list; + s16 bus_num; + u16 num_chipselect; + u16 dma_alignment; + u32 mode_bits; + u32 buswidth_override_bits; + u32 bits_per_word_mask; + u32 min_speed_hz; + u32 max_speed_hz; + u16 flags; + bool devm_allocated; + bool slave; + size_t (*max_transfer_size)(struct spi_device *); + size_t (*max_message_size)(struct spi_device *); + struct mutex io_mutex; + spinlock_t bus_lock_spinlock; + struct mutex bus_lock_mutex; + bool bus_lock_flag; + int (*setup)(struct spi_device *); + int (*set_cs_timing)(struct spi_device *, struct spi_delay *, struct spi_delay *, struct spi_delay *); + int (*transfer)(struct spi_device *, struct spi_message *); + void (*cleanup)(struct spi_device *); + bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *); + bool queued; + struct kthread_worker *kworker; + struct kthread_work pump_messages; + spinlock_t queue_lock; + struct list_head queue; + struct spi_message *cur_msg; + bool idling; + bool busy; + bool running; + bool rt; + bool auto_runtime_pm; + bool cur_msg_prepared; + bool cur_msg_mapped; + bool last_cs_enable; + bool last_cs_mode_high; + bool fallback; + struct completion xfer_completion; + size_t max_dma_len; + int (*prepare_transfer_hardware)(struct spi_controller *); + int (*transfer_one_message)(struct spi_controller *, struct spi_message *); + int (*unprepare_transfer_hardware)(struct spi_controller *); + int (*prepare_message)(struct spi_controller *, struct spi_message *); + int (*unprepare_message)(struct spi_controller *, struct spi_message *); + int (*slave_abort)(struct spi_controller *); + void (*set_cs)(struct spi_device *, bool); + int (*transfer_one)(struct spi_controller *, struct spi_device *, struct spi_transfer *); + void (*handle_err)(struct spi_controller *, struct spi_message *); + const struct spi_controller_mem_ops *mem_ops; + struct spi_delay cs_setup; + struct spi_delay cs_hold; + struct spi_delay cs_inactive; + int *cs_gpios; + struct gpio_desc **cs_gpiods; + bool use_gpio_descriptors; + s8 unused_native_cs; + s8 max_native_cs; + struct spi_statistics statistics; + struct dma_chan___2 *dma_tx; + struct dma_chan___2 *dma_rx; + void *dummy_rx; + void *dummy_tx; + int (*fw_translate_cs)(struct spi_controller *, unsigned int); + bool ptp_sts_supported; + long unsigned int irq_flags; +}; + +struct spi_driver { + const struct spi_device_id *id_table; + int (*probe)(struct spi_device *); + int (*remove)(struct spi_device *); + void (*shutdown)(struct spi_device *); + struct device_driver driver; +}; + +struct spi_message { + struct list_head transfers; + struct spi_device *spi; + unsigned int is_dma_mapped: 1; + void (*complete)(void *); + void *context; + unsigned int frame_length; + unsigned int actual_length; + int status; + struct list_head queue; + void *state; + struct list_head resources; +}; + +struct spi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned int len; + dma_addr_t tx_dma; + dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; + unsigned int dummy_data: 1; + unsigned int cs_change: 1; + unsigned int tx_nbits: 3; + unsigned int rx_nbits: 3; + u8 bits_per_word; + struct spi_delay delay; + struct spi_delay cs_change_delay; + struct spi_delay word_delay; + u32 speed_hz; + u32 effective_speed_hz; + unsigned int ptp_sts_word_pre; + unsigned int ptp_sts_word_post; + struct ptp_system_timestamp *ptp_sts; + bool timestamped; + struct list_head transfer_list; + u16 error; +}; + +struct spi_mem; + +struct spi_mem_op; + +struct spi_mem_dirmap_desc; + +struct spi_controller_mem_ops { + int (*adjust_op_size)(struct spi_mem *, struct spi_mem_op *); + bool (*supports_op)(struct spi_mem *, const struct spi_mem_op *); + int (*exec_op)(struct spi_mem *, const struct spi_mem_op *); + const char * (*get_name)(struct spi_mem *); + int (*dirmap_create)(struct spi_mem_dirmap_desc *); + void (*dirmap_destroy)(struct spi_mem_dirmap_desc *); + ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *, u64, size_t, void *); + ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *, u64, size_t, const void *); +}; + +struct max310x_devtype { + char name[9]; + int nr; + u8 mode1; + int (*detect)(struct device *); + void (*power)(struct uart_port *, int); +}; + +struct max310x_one { + struct uart_port port; + struct work_struct tx_work; + struct work_struct md_work; + struct work_struct rs_work; + u8 wr_header; + u8 rd_header; + u8 rx_buf[128]; +}; + +struct max310x_port { + const struct max310x_devtype *devtype; + struct regmap *regmap; + struct clk *clk; + struct gpio_chip gpio; + struct max310x_one p[0]; +}; + +struct sccnxp_pdata { + const u8 reg_shift; + const u32 mctrl_cfg[2]; + const unsigned int poll_time_us; +}; + +struct sccnxp_chip { + const char *name; + unsigned int nr; + long unsigned int freq_min; + long unsigned int freq_std; + long unsigned int freq_max; + unsigned int flags; + unsigned int fifosize; + unsigned int trwd; +}; + +struct sccnxp_port { + struct uart_driver uart; + struct uart_port port[2]; + bool opened[2]; + int irq; + u8 imr; + struct sccnxp_chip *chip; + struct console console; + spinlock_t lock; + bool poll; + struct timer_list timer; + struct sccnxp_pdata pdata; + struct regulator *regulator; +}; + +struct gpio_array___2; + +enum mctrl_gpio_idx { + UART_GPIO_CTS = 0, + UART_GPIO_DSR = 1, + UART_GPIO_DCD = 2, + UART_GPIO_RNG = 3, + UART_GPIO_RI = 3, + UART_GPIO_RTS = 4, + UART_GPIO_DTR = 5, + UART_GPIO_MAX = 6, +}; + +struct mctrl_gpios___2 { + struct uart_port *port; + struct gpio_desc *gpio[6]; + int irq[6]; + unsigned int mctrl_prev; + bool mctrl_on; +}; + +typedef unsigned char unchar; + +struct kgdb_nmi_tty_priv { + struct tty_port port; + struct timer_list timer; + struct { + union { + struct __kfifo kfifo; + char *type; + const char *const_type; + char (*rectype)[0]; + char *ptr; + const char *ptr_const; + }; + char buf[64]; + } fifo; +}; + +struct serdev_device; + +struct serdev_device_ops { + int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); + void (*write_wakeup)(struct serdev_device *); +}; + +struct serdev_controller; + +struct serdev_device { + struct device dev; + int nr; + struct serdev_controller *ctrl; + const struct serdev_device_ops *ops; + struct completion write_comp; + struct mutex write_lock; +}; + +struct serdev_controller_ops; + +struct serdev_controller { + struct device dev; + unsigned int nr; + struct serdev_device *serdev; + const struct serdev_controller_ops *ops; +}; + +struct serdev_device_driver { + struct device_driver driver; + int (*probe)(struct serdev_device *); + void (*remove)(struct serdev_device *); +}; + +enum serdev_parity { + SERDEV_PARITY_NONE = 0, + SERDEV_PARITY_EVEN = 1, + SERDEV_PARITY_ODD = 2, +}; + +struct serdev_controller_ops { + int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); + void (*write_flush)(struct serdev_controller *); + int (*write_room)(struct serdev_controller *); + int (*open)(struct serdev_controller *); + void (*close)(struct serdev_controller *); + void (*set_flow_control)(struct serdev_controller *, bool); + int (*set_parity)(struct serdev_controller *, enum serdev_parity); + unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); + void (*wait_until_sent)(struct serdev_controller *, long int); + int (*get_tiocm)(struct serdev_controller *); + int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); +}; + +struct acpi_serdev_lookup { + acpi_handle device_handle; + acpi_handle controller_handle; + int n; + int index; +}; + +struct serport { + struct tty_port *port; + struct tty_struct *tty; + struct tty_driver *tty_drv; + int tty_idx; + long unsigned int flags; +}; + +struct memdev { + const char *name; + umode_t mode; + const struct file_operations *fops; + fmode_t fmode; +}; + +struct timer_rand_state { + cycles_t last_time; + long int last_delta; + long int last_delta2; +}; + +struct trace_event_raw_add_device_randomness { + struct trace_entry ent; + int bytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_random__mix_pool_bytes { + struct trace_entry ent; + const char *pool_name; + int bytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_credit_entropy_bits { + struct trace_entry ent; + const char *pool_name; + int bits; + int entropy_count; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_debit_entropy { + struct trace_entry ent; + const char *pool_name; + int debit_bits; + char __data[0]; +}; + +struct trace_event_raw_add_input_randomness { + struct trace_entry ent; + int input_bits; + char __data[0]; +}; + +struct trace_event_raw_add_disk_randomness { + struct trace_entry ent; + dev_t dev; + int input_bits; + char __data[0]; +}; + +struct trace_event_raw_random__get_random_bytes { + struct trace_entry ent; + int nbytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_random__extract_entropy { + struct trace_entry ent; + const char *pool_name; + int nbytes; + int entropy_count; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_urandom_read { + struct trace_entry ent; + int got_bits; + int pool_left; + int input_left; + char __data[0]; +}; + +struct trace_event_raw_prandom_u32 { + struct trace_entry ent; + unsigned int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_add_device_randomness {}; + +struct trace_event_data_offsets_random__mix_pool_bytes {}; + +struct trace_event_data_offsets_credit_entropy_bits {}; + +struct trace_event_data_offsets_debit_entropy {}; + +struct trace_event_data_offsets_add_input_randomness {}; + +struct trace_event_data_offsets_add_disk_randomness {}; + +struct trace_event_data_offsets_random__get_random_bytes {}; + +struct trace_event_data_offsets_random__extract_entropy {}; + +struct trace_event_data_offsets_urandom_read {}; + +struct trace_event_data_offsets_prandom_u32 {}; + +typedef void (*btf_trace_add_device_randomness)(void *, int, long unsigned int); + +typedef void (*btf_trace_mix_pool_bytes)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_mix_pool_bytes_nolock)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_credit_entropy_bits)(void *, const char *, int, int, long unsigned int); + +typedef void (*btf_trace_debit_entropy)(void *, const char *, int); + +typedef void (*btf_trace_add_input_randomness)(void *, int); + +typedef void (*btf_trace_add_disk_randomness)(void *, dev_t, int); + +typedef void (*btf_trace_get_random_bytes)(void *, int, long unsigned int); + +typedef void (*btf_trace_get_random_bytes_arch)(void *, int, long unsigned int); + +typedef void (*btf_trace_extract_entropy)(void *, const char *, int, int, long unsigned int); + +typedef void (*btf_trace_urandom_read)(void *, int, int, int); + +typedef void (*btf_trace_prandom_u32)(void *, unsigned int); + +struct poolinfo { + int poolbitshift; + int poolwords; + int poolbytes; + int poolfracbits; + int tap1; + int tap2; + int tap3; + int tap4; + int tap5; +}; + +struct crng_state { + __u32 state[16]; + long unsigned int init_time; + spinlock_t lock; +}; + +struct entropy_store { + const struct poolinfo *poolinfo; + __u32 *pool; + const char *name; + spinlock_t lock; + short unsigned int add_ptr; + short unsigned int input_rotate; + int entropy_count; + unsigned int last_data_init: 1; + __u8 last_data[10]; +}; + +struct fast_pool { + __u32 pool[4]; + long unsigned int last; + short unsigned int reg_idx; + unsigned char count; +}; + +struct batched_entropy { + union { + u64 entropy_u64[8]; + u32 entropy_u32[16]; + }; + unsigned int position; + spinlock_t batch_lock; +}; + +struct ttyprintk_port { + struct tty_port port; + spinlock_t spinlock; +}; + +struct virtio_console_config { + __virtio16 cols; + __virtio16 rows; + __virtio32 max_nr_ports; + __virtio32 emerg_wr; +}; + +struct virtio_console_control { + __virtio32 id; + __virtio16 event; + __virtio16 value; +}; + +struct ports_driver_data { + struct class *class; + struct dentry *debugfs_dir; + struct list_head portdevs; + unsigned int next_vtermno; + struct list_head consoles; +}; + +struct console___2 { + struct list_head list; + struct hvc_struct *hvc; + struct winsize ws; + u32 vtermno; +}; + +struct port_buffer { + char *buf; + size_t size; + size_t len; + size_t offset; + dma_addr_t dma; + struct device *dev; + struct list_head list; + unsigned int sgpages; + struct scatterlist sg[0]; +}; + +struct ports_device { + struct list_head list; + struct work_struct control_work; + struct work_struct config_work; + struct list_head ports; + spinlock_t ports_lock; + spinlock_t c_ivq_lock; + spinlock_t c_ovq_lock; + u32 max_nr_ports; + struct virtio_device *vdev; + struct virtqueue *c_ivq; + struct virtqueue *c_ovq; + struct virtio_console_control cpkt; + struct virtqueue **in_vqs; + struct virtqueue **out_vqs; + int chr_major; +}; + +struct port_stats { + long unsigned int bytes_sent; + long unsigned int bytes_received; + long unsigned int bytes_discarded; +}; + +struct port { + struct list_head list; + struct ports_device *portdev; + struct port_buffer *inbuf; + spinlock_t inbuf_lock; + spinlock_t outvq_lock; + struct virtqueue *in_vq; + struct virtqueue *out_vq; + struct dentry *debugfs_file; + struct port_stats stats; + struct console___2 cons; + struct cdev *cdev; + struct device *dev; + struct kref kref; + wait_queue_head_t waitqueue; + char *name; + struct fasync_struct *async_queue; + u32 id; + bool outvq_full; + bool host_connected; + bool guest_connected; +}; + +struct sg_list { + unsigned int n; + unsigned int size; + size_t len; + struct scatterlist *sg; +}; + +struct hpet_info { + long unsigned int hi_ireqfreq; + long unsigned int hi_flags; + short unsigned int hi_hpet; + short unsigned int hi_timer; +}; + +struct hpet_timer { + u64 hpet_config; + union { + u64 _hpet_hc64; + u32 _hpet_hc32; + long unsigned int _hpet_compare; + } _u1; + u64 hpet_fsb[2]; +}; + +struct hpet { + u64 hpet_cap; + u64 res0; + u64 hpet_config; + u64 res1; + u64 hpet_isr; + u64 res2[25]; + union { + u64 _hpet_mc64; + u32 _hpet_mc32; + long unsigned int _hpet_mc; + } _u0; + u64 res3; + struct hpet_timer hpet_timers[1]; +}; + +struct hpets; + +struct hpet_dev { + struct hpets *hd_hpets; + struct hpet *hd_hpet; + struct hpet_timer *hd_timer; + long unsigned int hd_ireqfreq; + long unsigned int hd_irqdata; + wait_queue_head_t hd_waitqueue; + struct fasync_struct *hd_async_queue; + unsigned int hd_flags; + unsigned int hd_irq; + unsigned int hd_hdwirq; + char hd_name[7]; +}; + +struct hpets { + struct hpets *hp_next; + struct hpet *hp_hpet; + long unsigned int hp_hpet_phys; + struct clocksource *hp_clocksource; + long long unsigned int hp_tick_freq; + long unsigned int hp_delta; + unsigned int hp_ntimer; + unsigned int hp_which; + struct hpet_dev hp_dev[0]; +}; + +struct compat_hpet_info { + compat_ulong_t hi_ireqfreq; + compat_ulong_t hi_flags; + short unsigned int hi_hpet; + short unsigned int hi_timer; +}; + +struct agp_bridge_data___2; + +struct agp_memory { + struct agp_memory *next; + struct agp_memory *prev; + struct agp_bridge_data___2 *bridge; + struct page **pages; + size_t page_count; + int key; + int num_scratch_pages; + off_t pg_start; + u32 type; + u32 physical; + bool is_bound; + bool is_flushed; + struct list_head mapped_list; + struct scatterlist *sg_list; + int num_sg; +}; + +struct agp_bridge_driver; + +struct agp_bridge_data___2 { + const struct agp_version *version; + const struct agp_bridge_driver *driver; + const struct vm_operations_struct *vm_ops; + void *previous_size; + void *current_size; + void *dev_private_data; + struct pci_dev *dev; + u32 *gatt_table; + u32 *gatt_table_real; + long unsigned int scratch_page; + struct page *scratch_page_page; + dma_addr_t scratch_page_dma; + long unsigned int gart_bus_addr; + long unsigned int gatt_bus_addr; + u32 mode; + enum chipset_type type; + long unsigned int *key_list; + atomic_t current_memory_agp; + atomic_t agp_in_use; + int max_memory_agp; + int aperture_size_idx; + int capndx; + int flags; + char major_version; + char minor_version; + struct list_head list; + u32 apbase_config; + struct list_head mapped_list; + spinlock_t mapped_lock; +}; + +enum aper_size_type { + U8_APER_SIZE = 0, + U16_APER_SIZE = 1, + U32_APER_SIZE = 2, + LVL2_APER_SIZE = 3, + FIXED_APER_SIZE = 4, +}; + +struct gatt_mask { + long unsigned int mask; + u32 type; +}; + +struct agp_bridge_driver { + struct module *owner; + const void *aperture_sizes; + int num_aperture_sizes; + enum aper_size_type size_type; + bool cant_use_aperture; + bool needs_scratch_page; + const struct gatt_mask *masks; + int (*fetch_size)(); + int (*configure)(); + void (*agp_enable)(struct agp_bridge_data___2 *, u32); + void (*cleanup)(); + void (*tlb_flush)(struct agp_memory *); + long unsigned int (*mask_memory)(struct agp_bridge_data___2 *, dma_addr_t, int); + void (*cache_flush)(); + int (*create_gatt_table)(struct agp_bridge_data___2 *); + int (*free_gatt_table)(struct agp_bridge_data___2 *); + int (*insert_memory)(struct agp_memory *, off_t, int); + int (*remove_memory)(struct agp_memory *, off_t, int); + struct agp_memory * (*alloc_by_type)(size_t, int); + void (*free_by_type)(struct agp_memory *); + struct page * (*agp_alloc_page)(struct agp_bridge_data___2 *); + int (*agp_alloc_pages)(struct agp_bridge_data___2 *, struct agp_memory *, size_t); + void (*agp_destroy_page)(struct page *, int); + void (*agp_destroy_pages)(struct agp_memory *); + int (*agp_type_to_mask_type)(struct agp_bridge_data___2 *, int); +}; + +struct aper_size_info_8 { + int size; + int num_entries; + int page_order; + u8 size_value; +}; + +struct aper_size_info_16 { + int size; + int num_entries; + int page_order; + u16 size_value; +}; + +struct aper_size_info_32 { + int size; + int num_entries; + int page_order; + u32 size_value; +}; + +struct aper_size_info_lvl2 { + int size; + int num_entries; + u32 size_value; +}; + +struct aper_size_info_fixed { + int size; + int num_entries; + int page_order; +}; + +struct agp_3_5_dev { + struct list_head list; + u8 capndx; + u32 maxbw; + struct pci_dev *dev; +}; + +struct isoch_data { + u32 maxbw; + u32 n; + u32 y; + u32 l; + u32 rq; + struct agp_3_5_dev *dev; +}; + +struct intel_agp_driver_description { + unsigned int chip_id; + char *name; + const struct agp_bridge_driver *driver; +}; + +struct intel_gtt_driver { + unsigned int gen: 8; + unsigned int is_g33: 1; + unsigned int is_pineview: 1; + unsigned int is_ironlake: 1; + unsigned int has_pgtbl_enable: 1; + unsigned int dma_mask_size: 8; + int (*setup)(); + void (*cleanup)(); + void (*write_entry)(dma_addr_t, unsigned int, unsigned int); + bool (*check_flags)(unsigned int); + void (*chipset_flush)(); +}; + +struct _intel_private { + const struct intel_gtt_driver *driver; + struct pci_dev *pcidev; + struct pci_dev *bridge_dev; + u8 *registers; + phys_addr_t gtt_phys_addr; + u32 PGETBL_save; + u32 *gtt; + bool clear_fake_agp; + int num_dcache_entries; + void *i9xx_flush_page; + char *i81x_gtt_table; + struct resource ifp_resource; + int resource_valid; + struct page *scratch_page; + phys_addr_t scratch_page_dma; + int refcount; + unsigned int needs_dmar: 1; + phys_addr_t gma_bus_addr; + resource_size_t stolen_size; + unsigned int gtt_total_entries; + unsigned int gtt_mappable_entries; +}; + +struct intel_gtt_driver_description { + unsigned int gmch_chip_id; + char *name; + const struct intel_gtt_driver *gtt_driver; +}; + +struct agp_device_ids { + short unsigned int device_id; + enum chipset_type chipset; + const char *chipset_name; + int (*chipset_setup)(struct pci_dev *); +}; + +enum tpm2_startup_types { + TPM2_SU_CLEAR = 0, + TPM2_SU_STATE = 1, +}; + +enum tpm_chip_flags { + TPM_CHIP_FLAG_TPM2 = 2, + TPM_CHIP_FLAG_IRQ = 4, + TPM_CHIP_FLAG_VIRTUAL = 8, + TPM_CHIP_FLAG_HAVE_TIMEOUTS = 16, + TPM_CHIP_FLAG_ALWAYS_POWERED = 32, + TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = 64, +}; + +struct file_priv { + struct tpm_chip *chip; + struct tpm_space *space; + struct mutex buffer_mutex; + struct timer_list user_read_timer; + struct work_struct timeout_work; + struct work_struct async_work; + wait_queue_head_t async_wait; + ssize_t response_length; + bool response_read; + bool command_enqueued; + u8 data_buffer[4096]; +}; + +enum TPM_OPS_FLAGS { + TPM_OPS_AUTO_STARTUP = 1, +}; + +enum tpm2_timeouts { + TPM2_TIMEOUT_A = 750, + TPM2_TIMEOUT_B = 2000, + TPM2_TIMEOUT_C = 200, + TPM2_TIMEOUT_D = 30, + TPM2_DURATION_SHORT = 20, + TPM2_DURATION_MEDIUM = 750, + TPM2_DURATION_LONG = 2000, + TPM2_DURATION_LONG_LONG = 300000, + TPM2_DURATION_DEFAULT = 120000, +}; + +enum tpm_timeout { + TPM_TIMEOUT = 5, + TPM_TIMEOUT_RETRY = 100, + TPM_TIMEOUT_RANGE_US = 300, + TPM_TIMEOUT_POLL = 1, + TPM_TIMEOUT_USECS_MIN = 100, + TPM_TIMEOUT_USECS_MAX = 500, +}; + +struct stclear_flags_t { + __be16 tag; + u8 deactivated; + u8 disableForceClear; + u8 physicalPresence; + u8 physicalPresenceLock; + u8 bGlobalLock; +} __attribute__((packed)); + +struct tpm1_version { + u8 major; + u8 minor; + u8 rev_major; + u8 rev_minor; +}; + +struct tpm1_version2 { + __be16 tag; + struct tpm1_version version; +}; + +struct timeout_t { + __be32 a; + __be32 b; + __be32 c; + __be32 d; +}; + +struct duration_t { + __be32 tpm_short; + __be32 tpm_medium; + __be32 tpm_long; +}; + +struct permanent_flags_t { + __be16 tag; + u8 disable; + u8 ownership; + u8 deactivated; + u8 readPubek; + u8 disableOwnerClear; + u8 allowMaintenance; + u8 physicalPresenceLifetimeLock; + u8 physicalPresenceHWEnable; + u8 physicalPresenceCMDEnable; + u8 CEKPUsed; + u8 TPMpost; + u8 TPMpostLock; + u8 FIPS; + u8 operator; + u8 enableRevokeEK; + u8 nvLocked; + u8 readSRKPub; + u8 tpmEstablished; + u8 maintenanceDone; + u8 disableFullDALogicInfo; +}; + +typedef union { + struct permanent_flags_t perm_flags; + struct stclear_flags_t stclear_flags; + __u8 owned; + __be32 num_pcrs; + struct tpm1_version version1; + struct tpm1_version2 version2; + __be32 manufacturer_id; + struct timeout_t timeout; + struct duration_t duration; +} cap_t; + +enum tpm_capabilities { + TPM_CAP_FLAG = 4, + TPM_CAP_PROP = 5, + TPM_CAP_VERSION_1_1 = 6, + TPM_CAP_VERSION_1_2 = 26, +}; + +enum tpm_sub_capabilities { + TPM_CAP_PROP_PCR = 257, + TPM_CAP_PROP_MANUFACTURER = 259, + TPM_CAP_FLAG_PERM = 264, + TPM_CAP_FLAG_VOL = 265, + TPM_CAP_PROP_OWNER = 273, + TPM_CAP_PROP_TIS_TIMEOUT = 277, + TPM_CAP_PROP_TIS_DURATION = 288, +}; + +struct tpm1_get_random_out { + __be32 rng_data_len; + u8 rng_data[128]; +}; + +enum tpm2_const { + TPM2_PLATFORM_PCR = 24, + TPM2_PCR_SELECT_MIN = 3, +}; + +enum tpm2_capabilities { + TPM2_CAP_HANDLES = 1, + TPM2_CAP_COMMANDS = 2, + TPM2_CAP_PCRS = 5, + TPM2_CAP_TPM_PROPERTIES = 6, +}; + +enum tpm2_properties { + TPM_PT_TOTAL_COMMANDS = 297, +}; + +enum tpm2_cc_attrs { + TPM2_CC_ATTR_CHANDLES = 25, + TPM2_CC_ATTR_RHANDLE = 28, +}; + +struct tpm2_pcr_read_out { + __be32 update_cnt; + __be32 pcr_selects_cnt; + __be16 hash_alg; + u8 pcr_select_size; + u8 pcr_select[3]; + __be32 digests_cnt; + __be16 digest_size; + u8 digest[0]; +} __attribute__((packed)); + +struct tpm2_null_auth_area { + __be32 handle; + __be16 nonce_size; + u8 attributes; + __be16 auth_size; +} __attribute__((packed)); + +struct tpm2_get_random_out { + __be16 size; + u8 buffer[128]; +}; + +struct tpm2_get_cap_out { + u8 more_data; + __be32 subcap_id; + __be32 property_cnt; + __be32 property_id; + __be32 value; +} __attribute__((packed)); + +struct tpm2_pcr_selection { + __be16 hash_alg; + u8 size_of_select; + u8 pcr_select[3]; +}; + +struct tpmrm_priv { + struct file_priv priv; + struct tpm_space space; +}; + +enum tpm2_handle_types { + TPM2_HT_HMAC_SESSION = 33554432, + TPM2_HT_POLICY_SESSION = 50331648, + TPM2_HT_TRANSIENT = 2147483648, +}; + +struct tpm2_context { + __be64 sequence; + __be32 saved_handle; + __be32 hierarchy; + __be16 blob_size; +} __attribute__((packed)); + +struct tpm2_cap_handles { + u8 more_data; + __be32 capability; + __be32 count; + __be32 handles[0]; +} __attribute__((packed)); + +struct tpm_readpubek_out { + u8 algorithm[4]; + u8 encscheme[2]; + u8 sigscheme[2]; + __be32 paramsize; + u8 parameters[12]; + __be32 keysize; + u8 modulus[256]; + u8 checksum[20]; +}; + +struct tpm_pcr_attr { + int alg_id; + int pcr; + struct device_attribute attr; +}; + +struct tcpa_event { + u32 pcr_index; + u32 event_type; + u8 pcr_value[20]; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE = 1, + UNUSED = 2, + NO_ACTION = 3, + SEPARATOR = 4, + ACTION = 5, + EVENT_TAG = 6, + SCRTM_CONTENTS = 7, + SCRTM_VERSION = 8, + CPU_MICROCODE = 9, + PLATFORM_CONFIG_FLAGS = 10, + TABLE_OF_DEVICES = 11, + COMPACT_HASH = 12, + IPL = 13, + IPL_PARTITION_DATA = 14, + NONHOST_CODE = 15, + NONHOST_CONFIG = 16, + NONHOST_INFO = 17, +}; + +struct tcpa_pc_event { + u32 event_id; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_pc_event_ids { + SMBIOS = 1, + BIS_CERT = 2, + POST_BIOS_ROM = 3, + ESCD = 4, + CMOS = 5, + NVRAM = 6, + OPTION_ROM_EXEC = 7, + OPTION_ROM_CONFIG = 8, + OPTION_ROM_MICROCODE = 10, + S_CRTM_VERSION = 11, + S_CRTM_CONTENTS = 12, + POST_CONTENTS = 13, + HOST_TABLE_OF_DEVICES = 14, +}; + +struct tcg_efi_specid_event_algs { + u16 alg_id; + u16 digest_size; +}; + +struct tcg_efi_specid_event_head { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintnsize; + u32 num_algs; + struct tcg_efi_specid_event_algs digest_sizes[0]; +}; + +struct tcg_pcr_event { + u32 pcr_idx; + u32 event_type; + u8 digest[20]; + u32 event_size; + u8 event[0]; +}; + +struct tcg_event_field { + u32 event_size; + u8 event[0]; +}; + +struct tcg_pcr_event2_head { + u32 pcr_idx; + u32 event_type; + u32 count; + struct tpm_digest digests[0]; +}; + +struct acpi_table_tpm2 { + struct acpi_table_header header; + u16 platform_class; + u16 reserved; + u64 control_address; + u32 start_method; +} __attribute__((packed)); + +struct acpi_tpm2_phy { + u8 start_method_specific[12]; + u32 log_area_minimum_length; + u64 log_area_start_address; +}; + +enum bios_platform_class { + BIOS_CLIENT = 0, + BIOS_SERVER = 1, +}; + +struct client_hdr { + u32 log_max_len; + u64 log_start_addr; +} __attribute__((packed)); + +struct server_hdr { + u16 reserved; + u64 log_max_len; + u64 log_start_addr; +} __attribute__((packed)); + +struct acpi_tcpa { + struct acpi_table_header hdr; + u16 platform_class; + union { + struct client_hdr client; + struct server_hdr server; + }; +} __attribute__((packed)); + +struct linux_efi_tpm_eventlog { + u32 size; + u32 final_events_preboot_size; + u8 version; + u8 log[0]; +}; + +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[0]; +}; + +enum tis_access { + TPM_ACCESS_VALID = 128, + TPM_ACCESS_ACTIVE_LOCALITY = 32, + TPM_ACCESS_REQUEST_PENDING = 4, + TPM_ACCESS_REQUEST_USE = 2, +}; + +enum tis_status { + TPM_STS_VALID = 128, + TPM_STS_COMMAND_READY = 64, + TPM_STS_GO = 32, + TPM_STS_DATA_AVAIL = 16, + TPM_STS_DATA_EXPECT = 8, + TPM_STS_READ_ZERO = 35, +}; + +enum tis_int_flags { + TPM_GLOBAL_INT_ENABLE = 2147483648, + TPM_INTF_BURST_COUNT_STATIC = 256, + TPM_INTF_CMD_READY_INT = 128, + TPM_INTF_INT_EDGE_FALLING = 64, + TPM_INTF_INT_EDGE_RISING = 32, + TPM_INTF_INT_LEVEL_LOW = 16, + TPM_INTF_INT_LEVEL_HIGH = 8, + TPM_INTF_LOCALITY_CHANGE_INT = 4, + TPM_INTF_STS_VALID_INT = 2, + TPM_INTF_DATA_AVAIL_INT = 1, +}; + +enum tis_defaults { + TIS_MEM_LEN = 20480, + TIS_SHORT_TIMEOUT = 750, + TIS_LONG_TIMEOUT = 2000, +}; + +enum tpm_tis_flags { + TPM_TIS_ITPM_WORKAROUND = 1, + TPM_TIS_INVALID_STATUS = 2, +}; + +struct tpm_tis_phy_ops; + +struct tpm_tis_data { + u16 manufacturer_id; + int locality; + int irq; + bool irq_tested; + long unsigned int flags; + void *ilb_base_addr; + u16 clkrun_enabled; + wait_queue_head_t int_queue; + wait_queue_head_t read_queue; + const struct tpm_tis_phy_ops *phy_ops; + short unsigned int rng_quality; +}; + +struct tpm_tis_phy_ops { + int (*read_bytes)(struct tpm_tis_data *, u32, u16, u8 *); + int (*write_bytes)(struct tpm_tis_data *, u32, u16, const u8 *); + int (*read16)(struct tpm_tis_data *, u32, u16 *); + int (*read32)(struct tpm_tis_data *, u32, u32 *); + int (*write32)(struct tpm_tis_data *, u32, u32); +}; + +struct tis_vendor_durations_override { + u32 did_vid; + struct tpm1_version version; + long unsigned int durations[3]; +}; + +struct tis_vendor_timeout_override { + u32 did_vid; + long unsigned int timeout_us[4]; +}; + +struct tpm_info { + struct resource res; + int irq; +}; + +struct tpm_tis_tcg_phy { + struct tpm_tis_data priv; + void *iobase; +}; + +enum crb_defaults { + CRB_ACPI_START_REVISION_ID = 1, + CRB_ACPI_START_INDEX = 1, +}; + +enum crb_loc_ctrl { + CRB_LOC_CTRL_REQUEST_ACCESS = 1, + CRB_LOC_CTRL_RELINQUISH = 2, +}; + +enum crb_loc_state { + CRB_LOC_STATE_LOC_ASSIGNED = 2, + CRB_LOC_STATE_TPM_REG_VALID_STS = 128, +}; + +enum crb_ctrl_req { + CRB_CTRL_REQ_CMD_READY = 1, + CRB_CTRL_REQ_GO_IDLE = 2, +}; + +enum crb_ctrl_sts { + CRB_CTRL_STS_ERROR = 1, + CRB_CTRL_STS_TPM_IDLE = 2, +}; + +enum crb_start { + CRB_START_INVOKE = 1, +}; + +enum crb_cancel { + CRB_CANCEL_INVOKE = 1, +}; + +struct crb_regs_head { + u32 loc_state; + u32 reserved1; + u32 loc_ctrl; + u32 loc_sts; + u8 reserved2[32]; + u64 intf_id; + u64 ctrl_ext; +}; + +struct crb_regs_tail { + u32 ctrl_req; + u32 ctrl_sts; + u32 ctrl_cancel; + u32 ctrl_start; + u32 ctrl_int_enable; + u32 ctrl_int_sts; + u32 ctrl_cmd_size; + u32 ctrl_cmd_pa_low; + u32 ctrl_cmd_pa_high; + u32 ctrl_rsp_size; + u64 ctrl_rsp_pa; +}; + +enum crb_status { + CRB_DRV_STS_COMPLETE = 1, +}; + +struct crb_priv { + u32 sm; + const char *hid; + struct crb_regs_head *regs_h; + struct crb_regs_tail *regs_t; + u8 *cmd; + u8 *rsp; + u32 cmd_size; + u32 smc_func_id; +}; + +struct tpm2_crb_smc { + u32 interrupt; + u8 interrupt_flags; + u8 op_flags; + u16 reserved2; + u32 smc_func_id; +}; + +struct vcpu_data; + +struct amd_iommu_pi_data { + u32 ga_tag; + u32 prev_ga_tag; + u64 base; + bool is_guest_mode; + struct vcpu_data *vcpu_data; + void *ir_data; +}; + +struct vcpu_data { + u64 pi_desc_addr; + u32 vector; +}; + +struct amd_iommu_device_info { + int max_pasids; + u32 flags; +}; + +enum io_pgtable_fmt { + ARM_32_LPAE_S1 = 0, + ARM_32_LPAE_S2 = 1, + ARM_64_LPAE_S1 = 2, + ARM_64_LPAE_S2 = 3, + ARM_V7S = 4, + ARM_MALI_LPAE = 5, + AMD_IOMMU_V1 = 6, + IO_PGTABLE_NUM_FMTS = 7, +}; + +struct iommu_flush_ops { + void (*tlb_flush_all)(void *); + void (*tlb_flush_walk)(long unsigned int, size_t, size_t, void *); + void (*tlb_add_page)(struct iommu_iotlb_gather *, long unsigned int, size_t, void *); +}; + +struct io_pgtable_cfg { + long unsigned int quirks; + long unsigned int pgsize_bitmap; + unsigned int ias; + unsigned int oas; + bool coherent_walk; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; + union { + struct { + u64 ttbr; + struct { + u32 ips: 3; + u32 tg: 2; + u32 sh: 2; + u32 orgn: 2; + u32 irgn: 2; + u32 tsz: 6; + } tcr; + u64 mair; + } arm_lpae_s1_cfg; + struct { + u64 vttbr; + struct { + u32 ps: 3; + u32 tg: 2; + u32 sh: 2; + u32 orgn: 2; + u32 irgn: 2; + u32 sl: 2; + u32 tsz: 6; + } vtcr; + } arm_lpae_s2_cfg; + struct { + u32 ttbr; + u32 tcr; + u32 nmrr; + u32 prrr; + } arm_v7s_cfg; + struct { + u64 transtab; + u64 memattr; + } arm_mali_lpae_cfg; + }; +}; + +struct io_pgtable_ops { + int (*map)(struct io_pgtable_ops *, long unsigned int, phys_addr_t, size_t, int, gfp_t); + size_t (*unmap)(struct io_pgtable_ops *, long unsigned int, size_t, struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *, long unsigned int); +}; + +struct io_pgtable { + enum io_pgtable_fmt fmt; + void *cookie; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops ops; +}; + +struct irq_remap_table { + raw_spinlock_t lock; + unsigned int min_index; + u32 *table; +}; + +struct amd_iommu_fault { + u64 address; + u32 pasid; + u16 device_id; + u16 tag; + u16 flags; +}; + +struct amd_io_pgtable { + struct io_pgtable_cfg pgtbl_cfg; + struct io_pgtable iop; + int mode; + u64 *root; + atomic64_t pt_root; +}; + +struct protection_domain { + struct list_head dev_list; + struct iommu_domain domain; + struct amd_io_pgtable iop; + spinlock_t lock; + u16 id; + int glx; + u64 *gcr3_tbl; + long unsigned int flags; + unsigned int dev_cnt; + unsigned int dev_iommu[32]; +}; + +struct amd_irte_ops; + +struct amd_iommu___2 { + struct list_head list; + int index; + raw_spinlock_t lock; + struct pci_dev *dev; + struct pci_dev *root_pdev; + u64 mmio_phys; + u64 mmio_phys_end; + u8 *mmio_base; + u32 cap; + u8 acpi_flags; + u64 features; + bool is_iommu_v2; + u16 devid; + u16 cap_ptr; + u16 pci_seg; + u64 exclusion_start; + u64 exclusion_length; + u8 *cmd_buf; + u32 cmd_buf_head; + u32 cmd_buf_tail; + u8 *evt_buf; + u8 *ppr_log; + u8 *ga_log; + u8 *ga_log_tail; + bool int_enabled; + bool need_sync; + struct iommu_device iommu; + u32 stored_addr_lo; + u32 stored_addr_hi; + u32 stored_l1[108]; + u32 stored_l2[131]; + u8 max_banks; + u8 max_counters; + struct irq_domain *ir_domain; + struct irq_domain *msi_domain; + struct amd_irte_ops *irte_ops; + u32 flags; + volatile u64 *cmd_sem; + u64 cmd_sem_val; + struct irq_affinity_notify intcapxt_notify; +}; + +struct amd_irte_ops { + void (*prepare)(void *, u32, bool, u8, u32, int); + void (*activate)(void *, u16, u16); + void (*deactivate)(void *, u16, u16); + void (*set_affinity)(void *, u16, u16, u8, u32); + void * (*get)(struct irq_remap_table *, int); + void (*set_allocated)(struct irq_remap_table *, int); + bool (*is_allocated)(struct irq_remap_table *, int); + void (*clear_allocated)(struct irq_remap_table *, int); +}; + +struct acpihid_map_entry { + struct list_head list; + u8 uid[256]; + u8 hid[9]; + u16 devid; + u16 root_devid; + bool cmd_line; + struct iommu_group *group; +}; + +struct devid_map { + struct list_head list; + u8 id; + u16 devid; + bool cmd_line; +}; + +struct iommu_dev_data { + spinlock_t lock; + struct list_head list; + struct llist_node dev_data_list; + struct protection_domain *domain; + struct pci_dev *pdev; + u16 devid; + bool iommu_v2; + struct { + bool enabled; + int qdep; + } ats; + bool pri_tlp; + bool use_vapic; + bool defer_attach; + struct ratelimit_state rs; +}; + +struct dev_table_entry { + u64 data[4]; +}; + +struct unity_map_entry { + struct list_head list; + u16 devid_start; + u16 devid_end; + u64 address_start; + u64 address_end; + int prot; +}; + +enum amd_iommu_intr_mode_type { + AMD_IOMMU_GUEST_IR_LEGACY = 0, + AMD_IOMMU_GUEST_IR_LEGACY_GA = 1, + AMD_IOMMU_GUEST_IR_VAPIC = 2, +}; + +union irte { + u32 val; + struct { + u32 valid: 1; + u32 no_fault: 1; + u32 int_type: 3; + u32 rq_eoi: 1; + u32 dm: 1; + u32 rsvd_1: 1; + u32 destination: 8; + u32 vector: 8; + u32 rsvd_2: 8; + } fields; +}; + +union irte_ga_lo { + u64 val; + struct { + u64 valid: 1; + u64 no_fault: 1; + u64 int_type: 3; + u64 rq_eoi: 1; + u64 dm: 1; + u64 guest_mode: 1; + u64 destination: 24; + u64 ga_tag: 32; + } fields_remap; + struct { + u64 valid: 1; + u64 no_fault: 1; + u64 ga_log_intr: 1; + u64 rsvd1: 3; + u64 is_run: 1; + u64 guest_mode: 1; + u64 destination: 24; + u64 ga_tag: 32; + } fields_vapic; +}; + +union irte_ga_hi { + u64 val; + struct { + u64 vector: 8; + u64 rsvd_1: 4; + u64 ga_root_ptr: 40; + u64 rsvd_2: 4; + u64 destination: 8; + } fields; +}; + +struct irte_ga { + union irte_ga_lo lo; + union irte_ga_hi hi; +}; + +struct irq_2_irte { + u16 devid; + u16 index; +}; + +struct amd_ir_data { + u32 cached_ga_tag; + struct irq_2_irte irq_2_irte; + struct msi_msg msi_entry; + void *entry; + void *ref; + struct irq_cfg *cfg; + int ga_vector; + int ga_root_ptr; + int ga_tag; +}; + +struct irq_remap_ops { + int capability; + int (*prepare)(); + int (*enable)(); + void (*disable)(); + int (*reenable)(int); + int (*enable_faulting)(); +}; + +struct iommu_cmd { + u32 data[4]; +}; + +enum irq_remap_cap { + IRQ_POSTING_CAP = 0, +}; + +struct ivhd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 cap_ptr; + u64 mmio_phys; + u16 pci_seg; + u16 info; + u32 efr_attr; + u64 efr_reg; + u64 res; +}; + +struct ivhd_entry { + u8 type; + u16 devid; + u8 flags; + u32 ext; + u32 hidh; + u64 cid; + u8 uidf; + u8 uidl; + u8 uid; +} __attribute__((packed)); + +struct ivmd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 aux; + u64 resv; + u64 range_start; + u64 range_length; +}; + +enum iommu_init_state { + IOMMU_START_STATE = 0, + IOMMU_IVRS_DETECTED = 1, + IOMMU_ACPI_FINISHED = 2, + IOMMU_ENABLED = 3, + IOMMU_PCI_INIT = 4, + IOMMU_INTERRUPTS_EN = 5, + IOMMU_INITIALIZED = 6, + IOMMU_NOT_FOUND = 7, + IOMMU_INIT_ERROR = 8, + IOMMU_CMDLINE_DISABLED = 9, +}; + +union intcapxt { + u64 capxt; + struct { + u64 reserved_0: 2; + u64 dest_mode_logical: 1; + u64 reserved_1: 5; + u64 destid_0_23: 24; + u64 vector: 8; + u64 reserved_2: 16; + u64 destid_24_31: 8; + }; +}; + +struct ivrs_quirk_entry { + u8 id; + u16 devid; +}; + +enum { + DELL_INSPIRON_7375 = 0, + DELL_LATITUDE_5495 = 1, + LENOVO_IDEAPAD_330S_15ARR = 2, +}; + +struct io_pgtable_init_fns { + struct io_pgtable * (*alloc)(struct io_pgtable_cfg *, void *); + void (*free)(struct io_pgtable *); +}; + +struct acpi_table_dmar { + struct acpi_table_header header; + u8 width; + u8 flags; + u8 reserved[10]; +}; + +struct acpi_dmar_header { + u16 type; + u16 length; +}; + +enum acpi_dmar_type { + ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, + ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, + ACPI_DMAR_TYPE_ROOT_ATS = 2, + ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, + ACPI_DMAR_TYPE_NAMESPACE = 4, + ACPI_DMAR_TYPE_SATC = 5, + ACPI_DMAR_TYPE_RESERVED = 6, +}; + +struct acpi_dmar_device_scope { + u8 entry_type; + u8 length; + u16 reserved; + u8 enumeration_id; + u8 bus; +}; + +enum acpi_dmar_scope_type { + ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0, + ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1, + ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, + ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, + ACPI_DMAR_SCOPE_TYPE_HPET = 4, + ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5, + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6, +}; + +struct acpi_dmar_pci_path { + u8 device; + u8 function; +}; + +struct acpi_dmar_hardware_unit { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; + u64 address; +}; + +struct acpi_dmar_reserved_memory { + struct acpi_dmar_header header; + u16 reserved; + u16 segment; + u64 base_address; + u64 end_address; +}; + +struct acpi_dmar_atsr { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +struct acpi_dmar_rhsa { + struct acpi_dmar_header header; + u32 reserved; + u64 base_address; + u32 proximity_domain; +} __attribute__((packed)); + +struct acpi_dmar_andd { + struct acpi_dmar_header header; + u8 reserved[3]; + u8 device_number; + char device_name[1]; +} __attribute__((packed)); + +struct acpi_dmar_satc { + struct acpi_dmar_header header; + u8 flags; + u8 reserved; + u16 segment; +}; + +struct dmar_dev_scope { + struct device *dev; + u8 bus; + u8 devfn; +}; + +struct intel_iommu; + +struct dmar_drhd_unit { + struct list_head list; + struct acpi_dmar_header *hdr; + u64 reg_base_addr; + struct dmar_dev_scope *devices; + int devices_cnt; + u16 segment; + u8 ignored: 1; + u8 include_all: 1; + u8 gfx_dedicated: 1; + struct intel_iommu *iommu; +}; + +struct iommu_flush { + void (*flush_context)(struct intel_iommu *, u16, u16, u8, u64); + void (*flush_iotlb)(struct intel_iommu *, u16, u64, unsigned int, u64); +}; + +typedef unsigned int ioasid_t; + +typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t, ioasid_t, void *); + +typedef void (*ioasid_free_fn_t)(ioasid_t, void *); + +struct ioasid_allocator_ops { + ioasid_alloc_fn_t alloc; + ioasid_free_fn_t free; + struct list_head list; + void *pdata; +}; + +struct dmar_domain; + +struct root_entry; + +struct page_req_dsc; + +struct q_inval; + +struct ir_table; + +struct intel_iommu { + void *reg; + u64 reg_phys; + u64 reg_size; + u64 cap; + u64 ecap; + u64 vccap; + u32 gcmd; + raw_spinlock_t register_lock; + int seq_id; + int agaw; + int msagaw; + unsigned int irq; + unsigned int pr_irq; + u16 segment; + unsigned char name[13]; + long unsigned int *domain_ids; + struct dmar_domain ***domains; + spinlock_t lock; + struct root_entry *root_entry; + struct iommu_flush flush; + struct page_req_dsc *prq; + unsigned char prq_name[16]; + struct completion prq_complete; + struct ioasid_allocator_ops pasid_allocator; + struct q_inval *qi; + u32 *iommu_state; + struct ir_table *ir_table; + struct irq_domain *ir_domain; + struct irq_domain *ir_msi_domain; + struct iommu_device iommu; + int node; + u32 flags; + struct dmar_drhd_unit *drhd; +}; + +struct dmar_pci_path { + u8 bus; + u8 device; + u8 function; +}; + +struct dmar_pci_notify_info { + struct pci_dev *dev; + long unsigned int event; + int bus; + u16 seg; + u16 level; + struct dmar_pci_path path[0]; +}; + +struct irte___2 { + union { + struct { + __u64 present: 1; + __u64 fpd: 1; + __u64 __res0: 6; + __u64 avail: 4; + __u64 __res1: 3; + __u64 pst: 1; + __u64 vector: 8; + __u64 __res2: 40; + }; + struct { + __u64 r_present: 1; + __u64 r_fpd: 1; + __u64 dst_mode: 1; + __u64 redir_hint: 1; + __u64 trigger_mode: 1; + __u64 dlvry_mode: 3; + __u64 r_avail: 4; + __u64 r_res0: 4; + __u64 r_vector: 8; + __u64 r_res1: 8; + __u64 dest_id: 32; + }; + struct { + __u64 p_present: 1; + __u64 p_fpd: 1; + __u64 p_res0: 6; + __u64 p_avail: 4; + __u64 p_res1: 2; + __u64 p_urgent: 1; + __u64 p_pst: 1; + __u64 p_vector: 8; + __u64 p_res2: 14; + __u64 pda_l: 26; + }; + __u64 low; + }; + union { + struct { + __u64 sid: 16; + __u64 sq: 2; + __u64 svt: 2; + __u64 __res3: 44; + }; + struct { + __u64 p_sid: 16; + __u64 p_sq: 2; + __u64 p_svt: 2; + __u64 p_res3: 12; + __u64 pda_h: 32; + }; + __u64 high; + }; +}; + +struct iova { + struct rb_node node; + long unsigned int pfn_hi; + long unsigned int pfn_lo; +}; + +struct iova_magazine; + +struct iova_cpu_rcache; + +struct iova_rcache { + spinlock_t lock; + long unsigned int depot_size; + struct iova_magazine *depot[32]; + struct iova_cpu_rcache *cpu_rcaches; +}; + +struct iova_domain; + +typedef void (*iova_flush_cb)(struct iova_domain *); + +typedef void (*iova_entry_dtor)(long unsigned int); + +struct iova_fq; + +struct iova_domain { + spinlock_t iova_rbtree_lock; + struct rb_root rbroot; + struct rb_node *cached_node; + struct rb_node *cached32_node; + long unsigned int granule; + long unsigned int start_pfn; + long unsigned int dma_32bit_pfn; + long unsigned int max32_alloc_size; + struct iova_fq *fq; + atomic64_t fq_flush_start_cnt; + atomic64_t fq_flush_finish_cnt; + struct iova anchor; + struct iova_rcache rcaches[6]; + iova_flush_cb flush_cb; + iova_entry_dtor entry_dtor; + struct timer_list fq_timer; + atomic_t fq_timer_on; + struct hlist_node cpuhp_dead; +}; + +struct iova_fq_entry { + long unsigned int iova_pfn; + long unsigned int pages; + long unsigned int data; + u64 counter; +}; + +struct iova_fq { + struct iova_fq_entry entries[256]; + unsigned int head; + unsigned int tail; + spinlock_t lock; +}; + +enum { + QI_FREE = 0, + QI_IN_USE = 1, + QI_DONE = 2, + QI_ABORT = 3, +}; + +struct qi_desc { + u64 qw0; + u64 qw1; + u64 qw2; + u64 qw3; +}; + +struct q_inval { + raw_spinlock_t q_lock; + void *desc; + int *desc_status; + int free_head; + int free_tail; + int free_cnt; +}; + +struct ir_table { + struct irte___2 *base; + long unsigned int *bitmap; +}; + +struct root_entry { + u64 lo; + u64 hi; +}; + +struct dma_pte; + +struct dmar_domain { + int nid; + unsigned int iommu_refcnt[128]; + u16 iommu_did[128]; + bool has_iotlb_device; + struct list_head devices; + struct list_head subdevices; + struct iova_domain iovad; + struct dma_pte *pgd; + int gaw; + int agaw; + int flags; + int iommu_coherency; + int iommu_snooping; + int iommu_count; + int iommu_superpage; + u64 max_addr; + u32 default_pasid; + struct iommu_domain domain; +}; + +struct dma_pte { + u64 val; +}; + +typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); + +struct dmar_res_callback { + dmar_res_handler_t cb[6]; + void *arg[6]; + bool ignore_unhandled; + bool print_entry; +}; + +enum faulttype { + DMA_REMAP = 0, + INTR_REMAP = 1, + UNKNOWN = 2, +}; + +struct ioasid_set { + int dummy; +}; + +enum iommu_inv_granularity { + IOMMU_INV_GRANU_DOMAIN = 0, + IOMMU_INV_GRANU_PASID = 1, + IOMMU_INV_GRANU_ADDR = 2, + IOMMU_INV_GRANU_NR = 3, +}; + +enum { + SR_DMAR_FECTL_REG = 0, + SR_DMAR_FEDATA_REG = 1, + SR_DMAR_FEADDR_REG = 2, + SR_DMAR_FEUADDR_REG = 3, + MAX_SR_DMAR_REGS = 4, +}; + +struct context_entry { + u64 lo; + u64 hi; +}; + +struct subdev_domain_info { + struct list_head link_phys; + struct list_head link_domain; + struct device *pdev; + struct dmar_domain *domain; + int users; +}; + +struct pasid_table; + +struct device_domain_info { + struct list_head link; + struct list_head global; + struct list_head table; + struct list_head subdevices; + u32 segment; + u8 bus; + u8 devfn; + u16 pfsid; + u8 pasid_supported: 3; + u8 pasid_enabled: 1; + u8 pri_supported: 1; + u8 pri_enabled: 1; + u8 ats_supported: 1; + u8 ats_enabled: 1; + u8 auxd_enabled: 1; + u8 ats_qdep; + struct device *dev; + struct intel_iommu *iommu; + struct dmar_domain *domain; + struct pasid_table *pasid_table; +}; + +struct pasid_table { + void *table; + int order; + u32 max_pasid; + struct list_head dev; +}; + +enum cap_audit_type { + CAP_AUDIT_STATIC_DMAR = 0, + CAP_AUDIT_STATIC_IRQR = 1, + CAP_AUDIT_HOTPLUG_DMAR = 2, + CAP_AUDIT_HOTPLUG_IRQR = 3, +}; + +struct dmar_rmrr_unit { + struct list_head list; + struct acpi_dmar_header *hdr; + u64 base_address; + u64 end_address; + struct dmar_dev_scope *devices; + int devices_cnt; +}; + +struct dmar_atsr_unit { + struct list_head list; + struct acpi_dmar_header *hdr; + struct dmar_dev_scope *devices; + int devices_cnt; + u8 include_all: 1; +}; + +struct dmar_satc_unit { + struct list_head list; + struct acpi_dmar_header *hdr; + struct dmar_dev_scope *devices; + struct intel_iommu *iommu; + int devices_cnt; + u8 atc_required: 1; +}; + +struct domain_context_mapping_data { + struct dmar_domain *domain; + struct intel_iommu *iommu; + struct pasid_table *table; +}; + +struct pasid_dir_entry { + u64 val; +}; + +struct pasid_entry { + u64 val[8]; +}; + +struct pasid_table_opaque { + struct pasid_table **pasid_table; + int segment; + int bus; + int devfn; +}; + +struct trace_event_raw_qi_submit { + struct trace_entry ent; + u64 qw0; + u64 qw1; + u64 qw2; + u64 qw3; + u32 __data_loc_iommu; + char __data[0]; +}; + +struct trace_event_data_offsets_qi_submit { + u32 iommu; +}; + +typedef void (*btf_trace_qi_submit)(void *, struct intel_iommu *, u64, u64, u64, u64); + +enum iommu_fault_type { + IOMMU_FAULT_DMA_UNRECOV = 1, + IOMMU_FAULT_PAGE_REQ = 2, +}; + +struct page_req_dsc { + union { + struct { + u64 type: 8; + u64 pasid_present: 1; + u64 priv_data_present: 1; + u64 rsvd: 6; + u64 rid: 16; + u64 pasid: 20; + u64 exe_req: 1; + u64 pm_req: 1; + u64 rsvd2: 10; + }; + u64 qw_0; + }; + union { + struct { + u64 rd_req: 1; + u64 wr_req: 1; + u64 lpig: 1; + u64 prg_index: 9; + u64 addr: 52; + }; + u64 qw_1; + }; + u64 priv_data[2]; +}; + +struct intel_svm_dev { + struct list_head list; + struct callback_head rcu; + struct device *dev; + struct intel_iommu *iommu; + struct iommu_sva sva; + u32 pasid; + int users; + u16 did; + u16 dev_iotlb: 1; + u16 sid; + u16 qdep; +}; + +struct intel_svm { + struct mmu_notifier notifier; + struct mm_struct *mm; + unsigned int flags; + u32 pasid; + int gpasid; + struct list_head devs; + struct list_head list; +}; + +enum irq_mode { + IRQ_REMAPPING = 0, + IRQ_POSTING = 1, +}; + +struct ioapic_scope { + struct intel_iommu *iommu; + unsigned int id; + unsigned int bus; + unsigned int devfn; +}; + +struct hpet_scope { + struct intel_iommu *iommu; + u8 id; + unsigned int bus; + unsigned int devfn; +}; + +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; + enum irq_mode mode; +}; + +struct intel_ir_data { + struct irq_2_iommu irq_2_iommu; + struct irte___2 irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + +struct set_msi_sid_data { + struct pci_dev *pdev; + u16 alias; + int count; + int busmatch_count; +}; + +struct iommu_group { + struct kobject kobj; + struct kobject *devices_kobj; + struct list_head devices; + struct mutex mutex; + struct blocking_notifier_head notifier; + void *iommu_data; + void (*iommu_data_release)(void *); + char *name; + int id; + struct iommu_domain *default_domain; + struct iommu_domain *domain; + struct list_head entry; +}; + +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +struct fsl_mc_io; + +struct fsl_mc_device_irq; + +struct fsl_mc_resource; + +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u32 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; + struct device_link *consumer_link; + char *driver_override; +}; + +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0, + FSL_MC_POOL_DPBP = 1, + FSL_MC_POOL_DPCON = 2, + FSL_MC_POOL_IRQ = 3, + FSL_MC_NUM_POOL_TYPES = 4, +}; + +struct fsl_mc_resource_pool; + +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +struct fsl_mc_device_irq { + struct msi_desc *msi_desc; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + struct mutex mutex; + raw_spinlock_t spinlock; + }; +}; + +struct group_device { + struct list_head list; + struct device *dev; + char *name; +}; + +struct iommu_group_attribute { + struct attribute attr; + ssize_t (*show)(struct iommu_group *, char *); + ssize_t (*store)(struct iommu_group *, const char *, size_t); +}; + +struct group_for_pci_data { + struct pci_dev *pdev; + struct iommu_group *group; +}; + +struct __group_domain_type { + struct device *dev; + unsigned int type; +}; + +struct trace_event_raw_iommu_group_event { + struct trace_entry ent; + int gid; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_iommu_device_event { + struct trace_entry ent; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_map { + struct trace_entry ent; + u64 iova; + u64 paddr; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_unmap { + struct trace_entry ent; + u64 iova; + size_t size; + size_t unmapped_size; + char __data[0]; +}; + +struct trace_event_raw_iommu_error { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u64 iova; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_iommu_group_event { + u32 device; +}; + +struct trace_event_data_offsets_iommu_device_event { + u32 device; +}; + +struct trace_event_data_offsets_map {}; + +struct trace_event_data_offsets_unmap {}; + +struct trace_event_data_offsets_iommu_error { + u32 device; + u32 driver; +}; + +typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); + +typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); + +typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); + +typedef void (*btf_trace_detach_device_from_domain)(void *, struct device *); + +typedef void (*btf_trace_map)(void *, long unsigned int, phys_addr_t, size_t); + +typedef void (*btf_trace_unmap)(void *, long unsigned int, size_t, size_t); + +typedef void (*btf_trace_io_page_fault)(void *, struct device *, long unsigned int, int); + +struct iommu_dma_msi_page { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; +}; + +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE = 0, + IOMMU_DMA_MSI_COOKIE = 1, +}; + +struct iommu_dma_cookie { + enum iommu_dma_cookie_type type; + union { + struct iova_domain iovad; + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + struct iommu_domain *fq_domain; +}; + +struct ioasid_data { + ioasid_t id; + struct ioasid_set *set; + void *private; + struct callback_head rcu; + refcount_t refs; +}; + +struct ioasid_allocator_data { + struct ioasid_allocator_ops *ops; + struct list_head list; + struct list_head slist; + long unsigned int flags; + struct xarray xa; + struct callback_head rcu; +}; + +struct iova_magazine { + long unsigned int size; + long unsigned int pfns[128]; +}; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +}; + +struct hyperv_root_ir_data { + u8 ioapic_id; + bool is_level; + struct hv_interrupt_entry entry; +} __attribute__((packed)); + +struct cb_id { + __u32 idx; + __u32 val; +}; + +struct cn_msg { + struct cb_id id; + __u32 seq; + __u32 ack; + __u16 len; + __u16 flags; + __u8 data[0]; +}; + +struct cn_queue_dev { + atomic_t refcnt; + unsigned char name[32]; + struct list_head queue_list; + spinlock_t queue_lock; + struct sock *nls; +}; + +struct cn_callback_id { + unsigned char name[32]; + struct cb_id id; +}; + +struct cn_callback_entry { + struct list_head callback_entry; + refcount_t refcnt; + struct cn_queue_dev *pdev; + struct cn_callback_id id; + void (*callback)(struct cn_msg *, struct netlink_skb_parms *); + u32 seq; + u32 group; +}; + +struct cn_dev { + struct cb_id id; + u32 seq; + u32 groups; + struct sock *nls; + struct cn_queue_dev *cbdev; +}; + +enum proc_cn_mcast_op { + PROC_CN_MCAST_LISTEN = 1, + PROC_CN_MCAST_IGNORE = 2, +}; + +struct fork_proc_event { + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; + __kernel_pid_t child_pid; + __kernel_pid_t child_tgid; +}; + +struct exec_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct id_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + union { + __u32 ruid; + __u32 rgid; + } r; + union { + __u32 euid; + __u32 egid; + } e; +}; + +struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct ptrace_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t tracer_pid; + __kernel_pid_t tracer_tgid; +}; + +struct comm_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + char comm[16]; +}; + +struct coredump_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct exit_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __u32 exit_code; + __u32 exit_signal; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct proc_event { + enum what what; + __u32 cpu; + __u64 timestamp_ns; + union { + struct { + __u32 err; + } ack; + struct fork_proc_event fork; + struct exec_proc_event exec; + struct id_proc_event id; + struct sid_proc_event sid; + struct ptrace_proc_event ptrace; + struct comm_proc_event comm; + struct coredump_proc_event coredump; + struct exit_proc_event exit; + } event_data; +}; + +struct local_event { + local_lock_t lock; + __u32 count; +}; + +struct nvm_ioctl_info_tgt { + __u32 version[3]; + __u32 reserved; + char tgtname[48]; +}; + +struct nvm_ioctl_info { + __u32 version[3]; + __u16 tgtsize; + __u16 reserved16; + __u32 reserved[12]; + struct nvm_ioctl_info_tgt tgts[63]; +}; + +struct nvm_ioctl_device_info { + char devname[32]; + char bmname[48]; + __u32 bmversion[3]; + __u32 flags; + __u32 reserved[8]; +}; + +struct nvm_ioctl_get_devices { + __u32 nr_devices; + __u32 reserved[31]; + struct nvm_ioctl_device_info info[31]; +}; + +struct nvm_ioctl_create_simple { + __u32 lun_begin; + __u32 lun_end; +}; + +struct nvm_ioctl_create_extended { + __u16 lun_begin; + __u16 lun_end; + __u16 op; + __u16 rsv; +}; + +enum { + NVM_CONFIG_TYPE_SIMPLE = 0, + NVM_CONFIG_TYPE_EXTENDED = 1, +}; + +struct nvm_ioctl_create_conf { + __u32 type; + union { + struct nvm_ioctl_create_simple s; + struct nvm_ioctl_create_extended e; + }; +}; + +enum { + NVM_TARGET_FACTORY = 1, +}; + +struct nvm_ioctl_create { + char dev[32]; + char tgttype[48]; + char tgtname[32]; + __u32 flags; + struct nvm_ioctl_create_conf conf; +}; + +struct nvm_ioctl_remove { + char tgtname[32]; + __u32 flags; +}; + +struct nvm_ioctl_dev_init { + char dev[32]; + char mmtype[8]; + __u32 flags; +}; + +enum { + NVM_FACTORY_ERASE_ONLY_USER = 1, + NVM_FACTORY_RESET_HOST_BLKS = 2, + NVM_FACTORY_RESET_GRWN_BBLKS = 4, + NVM_FACTORY_NR_BITS = 8, +}; + +struct nvm_ioctl_dev_factory { + char dev[32]; + __u32 flags; +}; + +enum { + NVM_INFO_CMD = 32, + NVM_GET_DEVICES_CMD = 33, + NVM_DEV_CREATE_CMD = 34, + NVM_DEV_REMOVE_CMD = 35, + NVM_DEV_INIT_CMD = 36, + NVM_DEV_FACTORY_CMD = 37, + NVM_DEV_VIO_ADMIN_CMD = 65, + NVM_DEV_VIO_CMD = 66, + NVM_DEV_VIO_USER_CMD = 67, +}; + +enum { + NVM_OCSSD_SPEC_12 = 12, + NVM_OCSSD_SPEC_20 = 20, +}; + +struct ppa_addr { + union { + struct { + u64 ch: 8; + u64 lun: 8; + u64 blk: 16; + u64 reserved: 32; + } a; + struct { + u64 ch: 8; + u64 lun: 8; + u64 blk: 16; + u64 pg: 16; + u64 pl: 4; + u64 sec: 4; + u64 reserved: 8; + } g; + struct { + u64 grp: 8; + u64 pu: 8; + u64 chk: 16; + u64 sec: 24; + u64 reserved: 8; + } m; + struct { + u64 line: 63; + u64 is_cached: 1; + } c; + u64 ppa; + }; +}; + +struct nvm_dev; + +typedef int nvm_id_fn(struct nvm_dev *); + +struct nvm_addrf { + u8 ch_len; + u8 lun_len; + u8 chk_len; + u8 sec_len; + u8 rsv_len[2]; + u8 ch_offset; + u8 lun_offset; + u8 chk_offset; + u8 sec_offset; + u8 rsv_off[2]; + u64 ch_mask; + u64 lun_mask; + u64 chk_mask; + u64 sec_mask; + u64 rsv_mask[2]; +}; + +struct nvm_geo { + u8 major_ver_id; + u8 minor_ver_id; + u8 version; + int num_ch; + int num_lun; + int all_luns; + int all_chunks; + int op; + sector_t total_secs; + u32 num_chk; + u32 clba; + u16 csecs; + u16 sos; + bool ext; + u32 mdts; + u32 ws_min; + u32 ws_opt; + u32 mw_cunits; + u32 maxoc; + u32 maxocpu; + u32 mccap; + u32 trdt; + u32 trdm; + u32 tprt; + u32 tprm; + u32 tbet; + u32 tbem; + struct nvm_addrf addrf; + u8 vmnt; + u32 cap; + u32 dom; + u8 mtype; + u8 fmtype; + u16 cpar; + u32 mpos; + u8 num_pln; + u8 pln_mode; + u16 num_pg; + u16 fpg_sz; +}; + +struct nvm_dev_ops; + +struct nvm_dev { + struct nvm_dev_ops *ops; + struct list_head devices; + struct nvm_geo geo; + long unsigned int *lun_map; + void *dma_pool; + struct request_queue *q; + char name[32]; + void *private_data; + struct kref ref; + void *rmap; + struct mutex mlock; + spinlock_t lock; + struct list_head area_list; + struct list_head targets; +}; + +typedef int nvm_op_bb_tbl_fn(struct nvm_dev *, struct ppa_addr, u8 *); + +typedef int nvm_op_set_bb_fn(struct nvm_dev *, struct ppa_addr *, int, int); + +struct nvm_chk_meta; + +typedef int nvm_get_chk_meta_fn(struct nvm_dev *, sector_t, int, struct nvm_chk_meta *); + +struct nvm_chk_meta { + u8 state; + u8 type; + u8 wi; + u8 rsvd[5]; + u64 slba; + u64 cnlb; + u64 wp; +}; + +struct nvm_rq; + +typedef int nvm_submit_io_fn(struct nvm_dev *, struct nvm_rq *, void *); + +typedef void nvm_end_io_fn(struct nvm_rq *); + +struct nvm_tgt_dev; + +struct nvm_rq { + struct nvm_tgt_dev *dev; + struct bio *bio; + union { + struct ppa_addr ppa_addr; + dma_addr_t dma_ppa_list; + }; + struct ppa_addr *ppa_list; + void *meta_list; + dma_addr_t dma_meta_list; + nvm_end_io_fn *end_io; + uint8_t opcode; + uint16_t nr_ppas; + uint16_t flags; + u64 ppa_status; + int error; + int is_seq; + void *private; +}; + +typedef void *nvm_create_dma_pool_fn(struct nvm_dev *, char *, int); + +typedef void nvm_destroy_dma_pool_fn(void *); + +typedef void *nvm_dev_dma_alloc_fn(struct nvm_dev *, void *, gfp_t, dma_addr_t *); + +typedef void nvm_dev_dma_free_fn(void *, void *, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb_tbl; + nvm_get_chk_meta_fn *get_chk_meta; + nvm_submit_io_fn *submit_io; + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; +}; + +enum { + NVM_RSP_L2P = 1, + NVM_RSP_ECC = 2, + NVM_ADDRMODE_LINEAR = 0, + NVM_ADDRMODE_CHANNEL = 1, + NVM_PLANE_SINGLE = 1, + NVM_PLANE_DOUBLE = 2, + NVM_PLANE_QUAD = 4, + NVM_RSP_SUCCESS = 0, + NVM_RSP_NOT_CHANGEABLE = 1, + NVM_RSP_ERR_FAILWRITE = 16639, + NVM_RSP_ERR_EMPTYPAGE = 17151, + NVM_RSP_ERR_FAILECC = 17025, + NVM_RSP_ERR_FAILCRC = 16388, + NVM_RSP_WARN_HIGHECC = 18176, + NVM_OP_PWRITE = 145, + NVM_OP_PREAD = 146, + NVM_OP_ERASE = 144, + NVM_IO_SNGL_ACCESS = 0, + NVM_IO_DUAL_ACCESS = 1, + NVM_IO_QUAD_ACCESS = 2, + NVM_IO_SUSPEND = 128, + NVM_IO_SLC_MODE = 256, + NVM_IO_SCRAMBLE_ENABLE = 512, + NVM_BLK_T_FREE = 0, + NVM_BLK_T_BAD = 1, + NVM_BLK_T_GRWN_BAD = 2, + NVM_BLK_T_DEV = 4, + NVM_BLK_T_HOST = 8, + NVM_ID_CAP_SLC = 1, + NVM_ID_CAP_CMD_SUSPEND = 2, + NVM_ID_CAP_SCRAMBLE = 4, + NVM_ID_CAP_ENCRYPT = 8, + NVM_ID_FMTYPE_SLC = 0, + NVM_ID_FMTYPE_MLC = 1, + NVM_ID_DCAP_BBLKMGMT = 1, + NVM_UD_DCAP_ECC = 2, +}; + +struct nvm_addrf_12 { + u8 ch_len; + u8 lun_len; + u8 blk_len; + u8 pg_len; + u8 pln_len; + u8 sec_len; + u8 ch_offset; + u8 lun_offset; + u8 blk_offset; + u8 pg_offset; + u8 pln_offset; + u8 sec_offset; + u64 ch_mask; + u64 lun_mask; + u64 blk_mask; + u64 pg_mask; + u64 pln_mask; + u64 sec_mask; +}; + +enum { + NVM_CHK_ST_FREE = 1, + NVM_CHK_ST_CLOSED = 2, + NVM_CHK_ST_OPEN = 4, + NVM_CHK_ST_OFFLINE = 8, + NVM_CHK_TP_W_SEQ = 1, + NVM_CHK_TP_W_RAN = 2, + NVM_CHK_TP_SZ_SPEC = 16, +}; + +struct nvm_tgt_type; + +struct nvm_target { + struct list_head list; + struct nvm_tgt_dev *dev; + struct nvm_tgt_type *type; + struct gendisk *disk; +}; + +struct nvm_tgt_dev { + struct nvm_geo geo; + struct ppa_addr *luns; + struct request_queue *q; + struct nvm_dev *parent; + void *map; +}; + +typedef sector_t nvm_tgt_capacity_fn(void *); + +typedef void *nvm_tgt_init_fn(struct nvm_tgt_dev *, struct gendisk *, int); + +typedef void nvm_tgt_exit_fn(void *, bool); + +typedef int nvm_tgt_sysfs_init_fn(struct gendisk *); + +typedef void nvm_tgt_sysfs_exit_fn(struct gendisk *); + +struct nvm_tgt_type { + const char *name; + unsigned int version[3]; + int flags; + const struct block_device_operations *bops; + nvm_tgt_capacity_fn *capacity; + nvm_tgt_init_fn *init; + nvm_tgt_exit_fn *exit; + nvm_tgt_sysfs_init_fn *sysfs_init; + nvm_tgt_sysfs_exit_fn *sysfs_exit; + struct list_head list; + struct module *owner; +}; + +enum { + NVM_TGT_F_DEV_L2P = 0, + NVM_TGT_F_HOST_L2P = 1, +}; + +struct nvm_ch_map { + int ch_off; + int num_lun; + int *lun_offs; +}; + +struct nvm_dev_map { + struct nvm_ch_map *chnls; + int num_ch; +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct master; + +struct component { + struct list_head node; + struct master *master; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct master { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *dev; + struct component_match *match; +}; + +struct fwnode_link { + struct fwnode_handle *supplier; + struct list_head s_hook; + struct fwnode_handle *consumer; + struct list_head c_hook; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + struct bus_type *bus; + struct kset glue_dirs; + struct class *class; +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +struct irq_affinity_devres { + unsigned int count; + unsigned int irq[0]; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map___2 { + struct probe *probes[255]; + struct mutex *lock; +}; + +struct devres_node { + struct list_head entry; + dr_release_t release; +}; + +struct devres___2 { + struct devres_node node; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + struct device classdev; +}; + +struct transport_container; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +typedef void * (*devcon_match_fn_t)(struct fwnode_handle *, const char *, void *); + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_UP = 3, + PHY_RUNNING = 4, + PHY_NOLINK = 5, + PHY_CABLETEST = 6, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_RGMII = 8, + PHY_INTERFACE_MODE_RGMII_ID = 9, + PHY_INTERFACE_MODE_RGMII_RXID = 10, + PHY_INTERFACE_MODE_RGMII_TXID = 11, + PHY_INTERFACE_MODE_RTBI = 12, + PHY_INTERFACE_MODE_SMII = 13, + PHY_INTERFACE_MODE_XGMII = 14, + PHY_INTERFACE_MODE_XLGMII = 15, + PHY_INTERFACE_MODE_MOCA = 16, + PHY_INTERFACE_MODE_QSGMII = 17, + PHY_INTERFACE_MODE_TRGMII = 18, + PHY_INTERFACE_MODE_100BASEX = 19, + PHY_INTERFACE_MODE_1000BASEX = 20, + PHY_INTERFACE_MODE_2500BASEX = 21, + PHY_INTERFACE_MODE_5GBASER = 22, + PHY_INTERFACE_MODE_RXAUI = 23, + PHY_INTERFACE_MODE_XAUI = 24, + PHY_INTERFACE_MODE_10GBASER = 25, + PHY_INTERFACE_MODE_USXGMII = 26, + PHY_INTERFACE_MODE_10GKR = 27, + PHY_INTERFACE_MODE_MAX = 28, +} phy_interface_t; + +struct phylink; + +struct phy_driver; + +struct phy_led_trigger; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + struct phy_driver *drv; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int is_on_sfp_module: 1; + unsigned int mac_managed_pm: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + int speed; + int duplex; + int port; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + long unsigned int adv_old[2]; + u32 eee_broken_modes; + struct phy_led_trigger *phy_led_triggers; + unsigned int phy_num_led_triggers; + struct phy_led_trigger *last_triggered; + struct phy_led_trigger *led_link_trigger; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + u8 mdix; + u8 mdix_ctrl; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); + const struct macsec_ops *macsec_ops; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22 = 1, + MDIOBUS_C45 = 2, + MDIOBUS_C22_C45 = 3, + } probe_capabilities; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct ifreq *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + int addr; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*config_intr)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); +}; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct swnode { + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + int id; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; + unsigned int managed: 1; +}; + +struct auxiliary_device_id { + char name[32]; + kernel_ulong_t driver_data; +}; + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *, const struct auxiliary_device_id *); + void (*remove)(struct auxiliary_device *); + void (*shutdown)(struct auxiliary_device *); + int (*suspend)(struct auxiliary_device *, pm_message_t); + int (*resume)(struct auxiliary_device *); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +typedef int (*pm_callback_t)(struct device *); + +enum gpd_status { + GENPD_STATE_ON = 0, + GENPD_STATE_OFF = 1, +}; + +enum genpd_notication { + GENPD_NOTIFY_PRE_OFF = 0, + GENPD_NOTIFY_OFF = 1, + GENPD_NOTIFY_PRE_ON = 2, + GENPD_NOTIFY_ON = 3, +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *); + bool (*suspend_ok)(struct device *); +}; + +struct gpd_dev_ops { + int (*start)(struct device *); + int (*stop)(struct device *); +}; + +struct genpd_power_state { + s64 power_off_latency_ns; + s64 power_on_latency_ns; + s64 residency_ns; + u64 usage; + u64 rejected; + struct fwnode_handle *fwnode; + ktime_t idle_time; + void *data; +}; + +struct opp_table; + +struct dev_pm_opp; + +struct genpd_lock_ops; + +struct generic_pm_domain { + struct device dev; + struct dev_pm_domain domain; + struct list_head gpd_list_node; + struct list_head parent_links; + struct list_head child_links; + struct list_head dev_list; + struct dev_power_governor *gov; + struct work_struct power_off_work; + struct fwnode_handle *provider; + bool has_provider; + const char *name; + atomic_t sd_count; + enum gpd_status status; + unsigned int device_count; + unsigned int suspended_count; + unsigned int prepared_count; + unsigned int performance_state; + cpumask_var_t cpus; + int (*power_off)(struct generic_pm_domain *); + int (*power_on)(struct generic_pm_domain *); + struct raw_notifier_head power_notifiers; + struct opp_table *opp_table; + unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, struct dev_pm_opp *); + int (*set_performance_state)(struct generic_pm_domain *, unsigned int); + struct gpd_dev_ops dev_ops; + s64 max_off_time_ns; + ktime_t next_wakeup; + bool max_off_time_changed; + bool cached_power_down_ok; + bool cached_power_down_state_idx; + int (*attach_dev)(struct generic_pm_domain *, struct device *); + void (*detach_dev)(struct generic_pm_domain *, struct device *); + unsigned int flags; + struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *, unsigned int); + unsigned int state_count; + unsigned int state_idx; + ktime_t on_time; + ktime_t accounting_time; + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + long unsigned int lock_flags; + }; + }; +}; + +struct genpd_lock_ops { + void (*lock)(struct generic_pm_domain *); + void (*lock_nested)(struct generic_pm_domain *, int); + int (*lock_interruptible)(struct generic_pm_domain *); + void (*unlock)(struct generic_pm_domain *); +}; + +struct gpd_link { + struct generic_pm_domain *parent; + struct list_head parent_node; + struct generic_pm_domain *child; + struct list_head child_node; + unsigned int performance_state; + unsigned int prev_performance_state; +}; + +struct gpd_timing_data { + s64 suspend_latency_ns; + s64 resume_latency_ns; + s64 effective_constraint_ns; + bool constraint_changed; + bool cached_suspend_ok; +}; + +struct generic_pm_domain_data { + struct pm_domain_data base; + struct gpd_timing_data td; + struct notifier_block nb; + struct notifier_block *power_nb; + int cpu; + unsigned int performance_state; + ktime_t next_wakeup; + void *data; +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_PREPARED = 2, + PCE_STATUS_ENABLED = 3, + PCE_STATUS_ERROR = 4, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; + bool enabled_when_prepared; +}; + +struct isa_driver { + int (*match)(struct device *, unsigned int); + int (*probe)(struct device *, unsigned int); + void (*remove)(struct device *, unsigned int); + void (*shutdown)(struct device *, unsigned int); + int (*suspend)(struct device *, unsigned int, pm_message_t); + int (*resume)(struct device *, unsigned int); + struct device_driver driver; + struct device *devices; +}; + +struct isa_dev { + struct device dev; + struct device *next; + unsigned int id; +}; + +struct firmware_fallback_config { + unsigned int force_sysfs_fallback; + unsigned int ignore_sysfs_fallback; + int old_timeout; + int loading_timeout; +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + bool is_paged_buf; + struct page **pages; + int nr_pages; + int page_array_size; + bool need_uevent; + struct list_head pending_list; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; + spinlock_t name_lock; + struct list_head fw_names; + struct delayed_work work; + struct notifier_block pm_notify; +}; + +struct fw_cache_entry { + struct list_head list; + const char *name; +}; + +struct fw_name_devm { + long unsigned int magic; + const char *name; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +struct fw_sysfs { + bool nowait; + struct device dev; + struct fw_priv *fw_priv; + struct firmware *fw; +}; + +struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned int access; + struct node_hmem_attrs hmem_attrs; +}; + +struct node_cache_info { + struct device dev; + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct node_attr { + struct device_attribute attr; + enum node_states state; +}; + +struct for_each_memory_block_cb_data { + walk_memory_blocks_func_t func; + void *arg; +}; + +struct reg_sequence { + unsigned int reg; + unsigned int def; + unsigned int delay_us; +}; + +struct regmap___2; + +struct regmap_async { + struct list_head list; + struct regmap___2 *map; + void *work_buf; +}; + +struct reg_field { + unsigned int reg; + unsigned int lsb; + unsigned int msb; + unsigned int id_size; + unsigned int id_offset; +}; + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + void (*format_write)(struct regmap___2 *, unsigned int, unsigned int); + void (*format_reg)(void *, unsigned int, unsigned int); + void (*format_val)(void *, unsigned int, unsigned int); + unsigned int (*parse_val)(const void *); + void (*parse_inplace)(void *); +}; + +struct hwspinlock; + +struct regcache_ops; + +struct regmap___2 { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + long unsigned int spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + gfp_t alloc_flags; + struct device *dev; + void *work_buf; + struct regmap_format format; + const struct regmap_bus *bus; + void *bus_context; + const char *name; + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + struct list_head debugfs_off_cache; + struct mutex cache_lock; + unsigned int max_register; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + bool defer_caching; + long unsigned int read_flag_mask; + long unsigned int write_flag_mask; + int reg_shift; + int reg_stride; + int reg_stride_order; + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + unsigned int cache_size_raw; + unsigned int cache_word_size; + unsigned int num_reg_defaults; + unsigned int num_reg_defaults_raw; + bool cache_only; + bool cache_bypass; + bool cache_free; + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + bool cache_dirty; + bool no_sync_defaults; + struct reg_sequence *patch; + int patch_regs; + bool use_single_read; + bool use_single_write; + bool can_multi_write; + size_t max_raw_read; + size_t max_raw_write; + struct rb_root range_tree; + void *selector_work_buf; + struct hwspinlock *hwlock; + bool can_sleep; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap___2 *); + int (*exit)(struct regmap___2 *); + void (*debugfs_init)(struct regmap___2 *); + int (*read)(struct regmap___2 *, unsigned int, unsigned int *); + int (*write)(struct regmap___2 *, unsigned int, unsigned int); + int (*sync)(struct regmap___2 *, unsigned int, unsigned int); + int (*drop)(struct regmap___2 *, unsigned int, unsigned int); +}; + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap___2 *map; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap___2 *regmap; + unsigned int mask; + unsigned int shift; + unsigned int reg; + unsigned int id_size; + unsigned int id_offset; +}; + +struct trace_event_raw_regmap_reg { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_regmap_block { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + int count; + char __data[0]; +}; + +struct trace_event_raw_regcache_sync { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_status; + u32 __data_loc_type; + char __data[0]; +}; + +struct trace_event_raw_regmap_bool { + struct trace_entry ent; + u32 __data_loc_name; + int flag; + char __data[0]; +}; + +struct trace_event_raw_regmap_async { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_regcache_drop_region { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int from; + unsigned int to; + char __data[0]; +}; + +struct trace_event_data_offsets_regmap_reg { + u32 name; +}; + +struct trace_event_data_offsets_regmap_block { + u32 name; +}; + +struct trace_event_data_offsets_regcache_sync { + u32 name; + u32 status; + u32 type; +}; + +struct trace_event_data_offsets_regmap_bool { + u32 name; +}; + +struct trace_event_data_offsets_regmap_async { + u32 name; +}; + +struct trace_event_data_offsets_regcache_drop_region { + u32 name; +}; + +typedef void (*btf_trace_regmap_reg_write)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_reg_read)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_reg_read_cache)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_hw_read_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_read_done)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_done)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regcache_sync)(void *, struct regmap___2 *, const char *, const char *); + +typedef void (*btf_trace_regmap_cache_only)(void *, struct regmap___2 *, bool); + +typedef void (*btf_trace_regmap_cache_bypass)(void *, struct regmap___2 *, bool); + +typedef void (*btf_trace_regmap_async_write_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_async_io_complete)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regmap_async_complete_start)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regmap_async_complete_done)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regcache_drop_region)(void *, struct regmap___2 *, unsigned int, unsigned int); + +struct regcache_rbtree_node { + void *block; + long int *cache_present; + unsigned int base_reg; + unsigned int blklen; + struct rb_node node; +}; + +struct regcache_rbtree_ctx { + struct rb_root root; + struct regcache_rbtree_node *cached_rbnode; +}; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct regmap_debugfs_node { + struct regmap___2 *map; + struct list_head link; +}; + +struct regmap_async_spi { + struct regmap_async core; + struct spi_message m; + struct spi_transfer t[2]; +}; + +struct regmap_mmio_context { + void *regs; + unsigned int val_bytes; + bool relaxed_mmio; + bool attached_clk; + struct clk *clk; + void (*reg_write)(struct regmap_mmio_context *, unsigned int, unsigned int); + unsigned int (*reg_read)(struct regmap_mmio_context *, unsigned int); +}; + +struct regmap_irq_chip_data___2 { + struct mutex lock; + struct irq_chip irq_chip; + struct regmap___2 *map; + const struct regmap_irq_chip *chip; + int irq_base; + struct irq_domain *domain; + int irq; + int wake_count; + void *status_reg_buf; + unsigned int *main_status_buf; + unsigned int *status_buf; + unsigned int *mask_buf; + unsigned int *mask_buf_def; + unsigned int *wake_buf; + unsigned int *type_buf; + unsigned int *type_buf_def; + unsigned int **virt_buf; + unsigned int irq_reg_stride; + unsigned int type_reg_stride; + bool clear_status: 1; +}; + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + struct module *owner; + ssize_t (*read)(char *, loff_t, size_t, void *, size_t); + void (*free)(void *); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; + irq_write_msi_msg_t write_msg; + int devid; +}; + +typedef long unsigned int __kernel_old_dev_t; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, + Lo_deleting = 3, +}; + +struct loop_func_table; + +struct loop_device { + int lo_number; + atomic_t lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t); + char lo_file_name[64]; + char lo_crypt_name[64]; + char lo_encrypt_key[32]; + int lo_encrypt_key_size; + struct loop_func_table *lo_encryption; + __u32 lo_init[2]; + kuid_t lo_key_owner; + int (*ioctl)(struct loop_device *, int, long unsigned int); + struct file *lo_backing_file; + struct block_device *lo_device; + void *key_data; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + struct kthread_worker worker; + struct task_struct *worker_task; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; + struct mutex lo_mutex; +}; + +struct loop_func_table { + int number; + int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t); + int (*init)(struct loop_device *, const struct loop_info64 *); + int (*release)(struct loop_device *); + int (*ioctl)(struct loop_device *, int, long unsigned int); + struct module *owner; +}; + +struct loop_cmd { + struct kthread_work work; + bool use_aio; + atomic_t ref; + long int ret; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *css; +}; + +struct compat_loop_info { + compat_int_t lo_number; + compat_dev_t lo_device; + compat_ulong_t lo_inode; + compat_dev_t lo_rdevice; + compat_int_t lo_offset; + compat_int_t lo_encrypt_type; + compat_int_t lo_encrypt_key_size; + compat_int_t lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + compat_ulong_t lo_init[2]; + char reserved[4]; +}; + +typedef unsigned int RING_IDX; + +typedef uint16_t blkif_vdev_t; + +typedef uint64_t blkif_sector_t; + +struct blkif_request_segment { + grant_ref_t gref; + uint8_t first_sect; + uint8_t last_sect; +}; + +struct blkif_request_rw { + uint8_t nr_segments; + blkif_vdev_t handle; + uint32_t _pad1; + uint64_t id; + blkif_sector_t sector_number; + struct blkif_request_segment seg[11]; +} __attribute__((packed)); + +struct blkif_request_discard { + uint8_t flag; + blkif_vdev_t _pad1; + uint32_t _pad2; + uint64_t id; + blkif_sector_t sector_number; + uint64_t nr_sectors; + uint8_t _pad3; +} __attribute__((packed)); + +struct blkif_request_other { + uint8_t _pad1; + blkif_vdev_t _pad2; + uint32_t _pad3; + uint64_t id; +} __attribute__((packed)); + +struct blkif_request_indirect { + uint8_t indirect_op; + uint16_t nr_segments; + uint32_t _pad1; + uint64_t id; + blkif_sector_t sector_number; + blkif_vdev_t handle; + uint16_t _pad2; + grant_ref_t indirect_grefs[8]; + uint32_t _pad3; +} __attribute__((packed)); + +struct blkif_request { + uint8_t operation; + union { + struct blkif_request_rw rw; + struct blkif_request_discard discard; + struct blkif_request_other other; + struct blkif_request_indirect indirect; + } u; +} __attribute__((packed)); + +struct blkif_response { + uint64_t id; + uint8_t operation; + int16_t status; +}; + +union blkif_sring_entry { + struct blkif_request req; + struct blkif_response rsp; +}; + +struct blkif_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union blkif_sring_entry ring[1]; +}; + +struct blkif_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct blkif_sring *sring; +}; + +enum blkif_state { + BLKIF_STATE_DISCONNECTED = 0, + BLKIF_STATE_CONNECTED = 1, + BLKIF_STATE_SUSPENDED = 2, +}; + +struct grant { + grant_ref_t gref; + struct page *page; + struct list_head node; +}; + +enum blk_req_status { + REQ_WAITING = 0, + REQ_DONE = 1, + REQ_ERROR = 2, + REQ_EOPNOTSUPP = 3, +}; + +struct blk_shadow { + struct blkif_request req; + struct request *request; + struct grant **grants_used; + struct grant **indirect_grants; + struct scatterlist *sg; + unsigned int num_sg; + enum blk_req_status status; + long unsigned int associated_id; +}; + +struct blkif_req { + blk_status_t error; +}; + +struct blkfront_info; + +struct blkfront_ring_info { + spinlock_t ring_lock; + struct blkif_front_ring ring; + unsigned int ring_ref[16]; + unsigned int evtchn; + unsigned int irq; + struct work_struct work; + struct gnttab_free_callback callback; + struct list_head indirect_pages; + struct list_head grants; + unsigned int persistent_gnts_c; + long unsigned int shadow_free; + struct blkfront_info *dev_info; + struct blk_shadow shadow[0]; +}; + +struct blkfront_info { + struct mutex mutex; + struct xenbus_device *xbdev; + struct gendisk *gd; + u16 sector_size; + unsigned int physical_sector_size; + int vdevice; + blkif_vdev_t handle; + enum blkif_state connected; + unsigned int nr_ring_pages; + struct request_queue *rq; + unsigned int feature_flush: 1; + unsigned int feature_fua: 1; + unsigned int feature_discard: 1; + unsigned int feature_secdiscard: 1; + unsigned int feature_persistent: 1; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int max_indirect_segments; + int is_ready; + struct blk_mq_tag_set tag_set; + struct blkfront_ring_info *rinfo; + unsigned int nr_rings; + unsigned int rinfo_size; + struct list_head requests; + struct bio_list bio_list; + struct list_head info_list; +}; + +struct setup_rw_req { + unsigned int grant_idx; + struct blkif_request_segment *segments; + struct blkfront_ring_info *rinfo; + struct blkif_request *ring_req; + grant_ref_t gref_head; + unsigned int id; + bool need_copy; + unsigned int bvec_off; + char *bvec_data; + bool require_extra_req; + struct blkif_request *extra_ring_req; +}; + +struct copy_from_grant { + const struct blk_shadow *s; + unsigned int grant_idx; + unsigned int bvec_offset; + char *bvec_data; +}; + +struct sram_partition { + void *base; + struct gen_pool *pool; + struct bin_attribute battr; + struct mutex lock; + struct list_head list; +}; + +struct sram_dev { + struct device *dev; + void *virt_base; + struct gen_pool *pool; + struct clk *clk; + struct sram_partition *partition; + u32 partitions; +}; + +struct sram_reserve { + struct list_head list; + u32 start; + u32 size; + bool export; + bool pool; + bool protect_exec; + const char *label; +}; + +struct mfd_cell_acpi_match { + const char *pnpid; + const long long unsigned int adr; +}; + +enum { + CHIP_INVALID = 0, + CHIP_PM8606 = 1, + CHIP_PM8607 = 2, + CHIP_MAX = 3, +}; + +enum pm8606_ref_gp_and_osc_clients { + REF_GP_NO_CLIENTS = 0, + WLED1_DUTY = 1, + WLED2_DUTY = 2, + WLED3_DUTY = 4, + RGB1_ENABLE = 8, + RGB2_ENABLE = 16, + LDO_VBR_EN = 32, + REF_GP_MAX_CLIENT = 65535, +}; + +enum { + PM8607_IRQ_ONKEY = 0, + PM8607_IRQ_EXTON = 1, + PM8607_IRQ_CHG = 2, + PM8607_IRQ_BAT = 3, + PM8607_IRQ_RTC = 4, + PM8607_IRQ_CC = 5, + PM8607_IRQ_VBAT = 6, + PM8607_IRQ_VCHG = 7, + PM8607_IRQ_VSYS = 8, + PM8607_IRQ_TINT = 9, + PM8607_IRQ_GPADC0 = 10, + PM8607_IRQ_GPADC1 = 11, + PM8607_IRQ_GPADC2 = 12, + PM8607_IRQ_GPADC3 = 13, + PM8607_IRQ_AUDIO_SHORT = 14, + PM8607_IRQ_PEN = 15, + PM8607_IRQ_HEADSET = 16, + PM8607_IRQ_HOOK = 17, + PM8607_IRQ_MICIN = 18, + PM8607_IRQ_CHG_FAIL = 19, + PM8607_IRQ_CHG_DONE = 20, + PM8607_IRQ_CHG_FAULT = 21, +}; + +struct pm860x_chip { + struct device *dev; + struct mutex irq_lock; + struct mutex osc_lock; + struct i2c_client *client; + struct i2c_client *companion; + struct regmap *regmap; + struct regmap *regmap_companion; + int buck3_double; + int companion_addr; + short unsigned int osc_vote; + int id; + int irq_mode; + int irq_base; + int core_irq; + unsigned char chip_version; + unsigned char osc_status; + unsigned int wakeup_flag; +}; + +enum { + GI2C_PORT = 0, + PI2C_PORT = 1, +}; + +struct pm860x_backlight_pdata { + int pwm; + int iset; +}; + +struct pm860x_led_pdata { + int iset; +}; + +struct pm860x_rtc_pdata { + int (*sync)(unsigned int); + int vrtc; +}; + +struct pm860x_touch_pdata { + int gpadc_prebias; + int slot_cycle; + int off_scale; + int sw_cal; + int tsi_prebias; + int pen_prebias; + int pen_prechg; + int res_x; + long unsigned int flags; +}; + +struct pm860x_power_pdata { + int max_capacity; + int resistor; +}; + +struct charger_desc; + +struct pm860x_platform_data { + struct pm860x_backlight_pdata *backlight; + struct pm860x_led_pdata *led; + struct pm860x_rtc_pdata *rtc; + struct pm860x_touch_pdata *touch; + struct pm860x_power_pdata *power; + struct regulator_init_data *buck1; + struct regulator_init_data *buck2; + struct regulator_init_data *buck3; + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldo8; + struct regulator_init_data *ldo9; + struct regulator_init_data *ldo10; + struct regulator_init_data *ldo12; + struct regulator_init_data *ldo_vibrator; + struct regulator_init_data *ldo14; + struct charger_desc *chg_desc; + int companion_addr; + int i2c_port; + int irq_mode; + int irq_base; + int num_leds; + int num_backlights; +}; + +enum polling_modes { + CM_POLL_DISABLE = 0, + CM_POLL_ALWAYS = 1, + CM_POLL_EXTERNAL_POWER_ONLY = 2, + CM_POLL_CHARGING_ONLY = 3, +}; + +enum data_source { + CM_BATTERY_PRESENT = 0, + CM_NO_BATTERY = 1, + CM_FUEL_GAUGE = 2, + CM_CHARGER_STAT = 3, +}; + +struct charger_regulator; + +struct charger_desc { + const char *psy_name; + enum polling_modes polling_mode; + unsigned int polling_interval_ms; + unsigned int fullbatt_vchkdrop_uV; + unsigned int fullbatt_uV; + unsigned int fullbatt_soc; + unsigned int fullbatt_full_capacity; + enum data_source battery_present; + const char **psy_charger_stat; + int num_charger_regulators; + struct charger_regulator *charger_regulators; + const struct attribute_group **sysfs_groups; + const char *psy_fuel_gauge; + const char *thermal_zone; + int temp_min; + int temp_max; + int temp_diff; + bool measure_battery_temp; + u32 charging_max_duration_ms; + u32 discharging_max_duration_ms; +}; + +struct charger_manager; + +struct charger_cable { + const char *extcon_name; + const char *name; + struct extcon_dev *extcon_dev; + u64 extcon_type; + struct work_struct wq; + struct notifier_block nb; + bool attached; + struct charger_regulator *charger; + int min_uA; + int max_uA; + struct charger_manager *cm; +}; + +struct charger_regulator { + const char *regulator_name; + struct regulator *consumer; + int externally_control; + struct charger_cable *cables; + int num_cables; + struct attribute_group attr_grp; + struct device_attribute attr_name; + struct device_attribute attr_state; + struct device_attribute attr_externally_control; + struct attribute *attrs[4]; + struct charger_manager *cm; +}; + +struct charger_manager { + struct list_head entry; + struct device *dev; + struct charger_desc *desc; + struct thermal_zone_device *tzd_batt; + bool charger_enabled; + int emergency_stop; + char psy_name_buf[31]; + struct power_supply_desc charger_psy_desc; + struct power_supply *charger_psy; + u64 charging_start_time; + u64 charging_end_time; + int battery_status; +}; + +struct pm860x_irq_data { + int reg; + int mask_reg; + int enable; + int offs; +}; + +struct htcpld_chip_platform_data { + unsigned int addr; + unsigned int reset; + unsigned int num_gpios; + unsigned int gpio_out_base; + unsigned int gpio_in_base; + unsigned int irq_base; + unsigned int num_irqs; +}; + +struct htcpld_core_platform_data { + unsigned int int_reset_gpio_hi; + unsigned int int_reset_gpio_lo; + unsigned int i2c_adapter_id; + struct htcpld_chip_platform_data *chip; + unsigned int num_chip; +}; + +struct htcpld_chip { + spinlock_t lock; + u8 reset; + u8 addr; + struct device *dev; + struct i2c_client *client; + u8 cache_out; + struct gpio_chip chip_out; + u8 cache_in; + struct gpio_chip chip_in; + u16 irqs_enabled; + uint irq_start; + int nirqs; + unsigned int flow_type; + struct work_struct set_val_work; +}; + +struct htcpld_data { + u16 irqs_enabled; + uint irq_start; + int nirqs; + uint chained_irq; + unsigned int int_reset_gpio_hi; + unsigned int int_reset_gpio_lo; + struct htcpld_chip *chip; + unsigned int nchips; +}; + +struct arizona_ldo1_pdata { + const struct regulator_init_data *init_data; +}; + +struct arizona_micsupp_pdata { + const struct regulator_init_data *init_data; +}; + +struct arizona_micbias { + int mV; + unsigned int ext_cap: 1; + unsigned int discharge: 1; + unsigned int soft_start: 1; + unsigned int bypass: 1; +}; + +struct arizona_micd_config { + unsigned int src; + unsigned int bias; + bool gpio; +}; + +struct arizona_micd_range { + int max; + int key; +}; + +struct arizona_pdata { + struct gpio_desc *reset; + struct arizona_micsupp_pdata micvdd; + struct arizona_ldo1_pdata ldo1; + int clk32k_src; + unsigned int irq_flags; + int gpio_base; + unsigned int gpio_defaults[5]; + unsigned int max_channels_clocked[3]; + bool jd_gpio5; + bool jd_gpio5_nopull; + bool jd_invert; + bool hpdet_acc_id; + bool hpdet_acc_id_line; + int hpdet_id_gpio; + unsigned int hpdet_channel; + bool micd_software_compare; + unsigned int micd_detect_debounce; + int micd_pol_gpio; + unsigned int micd_bias_start_time; + unsigned int micd_rate; + unsigned int micd_dbtime; + unsigned int micd_timeout; + bool micd_force_micbias; + const struct arizona_micd_range *micd_ranges; + int num_micd_ranges; + struct arizona_micd_config *micd_configs; + int num_micd_configs; + int dmic_ref[4]; + struct arizona_micbias micbias[3]; + int inmode[4]; + int out_mono[6]; + unsigned int out_vol_limit[12]; + unsigned int spk_mute[2]; + unsigned int spk_fmt[2]; + unsigned int hap_act; + int irq_gpio; + unsigned int gpsw; +}; + +enum { + ARIZONA_MCLK1 = 0, + ARIZONA_MCLK2 = 1, + ARIZONA_NUM_MCLK = 2, +}; + +enum arizona_type { + WM5102 = 1, + WM5110 = 2, + WM8997 = 3, + WM8280 = 4, + WM8998 = 5, + WM1814 = 6, + WM1831 = 7, + CS47L24 = 8, +}; + +struct snd_soc_dapm_context; + +struct arizona { + struct regmap *regmap; + struct device *dev; + enum arizona_type type; + unsigned int rev; + int num_core_supplies; + struct regulator_bulk_data core_supplies[2]; + struct regulator *dcvdd; + bool has_fully_powered_off; + struct arizona_pdata pdata; + unsigned int external_dcvdd: 1; + int irq; + struct irq_domain *virq; + struct regmap_irq_chip_data *aod_irq_chip; + struct regmap_irq_chip_data *irq_chip; + bool hpdet_clamp; + unsigned int hp_ena; + struct mutex clk_lock; + int clk32k_ref; + struct clk *mclk[2]; + bool ctrlif_error; + struct snd_soc_dapm_context *dapm; + int tdm_width[3]; + int tdm_slots[3]; + uint16_t dac_comp_coeff; + uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; + struct blocking_notifier_head notifier; +}; + +struct arizona_sysclk_state { + unsigned int fll; + unsigned int sysclk; +}; + +struct wm8400_platform_data { + int (*platform_init)(struct device *); +}; + +struct wm8400 { + struct device *dev; + struct regmap *regmap; + struct platform_device regulators[6]; +}; + +enum wm831x_auxadc { + WM831X_AUX_CAL = 15, + WM831X_AUX_BKUP_BATT = 10, + WM831X_AUX_WALL = 9, + WM831X_AUX_BATT = 8, + WM831X_AUX_USB = 7, + WM831X_AUX_SYSVDD = 6, + WM831X_AUX_BATT_TEMP = 5, + WM831X_AUX_CHIP_TEMP = 4, + WM831X_AUX_AUX4 = 3, + WM831X_AUX_AUX3 = 2, + WM831X_AUX_AUX2 = 1, + WM831X_AUX_AUX1 = 0, +}; + +struct wm831x_backlight_pdata { + int isink; + int max_uA; +}; + +struct wm831x_backup_pdata { + int charger_enable; + int no_constant_voltage; + int vlim; + int ilim; +}; + +struct wm831x_battery_pdata { + int enable; + int fast_enable; + int off_mask; + int trickle_ilim; + int vsel; + int eoc_iterm; + int fast_ilim; + int timeout; +}; + +enum wm831x_status_src { + WM831X_STATUS_PRESERVE = 0, + WM831X_STATUS_OTP = 1, + WM831X_STATUS_POWER = 2, + WM831X_STATUS_CHARGER = 3, + WM831X_STATUS_MANUAL = 4, +}; + +struct wm831x_status_pdata { + enum wm831x_status_src default_src; + const char *name; + const char *default_trigger; +}; + +struct wm831x_touch_pdata { + int fivewire; + int isel; + int rpu; + int pressure; + unsigned int data_irq; + int data_irqf; + unsigned int pd_irq; + int pd_irqf; +}; + +enum wm831x_watchdog_action { + WM831X_WDOG_NONE = 0, + WM831X_WDOG_INTERRUPT = 1, + WM831X_WDOG_RESET = 2, + WM831X_WDOG_WAKE = 3, +}; + +struct wm831x_watchdog_pdata { + enum wm831x_watchdog_action primary; + enum wm831x_watchdog_action secondary; + unsigned int software: 1; +}; + +struct wm831x; + +struct wm831x_pdata { + int wm831x_num; + int (*pre_init)(struct wm831x *); + int (*post_init)(struct wm831x *); + bool irq_cmos; + bool disable_touch; + bool soft_shutdown; + int irq_base; + int gpio_base; + int gpio_defaults[16]; + struct wm831x_backlight_pdata *backlight; + struct wm831x_backup_pdata *backup; + struct wm831x_battery_pdata *battery; + struct wm831x_touch_pdata *touch; + struct wm831x_watchdog_pdata *watchdog; + struct wm831x_status_pdata *status[2]; + struct regulator_init_data *dcdc[4]; + struct regulator_init_data *epe[2]; + struct regulator_init_data *ldo[11]; + struct regulator_init_data *isink[2]; +}; + +enum wm831x_parent { + WM8310 = 33552, + WM8311 = 33553, + WM8312 = 33554, + WM8320 = 33568, + WM8321 = 33569, + WM8325 = 33573, + WM8326 = 33574, +}; + +typedef int (*wm831x_auxadc_read_fn)(struct wm831x *, enum wm831x_auxadc); + +struct wm831x { + struct mutex io_lock; + struct device *dev; + struct regmap *regmap; + struct wm831x_pdata pdata; + enum wm831x_parent type; + int irq; + struct mutex irq_lock; + struct irq_domain *irq_domain; + int irq_masks_cur[5]; + int irq_masks_cache[5]; + bool soft_shutdown; + unsigned int has_gpio_ena: 1; + unsigned int has_cs_sts: 1; + unsigned int charger_irq_wake: 1; + int num_gpio; + int gpio_update[16]; + bool gpio_level_high[16]; + bool gpio_level_low[16]; + struct mutex auxadc_lock; + struct list_head auxadc_pending; + u16 auxadc_active; + wm831x_auxadc_read_fn auxadc_read; + struct mutex key_lock; + unsigned int locked: 1; +}; + +struct wm831x_irq_data { + int primary; + int reg; + int mask; +}; + +struct wm831x_auxadc_req { + struct list_head list; + enum wm831x_auxadc input; + int val; + struct completion done; +}; + +struct wm8350_audio_platform_data { + int vmid_discharge_msecs; + int drain_msecs; + int cap_discharge_msecs; + int vmid_charge_msecs; + u32 vmid_s_curve: 2; + u32 dis_out4: 2; + u32 dis_out3: 2; + u32 dis_out2: 2; + u32 dis_out1: 2; + u32 vroi_out4: 1; + u32 vroi_out3: 1; + u32 vroi_out2: 1; + u32 vroi_out1: 1; + u32 vroi_enable: 1; + u32 codec_current_on: 2; + u32 codec_current_standby: 2; + u32 codec_current_charge: 2; +}; + +struct wm8350_codec { + struct platform_device *pdev; + struct wm8350_audio_platform_data *platform_data; +}; + +struct wm8350_gpio { + struct platform_device *pdev; +}; + +struct wm8350_led { + struct platform_device *pdev; + struct work_struct work; + spinlock_t value_lock; + enum led_brightness value; + struct led_classdev cdev; + int max_uA_index; + int enabled; + struct regulator *isink; + struct regulator_consumer_supply isink_consumer; + struct regulator_init_data isink_init; + struct regulator *dcdc; + struct regulator_consumer_supply dcdc_consumer; + struct regulator_init_data dcdc_init; +}; + +struct wm8350_pmic { + int max_dcdc; + int max_isink; + int isink_A_dcdc; + int isink_B_dcdc; + u16 dcdc1_hib_mode; + u16 dcdc3_hib_mode; + u16 dcdc4_hib_mode; + u16 dcdc6_hib_mode; + struct platform_device *pdev[12]; + struct wm8350_led led[2]; +}; + +struct rtc_device___2; + +struct wm8350_rtc { + struct platform_device *pdev; + struct rtc_device___2 *rtc; + int alarm_enabled; + int update_enabled; +}; + +struct wm8350_charger_policy { + int eoc_mA; + int charge_mV; + int fast_limit_mA; + int fast_limit_USB_mA; + int charge_timeout; + int trickle_start_mV; + int trickle_charge_mA; + int trickle_charge_USB_mA; +}; + +struct wm8350_power { + struct platform_device *pdev; + struct power_supply *battery; + struct power_supply *usb; + struct power_supply *ac; + struct wm8350_charger_policy *policy; + int rev_g_coeff; +}; + +struct wm8350_wdt { + struct platform_device *pdev; +}; + +struct wm8350_hwmon { + struct platform_device *pdev; + struct device *classdev; +}; + +struct wm8350 { + struct device *dev; + struct regmap *regmap; + bool unlocked; + struct mutex auxadc_mutex; + struct completion auxadc_done; + struct mutex irq_lock; + int chip_irq; + int irq_base; + u16 irq_masks[7]; + struct wm8350_codec codec; + struct wm8350_gpio gpio; + struct wm8350_hwmon hwmon; + struct wm8350_pmic pmic; + struct wm8350_power power; + struct wm8350_rtc rtc; + struct wm8350_wdt wdt; +}; + +struct wm8350_platform_data { + int (*init)(struct wm8350 *); + int irq_high; + int irq_base; + int gpio_base; +}; + +struct wm8350_reg_access { + u16 readable; + u16 writable; + u16 vol; +}; + +struct wm8350_irq_data { + int primary; + int reg; + int mask; + int primary_only; +}; + +struct tps65910_platform_data { + int irq; + int irq_base; +}; + +enum tps65912_irqs { + TPS65912_IRQ_PWRHOLD_F = 0, + TPS65912_IRQ_VMON = 1, + TPS65912_IRQ_PWRON = 2, + TPS65912_IRQ_PWRON_LP = 3, + TPS65912_IRQ_PWRHOLD_R = 4, + TPS65912_IRQ_HOTDIE = 5, + TPS65912_IRQ_GPIO1_R = 6, + TPS65912_IRQ_GPIO1_F = 7, + TPS65912_IRQ_GPIO2_R = 8, + TPS65912_IRQ_GPIO2_F = 9, + TPS65912_IRQ_GPIO3_R = 10, + TPS65912_IRQ_GPIO3_F = 11, + TPS65912_IRQ_GPIO4_R = 12, + TPS65912_IRQ_GPIO4_F = 13, + TPS65912_IRQ_GPIO5_R = 14, + TPS65912_IRQ_GPIO5_F = 15, + TPS65912_IRQ_PGOOD_DCDC1 = 16, + TPS65912_IRQ_PGOOD_DCDC2 = 17, + TPS65912_IRQ_PGOOD_DCDC3 = 18, + TPS65912_IRQ_PGOOD_DCDC4 = 19, + TPS65912_IRQ_PGOOD_LDO1 = 20, + TPS65912_IRQ_PGOOD_LDO2 = 21, + TPS65912_IRQ_PGOOD_LDO3 = 22, + TPS65912_IRQ_PGOOD_LDO4 = 23, + TPS65912_IRQ_PGOOD_LDO5 = 24, + TPS65912_IRQ_PGOOD_LDO6 = 25, + TPS65912_IRQ_PGOOD_LDO7 = 26, + TPS65912_IRQ_PGOOD_LDO8 = 27, + TPS65912_IRQ_PGOOD_LDO9 = 28, + TPS65912_IRQ_PGOOD_LDO10 = 29, +}; + +struct tps65912 { + struct device *dev; + struct regmap *regmap; + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +enum chips { + TPS80031 = 1, + TPS80032 = 2, +}; + +enum { + TPS80031_INT_PWRON = 0, + TPS80031_INT_RPWRON = 1, + TPS80031_INT_SYS_VLOW = 2, + TPS80031_INT_RTC_ALARM = 3, + TPS80031_INT_RTC_PERIOD = 4, + TPS80031_INT_HOT_DIE = 5, + TPS80031_INT_VXX_SHORT = 6, + TPS80031_INT_SPDURATION = 7, + TPS80031_INT_WATCHDOG = 8, + TPS80031_INT_BAT = 9, + TPS80031_INT_SIM = 10, + TPS80031_INT_MMC = 11, + TPS80031_INT_RES = 12, + TPS80031_INT_GPADC_RT = 13, + TPS80031_INT_GPADC_SW2_EOC = 14, + TPS80031_INT_CC_AUTOCAL = 15, + TPS80031_INT_ID_WKUP = 16, + TPS80031_INT_VBUSS_WKUP = 17, + TPS80031_INT_ID = 18, + TPS80031_INT_VBUS = 19, + TPS80031_INT_CHRG_CTRL = 20, + TPS80031_INT_EXT_CHRG = 21, + TPS80031_INT_INT_CHRG = 22, + TPS80031_INT_RES2 = 23, + TPS80031_INT_BAT_TEMP_OVRANGE = 24, + TPS80031_INT_BAT_REMOVED = 25, + TPS80031_INT_VBUS_DET = 26, + TPS80031_INT_VAC_DET = 27, + TPS80031_INT_FAULT_WDG = 28, + TPS80031_INT_LINCH_GATED = 29, + TPS80031_INT_NR = 30, +}; + +enum { + TPS80031_REGULATOR_VIO = 0, + TPS80031_REGULATOR_SMPS1 = 1, + TPS80031_REGULATOR_SMPS2 = 2, + TPS80031_REGULATOR_SMPS3 = 3, + TPS80031_REGULATOR_SMPS4 = 4, + TPS80031_REGULATOR_VANA = 5, + TPS80031_REGULATOR_LDO1 = 6, + TPS80031_REGULATOR_LDO2 = 7, + TPS80031_REGULATOR_LDO3 = 8, + TPS80031_REGULATOR_LDO4 = 9, + TPS80031_REGULATOR_LDO5 = 10, + TPS80031_REGULATOR_LDO6 = 11, + TPS80031_REGULATOR_LDO7 = 12, + TPS80031_REGULATOR_LDOLN = 13, + TPS80031_REGULATOR_LDOUSB = 14, + TPS80031_REGULATOR_VBUS = 15, + TPS80031_REGULATOR_REGEN1 = 16, + TPS80031_REGULATOR_REGEN2 = 17, + TPS80031_REGULATOR_SYSEN = 18, + TPS80031_REGULATOR_MAX = 19, +}; + +enum tps80031_ext_control { + TPS80031_PWR_REQ_INPUT_NONE = 0, + TPS80031_PWR_REQ_INPUT_PREQ1 = 1, + TPS80031_PWR_REQ_INPUT_PREQ2 = 2, + TPS80031_PWR_REQ_INPUT_PREQ3 = 4, + TPS80031_PWR_OFF_ON_SLEEP = 8, + TPS80031_PWR_ON_ON_SLEEP = 16, +}; + +enum tps80031_pupd_pins { + TPS80031_PREQ1 = 0, + TPS80031_PREQ2A = 1, + TPS80031_PREQ2B = 2, + TPS80031_PREQ2C = 3, + TPS80031_PREQ3 = 4, + TPS80031_NRES_WARM = 5, + TPS80031_PWM_FORCE = 6, + TPS80031_CHRG_EXT_CHRG_STATZ = 7, + TPS80031_SIM = 8, + TPS80031_MMC = 9, + TPS80031_GPADC_START = 10, + TPS80031_DVSI2C_SCL = 11, + TPS80031_DVSI2C_SDA = 12, + TPS80031_CTLI2C_SCL = 13, + TPS80031_CTLI2C_SDA = 14, +}; + +enum tps80031_pupd_settings { + TPS80031_PUPD_NORMAL = 0, + TPS80031_PUPD_PULLDOWN = 1, + TPS80031_PUPD_PULLUP = 2, +}; + +struct tps80031 { + struct device *dev; + long unsigned int chip_info; + int es_version; + struct i2c_client *clients[4]; + struct regmap *regmap[4]; + struct regmap_irq_chip_data *irq_data; +}; + +struct tps80031_pupd_init_data { + int input_pin; + int setting; +}; + +struct tps80031_regulator_platform_data { + struct regulator_init_data *reg_init_data; + unsigned int ext_ctrl_flag; + unsigned int config_flags; +}; + +struct tps80031_platform_data { + int irq_base; + bool use_power_off; + struct tps80031_pupd_init_data *pupd_init_data; + int pupd_init_data_size; + struct tps80031_regulator_platform_data *regulator_pdata[19]; +}; + +struct tps80031_pupd_data { + u8 reg; + u8 pullup_bit; + u8 pulldown_bit; +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +struct matrix_keymap_data { + const uint32_t *keymap; + unsigned int keymap_size; +}; + +enum twl_module_ids { + TWL_MODULE_USB = 0, + TWL_MODULE_PIH = 1, + TWL_MODULE_MAIN_CHARGE = 2, + TWL_MODULE_PM_MASTER = 3, + TWL_MODULE_PM_RECEIVER = 4, + TWL_MODULE_RTC = 5, + TWL_MODULE_PWM = 6, + TWL_MODULE_LED = 7, + TWL_MODULE_SECURED_REG = 8, + TWL_MODULE_LAST = 9, +}; + +enum twl4030_module_ids { + TWL4030_MODULE_AUDIO_VOICE = 9, + TWL4030_MODULE_GPIO = 10, + TWL4030_MODULE_INTBR = 11, + TWL4030_MODULE_TEST = 12, + TWL4030_MODULE_KEYPAD = 13, + TWL4030_MODULE_MADC = 14, + TWL4030_MODULE_INTERRUPTS = 15, + TWL4030_MODULE_PRECHARGE = 16, + TWL4030_MODULE_BACKUP = 17, + TWL4030_MODULE_INT = 18, + TWL5031_MODULE_ACCESSORY = 19, + TWL5031_MODULE_INTERRUPTS = 20, + TWL4030_MODULE_LAST = 21, +}; + +enum twl6030_module_ids { + TWL6030_MODULE_ID0 = 9, + TWL6030_MODULE_ID1 = 10, + TWL6030_MODULE_ID2 = 11, + TWL6030_MODULE_GPADC = 12, + TWL6030_MODULE_GASGAUGE = 13, + TWL6030_MODULE_LAST = 14, +}; + +struct twl4030_clock_init_data { + bool ck32k_lowpwr_enable; +}; + +struct twl4030_bci_platform_data { + int *battery_tmp_tbl; + unsigned int tblsize; + int bb_uvolt; + int bb_uamp; +}; + +struct twl4030_gpio_platform_data { + bool use_leds; + u8 mmc_cd; + u32 debounce; + u32 pullups; + u32 pulldowns; + int (*setup)(struct device *, unsigned int, unsigned int); + int (*teardown)(struct device *, unsigned int, unsigned int); +}; + +struct twl4030_madc_platform_data { + int irq_line; +}; + +struct twl4030_keypad_data { + const struct matrix_keymap_data *keymap_data; + unsigned int rows; + unsigned int cols; + bool rep; +}; + +enum twl4030_usb_mode { + T2_USB_MODE_ULPI = 1, + T2_USB_MODE_CEA2011_3PIN = 2, +}; + +struct twl4030_usb_data { + enum twl4030_usb_mode usb_mode; + long unsigned int features; + int (*phy_init)(struct device *); + int (*phy_exit)(struct device *); + int (*phy_power)(struct device *, int, int); + int (*phy_set_clock)(struct device *, int); + int (*phy_suspend)(struct device *, int); +}; + +struct twl4030_ins { + u16 pmb_message; + u8 delay; +}; + +struct twl4030_script { + struct twl4030_ins *script; + unsigned int size; + u8 flags; +}; + +struct twl4030_resconfig { + u8 resource; + u8 devgroup; + u8 type; + u8 type2; + u8 remap_off; + u8 remap_sleep; +}; + +struct twl4030_power_data { + struct twl4030_script **scripts; + unsigned int num; + struct twl4030_resconfig *resource_config; + struct twl4030_resconfig *board_config; + bool use_poweroff; + bool ac_charger_quirk; +}; + +struct twl4030_codec_data { + unsigned int digimic_delay; + unsigned int ramp_delay_value; + unsigned int offset_cncl_path; + unsigned int hs_extmute: 1; + int hs_extmute_gpio; +}; + +struct twl4030_vibra_data { + unsigned int coexist; +}; + +struct twl4030_audio_data { + unsigned int audio_mclk; + struct twl4030_codec_data *codec; + struct twl4030_vibra_data *vibra; + int audpwron_gpio; + int naudint_irq; + unsigned int irq_base; +}; + +struct twl4030_platform_data { + struct twl4030_clock_init_data *clock; + struct twl4030_bci_platform_data *bci; + struct twl4030_gpio_platform_data *gpio; + struct twl4030_madc_platform_data *madc; + struct twl4030_keypad_data *keypad; + struct twl4030_usb_data *usb; + struct twl4030_power_data *power; + struct twl4030_audio_data *audio; + struct regulator_init_data *vdac; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vdd1; + struct regulator_init_data *vdd2; + struct regulator_init_data *vdd3; + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux4; + struct regulator_init_data *vio; + struct regulator_init_data *vintana1; + struct regulator_init_data *vintana2; + struct regulator_init_data *vintdig; + struct regulator_init_data *vmmc; + struct regulator_init_data *vpp; + struct regulator_init_data *vusim; + struct regulator_init_data *vana; + struct regulator_init_data *vcxio; + struct regulator_init_data *vusb; + struct regulator_init_data *clk32kg; + struct regulator_init_data *v1v8; + struct regulator_init_data *v2v1; + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldoln; + struct regulator_init_data *ldousb; + struct regulator_init_data *smps3; + struct regulator_init_data *smps4; + struct regulator_init_data *vio6025; +}; + +struct twl_regulator_driver_data { + int (*set_voltage)(void *, int); + int (*get_voltage)(void *); + void *data; + long unsigned int features; +}; + +struct twl_client { + struct i2c_client *client; + struct regmap *regmap; +}; + +struct twl_mapping { + unsigned char sid; + unsigned char base; +}; + +struct twl_private { + bool ready; + u32 twl_idcode; + unsigned int twl_id; + struct twl_mapping *twl_map; + struct twl_client *twl_modules; +}; + +struct sih_irq_data { + u8 isr_offset; + u8 imr_offset; +}; + +struct sih { + char name[8]; + u8 module; + u8 control_offset; + bool set_cor; + u8 bits; + u8 bytes_ixr; + u8 edr_offset; + u8 bytes_edr; + u8 irq_lines; + struct sih_irq_data mask[2]; +}; + +struct sih_agent { + int irq_base; + const struct sih *sih; + u32 imr; + bool imr_change_pending; + u32 edge_change; + struct mutex irq_lock; + char *irq_name; +}; + +struct twl6030_irq { + unsigned int irq_base; + int twl_irq; + bool irq_wake_enabled; + atomic_t wakeirqs; + struct notifier_block pm_nb; + struct irq_chip irq_chip; + struct irq_domain *irq_domain; + const int *irq_mapping_tbl; +}; + +enum twl4030_audio_res { + TWL4030_AUDIO_RES_POWER = 0, + TWL4030_AUDIO_RES_APLL = 1, + TWL4030_AUDIO_RES_MAX = 2, +}; + +struct twl4030_audio_resource { + int request_count; + u8 reg; + u8 mask; +}; + +struct twl4030_audio { + unsigned int audio_mclk; + struct mutex mutex; + struct twl4030_audio_resource resource[2]; + struct mfd_cell cells[2]; +}; + +enum of_gpio_flags { + OF_GPIO_ACTIVE_LOW = 1, + OF_GPIO_SINGLE_ENDED = 2, + OF_GPIO_OPEN_DRAIN = 4, + OF_GPIO_TRANSITORY = 8, + OF_GPIO_PULL_UP = 16, + OF_GPIO_PULL_DOWN = 32, +}; + +struct twl6040 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct regulator_bulk_data supplies[2]; + struct clk *clk32k; + struct clk *mclk; + struct mutex mutex; + struct mutex irq_mutex; + struct mfd_cell cells[4]; + struct completion ready; + int audpwron; + int power_count; + int rev; + int pll; + unsigned int sysclk_rate; + unsigned int mclk_rate; + unsigned int irq; + unsigned int irq_ready; + unsigned int irq_th; +}; + +struct mfd_of_node_entry { + struct list_head list; + struct device *dev; + struct device_node *np; +}; + +struct pcap_subdev { + int id; + const char *name; + void *platform_data; +}; + +struct pcap_platform_data { + unsigned int irq_base; + unsigned int config; + int gpio; + void (*init)(void *); + int num_subdevs; + struct pcap_subdev *subdevs; +}; + +struct pcap_adc_request { + u8 bank; + u8 ch[2]; + u32 flags; + void (*callback)(void *, u16 *); + void *data; +}; + +struct pcap_adc_sync_request { + u16 res[2]; + struct completion completion; +}; + +struct pcap_chip { + struct spi_device *spi; + u32 buf; + spinlock_t io_lock; + unsigned int irq_base; + u32 msr; + struct work_struct isr_work; + struct work_struct msr_work; + struct workqueue_struct *workqueue; + struct pcap_adc_request *adc_queue[8]; + u8 adc_head; + u8 adc_tail; + spinlock_t adc_lock; +}; + +struct da903x_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct da903x_platform_data { + int num_subdevs; + struct da903x_subdev_info *subdevs; +}; + +struct da903x_chip; + +struct da903x_chip_ops { + int (*init_chip)(struct da903x_chip *); + int (*unmask_events)(struct da903x_chip *, unsigned int); + int (*mask_events)(struct da903x_chip *, unsigned int); + int (*read_events)(struct da903x_chip *, unsigned int *); + int (*read_status)(struct da903x_chip *, unsigned int *); +}; + +struct da903x_chip { + struct i2c_client *client; + struct device *dev; + const struct da903x_chip_ops *ops; + int type; + uint32_t events_mask; + struct mutex lock; + struct work_struct irq_work; + struct blocking_notifier_head notifier_list; +}; + +struct da9052 { + struct device *dev; + struct regmap *regmap; + struct mutex auxadc_lock; + struct completion done; + int irq_base; + struct regmap_irq_chip_data *irq_data; + u8 chip_id; + int chip_irq; + int (*fix_io)(struct da9052 *, unsigned char); +}; + +struct led_platform_data; + +struct da9052_pdata { + struct led_platform_data *pled; + int (*init)(struct da9052 *); + int irq_base; + int gpio_base; + int use_for_apm; + struct regulator_init_data *regulators[14]; +}; + +enum da9052_chip_id { + DA9052 = 0, + DA9053_AA = 1, + DA9053_BA = 2, + DA9053_BB = 3, + DA9053_BC = 4, +}; + +enum lp8788_int_id { + LP8788_INT_TSDL = 0, + LP8788_INT_TSDH = 1, + LP8788_INT_UVLO = 2, + LP8788_INT_FLAGMON = 3, + LP8788_INT_PWRON_TIME = 4, + LP8788_INT_PWRON = 5, + LP8788_INT_COMP1 = 6, + LP8788_INT_COMP2 = 7, + LP8788_INT_CHG_INPUT_STATE = 8, + LP8788_INT_CHG_STATE = 9, + LP8788_INT_EOC = 10, + LP8788_INT_CHG_RESTART = 11, + LP8788_INT_RESTART_TIMEOUT = 12, + LP8788_INT_FULLCHG_TIMEOUT = 13, + LP8788_INT_PRECHG_TIMEOUT = 14, + LP8788_INT_RTC_ALARM1 = 17, + LP8788_INT_RTC_ALARM2 = 18, + LP8788_INT_ENTER_SYS_SUPPORT = 19, + LP8788_INT_EXIT_SYS_SUPPORT = 20, + LP8788_INT_BATT_LOW = 21, + LP8788_INT_NO_BATT = 22, + LP8788_INT_MAX = 24, +}; + +enum lp8788_dvs_sel { + DVS_SEL_V0 = 0, + DVS_SEL_V1 = 1, + DVS_SEL_V2 = 2, + DVS_SEL_V3 = 3, +}; + +enum lp8788_charger_event { + NO_CHARGER = 0, + CHARGER_DETECTED = 1, +}; + +enum lp8788_bl_ctrl_mode { + LP8788_BL_REGISTER_ONLY = 0, + LP8788_BL_COMB_PWM_BASED = 1, + LP8788_BL_COMB_REGISTER_BASED = 2, +}; + +enum lp8788_bl_dim_mode { + LP8788_DIM_EXPONENTIAL = 0, + LP8788_DIM_LINEAR = 1, +}; + +enum lp8788_bl_full_scale_current { + LP8788_FULLSCALE_5000uA = 0, + LP8788_FULLSCALE_8500uA = 1, + LP8788_FULLSCALE_1200uA = 2, + LP8788_FULLSCALE_1550uA = 3, + LP8788_FULLSCALE_1900uA = 4, + LP8788_FULLSCALE_2250uA = 5, + LP8788_FULLSCALE_2600uA = 6, + LP8788_FULLSCALE_2950uA = 7, +}; + +enum lp8788_bl_ramp_step { + LP8788_RAMP_8us = 0, + LP8788_RAMP_1024us = 1, + LP8788_RAMP_2048us = 2, + LP8788_RAMP_4096us = 3, + LP8788_RAMP_8192us = 4, + LP8788_RAMP_16384us = 5, + LP8788_RAMP_32768us = 6, + LP8788_RAMP_65538us = 7, +}; + +enum lp8788_isink_scale { + LP8788_ISINK_SCALE_100mA = 0, + LP8788_ISINK_SCALE_120mA = 1, +}; + +enum lp8788_isink_number { + LP8788_ISINK_1 = 0, + LP8788_ISINK_2 = 1, + LP8788_ISINK_3 = 2, +}; + +enum lp8788_alarm_sel { + LP8788_ALARM_1 = 0, + LP8788_ALARM_2 = 1, + LP8788_ALARM_MAX = 2, +}; + +struct lp8788_buck1_dvs { + int gpio; + enum lp8788_dvs_sel vsel; +}; + +struct lp8788_buck2_dvs { + int gpio[2]; + enum lp8788_dvs_sel vsel; +}; + +struct lp8788_chg_param { + u8 addr; + u8 val; +}; + +struct lp8788; + +struct lp8788_charger_platform_data { + const char *adc_vbatt; + const char *adc_batt_temp; + unsigned int max_vbatt_mv; + struct lp8788_chg_param *chg_params; + int num_chg_params; + void (*charger_event)(struct lp8788 *, enum lp8788_charger_event); +}; + +struct lp8788_platform_data; + +struct lp8788 { + struct device *dev; + struct regmap *regmap; + struct irq_domain *irqdm; + int irq; + struct lp8788_platform_data *pdata; +}; + +struct lp8788_backlight_platform_data { + char *name; + int initial_brightness; + enum lp8788_bl_ctrl_mode bl_mode; + enum lp8788_bl_dim_mode dim_mode; + enum lp8788_bl_full_scale_current full_scale; + enum lp8788_bl_ramp_step rise_time; + enum lp8788_bl_ramp_step fall_time; + enum pwm_polarity pwm_pol; + unsigned int period_ns; +}; + +struct lp8788_led_platform_data { + char *name; + enum lp8788_isink_scale scale; + enum lp8788_isink_number num; + int iout_code; +}; + +struct lp8788_vib_platform_data { + char *name; + enum lp8788_isink_scale scale; + enum lp8788_isink_number num; + int iout_code; + int pwm_code; +}; + +struct iio_map; + +struct lp8788_platform_data { + int (*init_func)(struct lp8788 *); + struct regulator_init_data *buck_data[4]; + struct regulator_init_data *dldo_data[12]; + struct regulator_init_data *aldo_data[10]; + struct lp8788_buck1_dvs *buck1_dvs; + struct lp8788_buck2_dvs *buck2_dvs; + struct lp8788_charger_platform_data *chg_pdata; + enum lp8788_alarm_sel alarm_sel; + struct lp8788_backlight_platform_data *bl_pdata; + struct lp8788_led_platform_data *led_pdata; + struct lp8788_vib_platform_data *vib_pdata; + struct iio_map *adc_pdata; +}; + +struct lp8788_irq_data { + struct lp8788 *lp; + struct mutex irq_lock; + struct irq_domain *domain; + int enabled[24]; +}; + +struct da9055 { + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + struct device *dev; + struct i2c_client *i2c_client; + int irq_base; + int chip_irq; +}; + +enum gpio_select { + NO_GPIO = 0, + GPIO_1 = 1, + GPIO_2 = 2, +}; + +struct da9055_pdata { + int (*init)(struct da9055 *); + int irq_base; + int gpio_base; + struct regulator_init_data *regulators[8]; + bool reset_enable; + int *gpio_ren; + int *gpio_rsel; + enum gpio_select *reg_ren; + enum gpio_select *reg_rsel; + struct gpio_desc **ena_gpiods; +}; + +enum da9063_type { + PMIC_TYPE_DA9063 = 0, + PMIC_TYPE_DA9063L = 1, +}; + +enum da9063_irqs { + DA9063_IRQ_ONKEY = 0, + DA9063_IRQ_ALARM = 1, + DA9063_IRQ_TICK = 2, + DA9063_IRQ_ADC_RDY = 3, + DA9063_IRQ_SEQ_RDY = 4, + DA9063_IRQ_WAKE = 5, + DA9063_IRQ_TEMP = 6, + DA9063_IRQ_COMP_1V2 = 7, + DA9063_IRQ_LDO_LIM = 8, + DA9063_IRQ_REG_UVOV = 9, + DA9063_IRQ_DVC_RDY = 10, + DA9063_IRQ_VDD_MON = 11, + DA9063_IRQ_WARN = 12, + DA9063_IRQ_GPI0 = 13, + DA9063_IRQ_GPI1 = 14, + DA9063_IRQ_GPI2 = 15, + DA9063_IRQ_GPI3 = 16, + DA9063_IRQ_GPI4 = 17, + DA9063_IRQ_GPI5 = 18, + DA9063_IRQ_GPI6 = 19, + DA9063_IRQ_GPI7 = 20, + DA9063_IRQ_GPI8 = 21, + DA9063_IRQ_GPI9 = 22, + DA9063_IRQ_GPI10 = 23, + DA9063_IRQ_GPI11 = 24, + DA9063_IRQ_GPI12 = 25, + DA9063_IRQ_GPI13 = 26, + DA9063_IRQ_GPI14 = 27, + DA9063_IRQ_GPI15 = 28, +}; + +struct da9063 { + struct device *dev; + enum da9063_type type; + unsigned char variant_code; + unsigned int flags; + struct regmap *regmap; + int chip_irq; + unsigned int irq_base; + struct regmap_irq_chip_data *regmap_irq; +}; + +enum da9063_variant_codes { + PMIC_DA9063_AD = 3, + PMIC_DA9063_BB = 5, + PMIC_DA9063_CA = 6, + PMIC_DA9063_DA = 7, +}; + +enum da9063_page_sel_buf_fmt { + DA9063_PAGE_SEL_BUF_PAGE_REG = 0, + DA9063_PAGE_SEL_BUF_PAGE_VAL = 1, + DA9063_PAGE_SEL_BUF_SIZE = 2, +}; + +enum da9063_paged_read_msgs { + DA9063_PAGED_READ_MSG_PAGE_SEL = 0, + DA9063_PAGED_READ_MSG_REG_SEL = 1, + DA9063_PAGED_READ_MSG_DATA = 2, + DA9063_PAGED_READ_MSG_CNT = 3, +}; + +enum { + DA9063_DEV_ID_REG = 0, + DA9063_VAR_ID_REG = 1, + DA9063_CHIP_ID_REGS = 2, +}; + +struct max14577_regulator_platform_data { + int id; + struct regulator_init_data *initdata; + struct device_node *of_node; +}; + +struct max14577_platform_data { + int irq_base; + int gpio_pogo_vbatt_en; + int gpio_pogo_vbus_en; + int (*set_gpio_pogo_vbatt_en)(int); + int (*set_gpio_pogo_vbus_en)(int); + int (*set_gpio_pogo_cb)(int); + struct max14577_regulator_platform_data *regulators; +}; + +struct maxim_charger_current { + unsigned int min; + unsigned int high_start; + unsigned int high_step; + unsigned int max; +}; + +enum maxim_device_type { + MAXIM_DEVICE_TYPE_UNKNOWN = 0, + MAXIM_DEVICE_TYPE_MAX14577 = 1, + MAXIM_DEVICE_TYPE_MAX77836 = 2, + MAXIM_DEVICE_TYPE_NUM = 3, +}; + +enum max14577_reg { + MAX14577_REG_DEVICEID = 0, + MAX14577_REG_INT1 = 1, + MAX14577_REG_INT2 = 2, + MAX14577_REG_INT3 = 3, + MAX14577_REG_STATUS1 = 4, + MAX14577_REG_STATUS2 = 5, + MAX14577_REG_STATUS3 = 6, + MAX14577_REG_INTMASK1 = 7, + MAX14577_REG_INTMASK2 = 8, + MAX14577_REG_INTMASK3 = 9, + MAX14577_REG_CDETCTRL1 = 10, + MAX14577_REG_RFU = 11, + MAX14577_REG_CONTROL1 = 12, + MAX14577_REG_CONTROL2 = 13, + MAX14577_REG_CONTROL3 = 14, + MAX14577_REG_CHGCTRL1 = 15, + MAX14577_REG_CHGCTRL2 = 16, + MAX14577_REG_CHGCTRL3 = 17, + MAX14577_REG_CHGCTRL4 = 18, + MAX14577_REG_CHGCTRL5 = 19, + MAX14577_REG_CHGCTRL6 = 20, + MAX14577_REG_CHGCTRL7 = 21, + MAX14577_REG_END = 22, +}; + +enum max77836_pmic_reg { + MAX77836_PMIC_REG_PMIC_ID = 32, + MAX77836_PMIC_REG_PMIC_REV = 33, + MAX77836_PMIC_REG_INTSRC = 34, + MAX77836_PMIC_REG_INTSRC_MASK = 35, + MAX77836_PMIC_REG_TOPSYS_INT = 36, + MAX77836_PMIC_REG_TOPSYS_INT_MASK = 38, + MAX77836_PMIC_REG_TOPSYS_STAT = 40, + MAX77836_PMIC_REG_MRSTB_CNTL = 42, + MAX77836_PMIC_REG_LSCNFG = 43, + MAX77836_LDO_REG_CNFG1_LDO1 = 81, + MAX77836_LDO_REG_CNFG2_LDO1 = 82, + MAX77836_LDO_REG_CNFG1_LDO2 = 83, + MAX77836_LDO_REG_CNFG2_LDO2 = 84, + MAX77836_LDO_REG_CNFG_LDO_BIAS = 85, + MAX77836_COMP_REG_COMP1 = 96, + MAX77836_PMIC_REG_END = 97, +}; + +enum max77836_fg_reg { + MAX77836_FG_REG_VCELL_MSB = 2, + MAX77836_FG_REG_VCELL_LSB = 3, + MAX77836_FG_REG_SOC_MSB = 4, + MAX77836_FG_REG_SOC_LSB = 5, + MAX77836_FG_REG_MODE_H = 6, + MAX77836_FG_REG_MODE_L = 7, + MAX77836_FG_REG_VERSION_MSB = 8, + MAX77836_FG_REG_VERSION_LSB = 9, + MAX77836_FG_REG_HIBRT_H = 10, + MAX77836_FG_REG_HIBRT_L = 11, + MAX77836_FG_REG_CONFIG_H = 12, + MAX77836_FG_REG_CONFIG_L = 13, + MAX77836_FG_REG_VALRT_MIN = 20, + MAX77836_FG_REG_VALRT_MAX = 21, + MAX77836_FG_REG_CRATE_MSB = 22, + MAX77836_FG_REG_CRATE_LSB = 23, + MAX77836_FG_REG_VRESET = 24, + MAX77836_FG_REG_FGID = 25, + MAX77836_FG_REG_STATUS_H = 26, + MAX77836_FG_REG_STATUS_L = 27, + MAX77836_FG_REG_END = 28, +}; + +struct max14577 { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *i2c_pmic; + enum maxim_device_type dev_type; + struct regmap *regmap; + struct regmap *regmap_pmic; + struct regmap_irq_chip_data *irq_data; + struct regmap_irq_chip_data *irq_data_pmic; + int irq; +}; + +enum max77693_types { + TYPE_MAX77693_UNKNOWN = 0, + TYPE_MAX77693 = 1, + TYPE_MAX77843 = 2, + TYPE_MAX77693_NUM = 3, +}; + +struct max77693_dev { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *i2c_muic; + struct i2c_client *i2c_haptic; + struct i2c_client *i2c_chg; + enum max77693_types type; + struct regmap *regmap; + struct regmap *regmap_muic; + struct regmap *regmap_haptic; + struct regmap *regmap_chg; + struct regmap_irq_chip_data *irq_data_led; + struct regmap_irq_chip_data *irq_data_topsys; + struct regmap_irq_chip_data *irq_data_chg; + struct regmap_irq_chip_data *irq_data_muic; + int irq; +}; + +enum max77693_pmic_reg { + MAX77693_LED_REG_IFLASH1 = 0, + MAX77693_LED_REG_IFLASH2 = 1, + MAX77693_LED_REG_ITORCH = 2, + MAX77693_LED_REG_ITORCHTIMER = 3, + MAX77693_LED_REG_FLASH_TIMER = 4, + MAX77693_LED_REG_FLASH_EN = 5, + MAX77693_LED_REG_MAX_FLASH1 = 6, + MAX77693_LED_REG_MAX_FLASH2 = 7, + MAX77693_LED_REG_MAX_FLASH3 = 8, + MAX77693_LED_REG_MAX_FLASH4 = 9, + MAX77693_LED_REG_VOUT_CNTL = 10, + MAX77693_LED_REG_VOUT_FLASH1 = 11, + MAX77693_LED_REG_VOUT_FLASH2 = 12, + MAX77693_LED_REG_FLASH_INT = 14, + MAX77693_LED_REG_FLASH_INT_MASK = 15, + MAX77693_LED_REG_FLASH_STATUS = 16, + MAX77693_PMIC_REG_PMIC_ID1 = 32, + MAX77693_PMIC_REG_PMIC_ID2 = 33, + MAX77693_PMIC_REG_INTSRC = 34, + MAX77693_PMIC_REG_INTSRC_MASK = 35, + MAX77693_PMIC_REG_TOPSYS_INT = 36, + MAX77693_PMIC_REG_TOPSYS_INT_MASK = 38, + MAX77693_PMIC_REG_TOPSYS_STAT = 40, + MAX77693_PMIC_REG_MAINCTRL1 = 42, + MAX77693_PMIC_REG_LSCNFG = 43, + MAX77693_CHG_REG_CHG_INT = 176, + MAX77693_CHG_REG_CHG_INT_MASK = 177, + MAX77693_CHG_REG_CHG_INT_OK = 178, + MAX77693_CHG_REG_CHG_DETAILS_00 = 179, + MAX77693_CHG_REG_CHG_DETAILS_01 = 180, + MAX77693_CHG_REG_CHG_DETAILS_02 = 181, + MAX77693_CHG_REG_CHG_DETAILS_03 = 182, + MAX77693_CHG_REG_CHG_CNFG_00 = 183, + MAX77693_CHG_REG_CHG_CNFG_01 = 184, + MAX77693_CHG_REG_CHG_CNFG_02 = 185, + MAX77693_CHG_REG_CHG_CNFG_03 = 186, + MAX77693_CHG_REG_CHG_CNFG_04 = 187, + MAX77693_CHG_REG_CHG_CNFG_05 = 188, + MAX77693_CHG_REG_CHG_CNFG_06 = 189, + MAX77693_CHG_REG_CHG_CNFG_07 = 190, + MAX77693_CHG_REG_CHG_CNFG_08 = 191, + MAX77693_CHG_REG_CHG_CNFG_09 = 192, + MAX77693_CHG_REG_CHG_CNFG_10 = 193, + MAX77693_CHG_REG_CHG_CNFG_11 = 194, + MAX77693_CHG_REG_CHG_CNFG_12 = 195, + MAX77693_CHG_REG_CHG_CNFG_13 = 196, + MAX77693_CHG_REG_CHG_CNFG_14 = 197, + MAX77693_CHG_REG_SAFEOUT_CTRL = 198, + MAX77693_PMIC_REG_END = 199, +}; + +enum max77693_muic_reg { + MAX77693_MUIC_REG_ID = 0, + MAX77693_MUIC_REG_INT1 = 1, + MAX77693_MUIC_REG_INT2 = 2, + MAX77693_MUIC_REG_INT3 = 3, + MAX77693_MUIC_REG_STATUS1 = 4, + MAX77693_MUIC_REG_STATUS2 = 5, + MAX77693_MUIC_REG_STATUS3 = 6, + MAX77693_MUIC_REG_INTMASK1 = 7, + MAX77693_MUIC_REG_INTMASK2 = 8, + MAX77693_MUIC_REG_INTMASK3 = 9, + MAX77693_MUIC_REG_CDETCTRL1 = 10, + MAX77693_MUIC_REG_CDETCTRL2 = 11, + MAX77693_MUIC_REG_CTRL1 = 12, + MAX77693_MUIC_REG_CTRL2 = 13, + MAX77693_MUIC_REG_CTRL3 = 14, + MAX77693_MUIC_REG_END = 15, +}; + +enum max77693_haptic_reg { + MAX77693_HAPTIC_REG_STATUS = 0, + MAX77693_HAPTIC_REG_CONFIG1 = 1, + MAX77693_HAPTIC_REG_CONFIG2 = 2, + MAX77693_HAPTIC_REG_CONFIG_CHNL = 3, + MAX77693_HAPTIC_REG_CONFG_CYC1 = 4, + MAX77693_HAPTIC_REG_CONFG_CYC2 = 5, + MAX77693_HAPTIC_REG_CONFIG_PER1 = 6, + MAX77693_HAPTIC_REG_CONFIG_PER2 = 7, + MAX77693_HAPTIC_REG_CONFIG_PER3 = 8, + MAX77693_HAPTIC_REG_CONFIG_PER4 = 9, + MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 10, + MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 11, + MAX77693_HAPTIC_REG_CONFIG_PWM1 = 12, + MAX77693_HAPTIC_REG_CONFIG_PWM2 = 13, + MAX77693_HAPTIC_REG_CONFIG_PWM3 = 14, + MAX77693_HAPTIC_REG_CONFIG_PWM4 = 15, + MAX77693_HAPTIC_REG_REV = 16, + MAX77693_HAPTIC_REG_END = 17, +}; + +enum max77843_sys_reg { + MAX77843_SYS_REG_PMICID = 0, + MAX77843_SYS_REG_PMICREV = 1, + MAX77843_SYS_REG_MAINCTRL1 = 2, + MAX77843_SYS_REG_INTSRC = 34, + MAX77843_SYS_REG_INTSRCMASK = 35, + MAX77843_SYS_REG_SYSINTSRC = 36, + MAX77843_SYS_REG_SYSINTMASK = 38, + MAX77843_SYS_REG_TOPSYS_STAT = 40, + MAX77843_SYS_REG_SAFEOUTCTRL = 198, + MAX77843_SYS_REG_END = 199, +}; + +enum max77843_charger_reg { + MAX77843_CHG_REG_CHG_INT = 176, + MAX77843_CHG_REG_CHG_INT_MASK = 177, + MAX77843_CHG_REG_CHG_INT_OK = 178, + MAX77843_CHG_REG_CHG_DTLS_00 = 179, + MAX77843_CHG_REG_CHG_DTLS_01 = 180, + MAX77843_CHG_REG_CHG_DTLS_02 = 181, + MAX77843_CHG_REG_CHG_CNFG_00 = 183, + MAX77843_CHG_REG_CHG_CNFG_01 = 184, + MAX77843_CHG_REG_CHG_CNFG_02 = 185, + MAX77843_CHG_REG_CHG_CNFG_03 = 186, + MAX77843_CHG_REG_CHG_CNFG_04 = 187, + MAX77843_CHG_REG_CHG_CNFG_06 = 189, + MAX77843_CHG_REG_CHG_CNFG_07 = 190, + MAX77843_CHG_REG_CHG_CNFG_09 = 192, + MAX77843_CHG_REG_CHG_CNFG_10 = 193, + MAX77843_CHG_REG_CHG_CNFG_11 = 194, + MAX77843_CHG_REG_CHG_CNFG_12 = 195, + MAX77843_CHG_REG_END = 196, +}; + +enum { + MAX8925_IRQ_VCHG_DC_OVP = 0, + MAX8925_IRQ_VCHG_DC_F = 1, + MAX8925_IRQ_VCHG_DC_R = 2, + MAX8925_IRQ_VCHG_THM_OK_R = 3, + MAX8925_IRQ_VCHG_THM_OK_F = 4, + MAX8925_IRQ_VCHG_SYSLOW_F = 5, + MAX8925_IRQ_VCHG_SYSLOW_R = 6, + MAX8925_IRQ_VCHG_RST = 7, + MAX8925_IRQ_VCHG_DONE = 8, + MAX8925_IRQ_VCHG_TOPOFF = 9, + MAX8925_IRQ_VCHG_TMR_FAULT = 10, + MAX8925_IRQ_GPM_RSTIN = 11, + MAX8925_IRQ_GPM_MPL = 12, + MAX8925_IRQ_GPM_SW_3SEC = 13, + MAX8925_IRQ_GPM_EXTON_F = 14, + MAX8925_IRQ_GPM_EXTON_R = 15, + MAX8925_IRQ_GPM_SW_1SEC = 16, + MAX8925_IRQ_GPM_SW_F = 17, + MAX8925_IRQ_GPM_SW_R = 18, + MAX8925_IRQ_GPM_SYSCKEN_F = 19, + MAX8925_IRQ_GPM_SYSCKEN_R = 20, + MAX8925_IRQ_RTC_ALARM1 = 21, + MAX8925_IRQ_RTC_ALARM0 = 22, + MAX8925_IRQ_TSC_STICK = 23, + MAX8925_IRQ_TSC_NSTICK = 24, + MAX8925_NR_IRQS = 25, +}; + +struct max8925_chip { + struct device *dev; + struct i2c_client *i2c; + struct i2c_client *adc; + struct i2c_client *rtc; + struct mutex io_lock; + struct mutex irq_lock; + int irq_base; + int core_irq; + int tsc_irq; + unsigned int wakeup_flag; +}; + +struct max8925_backlight_pdata { + int lxw_scl; + int lxw_freq; + int dual_string; +}; + +struct max8925_touch_pdata { + unsigned int flags; +}; + +struct max8925_power_pdata { + int (*set_charger)(int); + unsigned int batt_detect: 1; + unsigned int topoff_threshold: 2; + unsigned int fast_charge: 3; + unsigned int no_temp_support: 1; + unsigned int no_insert_detect: 1; + char **supplied_to; + int num_supplicants; +}; + +struct max8925_platform_data { + struct max8925_backlight_pdata *backlight; + struct max8925_touch_pdata *touch; + struct max8925_power_pdata *power; + struct regulator_init_data *sd1; + struct regulator_init_data *sd2; + struct regulator_init_data *sd3; + struct regulator_init_data *ldo1; + struct regulator_init_data *ldo2; + struct regulator_init_data *ldo3; + struct regulator_init_data *ldo4; + struct regulator_init_data *ldo5; + struct regulator_init_data *ldo6; + struct regulator_init_data *ldo7; + struct regulator_init_data *ldo8; + struct regulator_init_data *ldo9; + struct regulator_init_data *ldo10; + struct regulator_init_data *ldo11; + struct regulator_init_data *ldo12; + struct regulator_init_data *ldo13; + struct regulator_init_data *ldo14; + struct regulator_init_data *ldo15; + struct regulator_init_data *ldo16; + struct regulator_init_data *ldo17; + struct regulator_init_data *ldo18; + struct regulator_init_data *ldo19; + struct regulator_init_data *ldo20; + int irq_base; + int tsc_irq; +}; + +enum { + FLAGS_ADC = 1, + FLAGS_RTC = 2, +}; + +struct max8925_irq_data { + int reg; + int mask_reg; + int enable; + int offs; + int flags; + int tsc_irq; +}; + +struct max8997_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; +}; + +struct max8997_muic_reg_data { + u8 addr; + u8 data; +}; + +struct max8997_muic_platform_data { + struct max8997_muic_reg_data *init_data; + int num_init_data; + int detcable_delay_ms; + int path_usb; + int path_uart; +}; + +enum max8997_haptic_motor_type { + MAX8997_HAPTIC_ERM = 0, + MAX8997_HAPTIC_LRA = 1, +}; + +enum max8997_haptic_pulse_mode { + MAX8997_EXTERNAL_MODE = 0, + MAX8997_INTERNAL_MODE = 1, +}; + +enum max8997_haptic_pwm_divisor { + MAX8997_PWM_DIVISOR_32 = 0, + MAX8997_PWM_DIVISOR_64 = 1, + MAX8997_PWM_DIVISOR_128 = 2, + MAX8997_PWM_DIVISOR_256 = 3, +}; + +struct max8997_haptic_platform_data { + unsigned int pwm_channel_id; + unsigned int pwm_period; + enum max8997_haptic_motor_type type; + enum max8997_haptic_pulse_mode mode; + enum max8997_haptic_pwm_divisor pwm_divisor; + unsigned int internal_mode_pattern; + unsigned int pattern_cycle; + unsigned int pattern_signal_period; +}; + +enum max8997_led_mode { + MAX8997_NONE = 0, + MAX8997_FLASH_MODE = 1, + MAX8997_MOVIE_MODE = 2, + MAX8997_FLASH_PIN_CONTROL_MODE = 3, + MAX8997_MOVIE_PIN_CONTROL_MODE = 4, +}; + +struct max8997_led_platform_data { + enum max8997_led_mode mode[2]; + u8 brightness[2]; +}; + +struct max8997_platform_data { + int ono; + struct max8997_regulator_data *regulators; + int num_regulators; + bool ignore_gpiodvs_side_effect; + int buck125_gpios[3]; + int buck125_default_idx; + unsigned int buck1_voltage[8]; + bool buck1_gpiodvs; + unsigned int buck2_voltage[8]; + bool buck2_gpiodvs; + unsigned int buck5_voltage[8]; + bool buck5_gpiodvs; + int eoc_mA; + int timeout; + struct max8997_muic_platform_data *muic_pdata; + struct max8997_haptic_platform_data *haptic_pdata; + struct max8997_led_platform_data *led_pdata; +}; + +enum max8997_pmic_reg { + MAX8997_REG_PMIC_ID0 = 0, + MAX8997_REG_PMIC_ID1 = 1, + MAX8997_REG_INTSRC = 2, + MAX8997_REG_INT1 = 3, + MAX8997_REG_INT2 = 4, + MAX8997_REG_INT3 = 5, + MAX8997_REG_INT4 = 6, + MAX8997_REG_INT1MSK = 8, + MAX8997_REG_INT2MSK = 9, + MAX8997_REG_INT3MSK = 10, + MAX8997_REG_INT4MSK = 11, + MAX8997_REG_STATUS1 = 13, + MAX8997_REG_STATUS2 = 14, + MAX8997_REG_STATUS3 = 15, + MAX8997_REG_STATUS4 = 16, + MAX8997_REG_MAINCON1 = 19, + MAX8997_REG_MAINCON2 = 20, + MAX8997_REG_BUCKRAMP = 21, + MAX8997_REG_BUCK1CTRL = 24, + MAX8997_REG_BUCK1DVS1 = 25, + MAX8997_REG_BUCK1DVS2 = 26, + MAX8997_REG_BUCK1DVS3 = 27, + MAX8997_REG_BUCK1DVS4 = 28, + MAX8997_REG_BUCK1DVS5 = 29, + MAX8997_REG_BUCK1DVS6 = 30, + MAX8997_REG_BUCK1DVS7 = 31, + MAX8997_REG_BUCK1DVS8 = 32, + MAX8997_REG_BUCK2CTRL = 33, + MAX8997_REG_BUCK2DVS1 = 34, + MAX8997_REG_BUCK2DVS2 = 35, + MAX8997_REG_BUCK2DVS3 = 36, + MAX8997_REG_BUCK2DVS4 = 37, + MAX8997_REG_BUCK2DVS5 = 38, + MAX8997_REG_BUCK2DVS6 = 39, + MAX8997_REG_BUCK2DVS7 = 40, + MAX8997_REG_BUCK2DVS8 = 41, + MAX8997_REG_BUCK3CTRL = 42, + MAX8997_REG_BUCK3DVS = 43, + MAX8997_REG_BUCK4CTRL = 44, + MAX8997_REG_BUCK4DVS = 45, + MAX8997_REG_BUCK5CTRL = 46, + MAX8997_REG_BUCK5DVS1 = 47, + MAX8997_REG_BUCK5DVS2 = 48, + MAX8997_REG_BUCK5DVS3 = 49, + MAX8997_REG_BUCK5DVS4 = 50, + MAX8997_REG_BUCK5DVS5 = 51, + MAX8997_REG_BUCK5DVS6 = 52, + MAX8997_REG_BUCK5DVS7 = 53, + MAX8997_REG_BUCK5DVS8 = 54, + MAX8997_REG_BUCK6CTRL = 55, + MAX8997_REG_BUCK6BPSKIPCTRL = 56, + MAX8997_REG_BUCK7CTRL = 57, + MAX8997_REG_BUCK7DVS = 58, + MAX8997_REG_LDO1CTRL = 59, + MAX8997_REG_LDO2CTRL = 60, + MAX8997_REG_LDO3CTRL = 61, + MAX8997_REG_LDO4CTRL = 62, + MAX8997_REG_LDO5CTRL = 63, + MAX8997_REG_LDO6CTRL = 64, + MAX8997_REG_LDO7CTRL = 65, + MAX8997_REG_LDO8CTRL = 66, + MAX8997_REG_LDO9CTRL = 67, + MAX8997_REG_LDO10CTRL = 68, + MAX8997_REG_LDO11CTRL = 69, + MAX8997_REG_LDO12CTRL = 70, + MAX8997_REG_LDO13CTRL = 71, + MAX8997_REG_LDO14CTRL = 72, + MAX8997_REG_LDO15CTRL = 73, + MAX8997_REG_LDO16CTRL = 74, + MAX8997_REG_LDO17CTRL = 75, + MAX8997_REG_LDO18CTRL = 76, + MAX8997_REG_LDO21CTRL = 77, + MAX8997_REG_MBCCTRL1 = 80, + MAX8997_REG_MBCCTRL2 = 81, + MAX8997_REG_MBCCTRL3 = 82, + MAX8997_REG_MBCCTRL4 = 83, + MAX8997_REG_MBCCTRL5 = 84, + MAX8997_REG_MBCCTRL6 = 85, + MAX8997_REG_OTPCGHCVS = 86, + MAX8997_REG_SAFEOUTCTRL = 90, + MAX8997_REG_LBCNFG1 = 94, + MAX8997_REG_LBCNFG2 = 95, + MAX8997_REG_BBCCTRL = 96, + MAX8997_REG_FLASH1_CUR = 99, + MAX8997_REG_FLASH2_CUR = 100, + MAX8997_REG_MOVIE_CUR = 101, + MAX8997_REG_GSMB_CUR = 102, + MAX8997_REG_BOOST_CNTL = 103, + MAX8997_REG_LEN_CNTL = 104, + MAX8997_REG_FLASH_CNTL = 105, + MAX8997_REG_WDT_CNTL = 106, + MAX8997_REG_MAXFLASH1 = 107, + MAX8997_REG_MAXFLASH2 = 108, + MAX8997_REG_FLASHSTATUS = 109, + MAX8997_REG_FLASHSTATUSMASK = 110, + MAX8997_REG_GPIOCNTL1 = 112, + MAX8997_REG_GPIOCNTL2 = 113, + MAX8997_REG_GPIOCNTL3 = 114, + MAX8997_REG_GPIOCNTL4 = 115, + MAX8997_REG_GPIOCNTL5 = 116, + MAX8997_REG_GPIOCNTL6 = 117, + MAX8997_REG_GPIOCNTL7 = 118, + MAX8997_REG_GPIOCNTL8 = 119, + MAX8997_REG_GPIOCNTL9 = 120, + MAX8997_REG_GPIOCNTL10 = 121, + MAX8997_REG_GPIOCNTL11 = 122, + MAX8997_REG_GPIOCNTL12 = 123, + MAX8997_REG_LDO1CONFIG = 128, + MAX8997_REG_LDO2CONFIG = 129, + MAX8997_REG_LDO3CONFIG = 130, + MAX8997_REG_LDO4CONFIG = 131, + MAX8997_REG_LDO5CONFIG = 132, + MAX8997_REG_LDO6CONFIG = 133, + MAX8997_REG_LDO7CONFIG = 134, + MAX8997_REG_LDO8CONFIG = 135, + MAX8997_REG_LDO9CONFIG = 136, + MAX8997_REG_LDO10CONFIG = 137, + MAX8997_REG_LDO11CONFIG = 138, + MAX8997_REG_LDO12CONFIG = 139, + MAX8997_REG_LDO13CONFIG = 140, + MAX8997_REG_LDO14CONFIG = 141, + MAX8997_REG_LDO15CONFIG = 142, + MAX8997_REG_LDO16CONFIG = 143, + MAX8997_REG_LDO17CONFIG = 144, + MAX8997_REG_LDO18CONFIG = 145, + MAX8997_REG_LDO21CONFIG = 146, + MAX8997_REG_DVSOKTIMER1 = 151, + MAX8997_REG_DVSOKTIMER2 = 152, + MAX8997_REG_DVSOKTIMER4 = 153, + MAX8997_REG_DVSOKTIMER5 = 154, + MAX8997_REG_PMIC_END = 155, +}; + +enum max8997_muic_reg { + MAX8997_MUIC_REG_ID = 0, + MAX8997_MUIC_REG_INT1 = 1, + MAX8997_MUIC_REG_INT2 = 2, + MAX8997_MUIC_REG_INT3 = 3, + MAX8997_MUIC_REG_STATUS1 = 4, + MAX8997_MUIC_REG_STATUS2 = 5, + MAX8997_MUIC_REG_STATUS3 = 6, + MAX8997_MUIC_REG_INTMASK1 = 7, + MAX8997_MUIC_REG_INTMASK2 = 8, + MAX8997_MUIC_REG_INTMASK3 = 9, + MAX8997_MUIC_REG_CDETCTRL = 10, + MAX8997_MUIC_REG_CONTROL1 = 12, + MAX8997_MUIC_REG_CONTROL2 = 13, + MAX8997_MUIC_REG_CONTROL3 = 14, + MAX8997_MUIC_REG_END = 15, +}; + +enum max8997_haptic_reg { + MAX8997_HAPTIC_REG_GENERAL = 0, + MAX8997_HAPTIC_REG_CONF1 = 1, + MAX8997_HAPTIC_REG_CONF2 = 2, + MAX8997_HAPTIC_REG_DRVCONF = 3, + MAX8997_HAPTIC_REG_CYCLECONF1 = 4, + MAX8997_HAPTIC_REG_CYCLECONF2 = 5, + MAX8997_HAPTIC_REG_SIGCONF1 = 6, + MAX8997_HAPTIC_REG_SIGCONF2 = 7, + MAX8997_HAPTIC_REG_SIGCONF3 = 8, + MAX8997_HAPTIC_REG_SIGCONF4 = 9, + MAX8997_HAPTIC_REG_SIGDC1 = 10, + MAX8997_HAPTIC_REG_SIGDC2 = 11, + MAX8997_HAPTIC_REG_SIGPWMDC1 = 12, + MAX8997_HAPTIC_REG_SIGPWMDC2 = 13, + MAX8997_HAPTIC_REG_SIGPWMDC3 = 14, + MAX8997_HAPTIC_REG_SIGPWMDC4 = 15, + MAX8997_HAPTIC_REG_MTR_REV = 16, + MAX8997_HAPTIC_REG_END = 17, +}; + +enum max8997_irq_source { + PMIC_INT1 = 0, + PMIC_INT2 = 1, + PMIC_INT3 = 2, + PMIC_INT4 = 3, + FUEL_GAUGE = 4, + MUIC_INT1 = 5, + MUIC_INT2 = 6, + MUIC_INT3 = 7, + GPIO_LOW = 8, + GPIO_HI = 9, + FLASH_STATUS = 10, + MAX8997_IRQ_GROUP_NR = 11, +}; + +struct max8997_dev { + struct device *dev; + struct max8997_platform_data *pdata; + struct i2c_client *i2c; + struct i2c_client *rtc; + struct i2c_client *haptic; + struct i2c_client *muic; + struct mutex iolock; + long unsigned int type; + struct platform_device *battery; + int irq; + int ono; + struct irq_domain *irq_domain; + struct mutex irqlock; + int irq_masks_cur[11]; + int irq_masks_cache[11]; + u8 reg_dump[187]; + bool gpio_status[12]; +}; + +enum max8997_types { + TYPE_MAX8997 = 0, + TYPE_MAX8966 = 1, +}; + +enum max8997_irq { + MAX8997_PMICIRQ_PWRONR = 0, + MAX8997_PMICIRQ_PWRONF = 1, + MAX8997_PMICIRQ_PWRON1SEC = 2, + MAX8997_PMICIRQ_JIGONR = 3, + MAX8997_PMICIRQ_JIGONF = 4, + MAX8997_PMICIRQ_LOWBAT2 = 5, + MAX8997_PMICIRQ_LOWBAT1 = 6, + MAX8997_PMICIRQ_JIGR = 7, + MAX8997_PMICIRQ_JIGF = 8, + MAX8997_PMICIRQ_MR = 9, + MAX8997_PMICIRQ_DVS1OK = 10, + MAX8997_PMICIRQ_DVS2OK = 11, + MAX8997_PMICIRQ_DVS3OK = 12, + MAX8997_PMICIRQ_DVS4OK = 13, + MAX8997_PMICIRQ_CHGINS = 14, + MAX8997_PMICIRQ_CHGRM = 15, + MAX8997_PMICIRQ_DCINOVP = 16, + MAX8997_PMICIRQ_TOPOFFR = 17, + MAX8997_PMICIRQ_CHGRSTF = 18, + MAX8997_PMICIRQ_MBCHGTMEXPD = 19, + MAX8997_PMICIRQ_RTC60S = 20, + MAX8997_PMICIRQ_RTCA1 = 21, + MAX8997_PMICIRQ_RTCA2 = 22, + MAX8997_PMICIRQ_SMPL_INT = 23, + MAX8997_PMICIRQ_RTC1S = 24, + MAX8997_PMICIRQ_WTSR = 25, + MAX8997_MUICIRQ_ADCError = 26, + MAX8997_MUICIRQ_ADCLow = 27, + MAX8997_MUICIRQ_ADC = 28, + MAX8997_MUICIRQ_VBVolt = 29, + MAX8997_MUICIRQ_DBChg = 30, + MAX8997_MUICIRQ_DCDTmr = 31, + MAX8997_MUICIRQ_ChgDetRun = 32, + MAX8997_MUICIRQ_ChgTyp = 33, + MAX8997_MUICIRQ_OVP = 34, + MAX8997_IRQ_NR = 35, +}; + +struct max8997_irq_data { + int mask; + enum max8997_irq_source group; +}; + +struct max8998_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; +}; + +struct max8998_platform_data { + struct max8998_regulator_data *regulators; + int num_regulators; + unsigned int irq_base; + int ono; + bool buck_voltage_lock; + int buck1_voltage[4]; + int buck2_voltage[2]; + int buck1_set1; + int buck1_set2; + int buck1_default_idx; + int buck2_set3; + int buck2_default_idx; + bool wakeup; + bool rtc_delay; + int eoc; + int restart; + int timeout; +}; + +enum { + MAX8998_REG_IRQ1 = 0, + MAX8998_REG_IRQ2 = 1, + MAX8998_REG_IRQ3 = 2, + MAX8998_REG_IRQ4 = 3, + MAX8998_REG_IRQM1 = 4, + MAX8998_REG_IRQM2 = 5, + MAX8998_REG_IRQM3 = 6, + MAX8998_REG_IRQM4 = 7, + MAX8998_REG_STATUS1 = 8, + MAX8998_REG_STATUS2 = 9, + MAX8998_REG_STATUSM1 = 10, + MAX8998_REG_STATUSM2 = 11, + MAX8998_REG_CHGR1 = 12, + MAX8998_REG_CHGR2 = 13, + MAX8998_REG_LDO_ACTIVE_DISCHARGE1 = 14, + MAX8998_REG_LDO_ACTIVE_DISCHARGE2 = 15, + MAX8998_REG_BUCK_ACTIVE_DISCHARGE3 = 16, + MAX8998_REG_ONOFF1 = 17, + MAX8998_REG_ONOFF2 = 18, + MAX8998_REG_ONOFF3 = 19, + MAX8998_REG_ONOFF4 = 20, + MAX8998_REG_BUCK1_VOLTAGE1 = 21, + MAX8998_REG_BUCK1_VOLTAGE2 = 22, + MAX8998_REG_BUCK1_VOLTAGE3 = 23, + MAX8998_REG_BUCK1_VOLTAGE4 = 24, + MAX8998_REG_BUCK2_VOLTAGE1 = 25, + MAX8998_REG_BUCK2_VOLTAGE2 = 26, + MAX8998_REG_BUCK3 = 27, + MAX8998_REG_BUCK4 = 28, + MAX8998_REG_LDO2_LDO3 = 29, + MAX8998_REG_LDO4 = 30, + MAX8998_REG_LDO5 = 31, + MAX8998_REG_LDO6 = 32, + MAX8998_REG_LDO7 = 33, + MAX8998_REG_LDO8_LDO9 = 34, + MAX8998_REG_LDO10_LDO11 = 35, + MAX8998_REG_LDO12 = 36, + MAX8998_REG_LDO13 = 37, + MAX8998_REG_LDO14 = 38, + MAX8998_REG_LDO15 = 39, + MAX8998_REG_LDO16 = 40, + MAX8998_REG_LDO17 = 41, + MAX8998_REG_BKCHR = 42, + MAX8998_REG_LBCNFG1 = 43, + MAX8998_REG_LBCNFG2 = 44, +}; + +enum { + TYPE_MAX8998 = 0, + TYPE_LP3974 = 1, + TYPE_LP3979 = 2, +}; + +struct max8998_dev { + struct device *dev; + struct max8998_platform_data *pdata; + struct i2c_client *i2c; + struct i2c_client *rtc; + struct mutex iolock; + struct mutex irqlock; + unsigned int irq_base; + struct irq_domain *irq_domain; + int irq; + int ono; + u8 irq_masks_cur[4]; + u8 irq_masks_cache[4]; + long unsigned int type; + bool wakeup; +}; + +struct max8998_reg_dump { + u8 addr; + u8 val; +}; + +enum { + MAX8998_IRQ_DCINF = 0, + MAX8998_IRQ_DCINR = 1, + MAX8998_IRQ_JIGF = 2, + MAX8998_IRQ_JIGR = 3, + MAX8998_IRQ_PWRONF = 4, + MAX8998_IRQ_PWRONR = 5, + MAX8998_IRQ_WTSREVNT = 6, + MAX8998_IRQ_SMPLEVNT = 7, + MAX8998_IRQ_ALARM1 = 8, + MAX8998_IRQ_ALARM0 = 9, + MAX8998_IRQ_ONKEY1S = 10, + MAX8998_IRQ_TOPOFFR = 11, + MAX8998_IRQ_DCINOVPR = 12, + MAX8998_IRQ_CHGRSTF = 13, + MAX8998_IRQ_DONER = 14, + MAX8998_IRQ_CHGFAULT = 15, + MAX8998_IRQ_LOBAT1 = 16, + MAX8998_IRQ_LOBAT2 = 17, + MAX8998_IRQ_NR = 18, +}; + +struct max8998_irq_data { + int reg; + int mask; +}; + +struct max8997_dev___2; + +struct adp5520_gpio_platform_data { + unsigned int gpio_start; + u8 gpio_en_mask; + u8 gpio_pullup_mask; +}; + +struct adp5520_keys_platform_data { + int rows_en_mask; + int cols_en_mask; + const short unsigned int *keymap; + short unsigned int keymapsize; + unsigned int repeat: 1; +}; + +struct led_info; + +struct adp5520_leds_platform_data { + int num_leds; + struct led_info *leds; + u8 fade_in; + u8 fade_out; + u8 led_on_time; +}; + +struct adp5520_backlight_platform_data { + u8 fade_in; + u8 fade_out; + u8 fade_led_law; + u8 en_ambl_sens; + u8 abml_filt; + u8 l1_daylight_max; + u8 l1_daylight_dim; + u8 l2_office_max; + u8 l2_office_dim; + u8 l3_dark_max; + u8 l3_dark_dim; + u8 l2_trip; + u8 l2_hyst; + u8 l3_trip; + u8 l3_hyst; +}; + +struct adp5520_platform_data { + struct adp5520_keys_platform_data *keys; + struct adp5520_gpio_platform_data *gpio; + struct adp5520_leds_platform_data *leds; + struct adp5520_backlight_platform_data *backlight; +}; + +struct adp5520_chip { + struct i2c_client *client; + struct device *dev; + struct mutex lock; + struct blocking_notifier_head notifier_list; + int irq; + long unsigned int id; + uint8_t mode; +}; + +struct tps6586x_irq_data { + u8 mask_reg; + u8 mask_mask; +}; + +struct tps6586x { + struct device *dev; + struct i2c_client *client; + struct regmap *regmap; + int version; + int irq; + struct irq_chip irq_chip; + struct mutex irq_lock; + int irq_base; + u32 irq_en; + u8 mask_reg[5]; + struct irq_domain *irq_domain; +}; + +enum { + TPS65090_IRQ_INTERRUPT = 0, + TPS65090_IRQ_VAC_STATUS_CHANGE = 1, + TPS65090_IRQ_VSYS_STATUS_CHANGE = 2, + TPS65090_IRQ_BAT_STATUS_CHANGE = 3, + TPS65090_IRQ_CHARGING_STATUS_CHANGE = 4, + TPS65090_IRQ_CHARGING_COMPLETE = 5, + TPS65090_IRQ_OVERLOAD_DCDC1 = 6, + TPS65090_IRQ_OVERLOAD_DCDC2 = 7, + TPS65090_IRQ_OVERLOAD_DCDC3 = 8, + TPS65090_IRQ_OVERLOAD_FET1 = 9, + TPS65090_IRQ_OVERLOAD_FET2 = 10, + TPS65090_IRQ_OVERLOAD_FET3 = 11, + TPS65090_IRQ_OVERLOAD_FET4 = 12, + TPS65090_IRQ_OVERLOAD_FET5 = 13, + TPS65090_IRQ_OVERLOAD_FET6 = 14, + TPS65090_IRQ_OVERLOAD_FET7 = 15, +}; + +enum { + TPS65090_REGULATOR_DCDC1 = 0, + TPS65090_REGULATOR_DCDC2 = 1, + TPS65090_REGULATOR_DCDC3 = 2, + TPS65090_REGULATOR_FET1 = 3, + TPS65090_REGULATOR_FET2 = 4, + TPS65090_REGULATOR_FET3 = 5, + TPS65090_REGULATOR_FET4 = 6, + TPS65090_REGULATOR_FET5 = 7, + TPS65090_REGULATOR_FET6 = 8, + TPS65090_REGULATOR_FET7 = 9, + TPS65090_REGULATOR_LDO1 = 10, + TPS65090_REGULATOR_LDO2 = 11, + TPS65090_REGULATOR_MAX = 12, +}; + +struct tps65090 { + struct device *dev; + struct regmap *rmap; + struct regmap_irq_chip_data *irq_data; +}; + +struct tps65090_regulator_plat_data { + struct regulator_init_data *reg_init_data; + bool enable_ext_control; + struct gpio_desc *gpiod; + bool overcurrent_wait_valid; + int overcurrent_wait; +}; + +struct tps65090_platform_data { + int irq_base; + char **supplied_to; + size_t num_supplicants; + int enable_low_current_chrg; + struct tps65090_regulator_plat_data *reg_pdata[12]; +}; + +enum tps65090_cells { + PMIC = 0, + CHARGER = 1, +}; + +enum aat2870_id { + AAT2870_ID_BL = 0, + AAT2870_ID_LDOA = 1, + AAT2870_ID_LDOB = 2, + AAT2870_ID_LDOC = 3, + AAT2870_ID_LDOD = 4, +}; + +struct aat2870_register { + bool readable; + bool writeable; + u8 value; +}; + +struct aat2870_data { + struct device *dev; + struct i2c_client *client; + struct mutex io_lock; + struct aat2870_register *reg_cache; + int en_pin; + bool is_enable; + int (*init)(struct aat2870_data *); + void (*uninit)(struct aat2870_data *); + int (*read)(struct aat2870_data *, u8, u8 *); + int (*write)(struct aat2870_data *, u8, u8); + int (*update)(struct aat2870_data *, u8, u8, u8); + struct dentry *dentry_root; +}; + +struct aat2870_subdev_info { + int id; + const char *name; + void *platform_data; +}; + +struct aat2870_platform_data { + int en_pin; + struct aat2870_subdev_info *subdevs; + int num_subdevs; + int (*init)(struct aat2870_data *); + void (*uninit)(struct aat2870_data *); +}; + +enum { + PALMAS_EXT_CONTROL_ENABLE1 = 1, + PALMAS_EXT_CONTROL_ENABLE2 = 2, + PALMAS_EXT_CONTROL_NSLEEP = 4, +}; + +enum palmas_external_requestor_id { + PALMAS_EXTERNAL_REQSTR_ID_REGEN1 = 0, + PALMAS_EXTERNAL_REQSTR_ID_REGEN2 = 1, + PALMAS_EXTERNAL_REQSTR_ID_SYSEN1 = 2, + PALMAS_EXTERNAL_REQSTR_ID_SYSEN2 = 3, + PALMAS_EXTERNAL_REQSTR_ID_CLK32KG = 4, + PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO = 5, + PALMAS_EXTERNAL_REQSTR_ID_REGEN3 = 6, + PALMAS_EXTERNAL_REQSTR_ID_SMPS12 = 7, + PALMAS_EXTERNAL_REQSTR_ID_SMPS3 = 8, + PALMAS_EXTERNAL_REQSTR_ID_SMPS45 = 9, + PALMAS_EXTERNAL_REQSTR_ID_SMPS6 = 10, + PALMAS_EXTERNAL_REQSTR_ID_SMPS7 = 11, + PALMAS_EXTERNAL_REQSTR_ID_SMPS8 = 12, + PALMAS_EXTERNAL_REQSTR_ID_SMPS9 = 13, + PALMAS_EXTERNAL_REQSTR_ID_SMPS10 = 14, + PALMAS_EXTERNAL_REQSTR_ID_LDO1 = 15, + PALMAS_EXTERNAL_REQSTR_ID_LDO2 = 16, + PALMAS_EXTERNAL_REQSTR_ID_LDO3 = 17, + PALMAS_EXTERNAL_REQSTR_ID_LDO4 = 18, + PALMAS_EXTERNAL_REQSTR_ID_LDO5 = 19, + PALMAS_EXTERNAL_REQSTR_ID_LDO6 = 20, + PALMAS_EXTERNAL_REQSTR_ID_LDO7 = 21, + PALMAS_EXTERNAL_REQSTR_ID_LDO8 = 22, + PALMAS_EXTERNAL_REQSTR_ID_LDO9 = 23, + PALMAS_EXTERNAL_REQSTR_ID_LDOLN = 24, + PALMAS_EXTERNAL_REQSTR_ID_LDOUSB = 25, + PALMAS_EXTERNAL_REQSTR_ID_MAX = 26, +}; + +enum tps65917_irqs { + TPS65917_RESERVED1 = 0, + TPS65917_PWRON_IRQ = 1, + TPS65917_LONG_PRESS_KEY_IRQ = 2, + TPS65917_RESERVED2 = 3, + TPS65917_PWRDOWN_IRQ = 4, + TPS65917_HOTDIE_IRQ = 5, + TPS65917_VSYS_MON_IRQ = 6, + TPS65917_RESERVED3 = 7, + TPS65917_RESERVED4 = 8, + TPS65917_OTP_ERROR_IRQ = 9, + TPS65917_WDT_IRQ = 10, + TPS65917_RESERVED5 = 11, + TPS65917_RESET_IN_IRQ = 12, + TPS65917_FSD_IRQ = 13, + TPS65917_SHORT_IRQ = 14, + TPS65917_RESERVED6 = 15, + TPS65917_GPADC_AUTO_0_IRQ = 16, + TPS65917_GPADC_AUTO_1_IRQ = 17, + TPS65917_GPADC_EOC_SW_IRQ = 18, + TPS65917_RESREVED6 = 19, + TPS65917_RESERVED7 = 20, + TPS65917_RESERVED8 = 21, + TPS65917_RESERVED9 = 22, + TPS65917_VBUS_IRQ = 23, + TPS65917_GPIO_0_IRQ = 24, + TPS65917_GPIO_1_IRQ = 25, + TPS65917_GPIO_2_IRQ = 26, + TPS65917_GPIO_3_IRQ = 27, + TPS65917_GPIO_4_IRQ = 28, + TPS65917_GPIO_5_IRQ = 29, + TPS65917_GPIO_6_IRQ = 30, + TPS65917_RESERVED10 = 31, + TPS65917_NUM_IRQ = 32, +}; + +struct palmas_driver_data { + unsigned int *features; + struct regmap_irq_chip *irq_chip; +}; + +enum { + RC5T583_DS_NONE = 0, + RC5T583_DS_DC0 = 1, + RC5T583_DS_DC1 = 2, + RC5T583_DS_DC2 = 3, + RC5T583_DS_DC3 = 4, + RC5T583_DS_LDO0 = 5, + RC5T583_DS_LDO1 = 6, + RC5T583_DS_LDO2 = 7, + RC5T583_DS_LDO3 = 8, + RC5T583_DS_LDO4 = 9, + RC5T583_DS_LDO5 = 10, + RC5T583_DS_LDO6 = 11, + RC5T583_DS_LDO7 = 12, + RC5T583_DS_LDO8 = 13, + RC5T583_DS_LDO9 = 14, + RC5T583_DS_PSO0 = 15, + RC5T583_DS_PSO1 = 16, + RC5T583_DS_PSO2 = 17, + RC5T583_DS_PSO3 = 18, + RC5T583_DS_PSO4 = 19, + RC5T583_DS_PSO5 = 20, + RC5T583_DS_PSO6 = 21, + RC5T583_DS_PSO7 = 22, + RC5T583_DS_MAX = 23, +}; + +enum { + RC5T583_EXT_PWRREQ1_CONTROL = 1, + RC5T583_EXT_PWRREQ2_CONTROL = 2, +}; + +struct deepsleep_control_data { + u8 reg_add; + u8 ds_pos_bit; +}; + +enum int_type { + SYS_INT = 1, + DCDC_INT = 2, + RTC_INT = 4, + ADC_INT = 8, + GPIO_INT = 16, +}; + +struct rc5t583_irq_data { + u8 int_type; + u8 master_bit; + u8 int_en_bit; + u8 mask_reg_index; + int grp_index; +}; + +enum sec_device_type { + S5M8751X = 0, + S5M8763X = 1, + S5M8767X = 2, + S2MPA01 = 3, + S2MPS11X = 4, + S2MPS13X = 5, + S2MPS14X = 6, + S2MPS15X = 7, + S2MPU02 = 8, +}; + +struct sec_platform_data; + +struct sec_pmic_dev { + struct device *dev; + struct sec_platform_data *pdata; + struct regmap *regmap_pmic; + struct i2c_client *i2c; + long unsigned int device_type; + int irq_base; + int irq; + struct regmap_irq_chip_data *irq_data; + bool wakeup; +}; + +struct sec_regulator_data; + +struct sec_opmode_data; + +struct sec_platform_data { + struct sec_regulator_data *regulators; + struct sec_opmode_data *opmode; + int device_type; + int num_regulators; + int irq_base; + int (*cfg_pmic_irq)(); + bool wakeup; + bool buck_voltage_lock; + int buck_gpios[3]; + int buck_ds[3]; + unsigned int buck2_voltage[8]; + bool buck2_gpiodvs; + unsigned int buck3_voltage[8]; + bool buck3_gpiodvs; + unsigned int buck4_voltage[8]; + bool buck4_gpiodvs; + int buck_set1; + int buck_set2; + int buck_set3; + int buck2_enable; + int buck3_enable; + int buck4_enable; + int buck_default_idx; + int buck2_default_idx; + int buck3_default_idx; + int buck4_default_idx; + int buck_ramp_delay; + int buck2_ramp_delay; + int buck34_ramp_delay; + int buck5_ramp_delay; + int buck16_ramp_delay; + int buck7810_ramp_delay; + int buck9_ramp_delay; + int buck24_ramp_delay; + int buck3_ramp_delay; + int buck7_ramp_delay; + int buck8910_ramp_delay; + bool buck1_ramp_enable; + bool buck2_ramp_enable; + bool buck3_ramp_enable; + bool buck4_ramp_enable; + bool buck6_ramp_enable; + int buck2_init; + int buck3_init; + int buck4_init; + bool manual_poweroff; + bool disable_wrstbi; +}; + +struct sec_regulator_data { + int id; + struct regulator_init_data *initdata; + struct device_node *reg_node; + struct gpio_desc *ext_control_gpiod; +}; + +struct sec_opmode_data { + int id; + unsigned int mode; +}; + +enum s2mpa01_reg { + S2MPA01_REG_ID = 0, + S2MPA01_REG_INT1 = 1, + S2MPA01_REG_INT2 = 2, + S2MPA01_REG_INT3 = 3, + S2MPA01_REG_INT1M = 4, + S2MPA01_REG_INT2M = 5, + S2MPA01_REG_INT3M = 6, + S2MPA01_REG_ST1 = 7, + S2MPA01_REG_ST2 = 8, + S2MPA01_REG_PWRONSRC = 9, + S2MPA01_REG_OFFSRC = 10, + S2MPA01_REG_RTC_BUF = 11, + S2MPA01_REG_CTRL1 = 12, + S2MPA01_REG_ETC_TEST = 13, + S2MPA01_REG_RSVD1 = 14, + S2MPA01_REG_BU_CHG = 15, + S2MPA01_REG_RAMP1 = 16, + S2MPA01_REG_RAMP2 = 17, + S2MPA01_REG_LDO_DSCH1 = 18, + S2MPA01_REG_LDO_DSCH2 = 19, + S2MPA01_REG_LDO_DSCH3 = 20, + S2MPA01_REG_LDO_DSCH4 = 21, + S2MPA01_REG_OTP_ADRL = 22, + S2MPA01_REG_OTP_ADRH = 23, + S2MPA01_REG_OTP_DATA = 24, + S2MPA01_REG_MON1SEL = 25, + S2MPA01_REG_MON2SEL = 26, + S2MPA01_REG_LEE = 27, + S2MPA01_REG_RSVD2 = 28, + S2MPA01_REG_RSVD3 = 29, + S2MPA01_REG_RSVD4 = 30, + S2MPA01_REG_RSVD5 = 31, + S2MPA01_REG_RSVD6 = 32, + S2MPA01_REG_TOP_RSVD = 33, + S2MPA01_REG_DVS_SEL = 34, + S2MPA01_REG_DVS_PTR = 35, + S2MPA01_REG_DVS_DATA = 36, + S2MPA01_REG_RSVD_NO = 37, + S2MPA01_REG_UVLO = 38, + S2MPA01_REG_LEE_NO = 39, + S2MPA01_REG_B1CTRL1 = 40, + S2MPA01_REG_B1CTRL2 = 41, + S2MPA01_REG_B2CTRL1 = 42, + S2MPA01_REG_B2CTRL2 = 43, + S2MPA01_REG_B3CTRL1 = 44, + S2MPA01_REG_B3CTRL2 = 45, + S2MPA01_REG_B4CTRL1 = 46, + S2MPA01_REG_B4CTRL2 = 47, + S2MPA01_REG_B5CTRL1 = 48, + S2MPA01_REG_B5CTRL2 = 49, + S2MPA01_REG_B5CTRL3 = 50, + S2MPA01_REG_B5CTRL4 = 51, + S2MPA01_REG_B5CTRL5 = 52, + S2MPA01_REG_B5CTRL6 = 53, + S2MPA01_REG_B6CTRL1 = 54, + S2MPA01_REG_B6CTRL2 = 55, + S2MPA01_REG_B7CTRL1 = 56, + S2MPA01_REG_B7CTRL2 = 57, + S2MPA01_REG_B8CTRL1 = 58, + S2MPA01_REG_B8CTRL2 = 59, + S2MPA01_REG_B9CTRL1 = 60, + S2MPA01_REG_B9CTRL2 = 61, + S2MPA01_REG_B10CTRL1 = 62, + S2MPA01_REG_B10CTRL2 = 63, + S2MPA01_REG_L1CTRL = 64, + S2MPA01_REG_L2CTRL = 65, + S2MPA01_REG_L3CTRL = 66, + S2MPA01_REG_L4CTRL = 67, + S2MPA01_REG_L5CTRL = 68, + S2MPA01_REG_L6CTRL = 69, + S2MPA01_REG_L7CTRL = 70, + S2MPA01_REG_L8CTRL = 71, + S2MPA01_REG_L9CTRL = 72, + S2MPA01_REG_L10CTRL = 73, + S2MPA01_REG_L11CTRL = 74, + S2MPA01_REG_L12CTRL = 75, + S2MPA01_REG_L13CTRL = 76, + S2MPA01_REG_L14CTRL = 77, + S2MPA01_REG_L15CTRL = 78, + S2MPA01_REG_L16CTRL = 79, + S2MPA01_REG_L17CTRL = 80, + S2MPA01_REG_L18CTRL = 81, + S2MPA01_REG_L19CTRL = 82, + S2MPA01_REG_L20CTRL = 83, + S2MPA01_REG_L21CTRL = 84, + S2MPA01_REG_L22CTRL = 85, + S2MPA01_REG_L23CTRL = 86, + S2MPA01_REG_L24CTRL = 87, + S2MPA01_REG_L25CTRL = 88, + S2MPA01_REG_L26CTRL = 89, + S2MPA01_REG_LDO_OVCB1 = 90, + S2MPA01_REG_LDO_OVCB2 = 91, + S2MPA01_REG_LDO_OVCB3 = 92, + S2MPA01_REG_LDO_OVCB4 = 93, +}; + +enum s2mps11_reg { + S2MPS11_REG_ID = 0, + S2MPS11_REG_INT1 = 1, + S2MPS11_REG_INT2 = 2, + S2MPS11_REG_INT3 = 3, + S2MPS11_REG_INT1M = 4, + S2MPS11_REG_INT2M = 5, + S2MPS11_REG_INT3M = 6, + S2MPS11_REG_ST1 = 7, + S2MPS11_REG_ST2 = 8, + S2MPS11_REG_OFFSRC = 9, + S2MPS11_REG_PWRONSRC = 10, + S2MPS11_REG_RTC_CTRL = 11, + S2MPS11_REG_CTRL1 = 12, + S2MPS11_REG_ETC_TEST = 13, + S2MPS11_REG_RSVD3 = 14, + S2MPS11_REG_BU_CHG = 15, + S2MPS11_REG_RAMP = 16, + S2MPS11_REG_RAMP_BUCK = 17, + S2MPS11_REG_LDO1_8 = 18, + S2MPS11_REG_LDO9_16 = 19, + S2MPS11_REG_LDO17_24 = 20, + S2MPS11_REG_LDO25_32 = 21, + S2MPS11_REG_LDO33_38 = 22, + S2MPS11_REG_LDO1_8_1 = 23, + S2MPS11_REG_LDO9_16_1 = 24, + S2MPS11_REG_LDO17_24_1 = 25, + S2MPS11_REG_LDO25_32_1 = 26, + S2MPS11_REG_LDO33_38_1 = 27, + S2MPS11_REG_OTP_ADRL = 28, + S2MPS11_REG_OTP_ADRH = 29, + S2MPS11_REG_OTP_DATA = 30, + S2MPS11_REG_MON1SEL = 31, + S2MPS11_REG_MON2SEL = 32, + S2MPS11_REG_LEE = 33, + S2MPS11_REG_RSVD_NO = 34, + S2MPS11_REG_UVLO = 35, + S2MPS11_REG_LEE_NO = 36, + S2MPS11_REG_B1CTRL1 = 37, + S2MPS11_REG_B1CTRL2 = 38, + S2MPS11_REG_B2CTRL1 = 39, + S2MPS11_REG_B2CTRL2 = 40, + S2MPS11_REG_B3CTRL1 = 41, + S2MPS11_REG_B3CTRL2 = 42, + S2MPS11_REG_B4CTRL1 = 43, + S2MPS11_REG_B4CTRL2 = 44, + S2MPS11_REG_B5CTRL1 = 45, + S2MPS11_REG_BUCK5_SW = 46, + S2MPS11_REG_B5CTRL2 = 47, + S2MPS11_REG_B5CTRL3 = 48, + S2MPS11_REG_B5CTRL4 = 49, + S2MPS11_REG_B5CTRL5 = 50, + S2MPS11_REG_B6CTRL1 = 51, + S2MPS11_REG_B6CTRL2 = 52, + S2MPS11_REG_B7CTRL1 = 53, + S2MPS11_REG_B7CTRL2 = 54, + S2MPS11_REG_B8CTRL1 = 55, + S2MPS11_REG_B8CTRL2 = 56, + S2MPS11_REG_B9CTRL1 = 57, + S2MPS11_REG_B9CTRL2 = 58, + S2MPS11_REG_B10CTRL1 = 59, + S2MPS11_REG_B10CTRL2 = 60, + S2MPS11_REG_L1CTRL = 61, + S2MPS11_REG_L2CTRL = 62, + S2MPS11_REG_L3CTRL = 63, + S2MPS11_REG_L4CTRL = 64, + S2MPS11_REG_L5CTRL = 65, + S2MPS11_REG_L6CTRL = 66, + S2MPS11_REG_L7CTRL = 67, + S2MPS11_REG_L8CTRL = 68, + S2MPS11_REG_L9CTRL = 69, + S2MPS11_REG_L10CTRL = 70, + S2MPS11_REG_L11CTRL = 71, + S2MPS11_REG_L12CTRL = 72, + S2MPS11_REG_L13CTRL = 73, + S2MPS11_REG_L14CTRL = 74, + S2MPS11_REG_L15CTRL = 75, + S2MPS11_REG_L16CTRL = 76, + S2MPS11_REG_L17CTRL = 77, + S2MPS11_REG_L18CTRL = 78, + S2MPS11_REG_L19CTRL = 79, + S2MPS11_REG_L20CTRL = 80, + S2MPS11_REG_L21CTRL = 81, + S2MPS11_REG_L22CTRL = 82, + S2MPS11_REG_L23CTRL = 83, + S2MPS11_REG_L24CTRL = 84, + S2MPS11_REG_L25CTRL = 85, + S2MPS11_REG_L26CTRL = 86, + S2MPS11_REG_L27CTRL = 87, + S2MPS11_REG_L28CTRL = 88, + S2MPS11_REG_L29CTRL = 89, + S2MPS11_REG_L30CTRL = 90, + S2MPS11_REG_L31CTRL = 91, + S2MPS11_REG_L32CTRL = 92, + S2MPS11_REG_L33CTRL = 93, + S2MPS11_REG_L34CTRL = 94, + S2MPS11_REG_L35CTRL = 95, + S2MPS11_REG_L36CTRL = 96, + S2MPS11_REG_L37CTRL = 97, + S2MPS11_REG_L38CTRL = 98, +}; + +enum s2mps13_reg { + S2MPS13_REG_ID = 0, + S2MPS13_REG_INT1 = 1, + S2MPS13_REG_INT2 = 2, + S2MPS13_REG_INT3 = 3, + S2MPS13_REG_INT1M = 4, + S2MPS13_REG_INT2M = 5, + S2MPS13_REG_INT3M = 6, + S2MPS13_REG_ST1 = 7, + S2MPS13_REG_ST2 = 8, + S2MPS13_REG_PWRONSRC = 9, + S2MPS13_REG_OFFSRC = 10, + S2MPS13_REG_BU_CHG = 11, + S2MPS13_REG_RTCCTRL = 12, + S2MPS13_REG_CTRL1 = 13, + S2MPS13_REG_CTRL2 = 14, + S2MPS13_REG_RSVD1 = 15, + S2MPS13_REG_RSVD2 = 16, + S2MPS13_REG_RSVD3 = 17, + S2MPS13_REG_RSVD4 = 18, + S2MPS13_REG_RSVD5 = 19, + S2MPS13_REG_RSVD6 = 20, + S2MPS13_REG_CTRL3 = 21, + S2MPS13_REG_RSVD7 = 22, + S2MPS13_REG_RSVD8 = 23, + S2MPS13_REG_WRSTBI = 24, + S2MPS13_REG_B1CTRL = 25, + S2MPS13_REG_B1OUT = 26, + S2MPS13_REG_B2CTRL = 27, + S2MPS13_REG_B2OUT = 28, + S2MPS13_REG_B3CTRL = 29, + S2MPS13_REG_B3OUT = 30, + S2MPS13_REG_B4CTRL = 31, + S2MPS13_REG_B4OUT = 32, + S2MPS13_REG_B5CTRL = 33, + S2MPS13_REG_B5OUT = 34, + S2MPS13_REG_B6CTRL = 35, + S2MPS13_REG_B6OUT = 36, + S2MPS13_REG_B7CTRL = 37, + S2MPS13_REG_B7SW = 38, + S2MPS13_REG_B7OUT = 39, + S2MPS13_REG_B8CTRL = 40, + S2MPS13_REG_B8OUT = 41, + S2MPS13_REG_B9CTRL = 42, + S2MPS13_REG_B9OUT = 43, + S2MPS13_REG_B10CTRL = 44, + S2MPS13_REG_B10OUT = 45, + S2MPS13_REG_BB1CTRL = 46, + S2MPS13_REG_BB1OUT = 47, + S2MPS13_REG_BUCK_RAMP1 = 48, + S2MPS13_REG_BUCK_RAMP2 = 49, + S2MPS13_REG_LDO_DVS1 = 50, + S2MPS13_REG_LDO_DVS2 = 51, + S2MPS13_REG_LDO_DVS3 = 52, + S2MPS13_REG_B6OUT2 = 53, + S2MPS13_REG_L1CTRL = 54, + S2MPS13_REG_L2CTRL = 55, + S2MPS13_REG_L3CTRL = 56, + S2MPS13_REG_L4CTRL = 57, + S2MPS13_REG_L5CTRL = 58, + S2MPS13_REG_L6CTRL = 59, + S2MPS13_REG_L7CTRL = 60, + S2MPS13_REG_L8CTRL = 61, + S2MPS13_REG_L9CTRL = 62, + S2MPS13_REG_L10CTRL = 63, + S2MPS13_REG_L11CTRL = 64, + S2MPS13_REG_L12CTRL = 65, + S2MPS13_REG_L13CTRL = 66, + S2MPS13_REG_L14CTRL = 67, + S2MPS13_REG_L15CTRL = 68, + S2MPS13_REG_L16CTRL = 69, + S2MPS13_REG_L17CTRL = 70, + S2MPS13_REG_L18CTRL = 71, + S2MPS13_REG_L19CTRL = 72, + S2MPS13_REG_L20CTRL = 73, + S2MPS13_REG_L21CTRL = 74, + S2MPS13_REG_L22CTRL = 75, + S2MPS13_REG_L23CTRL = 76, + S2MPS13_REG_L24CTRL = 77, + S2MPS13_REG_L25CTRL = 78, + S2MPS13_REG_L26CTRL = 79, + S2MPS13_REG_L27CTRL = 80, + S2MPS13_REG_L28CTRL = 81, + S2MPS13_REG_L29CTRL = 82, + S2MPS13_REG_L30CTRL = 83, + S2MPS13_REG_L31CTRL = 84, + S2MPS13_REG_L32CTRL = 85, + S2MPS13_REG_L33CTRL = 86, + S2MPS13_REG_L34CTRL = 87, + S2MPS13_REG_L35CTRL = 88, + S2MPS13_REG_L36CTRL = 89, + S2MPS13_REG_L37CTRL = 90, + S2MPS13_REG_L38CTRL = 91, + S2MPS13_REG_L39CTRL = 92, + S2MPS13_REG_L40CTRL = 93, + S2MPS13_REG_LDODSCH1 = 94, + S2MPS13_REG_LDODSCH2 = 95, + S2MPS13_REG_LDODSCH3 = 96, + S2MPS13_REG_LDODSCH4 = 97, + S2MPS13_REG_LDODSCH5 = 98, +}; + +enum s2mps14_reg { + S2MPS14_REG_ID = 0, + S2MPS14_REG_INT1 = 1, + S2MPS14_REG_INT2 = 2, + S2MPS14_REG_INT3 = 3, + S2MPS14_REG_INT1M = 4, + S2MPS14_REG_INT2M = 5, + S2MPS14_REG_INT3M = 6, + S2MPS14_REG_ST1 = 7, + S2MPS14_REG_ST2 = 8, + S2MPS14_REG_PWRONSRC = 9, + S2MPS14_REG_OFFSRC = 10, + S2MPS14_REG_BU_CHG = 11, + S2MPS14_REG_RTCCTRL = 12, + S2MPS14_REG_CTRL1 = 13, + S2MPS14_REG_CTRL2 = 14, + S2MPS14_REG_RSVD1 = 15, + S2MPS14_REG_RSVD2 = 16, + S2MPS14_REG_RSVD3 = 17, + S2MPS14_REG_RSVD4 = 18, + S2MPS14_REG_RSVD5 = 19, + S2MPS14_REG_RSVD6 = 20, + S2MPS14_REG_CTRL3 = 21, + S2MPS14_REG_RSVD7 = 22, + S2MPS14_REG_RSVD8 = 23, + S2MPS14_REG_WRSTBI = 24, + S2MPS14_REG_B1CTRL1 = 25, + S2MPS14_REG_B1CTRL2 = 26, + S2MPS14_REG_B2CTRL1 = 27, + S2MPS14_REG_B2CTRL2 = 28, + S2MPS14_REG_B3CTRL1 = 29, + S2MPS14_REG_B3CTRL2 = 30, + S2MPS14_REG_B4CTRL1 = 31, + S2MPS14_REG_B4CTRL2 = 32, + S2MPS14_REG_B5CTRL1 = 33, + S2MPS14_REG_B5CTRL2 = 34, + S2MPS14_REG_L1CTRL = 35, + S2MPS14_REG_L2CTRL = 36, + S2MPS14_REG_L3CTRL = 37, + S2MPS14_REG_L4CTRL = 38, + S2MPS14_REG_L5CTRL = 39, + S2MPS14_REG_L6CTRL = 40, + S2MPS14_REG_L7CTRL = 41, + S2MPS14_REG_L8CTRL = 42, + S2MPS14_REG_L9CTRL = 43, + S2MPS14_REG_L10CTRL = 44, + S2MPS14_REG_L11CTRL = 45, + S2MPS14_REG_L12CTRL = 46, + S2MPS14_REG_L13CTRL = 47, + S2MPS14_REG_L14CTRL = 48, + S2MPS14_REG_L15CTRL = 49, + S2MPS14_REG_L16CTRL = 50, + S2MPS14_REG_L17CTRL = 51, + S2MPS14_REG_L18CTRL = 52, + S2MPS14_REG_L19CTRL = 53, + S2MPS14_REG_L20CTRL = 54, + S2MPS14_REG_L21CTRL = 55, + S2MPS14_REG_L22CTRL = 56, + S2MPS14_REG_L23CTRL = 57, + S2MPS14_REG_L24CTRL = 58, + S2MPS14_REG_L25CTRL = 59, + S2MPS14_REG_LDODSCH1 = 60, + S2MPS14_REG_LDODSCH2 = 61, + S2MPS14_REG_LDODSCH3 = 62, +}; + +enum s2mps15_reg { + S2MPS15_REG_ID = 0, + S2MPS15_REG_INT1 = 1, + S2MPS15_REG_INT2 = 2, + S2MPS15_REG_INT3 = 3, + S2MPS15_REG_INT1M = 4, + S2MPS15_REG_INT2M = 5, + S2MPS15_REG_INT3M = 6, + S2MPS15_REG_ST1 = 7, + S2MPS15_REG_ST2 = 8, + S2MPS15_REG_PWRONSRC = 9, + S2MPS15_REG_OFFSRC = 10, + S2MPS15_REG_BU_CHG = 11, + S2MPS15_REG_RTC_BUF = 12, + S2MPS15_REG_CTRL1 = 13, + S2MPS15_REG_CTRL2 = 14, + S2MPS15_REG_RSVD1 = 15, + S2MPS15_REG_RSVD2 = 16, + S2MPS15_REG_RSVD3 = 17, + S2MPS15_REG_RSVD4 = 18, + S2MPS15_REG_RSVD5 = 19, + S2MPS15_REG_RSVD6 = 20, + S2MPS15_REG_CTRL3 = 21, + S2MPS15_REG_RSVD7 = 22, + S2MPS15_REG_RSVD8 = 23, + S2MPS15_REG_RSVD9 = 24, + S2MPS15_REG_B1CTRL1 = 25, + S2MPS15_REG_B1CTRL2 = 26, + S2MPS15_REG_B2CTRL1 = 27, + S2MPS15_REG_B2CTRL2 = 28, + S2MPS15_REG_B3CTRL1 = 29, + S2MPS15_REG_B3CTRL2 = 30, + S2MPS15_REG_B4CTRL1 = 31, + S2MPS15_REG_B4CTRL2 = 32, + S2MPS15_REG_B5CTRL1 = 33, + S2MPS15_REG_B5CTRL2 = 34, + S2MPS15_REG_B6CTRL1 = 35, + S2MPS15_REG_B6CTRL2 = 36, + S2MPS15_REG_B7CTRL1 = 37, + S2MPS15_REG_B7CTRL2 = 38, + S2MPS15_REG_B8CTRL1 = 39, + S2MPS15_REG_B8CTRL2 = 40, + S2MPS15_REG_B9CTRL1 = 41, + S2MPS15_REG_B9CTRL2 = 42, + S2MPS15_REG_B10CTRL1 = 43, + S2MPS15_REG_B10CTRL2 = 44, + S2MPS15_REG_BBCTRL1 = 45, + S2MPS15_REG_BBCTRL2 = 46, + S2MPS15_REG_BRAMP = 47, + S2MPS15_REG_LDODVS1 = 48, + S2MPS15_REG_LDODVS2 = 49, + S2MPS15_REG_LDODVS3 = 50, + S2MPS15_REG_LDODVS4 = 51, + S2MPS15_REG_L1CTRL = 52, + S2MPS15_REG_L2CTRL = 53, + S2MPS15_REG_L3CTRL = 54, + S2MPS15_REG_L4CTRL = 55, + S2MPS15_REG_L5CTRL = 56, + S2MPS15_REG_L6CTRL = 57, + S2MPS15_REG_L7CTRL = 58, + S2MPS15_REG_L8CTRL = 59, + S2MPS15_REG_L9CTRL = 60, + S2MPS15_REG_L10CTRL = 61, + S2MPS15_REG_L11CTRL = 62, + S2MPS15_REG_L12CTRL = 63, + S2MPS15_REG_L13CTRL = 64, + S2MPS15_REG_L14CTRL = 65, + S2MPS15_REG_L15CTRL = 66, + S2MPS15_REG_L16CTRL = 67, + S2MPS15_REG_L17CTRL = 68, + S2MPS15_REG_L18CTRL = 69, + S2MPS15_REG_L19CTRL = 70, + S2MPS15_REG_L20CTRL = 71, + S2MPS15_REG_L21CTRL = 72, + S2MPS15_REG_L22CTRL = 73, + S2MPS15_REG_L23CTRL = 74, + S2MPS15_REG_L24CTRL = 75, + S2MPS15_REG_L25CTRL = 76, + S2MPS15_REG_L26CTRL = 77, + S2MPS15_REG_L27CTRL = 78, + S2MPS15_REG_LDODSCH1 = 79, + S2MPS15_REG_LDODSCH2 = 80, + S2MPS15_REG_LDODSCH3 = 81, + S2MPS15_REG_LDODSCH4 = 82, +}; + +enum S2MPU02_reg { + S2MPU02_REG_ID = 0, + S2MPU02_REG_INT1 = 1, + S2MPU02_REG_INT2 = 2, + S2MPU02_REG_INT3 = 3, + S2MPU02_REG_INT1M = 4, + S2MPU02_REG_INT2M = 5, + S2MPU02_REG_INT3M = 6, + S2MPU02_REG_ST1 = 7, + S2MPU02_REG_ST2 = 8, + S2MPU02_REG_PWRONSRC = 9, + S2MPU02_REG_OFFSRC = 10, + S2MPU02_REG_BU_CHG = 11, + S2MPU02_REG_RTCCTRL = 12, + S2MPU02_REG_PMCTRL1 = 13, + S2MPU02_REG_RSVD1 = 14, + S2MPU02_REG_RSVD2 = 15, + S2MPU02_REG_RSVD3 = 16, + S2MPU02_REG_RSVD4 = 17, + S2MPU02_REG_RSVD5 = 18, + S2MPU02_REG_RSVD6 = 19, + S2MPU02_REG_RSVD7 = 20, + S2MPU02_REG_WRSTEN = 21, + S2MPU02_REG_RSVD8 = 22, + S2MPU02_REG_RSVD9 = 23, + S2MPU02_REG_RSVD10 = 24, + S2MPU02_REG_B1CTRL1 = 25, + S2MPU02_REG_B1CTRL2 = 26, + S2MPU02_REG_B2CTRL1 = 27, + S2MPU02_REG_B2CTRL2 = 28, + S2MPU02_REG_B3CTRL1 = 29, + S2MPU02_REG_B3CTRL2 = 30, + S2MPU02_REG_B4CTRL1 = 31, + S2MPU02_REG_B4CTRL2 = 32, + S2MPU02_REG_B5CTRL1 = 33, + S2MPU02_REG_B5CTRL2 = 34, + S2MPU02_REG_B5CTRL3 = 35, + S2MPU02_REG_B5CTRL4 = 36, + S2MPU02_REG_B5CTRL5 = 37, + S2MPU02_REG_B6CTRL1 = 38, + S2MPU02_REG_B6CTRL2 = 39, + S2MPU02_REG_B7CTRL1 = 40, + S2MPU02_REG_B7CTRL2 = 41, + S2MPU02_REG_RAMP1 = 42, + S2MPU02_REG_RAMP2 = 43, + S2MPU02_REG_L1CTRL = 44, + S2MPU02_REG_L2CTRL1 = 45, + S2MPU02_REG_L2CTRL2 = 46, + S2MPU02_REG_L2CTRL3 = 47, + S2MPU02_REG_L2CTRL4 = 48, + S2MPU02_REG_L3CTRL = 49, + S2MPU02_REG_L4CTRL = 50, + S2MPU02_REG_L5CTRL = 51, + S2MPU02_REG_L6CTRL = 52, + S2MPU02_REG_L7CTRL = 53, + S2MPU02_REG_L8CTRL = 54, + S2MPU02_REG_L9CTRL = 55, + S2MPU02_REG_L10CTRL = 56, + S2MPU02_REG_L11CTRL = 57, + S2MPU02_REG_L12CTRL = 58, + S2MPU02_REG_L13CTRL = 59, + S2MPU02_REG_L14CTRL = 60, + S2MPU02_REG_L15CTRL = 61, + S2MPU02_REG_L16CTRL = 62, + S2MPU02_REG_L17CTRL = 63, + S2MPU02_REG_L18CTRL = 64, + S2MPU02_REG_L19CTRL = 65, + S2MPU02_REG_L20CTRL = 66, + S2MPU02_REG_L21CTRL = 67, + S2MPU02_REG_L22CTRL = 68, + S2MPU02_REG_L23CTRL = 69, + S2MPU02_REG_L24CTRL = 70, + S2MPU02_REG_L25CTRL = 71, + S2MPU02_REG_L26CTRL = 72, + S2MPU02_REG_L27CTRL = 73, + S2MPU02_REG_L28CTRL = 74, + S2MPU02_REG_LDODSCH1 = 75, + S2MPU02_REG_LDODSCH2 = 76, + S2MPU02_REG_LDODSCH3 = 77, + S2MPU02_REG_LDODSCH4 = 78, + S2MPU02_REG_SELMIF = 79, + S2MPU02_REG_RSVD11 = 80, + S2MPU02_REG_RSVD12 = 81, + S2MPU02_REG_RSVD13 = 82, + S2MPU02_REG_DVSSEL = 83, + S2MPU02_REG_DVSPTR = 84, + S2MPU02_REG_DVSDATA = 85, +}; + +enum s5m8763_reg { + S5M8763_REG_IRQ1 = 0, + S5M8763_REG_IRQ2 = 1, + S5M8763_REG_IRQ3 = 2, + S5M8763_REG_IRQ4 = 3, + S5M8763_REG_IRQM1 = 4, + S5M8763_REG_IRQM2 = 5, + S5M8763_REG_IRQM3 = 6, + S5M8763_REG_IRQM4 = 7, + S5M8763_REG_STATUS1 = 8, + S5M8763_REG_STATUS2 = 9, + S5M8763_REG_STATUSM1 = 10, + S5M8763_REG_STATUSM2 = 11, + S5M8763_REG_CHGR1 = 12, + S5M8763_REG_CHGR2 = 13, + S5M8763_REG_LDO_ACTIVE_DISCHARGE1 = 14, + S5M8763_REG_LDO_ACTIVE_DISCHARGE2 = 15, + S5M8763_REG_BUCK_ACTIVE_DISCHARGE3 = 16, + S5M8763_REG_ONOFF1 = 17, + S5M8763_REG_ONOFF2 = 18, + S5M8763_REG_ONOFF3 = 19, + S5M8763_REG_ONOFF4 = 20, + S5M8763_REG_BUCK1_VOLTAGE1 = 21, + S5M8763_REG_BUCK1_VOLTAGE2 = 22, + S5M8763_REG_BUCK1_VOLTAGE3 = 23, + S5M8763_REG_BUCK1_VOLTAGE4 = 24, + S5M8763_REG_BUCK2_VOLTAGE1 = 25, + S5M8763_REG_BUCK2_VOLTAGE2 = 26, + S5M8763_REG_BUCK3 = 27, + S5M8763_REG_BUCK4 = 28, + S5M8763_REG_LDO1_LDO2 = 29, + S5M8763_REG_LDO3 = 30, + S5M8763_REG_LDO4 = 31, + S5M8763_REG_LDO5 = 32, + S5M8763_REG_LDO6 = 33, + S5M8763_REG_LDO7 = 34, + S5M8763_REG_LDO7_LDO8 = 35, + S5M8763_REG_LDO9_LDO10 = 36, + S5M8763_REG_LDO11 = 37, + S5M8763_REG_LDO12 = 38, + S5M8763_REG_LDO13 = 39, + S5M8763_REG_LDO14 = 40, + S5M8763_REG_LDO15 = 41, + S5M8763_REG_LDO16 = 42, + S5M8763_REG_BKCHR = 43, + S5M8763_REG_LBCNFG1 = 44, + S5M8763_REG_LBCNFG2 = 45, +}; + +enum s5m8767_reg { + S5M8767_REG_ID = 0, + S5M8767_REG_INT1 = 1, + S5M8767_REG_INT2 = 2, + S5M8767_REG_INT3 = 3, + S5M8767_REG_INT1M = 4, + S5M8767_REG_INT2M = 5, + S5M8767_REG_INT3M = 6, + S5M8767_REG_STATUS1 = 7, + S5M8767_REG_STATUS2 = 8, + S5M8767_REG_STATUS3 = 9, + S5M8767_REG_CTRL1 = 10, + S5M8767_REG_CTRL2 = 11, + S5M8767_REG_LOWBAT1 = 12, + S5M8767_REG_LOWBAT2 = 13, + S5M8767_REG_BUCHG = 14, + S5M8767_REG_DVSRAMP = 15, + S5M8767_REG_DVSTIMER2 = 16, + S5M8767_REG_DVSTIMER3 = 17, + S5M8767_REG_DVSTIMER4 = 18, + S5M8767_REG_LDO1 = 19, + S5M8767_REG_LDO2 = 20, + S5M8767_REG_LDO3 = 21, + S5M8767_REG_LDO4 = 22, + S5M8767_REG_LDO5 = 23, + S5M8767_REG_LDO6 = 24, + S5M8767_REG_LDO7 = 25, + S5M8767_REG_LDO8 = 26, + S5M8767_REG_LDO9 = 27, + S5M8767_REG_LDO10 = 28, + S5M8767_REG_LDO11 = 29, + S5M8767_REG_LDO12 = 30, + S5M8767_REG_LDO13 = 31, + S5M8767_REG_LDO14 = 32, + S5M8767_REG_LDO15 = 33, + S5M8767_REG_LDO16 = 34, + S5M8767_REG_LDO17 = 35, + S5M8767_REG_LDO18 = 36, + S5M8767_REG_LDO19 = 37, + S5M8767_REG_LDO20 = 38, + S5M8767_REG_LDO21 = 39, + S5M8767_REG_LDO22 = 40, + S5M8767_REG_LDO23 = 41, + S5M8767_REG_LDO24 = 42, + S5M8767_REG_LDO25 = 43, + S5M8767_REG_LDO26 = 44, + S5M8767_REG_LDO27 = 45, + S5M8767_REG_LDO28 = 46, + S5M8767_REG_UVLO = 49, + S5M8767_REG_BUCK1CTRL1 = 50, + S5M8767_REG_BUCK1CTRL2 = 51, + S5M8767_REG_BUCK2CTRL = 52, + S5M8767_REG_BUCK2DVS1 = 53, + S5M8767_REG_BUCK2DVS2 = 54, + S5M8767_REG_BUCK2DVS3 = 55, + S5M8767_REG_BUCK2DVS4 = 56, + S5M8767_REG_BUCK2DVS5 = 57, + S5M8767_REG_BUCK2DVS6 = 58, + S5M8767_REG_BUCK2DVS7 = 59, + S5M8767_REG_BUCK2DVS8 = 60, + S5M8767_REG_BUCK3CTRL = 61, + S5M8767_REG_BUCK3DVS1 = 62, + S5M8767_REG_BUCK3DVS2 = 63, + S5M8767_REG_BUCK3DVS3 = 64, + S5M8767_REG_BUCK3DVS4 = 65, + S5M8767_REG_BUCK3DVS5 = 66, + S5M8767_REG_BUCK3DVS6 = 67, + S5M8767_REG_BUCK3DVS7 = 68, + S5M8767_REG_BUCK3DVS8 = 69, + S5M8767_REG_BUCK4CTRL = 70, + S5M8767_REG_BUCK4DVS1 = 71, + S5M8767_REG_BUCK4DVS2 = 72, + S5M8767_REG_BUCK4DVS3 = 73, + S5M8767_REG_BUCK4DVS4 = 74, + S5M8767_REG_BUCK4DVS5 = 75, + S5M8767_REG_BUCK4DVS6 = 76, + S5M8767_REG_BUCK4DVS7 = 77, + S5M8767_REG_BUCK4DVS8 = 78, + S5M8767_REG_BUCK5CTRL1 = 79, + S5M8767_REG_BUCK5CTRL2 = 80, + S5M8767_REG_BUCK5CTRL3 = 81, + S5M8767_REG_BUCK5CTRL4 = 82, + S5M8767_REG_BUCK5CTRL5 = 83, + S5M8767_REG_BUCK6CTRL1 = 84, + S5M8767_REG_BUCK6CTRL2 = 85, + S5M8767_REG_BUCK7CTRL1 = 86, + S5M8767_REG_BUCK7CTRL2 = 87, + S5M8767_REG_BUCK8CTRL1 = 88, + S5M8767_REG_BUCK8CTRL2 = 89, + S5M8767_REG_BUCK9CTRL1 = 90, + S5M8767_REG_BUCK9CTRL2 = 91, + S5M8767_REG_LDO1CTRL = 92, + S5M8767_REG_LDO2_1CTRL = 93, + S5M8767_REG_LDO2_2CTRL = 94, + S5M8767_REG_LDO2_3CTRL = 95, + S5M8767_REG_LDO2_4CTRL = 96, + S5M8767_REG_LDO3CTRL = 97, + S5M8767_REG_LDO4CTRL = 98, + S5M8767_REG_LDO5CTRL = 99, + S5M8767_REG_LDO6CTRL = 100, + S5M8767_REG_LDO7CTRL = 101, + S5M8767_REG_LDO8CTRL = 102, + S5M8767_REG_LDO9CTRL = 103, + S5M8767_REG_LDO10CTRL = 104, + S5M8767_REG_LDO11CTRL = 105, + S5M8767_REG_LDO12CTRL = 106, + S5M8767_REG_LDO13CTRL = 107, + S5M8767_REG_LDO14CTRL = 108, + S5M8767_REG_LDO15CTRL = 109, + S5M8767_REG_LDO16CTRL = 110, + S5M8767_REG_LDO17CTRL = 111, + S5M8767_REG_LDO18CTRL = 112, + S5M8767_REG_LDO19CTRL = 113, + S5M8767_REG_LDO20CTRL = 114, + S5M8767_REG_LDO21CTRL = 115, + S5M8767_REG_LDO22CTRL = 116, + S5M8767_REG_LDO23CTRL = 117, + S5M8767_REG_LDO24CTRL = 118, + S5M8767_REG_LDO25CTRL = 119, + S5M8767_REG_LDO26CTRL = 120, + S5M8767_REG_LDO27CTRL = 121, + S5M8767_REG_LDO28CTRL = 122, +}; + +enum s2mps11_irq { + S2MPS11_IRQ_PWRONF = 0, + S2MPS11_IRQ_PWRONR = 1, + S2MPS11_IRQ_JIGONBF = 2, + S2MPS11_IRQ_JIGONBR = 3, + S2MPS11_IRQ_ACOKBF = 4, + S2MPS11_IRQ_ACOKBR = 5, + S2MPS11_IRQ_PWRON1S = 6, + S2MPS11_IRQ_MRB = 7, + S2MPS11_IRQ_RTC60S = 8, + S2MPS11_IRQ_RTCA1 = 9, + S2MPS11_IRQ_RTCA0 = 10, + S2MPS11_IRQ_SMPL = 11, + S2MPS11_IRQ_RTC1S = 12, + S2MPS11_IRQ_WTSR = 13, + S2MPS11_IRQ_INT120C = 14, + S2MPS11_IRQ_INT140C = 15, + S2MPS11_IRQ_NR = 16, +}; + +enum s2mps14_irq { + S2MPS14_IRQ_PWRONF = 0, + S2MPS14_IRQ_PWRONR = 1, + S2MPS14_IRQ_JIGONBF = 2, + S2MPS14_IRQ_JIGONBR = 3, + S2MPS14_IRQ_ACOKBF = 4, + S2MPS14_IRQ_ACOKBR = 5, + S2MPS14_IRQ_PWRON1S = 6, + S2MPS14_IRQ_MRB = 7, + S2MPS14_IRQ_RTC60S = 8, + S2MPS14_IRQ_RTCA1 = 9, + S2MPS14_IRQ_RTCA0 = 10, + S2MPS14_IRQ_SMPL = 11, + S2MPS14_IRQ_RTC1S = 12, + S2MPS14_IRQ_WTSR = 13, + S2MPS14_IRQ_INT120C = 14, + S2MPS14_IRQ_INT140C = 15, + S2MPS14_IRQ_TSD = 16, + S2MPS14_IRQ_NR = 17, +}; + +enum s2mpu02_irq { + S2MPU02_IRQ_PWRONF = 0, + S2MPU02_IRQ_PWRONR = 1, + S2MPU02_IRQ_JIGONBF = 2, + S2MPU02_IRQ_JIGONBR = 3, + S2MPU02_IRQ_ACOKBF = 4, + S2MPU02_IRQ_ACOKBR = 5, + S2MPU02_IRQ_PWRON1S = 6, + S2MPU02_IRQ_MRB = 7, + S2MPU02_IRQ_RTC60S = 8, + S2MPU02_IRQ_RTCA1 = 9, + S2MPU02_IRQ_RTCA0 = 10, + S2MPU02_IRQ_SMPL = 11, + S2MPU02_IRQ_RTC1S = 12, + S2MPU02_IRQ_WTSR = 13, + S2MPU02_IRQ_INT120C = 14, + S2MPU02_IRQ_INT140C = 15, + S2MPU02_IRQ_TSD = 16, + S2MPU02_IRQ_NR = 17, +}; + +enum s5m8767_irq { + S5M8767_IRQ_PWRR = 0, + S5M8767_IRQ_PWRF = 1, + S5M8767_IRQ_PWR1S = 2, + S5M8767_IRQ_JIGR = 3, + S5M8767_IRQ_JIGF = 4, + S5M8767_IRQ_LOWBAT2 = 5, + S5M8767_IRQ_LOWBAT1 = 6, + S5M8767_IRQ_MRB = 7, + S5M8767_IRQ_DVSOK2 = 8, + S5M8767_IRQ_DVSOK3 = 9, + S5M8767_IRQ_DVSOK4 = 10, + S5M8767_IRQ_RTC60S = 11, + S5M8767_IRQ_RTCA1 = 12, + S5M8767_IRQ_RTCA2 = 13, + S5M8767_IRQ_SMPL = 14, + S5M8767_IRQ_RTC1S = 15, + S5M8767_IRQ_WTSR = 16, + S5M8767_IRQ_NR = 17, +}; + +enum s5m8763_irq { + S5M8763_IRQ_DCINF = 0, + S5M8763_IRQ_DCINR = 1, + S5M8763_IRQ_JIGF = 2, + S5M8763_IRQ_JIGR = 3, + S5M8763_IRQ_PWRONF = 4, + S5M8763_IRQ_PWRONR = 5, + S5M8763_IRQ_WTSREVNT = 6, + S5M8763_IRQ_SMPLEVNT = 7, + S5M8763_IRQ_ALARM1 = 8, + S5M8763_IRQ_ALARM0 = 9, + S5M8763_IRQ_ONKEY1S = 10, + S5M8763_IRQ_TOPOFFR = 11, + S5M8763_IRQ_DCINOVPR = 12, + S5M8763_IRQ_CHGRSTF = 13, + S5M8763_IRQ_DONER = 14, + S5M8763_IRQ_CHGFAULT = 15, + S5M8763_IRQ_LOBAT1 = 16, + S5M8763_IRQ_LOBAT2 = 17, + S5M8763_IRQ_NR = 18, +}; + +struct syscon_platform_data { + const char *label; +}; + +struct syscon { + struct device_node *np; + struct regmap *regmap; + struct list_head list; +}; + +enum { + AS3711_REGULATOR_SD_1 = 0, + AS3711_REGULATOR_SD_2 = 1, + AS3711_REGULATOR_SD_3 = 2, + AS3711_REGULATOR_SD_4 = 3, + AS3711_REGULATOR_LDO_1 = 4, + AS3711_REGULATOR_LDO_2 = 5, + AS3711_REGULATOR_LDO_3 = 6, + AS3711_REGULATOR_LDO_4 = 7, + AS3711_REGULATOR_LDO_5 = 8, + AS3711_REGULATOR_LDO_6 = 9, + AS3711_REGULATOR_LDO_7 = 10, + AS3711_REGULATOR_LDO_8 = 11, + AS3711_REGULATOR_MAX = 12, +}; + +struct as3711 { + struct device *dev; + struct regmap *regmap; +}; + +enum as3711_su2_feedback { + AS3711_SU2_VOLTAGE = 0, + AS3711_SU2_CURR1 = 1, + AS3711_SU2_CURR2 = 2, + AS3711_SU2_CURR3 = 3, + AS3711_SU2_CURR_AUTO = 4, +}; + +enum as3711_su2_fbprot { + AS3711_SU2_LX_SD4 = 0, + AS3711_SU2_GPIO2 = 1, + AS3711_SU2_GPIO3 = 2, + AS3711_SU2_GPIO4 = 3, +}; + +struct as3711_regulator_pdata { + struct regulator_init_data *init_data[12]; +}; + +struct as3711_bl_pdata { + bool su1_fb; + int su1_max_uA; + bool su2_fb; + int su2_max_uA; + enum as3711_su2_feedback su2_feedback; + enum as3711_su2_fbprot su2_fbprot; + bool su2_auto_curr1; + bool su2_auto_curr2; + bool su2_auto_curr3; +}; + +struct as3711_platform_data { + struct as3711_regulator_pdata regulator; + struct as3711_bl_pdata backlight; +}; + +enum { + AS3711_REGULATOR = 0, + AS3711_BACKLIGHT = 1, +}; + +struct intel_soc_pmic_config { + long unsigned int irq_flags; + struct mfd_cell *cell_dev; + int n_cell_devs; + const struct regmap_config *regmap_config; + const struct regmap_irq_chip *irq_chip; +}; + +enum { + CHT_WC_PWRSRC_IRQ = 0, + CHT_WC_THRM_IRQ = 1, + CHT_WC_BCU_IRQ = 2, + CHT_WC_ADC_IRQ = 3, + CHT_WC_EXT_CHGR_IRQ = 4, + CHT_WC_GPIO_IRQ = 5, + CHT_WC_CRIT_IRQ = 7, +}; + +struct badrange { + struct list_head list; + spinlock_t lock; +}; + +enum { + NDD_ALIASING = 0, + NDD_UNARMED = 1, + NDD_LOCKED = 2, + NDD_SECURITY_OVERWRITE = 3, + NDD_WORK_PENDING = 4, + NDD_NOBLK = 5, + NDD_LABELING = 6, + ND_IOCTL_MAX_BUFLEN = 4194304, + ND_CMD_MAX_ELEM = 5, + ND_CMD_MAX_ENVELOPE = 256, + ND_MAX_MAPPINGS = 32, + ND_REGION_PAGEMAP = 0, + ND_REGION_PERSIST_CACHE = 1, + ND_REGION_PERSIST_MEMCTRL = 2, + ND_REGION_ASYNC = 3, + DPA_RESOURCE_ADJUSTED = 1, +}; + +struct nvdimm_bus_descriptor; + +struct nvdimm; + +typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *, unsigned int, int *); + +struct nvdimm_bus_fw_ops; + +struct nvdimm_bus_descriptor { + const struct attribute_group **attr_groups; + long unsigned int cmd_mask; + long unsigned int dimm_family_mask; + long unsigned int bus_family_mask; + struct module *module; + char *provider_name; + struct device_node *of_node; + ndctl_fn ndctl; + int (*flush_probe)(struct nvdimm_bus_descriptor *); + int (*clear_to_send)(struct nvdimm_bus_descriptor *, struct nvdimm *, unsigned int, void *); + const struct nvdimm_bus_fw_ops *fw_ops; +}; + +struct nvdimm_security_ops; + +struct nvdimm_fw_ops; + +struct nvdimm { + long unsigned int flags; + void *provider_data; + long unsigned int cmd_mask; + struct device dev; + atomic_t busy; + int id; + int num_flush; + struct resource *flush_wpq; + const char *dimm_id; + struct { + const struct nvdimm_security_ops *ops; + long unsigned int flags; + long unsigned int ext_flags; + unsigned int overwrite_tmo; + struct kernfs_node *overwrite_state; + } sec; + struct delayed_work dwork; + const struct nvdimm_fw_ops *fw_ops; +}; + +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID = 0, + NVDIMM_FWA_IDLE = 1, + NVDIMM_FWA_ARMED = 2, + NVDIMM_FWA_BUSY = 3, + NVDIMM_FWA_ARM_OVERFLOW = 4, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID = 0, + NVDIMM_FWA_CAP_NONE = 1, + NVDIMM_FWA_CAP_QUIESCE = 2, + NVDIMM_FWA_CAP_LIVE = 3, +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm_bus_descriptor *); + enum nvdimm_fwa_capability (*capability)(struct nvdimm_bus_descriptor *); + int (*activate)(struct nvdimm_bus_descriptor *); +}; + +struct nvdimm_bus { + struct nvdimm_bus_descriptor *nd_desc; + wait_queue_head_t wait; + struct list_head list; + struct device dev; + int id; + int probe_active; + atomic_t ioctl_active; + struct list_head mapping_list; + struct mutex reconfig_mutex; + struct badrange badrange; +}; + +struct nvdimm_key_data { + u8 data[32]; +}; + +enum nvdimm_passphrase_type { + NVDIMM_USER = 0, + NVDIMM_MASTER = 1, +}; + +struct nvdimm_security_ops { + long unsigned int (*get_flags)(struct nvdimm *, enum nvdimm_passphrase_type); + int (*freeze)(struct nvdimm *); + int (*change_key)(struct nvdimm *, const struct nvdimm_key_data *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type); + int (*unlock)(struct nvdimm *, const struct nvdimm_key_data *); + int (*disable)(struct nvdimm *, const struct nvdimm_key_data *); + int (*erase)(struct nvdimm *, const struct nvdimm_key_data *, enum nvdimm_passphrase_type); + int (*overwrite)(struct nvdimm *, const struct nvdimm_key_data *); + int (*query_overwrite)(struct nvdimm *); +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM = 0, + NVDIMM_FWA_DISARM = 1, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID = 0, + NVDIMM_FWA_RESULT_NONE = 1, + NVDIMM_FWA_RESULT_SUCCESS = 2, + NVDIMM_FWA_RESULT_NOTSTAGED = 3, + NVDIMM_FWA_RESULT_NEEDRESET = 4, + NVDIMM_FWA_RESULT_FAIL = 5, +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *); + int (*arm)(struct nvdimm *, enum nvdimm_fwa_trigger); +}; + +enum { + ND_CMD_IMPLEMENTED = 0, + ND_CMD_ARS_CAP = 1, + ND_CMD_ARS_START = 2, + ND_CMD_ARS_STATUS = 3, + ND_CMD_CLEAR_ERROR = 4, + ND_CMD_SMART = 1, + ND_CMD_SMART_THRESHOLD = 2, + ND_CMD_DIMM_FLAGS = 3, + ND_CMD_GET_CONFIG_SIZE = 4, + ND_CMD_GET_CONFIG_DATA = 5, + ND_CMD_SET_CONFIG_DATA = 6, + ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7, + ND_CMD_VENDOR_EFFECT_LOG = 8, + ND_CMD_VENDOR = 9, + ND_CMD_CALL = 10, +}; + +enum { + NSINDEX_SIG_LEN = 16, + NSINDEX_ALIGN = 256, + NSINDEX_SEQ_MASK = 3, + NSLABEL_UUID_LEN = 16, + NSLABEL_NAME_LEN = 64, + NSLABEL_FLAG_ROLABEL = 1, + NSLABEL_FLAG_LOCAL = 2, + NSLABEL_FLAG_BTT = 4, + NSLABEL_FLAG_UPDATING = 8, + BTT_ALIGN = 4096, + BTTINFO_SIG_LEN = 16, + BTTINFO_UUID_LEN = 16, + BTTINFO_FLAG_ERROR = 1, + BTTINFO_MAJOR_VERSION = 1, + ND_LABEL_MIN_SIZE = 1024, + ND_LABEL_ID_SIZE = 50, + ND_NSINDEX_INIT = 1, +}; + +struct nvdimm_map { + struct nvdimm_bus *nvdimm_bus; + struct list_head list; + resource_size_t offset; + long unsigned int flags; + size_t size; + union { + void *mem; + void *iomem; + }; + struct kref kref; +}; + +struct badrange_entry { + u64 start; + u64 length; + struct list_head list; +}; + +struct nd_cmd_desc { + int in_num; + int out_num; + u32 in_sizes[5]; + int out_sizes[5]; +}; + +struct nd_interleave_set { + u64 cookie1; + u64 cookie2; + u64 altcookie; + guid_t type_guid; +}; + +struct nvdimm_drvdata; + +struct nd_mapping { + struct nvdimm *nvdimm; + u64 start; + u64 size; + int position; + struct list_head labels; + struct mutex lock; + struct nvdimm_drvdata *ndd; +}; + +struct nd_percpu_lane; + +struct nd_region { + struct device dev; + struct ida ns_ida; + struct ida btt_ida; + struct ida pfn_ida; + struct ida dax_ida; + long unsigned int flags; + struct device *ns_seed; + struct device *btt_seed; + struct device *pfn_seed; + struct device *dax_seed; + long unsigned int align; + u16 ndr_mappings; + u64 ndr_size; + u64 ndr_start; + int id; + int num_lanes; + int ro; + int numa_node; + int target_node; + void *provider_data; + struct kernfs_node *bb_state; + struct badblocks bb; + struct nd_interleave_set *nd_set; + struct nd_percpu_lane *lane; + int (*flush)(struct nd_region *, struct bio *); + struct nd_mapping mapping[0]; +}; + +enum nvdimm_security_bits { + NVDIMM_SECURITY_DISABLED = 0, + NVDIMM_SECURITY_UNLOCKED = 1, + NVDIMM_SECURITY_LOCKED = 2, + NVDIMM_SECURITY_FROZEN = 3, + NVDIMM_SECURITY_OVERWRITE = 4, +}; + +struct nd_cmd_get_config_size { + __u32 status; + __u32 config_size; + __u32 max_xfer; +}; + +struct nd_cmd_set_config_hdr { + __u32 in_offset; + __u32 in_length; + __u8 in_buf[0]; +}; + +struct nd_cmd_vendor_hdr { + __u32 opcode; + __u32 in_length; + __u8 in_buf[0]; +}; + +struct nd_cmd_ars_cap { + __u64 address; + __u64 length; + __u32 status; + __u32 max_ars_out; + __u32 clear_err_unit; + __u16 flags; + __u16 reserved; +}; + +struct nd_cmd_clear_error { + __u64 address; + __u64 length; + __u32 status; + __u8 reserved[4]; + __u64 cleared; +}; + +struct nd_cmd_pkg { + __u64 nd_family; + __u64 nd_command; + __u32 nd_size_in; + __u32 nd_size_out; + __u32 nd_reserved2[9]; + __u32 nd_fw_size; + unsigned char nd_payload[0]; +}; + +enum nvdimm_event { + NVDIMM_REVALIDATE_POISON = 0, + NVDIMM_REVALIDATE_REGION = 1, +}; + +enum nvdimm_claim_class { + NVDIMM_CCLASS_NONE = 0, + NVDIMM_CCLASS_BTT = 1, + NVDIMM_CCLASS_BTT2 = 2, + NVDIMM_CCLASS_PFN = 3, + NVDIMM_CCLASS_DAX = 4, + NVDIMM_CCLASS_UNKNOWN = 5, +}; + +struct nd_device_driver { + struct device_driver drv; + long unsigned int type; + int (*probe)(struct device *); + void (*remove)(struct device *); + void (*shutdown)(struct device *); + void (*notify)(struct device *, enum nvdimm_event); +}; + +struct nd_namespace_common { + int force_raw; + struct device dev; + struct device *claim; + enum nvdimm_claim_class claim_class; + int (*rw_bytes)(struct nd_namespace_common *, resource_size_t, void *, size_t, int, long unsigned int); +}; + +struct nd_namespace_io { + struct nd_namespace_common common; + struct resource res; + resource_size_t size; + void *addr; + struct badblocks bb; +}; + +struct nvdimm_drvdata { + struct device *dev; + int nslabel_size; + struct nd_cmd_get_config_size nsarea; + void *data; + int ns_current; + int ns_next; + struct resource dpa; + struct kref kref; +}; + +struct nd_percpu_lane { + int count; + spinlock_t lock; +}; + +struct btt; + +struct nd_btt { + struct device dev; + struct nd_namespace_common *ndns; + struct btt *btt; + long unsigned int lbasize; + u64 size; + u8 *uuid; + int id; + int initial_offset; + u16 version_major; + u16 version_minor; +}; + +enum nd_pfn_mode { + PFN_MODE_NONE = 0, + PFN_MODE_RAM = 1, + PFN_MODE_PMEM = 2, +}; + +struct nd_pfn_sb; + +struct nd_pfn { + int id; + u8 *uuid; + struct device dev; + long unsigned int align; + long unsigned int npfns; + enum nd_pfn_mode mode; + struct nd_pfn_sb *pfn_sb; + struct nd_namespace_common *ndns; +}; + +struct nd_pfn_sb { + u8 signature[16]; + u8 uuid[16]; + u8 parent_uuid[16]; + __le32 flags; + __le16 version_major; + __le16 version_minor; + __le64 dataoff; + __le64 npfns; + __le32 mode; + __le32 start_pad; + __le32 end_trunc; + __le32 align; + __le32 page_size; + __le16 page_struct_size; + u8 padding[3994]; + __le64 checksum; +}; + +struct nd_dax { + struct nd_pfn nd_pfn; +}; + +enum nd_async_mode { + ND_SYNC = 0, + ND_ASYNC = 1, +}; + +struct clear_badblocks_context { + resource_size_t phys; + resource_size_t cleared; +}; + +enum nd_ioctl_mode { + BUS_IOCTL = 0, + DIMM_IOCTL = 1, +}; + +struct nd_cmd_get_config_data_hdr { + __u32 in_offset; + __u32 in_length; + __u32 status; + __u8 out_buf[0]; +}; + +struct nd_blk_region { + int (*enable)(struct nvdimm_bus *, struct device *); + int (*do_io)(struct nd_blk_region *, resource_size_t, void *, u64, int); + void *blk_provider_data; + struct nd_region nd_region; +}; + +struct nd_label_id { + char id[50]; +}; + +struct blk_alloc_info { + struct nd_mapping *nd_mapping; + resource_size_t available; + resource_size_t busy; + struct resource *res; +}; + +enum nd_driver_flags { + ND_DRIVER_DIMM = 2, + ND_DRIVER_REGION_PMEM = 4, + ND_DRIVER_REGION_BLK = 8, + ND_DRIVER_NAMESPACE_IO = 16, + ND_DRIVER_NAMESPACE_PMEM = 32, + ND_DRIVER_NAMESPACE_BLK = 64, + ND_DRIVER_DAX_PMEM = 128, +}; + +struct nd_mapping_desc { + struct nvdimm *nvdimm; + u64 start; + u64 size; + int position; +}; + +struct nd_region_desc { + struct resource *res; + struct nd_mapping_desc *mapping; + u16 num_mappings; + const struct attribute_group **attr_groups; + struct nd_interleave_set *nd_set; + void *provider_data; + int num_lanes; + int numa_node; + int target_node; + long unsigned int flags; + struct device_node *of_node; + int (*flush)(struct nd_region *, struct bio *); +}; + +struct nd_blk_region_desc { + int (*enable)(struct nvdimm_bus *, struct device *); + int (*do_io)(struct nd_blk_region *, resource_size_t, void *, u64, int); + struct nd_region_desc ndr_desc; +}; + +struct nd_namespace_index { + u8 sig[16]; + u8 flags[3]; + u8 labelsize; + __le32 seq; + __le64 myoff; + __le64 mysize; + __le64 otheroff; + __le64 labeloff; + __le32 nslot; + __le16 major; + __le16 minor; + __le64 checksum; + u8 free[0]; +}; + +struct nd_namespace_label { + u8 uuid[16]; + u8 name[64]; + __le32 flags; + __le16 nlabel; + __le16 position; + __le64 isetcookie; + __le64 lbasize; + __le64 dpa; + __le64 rawsize; + __le32 slot; + u8 align; + u8 reserved[3]; + guid_t type_guid; + guid_t abstraction_guid; + u8 reserved2[88]; + __le64 checksum; +}; + +enum { + ND_MAX_LANES = 256, + INT_LBASIZE_ALIGNMENT = 64, + NVDIMM_IO_ATOMIC = 1, +}; + +struct nd_region_data { + int ns_count; + int ns_active; + unsigned int hints_shift; + void *flush_wpq[0]; +}; + +struct nd_label_ent { + struct list_head list; + long unsigned int flags; + struct nd_namespace_label *label; +}; + +struct conflict_context { + struct nd_region *nd_region; + resource_size_t start; + resource_size_t size; +}; + +enum { + ND_MIN_NAMESPACE_SIZE = 4096, +}; + +struct nd_namespace_pmem { + struct nd_namespace_io nsio; + long unsigned int lbasize; + char *alt_name; + u8 *uuid; + int id; +}; + +struct nd_namespace_blk { + struct nd_namespace_common common; + char *alt_name; + u8 *uuid; + int id; + long unsigned int lbasize; + resource_size_t size; + int num_resources; + struct resource **res; +}; + +enum nd_label_flags { + ND_LABEL_REAP = 0, +}; + +enum alloc_loc { + ALLOC_ERR = 0, + ALLOC_BEFORE = 1, + ALLOC_MID = 2, + ALLOC_AFTER = 3, +}; + +struct btt { + struct gendisk *btt_disk; + struct request_queue *btt_queue; + struct list_head arena_list; + struct dentry *debugfs_dir; + struct nd_btt *nd_btt; + u64 nlba; + long long unsigned int rawsize; + u32 lbasize; + u32 sector_size; + struct nd_region *nd_region; + struct mutex init_lock; + int init_state; + int num_arenas; + struct badblocks *phys_bb; +}; + +struct nd_gen_sb { + char reserved[4088]; + __le64 checksum; +}; + +struct btt_sb { + u8 signature[16]; + u8 uuid[16]; + u8 parent_uuid[16]; + __le32 flags; + __le16 version_major; + __le16 version_minor; + __le32 external_lbasize; + __le32 external_nlba; + __le32 internal_lbasize; + __le32 internal_nlba; + __le32 nfree; + __le32 infosize; + __le64 nextoff; + __le64 dataoff; + __le64 mapoff; + __le64 logoff; + __le64 info2off; + u8 padding[3968]; + __le64 checksum; +}; + +enum nvdimmsec_op_ids { + OP_FREEZE = 0, + OP_DISABLE = 1, + OP_UPDATE = 2, + OP_ERASE = 3, + OP_OVERWRITE = 4, + OP_MASTER_UPDATE = 5, + OP_MASTER_ERASE = 6, +}; + +struct nvdimm_bus___2; + +struct dax_operations { + long int (*direct_access)(struct dax_device *, long unsigned int, long int, void **, pfn_t *); + bool (*dax_supported)(struct dax_device *, struct block_device *, int, sector_t, sector_t); + size_t (*copy_from_iter)(struct dax_device *, long unsigned int, void *, size_t, struct iov_iter *); + size_t (*copy_to_iter)(struct dax_device *, long unsigned int, void *, size_t, struct iov_iter *); + int (*zero_page_range)(struct dax_device *, long unsigned int, size_t); +}; + +struct dax_device { + struct hlist_node list; + struct inode inode; + struct cdev cdev; + const char *host; + void *private; + long unsigned int flags; + const struct dax_operations *ops; +}; + +enum dax_device_flags { + DAXDEV_ALIVE = 0, + DAXDEV_WRITE_CACHE = 1, + DAXDEV_SYNC = 2, +}; + +struct dax_region { + int id; + int target_node; + struct kref kref; + struct device *dev; + unsigned int align; + struct ida ida; + struct resource res; + struct device *seed; + struct device *youngest; +}; + +struct dax_mapping { + struct device dev; + int range_id; + int id; +}; + +struct dev_dax_range { + long unsigned int pgoff; + struct range range; + struct dax_mapping *mapping; +}; + +struct dev_dax { + struct dax_region *region; + struct dax_device *dax_dev; + unsigned int align; + int target_node; + int id; + struct ida ida; + struct device dev; + struct dev_pagemap *pgmap; + int nr_range; + struct dev_dax_range *ranges; +}; + +enum dev_dax_subsys { + DEV_DAX_BUS = 0, + DEV_DAX_CLASS = 1, +}; + +struct dev_dax_data { + struct dax_region *dax_region; + struct dev_pagemap *pgmap; + enum dev_dax_subsys subsys; + resource_size_t size; + int id; +}; + +struct dax_device_driver { + struct device_driver drv; + struct list_head ids; + int match_always; + int (*probe)(struct dev_dax *); + void (*remove)(struct dev_dax *); +}; + +struct dax_id { + struct list_head list; + char dev_name[30]; +}; + +enum id_action { + ID_REMOVE = 0, + ID_ADD = 1, +}; + +struct memregion_info { + int target_node; +}; + +struct seqcount_ww_mutex { + seqcount_t seqcount; +}; + +typedef struct seqcount_ww_mutex seqcount_ww_mutex_t; + +struct dma_buf_map { + union { + void *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + int (*vmap)(struct dma_buf *, struct dma_buf_map *); + void (*vunmap)(struct dma_buf *, struct dma_buf_map *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_resv; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + struct mutex lock; + unsigned int vmapping_counter; + struct dma_buf_map vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_excl; + struct dma_buf_poll_cb_t cb_shared; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + seqcount_ww_mutex_t seq; + struct dma_fence *fence_excl; + struct dma_resv_list *fence; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 shared_count; + u32 shared_max; + struct dma_fence *shared[0]; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_list { + struct list_head head; + struct mutex lock; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + u32 timeline; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; +}; + +struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +}; + +enum seqno_fence_condition { + SEQNO_FENCE_WAIT_GEQUAL = 0, + SEQNO_FENCE_WAIT_NONZERO = 1, +}; + +struct seqno_fence { + struct dma_fence base; + const struct dma_fence_ops *ops; + struct dma_buf *sync_buf; + uint32_t seqno_ofs; + enum seqno_fence_condition condition; +}; + +struct dma_heap; + +struct dma_heap_ops { + struct dma_buf * (*allocate)(struct dma_heap *, long unsigned int, long unsigned int, long unsigned int); +}; + +struct dma_heap { + const char *name; + const struct dma_heap_ops *ops; + void *priv; + dev_t heap_devt; + struct list_head list; + struct cdev heap_cdev; +}; + +struct dma_heap_export_info { + const char *name; + const struct dma_heap_ops *ops; + void *priv; +}; + +struct dma_heap_allocation_data { + __u64 len; + __u32 fd; + __u32 fd_flags; + __u64 heap_flags; +}; + +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + long unsigned int len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct sync_timeline { + struct kref kref; + char name[32]; + u64 context; + int value; + struct rb_root pt_tree; + struct list_head pt_list; + spinlock_t lock; + struct list_head sync_timeline_list; +}; + +struct sync_pt { + struct dma_fence base; + struct list_head link; + struct rb_node node; +}; + +struct trace_event_raw_sync_timeline { + struct trace_entry ent; + u32 __data_loc_name; + u32 value; + char __data[0]; +}; + +struct trace_event_data_offsets_sync_timeline { + u32 name; +}; + +typedef void (*btf_trace_sync_timeline)(void *, struct sync_timeline *); + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; +}; + +struct udmabuf_create { + __u32 memfd; + __u32 flags; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_item { + __u32 memfd; + __u32 __pad; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_list { + __u32 flags; + __u32 count; + struct udmabuf_create_item list[0]; +}; + +struct udmabuf { + long unsigned int pagecount; + struct page **pages; + struct sg_table *sg; + struct miscdevice *device; +}; + +enum scsi_disposition { + NEEDS_RETRY = 8193, + SUCCESS = 8194, + FAILED = 8195, + QUEUED = 8196, + SOFT_ERROR = 8197, + ADD_TO_MLQUEUE = 8198, + TIMEOUT_ERROR = 8199, + SCSI_RETURN_NOT_HANDLED = 8200, + FAST_IO_FAIL = 8201, +}; + +typedef __u64 blist_flags_t; + +enum scsi_device_state { + SDEV_CREATED = 1, + SDEV_RUNNING = 2, + SDEV_CANCEL = 3, + SDEV_DEL = 4, + SDEV_QUIESCE = 5, + SDEV_OFFLINE = 6, + SDEV_TRANSPORT_OFFLINE = 7, + SDEV_BLOCK = 8, + SDEV_CREATED_BLOCK = 9, +}; + +struct scsi_vpd { + struct callback_head rcu; + int len; + unsigned char data[0]; +}; + +struct Scsi_Host; + +struct scsi_target; + +struct scsi_device_handler; + +struct scsi_device { + struct Scsi_Host *host; + struct request_queue *request_queue; + struct list_head siblings; + struct list_head same_target_siblings; + struct sbitmap budget_map; + atomic_t device_blocked; + atomic_t restarts; + spinlock_t list_lock; + struct list_head starved_entry; + short unsigned int queue_depth; + short unsigned int max_queue_depth; + short unsigned int last_queue_full_depth; + short unsigned int last_queue_full_count; + long unsigned int last_queue_full_time; + long unsigned int queue_ramp_up_period; + long unsigned int last_queue_ramp_up; + unsigned int id; + unsigned int channel; + u64 lun; + unsigned int manufacturer; + unsigned int sector_size; + void *hostdata; + unsigned char type; + char scsi_level; + char inq_periph_qual; + struct mutex inquiry_mutex; + unsigned char inquiry_len; + unsigned char *inquiry; + const char *vendor; + const char *model; + const char *rev; + struct scsi_vpd *vpd_pg0; + struct scsi_vpd *vpd_pg83; + struct scsi_vpd *vpd_pg80; + struct scsi_vpd *vpd_pg89; + unsigned char current_tag; + struct scsi_target *sdev_target; + blist_flags_t sdev_bflags; + unsigned int eh_timeout; + unsigned int removable: 1; + unsigned int changed: 1; + unsigned int busy: 1; + unsigned int lockable: 1; + unsigned int locked: 1; + unsigned int borken: 1; + unsigned int disconnect: 1; + unsigned int soft_reset: 1; + unsigned int sdtr: 1; + unsigned int wdtr: 1; + unsigned int ppr: 1; + unsigned int tagged_supported: 1; + unsigned int simple_tags: 1; + unsigned int was_reset: 1; + unsigned int expecting_cc_ua: 1; + unsigned int use_10_for_rw: 1; + unsigned int use_10_for_ms: 1; + unsigned int set_dbd_for_ms: 1; + unsigned int no_report_opcodes: 1; + unsigned int no_write_same: 1; + unsigned int use_16_for_rw: 1; + unsigned int skip_ms_page_8: 1; + unsigned int skip_ms_page_3f: 1; + unsigned int skip_vpd_pages: 1; + unsigned int try_vpd_pages: 1; + unsigned int use_192_bytes_for_3f: 1; + unsigned int no_start_on_add: 1; + unsigned int allow_restart: 1; + unsigned int manage_start_stop: 1; + unsigned int start_stop_pwr_cond: 1; + unsigned int no_uld_attach: 1; + unsigned int select_no_atn: 1; + unsigned int fix_capacity: 1; + unsigned int guess_capacity: 1; + unsigned int retry_hwerror: 1; + unsigned int last_sector_bug: 1; + unsigned int no_read_disc_info: 1; + unsigned int no_read_capacity_16: 1; + unsigned int try_rc_10_first: 1; + unsigned int security_supported: 1; + unsigned int is_visible: 1; + unsigned int wce_default_on: 1; + unsigned int no_dif: 1; + unsigned int broken_fua: 1; + unsigned int lun_in_cdb: 1; + unsigned int unmap_limit_for_ws: 1; + unsigned int rpm_autosuspend: 1; + bool offline_already; + atomic_t disk_events_disable_depth; + long unsigned int supported_events[1]; + long unsigned int pending_events[1]; + struct list_head event_list; + struct work_struct event_work; + unsigned int max_device_blocked; + atomic_t iorequest_cnt; + atomic_t iodone_cnt; + atomic_t ioerr_cnt; + struct device sdev_gendev; + struct device sdev_dev; + struct execute_work ew; + struct work_struct requeue_work; + struct scsi_device_handler *handler; + void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned char access_state; + struct mutex state_mutex; + enum scsi_device_state sdev_state; + struct task_struct *quiesced_by; + long unsigned int sdev_data[0]; +}; + +enum scsi_host_state { + SHOST_CREATED = 1, + SHOST_RUNNING = 2, + SHOST_CANCEL = 3, + SHOST_DEL = 4, + SHOST_RECOVERY = 5, + SHOST_CANCEL_RECOVERY = 6, + SHOST_DEL_RECOVERY = 7, +}; + +struct scsi_host_template; + +struct scsi_transport_template; + +struct Scsi_Host { + struct list_head __devices; + struct list_head __targets; + struct list_head starved_list; + spinlock_t default_lock; + spinlock_t *host_lock; + struct mutex scan_mutex; + struct list_head eh_cmd_q; + struct task_struct *ehandler; + struct completion *eh_action; + wait_queue_head_t host_wait; + struct scsi_host_template *hostt; + struct scsi_transport_template *transportt; + struct blk_mq_tag_set tag_set; + atomic_t host_blocked; + unsigned int host_failed; + unsigned int host_eh_scheduled; + unsigned int host_no; + int eh_deadline; + long unsigned int last_reset; + unsigned int max_channel; + unsigned int max_id; + u64 max_lun; + unsigned int unique_id; + short unsigned int max_cmd_len; + int this_id; + int can_queue; + short int cmd_per_lun; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + unsigned int nr_hw_queues; + unsigned int nr_maps; + unsigned int active_mode: 2; + unsigned int host_self_blocked: 1; + unsigned int reverse_ordering: 1; + unsigned int tmf_in_progress: 1; + unsigned int async_scan: 1; + unsigned int eh_noresume: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int short_inquiry: 1; + unsigned int no_scsi2_lun_in_cdb: 1; + char work_q_name[20]; + struct workqueue_struct *work_q; + struct workqueue_struct *tmf_work_q; + unsigned int max_host_blocked; + unsigned int prot_capabilities; + unsigned char prot_guard_type; + long unsigned int base; + long unsigned int io_port; + unsigned char n_io_port; + unsigned char dma_channel; + unsigned int irq; + enum scsi_host_state shost_state; + struct device shost_gendev; + struct device shost_dev; + void *shost_data; + struct device *dma_dev; + long unsigned int hostdata[0]; +}; + +enum scsi_target_state { + STARGET_CREATED = 1, + STARGET_RUNNING = 2, + STARGET_REMOVE = 3, + STARGET_CREATED_REMOVE = 4, + STARGET_DEL = 5, +}; + +struct scsi_target { + struct scsi_device *starget_sdev_user; + struct list_head siblings; + struct list_head devices; + struct device dev; + struct kref reap_ref; + unsigned int channel; + unsigned int id; + unsigned int create: 1; + unsigned int single_lun: 1; + unsigned int pdt_1f_for_no_lun: 1; + unsigned int no_report_luns: 1; + unsigned int expecting_lun_change: 1; + atomic_t target_busy; + atomic_t target_blocked; + unsigned int can_queue; + unsigned int max_target_blocked; + char scsi_level; + enum scsi_target_state state; + void *hostdata; + long unsigned int starget_data[0]; +}; + +struct scsi_host_cmd_pool; + +struct scsi_cmnd; + +struct scsi_host_template { + unsigned int cmd_size; + int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); + void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + const char * (*info)(struct Scsi_Host *); + int (*ioctl)(struct scsi_device *, unsigned int, void *); + int (*compat_ioctl)(struct scsi_device *, unsigned int, void *); + int (*init_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*exit_cmd_priv)(struct Scsi_Host *, struct scsi_cmnd *); + int (*eh_abort_handler)(struct scsi_cmnd *); + int (*eh_device_reset_handler)(struct scsi_cmnd *); + int (*eh_target_reset_handler)(struct scsi_cmnd *); + int (*eh_bus_reset_handler)(struct scsi_cmnd *); + int (*eh_host_reset_handler)(struct scsi_cmnd *); + int (*slave_alloc)(struct scsi_device *); + int (*slave_configure)(struct scsi_device *); + void (*slave_destroy)(struct scsi_device *); + int (*target_alloc)(struct scsi_target *); + void (*target_destroy)(struct scsi_target *); + int (*scan_finished)(struct Scsi_Host *, long unsigned int); + void (*scan_start)(struct Scsi_Host *); + int (*change_queue_depth)(struct scsi_device *, int); + int (*map_queues)(struct Scsi_Host *); + int (*mq_poll)(struct Scsi_Host *, unsigned int); + bool (*dma_need_drain)(struct request *); + int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *); + void (*unlock_native_capacity)(struct scsi_device *); + int (*show_info)(struct seq_file *, struct Scsi_Host *); + int (*write_info)(struct Scsi_Host *, char *, int); + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); + bool (*eh_should_retry_cmd)(struct scsi_cmnd *); + int (*host_reset)(struct Scsi_Host *, int); + const char *proc_name; + struct proc_dir_entry *proc_dir; + int can_queue; + int this_id; + short unsigned int sg_tablesize; + short unsigned int sg_prot_tablesize; + unsigned int max_sectors; + unsigned int max_segment_size; + long unsigned int dma_boundary; + long unsigned int virt_boundary_mask; + short int cmd_per_lun; + unsigned char present; + int tag_alloc_policy; + unsigned int track_queue_depth: 1; + unsigned int supported_mode: 2; + unsigned int emulated: 1; + unsigned int skip_settle_delay: 1; + unsigned int no_write_same: 1; + unsigned int host_tagset: 1; + unsigned int max_host_blocked; + struct device_attribute **shost_attrs; + struct device_attribute **sdev_attrs; + const struct attribute_group **sdev_groups; + u64 vendor_id; + struct scsi_host_cmd_pool *cmd_pool; + int rpm_autosuspend_delay; +}; + +struct scsi_data_buffer { + struct sg_table table; + unsigned int length; +}; + +struct scsi_pointer { + char *ptr; + int this_residual; + struct scatterlist *buffer; + int buffers_residual; + dma_addr_t dma_handle; + volatile int Status; + volatile int Message; + volatile int have_data_in; + volatile int sent_command; + volatile int phase; +}; + +struct scsi_cmnd { + struct scsi_request req; + struct scsi_device *device; + struct list_head eh_entry; + struct delayed_work abort_work; + struct callback_head rcu; + int eh_eflags; + int budget_token; + long unsigned int jiffies_at_alloc; + int retries; + int allowed; + unsigned char prot_op; + unsigned char prot_type; + unsigned char prot_flags; + short unsigned int cmd_len; + enum dma_data_direction sc_data_direction; + unsigned char *cmnd; + struct scsi_data_buffer sdb; + struct scsi_data_buffer *prot_sdb; + unsigned int underflow; + unsigned int transfersize; + struct request *request; + unsigned char *sense_buffer; + void (*scsi_done)(struct scsi_cmnd *); + struct scsi_pointer SCp; + unsigned char *host_scribble; + int result; + int flags; + long unsigned int state; + unsigned char tag; + unsigned int extra_len; +}; + +enum scsi_prot_operations { + SCSI_PROT_NORMAL = 0, + SCSI_PROT_READ_INSERT = 1, + SCSI_PROT_WRITE_STRIP = 2, + SCSI_PROT_READ_STRIP = 3, + SCSI_PROT_WRITE_INSERT = 4, + SCSI_PROT_READ_PASS = 5, + SCSI_PROT_WRITE_PASS = 6, +}; + +struct scsi_driver { + struct device_driver gendrv; + void (*rescan)(struct device *); + blk_status_t (*init_command)(struct scsi_cmnd *); + void (*uninit_command)(struct scsi_cmnd *); + int (*done)(struct scsi_cmnd *); + int (*eh_action)(struct scsi_cmnd *, int); + void (*eh_reset)(struct scsi_cmnd *); +}; + +struct trace_event_raw_scsi_dispatch_cmd_start { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + unsigned int opcode; + unsigned int cmd_len; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_dispatch_cmd_error { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int rtn; + unsigned int opcode; + unsigned int cmd_len; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_cmd_done_timeout_template { + struct trace_entry ent; + unsigned int host_no; + unsigned int channel; + unsigned int id; + unsigned int lun; + int result; + unsigned int opcode; + unsigned int cmd_len; + unsigned int data_sglen; + unsigned int prot_sglen; + unsigned char prot_op; + u32 __data_loc_cmnd; + char __data[0]; +}; + +struct trace_event_raw_scsi_eh_wakeup { + struct trace_entry ent; + unsigned int host_no; + char __data[0]; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_start { + u32 cmnd; +}; + +struct trace_event_data_offsets_scsi_dispatch_cmd_error { + u32 cmnd; +}; + +struct trace_event_data_offsets_scsi_cmd_done_timeout_template { + u32 cmnd; +}; + +struct trace_event_data_offsets_scsi_eh_wakeup {}; + +typedef void (*btf_trace_scsi_dispatch_cmd_start)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_error)(void *, struct scsi_cmnd *, int); + +typedef void (*btf_trace_scsi_dispatch_cmd_done)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_dispatch_cmd_timeout)(void *, struct scsi_cmnd *); + +typedef void (*btf_trace_scsi_eh_wakeup)(void *, struct Scsi_Host *); + +struct scsi_transport_template { + struct transport_container host_attrs; + struct transport_container target_attrs; + struct transport_container device_attrs; + int (*user_scan)(struct Scsi_Host *, uint, uint, u64); + int device_size; + int device_private_offset; + int target_size; + int target_private_offset; + int host_size; + unsigned int create_work_queue: 1; + void (*eh_strategy_handler)(struct Scsi_Host *); +}; + +struct scsi_host_busy_iter_data { + bool (*fn)(struct scsi_cmnd *, void *, bool); + void *priv; +}; + +struct scsi_idlun { + __u32 dev_id; + __u32 host_unique_id; +}; + +typedef void (*activate_complete)(void *, int); + +struct scsi_device_handler { + struct list_head list; + struct module *module; + const char *name; + enum scsi_disposition (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + int (*attach)(struct scsi_device *); + void (*detach)(struct scsi_device *); + int (*activate)(struct scsi_device *, activate_complete, void *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); + int (*set_params)(struct scsi_device *, const char *); + void (*rescan)(struct scsi_device *); +}; + +struct scsi_eh_save { + int result; + unsigned int resid_len; + int eh_eflags; + enum dma_data_direction data_direction; + unsigned int underflow; + unsigned char cmd_len; + unsigned char prot_op; + unsigned char *cmnd; + struct scsi_data_buffer sdb; + unsigned char eh_cmnd[16]; + struct scatterlist sense_sgl; +}; + +struct scsi_varlen_cdb_hdr { + __u8 opcode; + __u8 control; + __u8 misc[5]; + __u8 additional_cdb_length; + __be16 service_action; +}; + +struct scsi_mode_data { + __u32 length; + __u16 block_descriptor_length; + __u8 medium_type; + __u8 device_specific; + __u8 header_length; + __u8 longlba: 1; +}; + +struct scsi_event { + enum scsi_device_event evt_type; + struct list_head node; +}; + +enum scsi_host_prot_capabilities { + SHOST_DIF_TYPE1_PROTECTION = 1, + SHOST_DIF_TYPE2_PROTECTION = 2, + SHOST_DIF_TYPE3_PROTECTION = 4, + SHOST_DIX_TYPE0_PROTECTION = 8, + SHOST_DIX_TYPE1_PROTECTION = 16, + SHOST_DIX_TYPE2_PROTECTION = 32, + SHOST_DIX_TYPE3_PROTECTION = 64, +}; + +enum { + ACTION_FAIL = 0, + ACTION_REPREP = 1, + ACTION_RETRY = 2, + ACTION_DELAYED_RETRY = 3, +}; + +struct value_name_pair; + +struct sa_name_list { + int opcode; + const struct value_name_pair *arr; + int arr_sz; +}; + +struct value_name_pair { + int value; + const char *name; +}; + +struct error_info { + short unsigned int code12; + short unsigned int size; +}; + +struct error_info2 { + unsigned char code1; + unsigned char code2_min; + unsigned char code2_max; + const char *str; + const char *fmt; +}; + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +enum scsi_timeouts { + SCSI_DEFAULT_EH_TIMEOUT = 2500, +}; + +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN = 1, + SCSI_SCAN_MANUAL = 2, +}; + +struct async_scan_data { + struct list_head list; + struct Scsi_Host *shost; + struct completion prev_finished; +}; + +enum scsi_devinfo_key { + SCSI_DEVINFO_GLOBAL = 0, + SCSI_DEVINFO_SPI = 1, +}; + +struct scsi_dev_info_list { + struct list_head dev_info_list; + char vendor[8]; + char model[16]; + blist_flags_t flags; + unsigned int compatible; +}; + +struct scsi_dev_info_list_table { + struct list_head node; + struct list_head scsi_dev_info_list; + const char *name; + int key; +}; + +struct double_list { + struct list_head *top; + struct list_head *bottom; +}; + +struct scsi_nl_hdr { + __u8 version; + __u8 transport; + __u16 magic; + __u16 msgtype; + __u16 msglen; +}; + +enum { + SCSI_DH_OK = 0, + SCSI_DH_DEV_FAILED = 1, + SCSI_DH_DEV_TEMP_BUSY = 2, + SCSI_DH_DEV_UNSUPP = 3, + SCSI_DH_DEVICE_MAX = 4, + SCSI_DH_NOTCONN = 5, + SCSI_DH_CONN_FAILURE = 6, + SCSI_DH_TRANSPORT_MAX = 7, + SCSI_DH_IO = 8, + SCSI_DH_INVALID_IO = 9, + SCSI_DH_RETRY = 10, + SCSI_DH_IMM_RETRY = 11, + SCSI_DH_TIMED_OUT = 12, + SCSI_DH_RES_TEMP_UNAVAIL = 13, + SCSI_DH_DEV_OFFLINED = 14, + SCSI_DH_NOMEM = 15, + SCSI_DH_NOSYS = 16, + SCSI_DH_DRIVER_MAX = 17, +}; + +struct scsi_dh_blist { + const char *vendor; + const char *model; + const char *driver; +}; + +enum scsi_prot_flags { + SCSI_PROT_TRANSFER_PI = 1, + SCSI_PROT_GUARD_CHECK = 2, + SCSI_PROT_REF_CHECK = 4, + SCSI_PROT_REF_INCREMENT = 8, + SCSI_PROT_IP_CHECKSUM = 16, +}; + +enum { + SD_EXT_CDB_SIZE = 32, + SD_MEMPOOL_SIZE = 2, +}; + +enum { + SD_DEF_XFER_BLOCKS = 65535, + SD_MAX_XFER_BLOCKS = 4294967295, + SD_MAX_WS10_BLOCKS = 65535, + SD_MAX_WS16_BLOCKS = 8388607, +}; + +enum { + SD_LBP_FULL = 0, + SD_LBP_UNMAP = 1, + SD_LBP_WS16 = 2, + SD_LBP_WS10 = 3, + SD_LBP_ZERO = 4, + SD_LBP_DISABLE = 5, +}; + +enum { + SD_ZERO_WRITE = 0, + SD_ZERO_WS = 1, + SD_ZERO_WS16_UNMAP = 2, + SD_ZERO_WS10_UNMAP = 3, +}; + +struct opal_dev___2; + +struct scsi_disk { + struct scsi_driver *driver; + struct scsi_device *device; + struct device dev; + struct gendisk *disk; + struct opal_dev___2 *opal_dev; + u32 nr_zones; + u32 rev_nr_zones; + u32 zone_blocks; + u32 rev_zone_blocks; + u32 zones_optimal_open; + u32 zones_optimal_nonseq; + u32 zones_max_open; + u32 *zones_wp_offset; + spinlock_t zones_wp_offset_lock; + u32 *rev_wp_offset; + struct mutex rev_mutex; + struct work_struct zone_wp_offset_work; + char *zone_wp_update_buf; + atomic_t openers; + sector_t capacity; + int max_retries; + u32 max_xfer_blocks; + u32 opt_xfer_blocks; + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; + u32 index; + unsigned int physical_block_size; + unsigned int max_medium_access_timeouts; + unsigned int medium_access_timed_out; + u8 media_present; + u8 write_prot; + u8 protection_type; + u8 provisioning_mode; + u8 zeroing_mode; + unsigned int ATO: 1; + unsigned int cache_override: 1; + unsigned int WCE: 1; + unsigned int RCD: 1; + unsigned int DPOFUA: 1; + unsigned int first_scan: 1; + unsigned int lbpme: 1; + unsigned int lbprz: 1; + unsigned int lbpu: 1; + unsigned int lbpws: 1; + unsigned int lbpws10: 1; + unsigned int lbpvpd: 1; + unsigned int ws10: 1; + unsigned int ws16: 1; + unsigned int rc_basis: 2; + unsigned int zoned: 2; + unsigned int urswrz: 1; + unsigned int security: 1; + unsigned int ignore_medium_access_errors: 1; +}; + +enum scsi_host_guard_type { + SHOST_DIX_GUARD_CRC = 1, + SHOST_DIX_GUARD_IP = 2, +}; + +enum zbc_zone_type { + ZBC_ZONE_TYPE_CONV = 1, + ZBC_ZONE_TYPE_SEQWRITE_REQ = 2, + ZBC_ZONE_TYPE_SEQWRITE_PREF = 3, +}; + +enum zbc_zone_cond { + ZBC_ZONE_COND_NO_WP = 0, + ZBC_ZONE_COND_EMPTY = 1, + ZBC_ZONE_COND_IMP_OPEN = 2, + ZBC_ZONE_COND_EXP_OPEN = 3, + ZBC_ZONE_COND_CLOSED = 4, + ZBC_ZONE_COND_READONLY = 13, + ZBC_ZONE_COND_FULL = 14, + ZBC_ZONE_COND_OFFLINE = 15, +}; + +enum { + mechtype_caddy = 0, + mechtype_tray = 1, + mechtype_popup = 2, + mechtype_individual_changer = 4, + mechtype_cartridge_changer = 5, +}; + +struct event_header { + __be16 data_len; + __u8 notification_class: 3; + __u8 reserved1: 4; + __u8 nea: 1; + __u8 supp_event_class; +}; + +struct media_event_desc { + __u8 media_event_code: 4; + __u8 reserved1: 4; + __u8 door_open: 1; + __u8 media_present: 1; + __u8 reserved2: 6; + __u8 start_slot; + __u8 end_slot; +}; + +struct scsi_cd { + struct scsi_driver *driver; + unsigned int capacity; + struct scsi_device *device; + unsigned int vendor; + long unsigned int ms_offset; + unsigned int writeable: 1; + unsigned int use: 1; + unsigned int xa_flag: 1; + unsigned int readcd_known: 1; + unsigned int readcd_cdda: 1; + unsigned int media_present: 1; + int tur_mismatch; + bool tur_changed: 1; + bool get_event_changed: 1; + bool ignore_get_event: 1; + struct cdrom_device_info cdi; + struct mutex lock; + struct kref kref; + struct gendisk *disk; +}; + +typedef struct scsi_cd Scsi_CD; + +struct cdrom_ti { + __u8 cdti_trk0; + __u8 cdti_ind0; + __u8 cdti_trk1; + __u8 cdti_ind1; +}; + +struct cdrom_tochdr { + __u8 cdth_trk0; + __u8 cdth_trk1; +}; + +struct cdrom_tocentry { + __u8 cdte_track; + __u8 cdte_adr: 4; + __u8 cdte_ctrl: 4; + __u8 cdte_format; + union cdrom_addr cdte_addr; + __u8 cdte_datamode; +}; + +struct ccs_modesel_head { + __u8 _r1; + __u8 medium; + __u8 _r2; + __u8 block_desc_length; + __u8 density; + __u8 number_blocks_hi; + __u8 number_blocks_med; + __u8 number_blocks_lo; + __u8 _r3; + __u8 block_length_hi; + __u8 block_length_med; + __u8 block_length_lo; +}; + +typedef struct sg_io_hdr sg_io_hdr_t; + +struct sg_scsi_id { + int host_no; + int channel; + int scsi_id; + int lun; + int scsi_type; + short int h_cmd_per_lun; + short int d_queue_depth; + int unused[2]; +}; + +typedef struct sg_scsi_id sg_scsi_id_t; + +struct sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + void *usr_ptr; + unsigned int duration; + int unused; +}; + +typedef struct sg_req_info sg_req_info_t; + +struct sg_header { + int pack_len; + int reply_len; + int pack_id; + int result; + unsigned int twelve_byte: 1; + unsigned int target_status: 5; + unsigned int host_status: 8; + unsigned int driver_status: 8; + unsigned int other_flags: 10; + unsigned char sense_buffer[16]; +}; + +struct sg_scatter_hold { + short unsigned int k_use_sg; + unsigned int sglist_len; + unsigned int bufflen; + struct page **pages; + int page_order; + char dio_in_use; + unsigned char cmd_opcode; +}; + +typedef struct sg_scatter_hold Sg_scatter_hold; + +struct sg_fd; + +struct sg_request { + struct list_head entry; + struct sg_fd *parentfp; + Sg_scatter_hold data; + sg_io_hdr_t header; + unsigned char sense_b[96]; + char res_used; + char orphan; + char sg_io_owned; + char done; + struct request *rq; + struct bio *bio; + struct execute_work ew; +}; + +typedef struct sg_request Sg_request; + +struct sg_device; + +struct sg_fd { + struct list_head sfd_siblings; + struct sg_device *parentdp; + wait_queue_head_t read_wait; + rwlock_t rq_list_lock; + struct mutex f_mutex; + int timeout; + int timeout_user; + Sg_scatter_hold reserve; + struct list_head rq_list; + struct fasync_struct *async_qp; + Sg_request req_arr[16]; + char force_packid; + char cmd_q; + unsigned char next_cmd_len; + char keep_orphan; + char mmap_called; + char res_in_use; + struct kref f_ref; + struct execute_work ew; +}; + +struct sg_device { + struct scsi_device *device; + wait_queue_head_t open_wait; + struct mutex open_rel_lock; + int sg_tablesize; + u32 index; + struct list_head sfds; + rwlock_t sfd_lock; + atomic_t detaching; + bool exclude; + int open_cnt; + char sgdebug; + struct gendisk *disk; + struct cdev *cdev; + struct kref d_ref; +}; + +typedef struct sg_fd Sg_fd; + +typedef struct sg_device Sg_device; + +struct compat_sg_req_info { + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + compat_uptr_t usr_ptr; + unsigned int duration; + int unused; +}; + +struct sg_proc_deviter { + loff_t index; + size_t max; +}; + +enum { + ATA_MAX_DEVICES = 2, + ATA_MAX_PRD = 256, + ATA_SECT_SIZE = 512, + ATA_MAX_SECTORS_128 = 128, + ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_LBA48 = 65535, + ATA_MAX_SECTORS_TAPE = 65535, + ATA_MAX_TRIM_RNUM = 64, + ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, + ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, + ATA_ID_FW_REV = 23, + ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_TRUSTED = 48, + ATA_ID_CAPABILITY = 49, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, + ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, + ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, + ATA_ID_ADDITIONAL_SUPP = 69, + ATA_ID_QUEUE_DEPTH = 75, + ATA_ID_SATA_CAPABILITY = 76, + ATA_ID_SATA_CAPABILITY_2 = 77, + ATA_ID_FEATURE_SUPP = 78, + ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_SECTOR_SIZE = 106, + ATA_ID_WWN = 108, + ATA_ID_LOGICAL_SECTOR_SIZE = 117, + ATA_ID_COMMAND_SET_3 = 119, + ATA_ID_COMMAND_SET_4 = 120, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, + ATA_ID_SCT_CMD_XPORT = 206, + ATA_ID_ROT_SPEED = 217, + ATA_ID_PIO4 = 2, + ATA_ID_SERNO_LEN = 20, + ATA_ID_FW_REV_LEN = 8, + ATA_ID_PROD_LEN = 40, + ATA_ID_WWN_LEN = 8, + ATA_PCI_CTL_OFS = 2, + ATA_PIO0 = 1, + ATA_PIO1 = 3, + ATA_PIO2 = 7, + ATA_PIO3 = 15, + ATA_PIO4 = 31, + ATA_PIO5 = 63, + ATA_PIO6 = 127, + ATA_PIO4_ONLY = 16, + ATA_SWDMA0 = 1, + ATA_SWDMA1 = 3, + ATA_SWDMA2 = 7, + ATA_SWDMA2_ONLY = 4, + ATA_MWDMA0 = 1, + ATA_MWDMA1 = 3, + ATA_MWDMA2 = 7, + ATA_MWDMA3 = 15, + ATA_MWDMA4 = 31, + ATA_MWDMA12_ONLY = 6, + ATA_MWDMA2_ONLY = 4, + ATA_UDMA0 = 1, + ATA_UDMA1 = 3, + ATA_UDMA2 = 7, + ATA_UDMA3 = 15, + ATA_UDMA4 = 31, + ATA_UDMA5 = 63, + ATA_UDMA6 = 127, + ATA_UDMA7 = 255, + ATA_UDMA24_ONLY = 20, + ATA_UDMA_MASK_40C = 7, + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = 2048, + ATA_PRD_EOT = 2147483648, + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = 8, + ATA_DMA_START = 1, + ATA_DMA_INTR = 4, + ATA_DMA_ERR = 2, + ATA_DMA_ACTIVE = 1, + ATA_HOB = 128, + ATA_NIEN = 2, + ATA_LBA = 64, + ATA_DEV1 = 16, + ATA_DEVICE_OBS = 160, + ATA_DEVCTL_OBS = 8, + ATA_BUSY = 128, + ATA_DRDY = 64, + ATA_DF = 32, + ATA_DSC = 16, + ATA_DRQ = 8, + ATA_CORR = 4, + ATA_SENSE = 2, + ATA_ERR = 1, + ATA_SRST = 4, + ATA_ICRC = 128, + ATA_BBK = 128, + ATA_UNC = 64, + ATA_MC = 32, + ATA_IDNF = 16, + ATA_MCR = 8, + ATA_ABORTED = 4, + ATA_TRK0NF = 2, + ATA_AMNF = 1, + ATAPI_LFS = 240, + ATAPI_EOM = 2, + ATAPI_ILI = 1, + ATAPI_IO = 2, + ATAPI_COD = 1, + ATA_REG_DATA = 0, + ATA_REG_ERR = 1, + ATA_REG_NSECT = 2, + ATA_REG_LBAL = 3, + ATA_REG_LBAM = 4, + ATA_REG_LBAH = 5, + ATA_REG_DEVICE = 6, + ATA_REG_STATUS = 7, + ATA_REG_FEATURE = 1, + ATA_REG_CMD = 7, + ATA_REG_BYTEL = 4, + ATA_REG_BYTEH = 5, + ATA_REG_DEVSEL = 6, + ATA_REG_IRQ = 2, + ATA_CMD_DEV_RESET = 8, + ATA_CMD_CHK_POWER = 229, + ATA_CMD_STANDBY = 226, + ATA_CMD_IDLE = 227, + ATA_CMD_EDD = 144, + ATA_CMD_DOWNLOAD_MICRO = 146, + ATA_CMD_DOWNLOAD_MICRO_DMA = 147, + ATA_CMD_NOP = 0, + ATA_CMD_FLUSH = 231, + ATA_CMD_FLUSH_EXT = 234, + ATA_CMD_ID_ATA = 236, + ATA_CMD_ID_ATAPI = 161, + ATA_CMD_SERVICE = 162, + ATA_CMD_READ = 200, + ATA_CMD_READ_EXT = 37, + ATA_CMD_READ_QUEUED = 38, + ATA_CMD_READ_STREAM_EXT = 43, + ATA_CMD_READ_STREAM_DMA_EXT = 42, + ATA_CMD_WRITE = 202, + ATA_CMD_WRITE_EXT = 53, + ATA_CMD_WRITE_QUEUED = 54, + ATA_CMD_WRITE_STREAM_EXT = 59, + ATA_CMD_WRITE_STREAM_DMA_EXT = 58, + ATA_CMD_WRITE_FUA_EXT = 61, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 62, + ATA_CMD_FPDMA_READ = 96, + ATA_CMD_FPDMA_WRITE = 97, + ATA_CMD_NCQ_NON_DATA = 99, + ATA_CMD_FPDMA_SEND = 100, + ATA_CMD_FPDMA_RECV = 101, + ATA_CMD_PIO_READ = 32, + ATA_CMD_PIO_READ_EXT = 36, + ATA_CMD_PIO_WRITE = 48, + ATA_CMD_PIO_WRITE_EXT = 52, + ATA_CMD_READ_MULTI = 196, + ATA_CMD_READ_MULTI_EXT = 41, + ATA_CMD_WRITE_MULTI = 197, + ATA_CMD_WRITE_MULTI_EXT = 57, + ATA_CMD_WRITE_MULTI_FUA_EXT = 206, + ATA_CMD_SET_FEATURES = 239, + ATA_CMD_SET_MULTI = 198, + ATA_CMD_PACKET = 160, + ATA_CMD_VERIFY = 64, + ATA_CMD_VERIFY_EXT = 66, + ATA_CMD_WRITE_UNCORR_EXT = 69, + ATA_CMD_STANDBYNOW1 = 224, + ATA_CMD_IDLEIMMEDIATE = 225, + ATA_CMD_SLEEP = 230, + ATA_CMD_INIT_DEV_PARAMS = 145, + ATA_CMD_READ_NATIVE_MAX = 248, + ATA_CMD_READ_NATIVE_MAX_EXT = 39, + ATA_CMD_SET_MAX = 249, + ATA_CMD_SET_MAX_EXT = 55, + ATA_CMD_READ_LOG_EXT = 47, + ATA_CMD_WRITE_LOG_EXT = 63, + ATA_CMD_READ_LOG_DMA_EXT = 71, + ATA_CMD_WRITE_LOG_DMA_EXT = 87, + ATA_CMD_TRUSTED_NONDATA = 91, + ATA_CMD_TRUSTED_RCV = 92, + ATA_CMD_TRUSTED_RCV_DMA = 93, + ATA_CMD_TRUSTED_SND = 94, + ATA_CMD_TRUSTED_SND_DMA = 95, + ATA_CMD_PMP_READ = 228, + ATA_CMD_PMP_READ_DMA = 233, + ATA_CMD_PMP_WRITE = 232, + ATA_CMD_PMP_WRITE_DMA = 235, + ATA_CMD_CONF_OVERLAY = 177, + ATA_CMD_SEC_SET_PASS = 241, + ATA_CMD_SEC_UNLOCK = 242, + ATA_CMD_SEC_ERASE_PREP = 243, + ATA_CMD_SEC_ERASE_UNIT = 244, + ATA_CMD_SEC_FREEZE_LOCK = 245, + ATA_CMD_SEC_DISABLE_PASS = 246, + ATA_CMD_CONFIG_STREAM = 81, + ATA_CMD_SMART = 176, + ATA_CMD_MEDIA_LOCK = 222, + ATA_CMD_MEDIA_UNLOCK = 223, + ATA_CMD_DSM = 6, + ATA_CMD_CHK_MED_CRD_TYP = 209, + ATA_CMD_CFA_REQ_EXT_ERR = 3, + ATA_CMD_CFA_WRITE_NE = 56, + ATA_CMD_CFA_TRANS_SECT = 135, + ATA_CMD_CFA_ERASE = 192, + ATA_CMD_CFA_WRITE_MULT_NE = 205, + ATA_CMD_REQ_SENSE_DATA = 11, + ATA_CMD_SANITIZE_DEVICE = 180, + ATA_CMD_ZAC_MGMT_IN = 74, + ATA_CMD_ZAC_MGMT_OUT = 159, + ATA_CMD_RESTORE = 16, + ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 1, + ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 2, + ATA_SUBCMD_FPDMA_SEND_DSM = 0, + ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 2, + ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0, + ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 5, + ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 6, + ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 7, + ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0, + ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 1, + ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 2, + ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 3, + ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 4, + ATA_LOG_DIRECTORY = 0, + ATA_LOG_SATA_NCQ = 16, + ATA_LOG_NCQ_NON_DATA = 18, + ATA_LOG_NCQ_SEND_RECV = 19, + ATA_LOG_IDENTIFY_DEVICE = 48, + ATA_LOG_SECURITY = 6, + ATA_LOG_SATA_SETTINGS = 8, + ATA_LOG_ZONED_INFORMATION = 9, + ATA_LOG_DEVSLP_OFFSET = 48, + ATA_LOG_DEVSLP_SIZE = 8, + ATA_LOG_DEVSLP_MDAT = 0, + ATA_LOG_DEVSLP_MDAT_MASK = 31, + ATA_LOG_DEVSLP_DETO = 1, + ATA_LOG_DEVSLP_VALID = 7, + ATA_LOG_DEVSLP_VALID_MASK = 128, + ATA_LOG_NCQ_PRIO_OFFSET = 9, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = 1, + ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 4, + ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = 1, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 8, + ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 12, + ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 16, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = 1, + ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = 2, + ATA_LOG_NCQ_SEND_RECV_SIZE = 20, + ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0, + ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = 1, + ATA_LOG_NCQ_NON_DATA_ABORT_ALL = 2, + ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = 4, + ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = 8, + ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = 16, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 28, + ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = 1, + ATA_LOG_NCQ_NON_DATA_SIZE = 64, + ATA_CMD_READ_LONG = 34, + ATA_CMD_READ_LONG_ONCE = 35, + ATA_CMD_WRITE_LONG = 50, + ATA_CMD_WRITE_LONG_ONCE = 51, + SETFEATURES_XFER = 3, + XFER_UDMA_7 = 71, + XFER_UDMA_6 = 70, + XFER_UDMA_5 = 69, + XFER_UDMA_4 = 68, + XFER_UDMA_3 = 67, + XFER_UDMA_2 = 66, + XFER_UDMA_1 = 65, + XFER_UDMA_0 = 64, + XFER_MW_DMA_4 = 36, + XFER_MW_DMA_3 = 35, + XFER_MW_DMA_2 = 34, + XFER_MW_DMA_1 = 33, + XFER_MW_DMA_0 = 32, + XFER_SW_DMA_2 = 18, + XFER_SW_DMA_1 = 17, + XFER_SW_DMA_0 = 16, + XFER_PIO_6 = 14, + XFER_PIO_5 = 13, + XFER_PIO_4 = 12, + XFER_PIO_3 = 11, + XFER_PIO_2 = 10, + XFER_PIO_1 = 9, + XFER_PIO_0 = 8, + XFER_PIO_SLOW = 0, + SETFEATURES_WC_ON = 2, + SETFEATURES_WC_OFF = 130, + SETFEATURES_RA_ON = 170, + SETFEATURES_RA_OFF = 85, + SETFEATURES_AAM_ON = 66, + SETFEATURES_AAM_OFF = 194, + SETFEATURES_SPINUP = 7, + SETFEATURES_SPINUP_TIMEOUT = 30000, + SETFEATURES_SATA_ENABLE = 16, + SETFEATURES_SATA_DISABLE = 144, + SATA_FPDMA_OFFSET = 1, + SATA_FPDMA_AA = 2, + SATA_DIPM = 3, + SATA_FPDMA_IN_ORDER = 4, + SATA_AN = 5, + SATA_SSP = 6, + SATA_DEVSLP = 9, + SETFEATURE_SENSE_DATA = 195, + ATA_SET_MAX_ADDR = 0, + ATA_SET_MAX_PASSWD = 1, + ATA_SET_MAX_LOCK = 2, + ATA_SET_MAX_UNLOCK = 3, + ATA_SET_MAX_FREEZE_LOCK = 4, + ATA_SET_MAX_PASSWD_DMA = 5, + ATA_SET_MAX_UNLOCK_DMA = 6, + ATA_DCO_RESTORE = 192, + ATA_DCO_FREEZE_LOCK = 193, + ATA_DCO_IDENTIFY = 194, + ATA_DCO_SET = 195, + ATA_SMART_ENABLE = 216, + ATA_SMART_READ_VALUES = 208, + ATA_SMART_READ_THRESHOLDS = 209, + ATA_DSM_TRIM = 1, + ATA_SMART_LBAM_PASS = 79, + ATA_SMART_LBAH_PASS = 194, + ATAPI_PKT_DMA = 1, + ATAPI_DMADIR = 4, + ATAPI_CDB_LEN = 16, + SATA_PMP_MAX_PORTS = 15, + SATA_PMP_CTRL_PORT = 15, + SATA_PMP_GSCR_DWORDS = 128, + SATA_PMP_GSCR_PROD_ID = 0, + SATA_PMP_GSCR_REV = 1, + SATA_PMP_GSCR_PORT_INFO = 2, + SATA_PMP_GSCR_ERROR = 32, + SATA_PMP_GSCR_ERROR_EN = 33, + SATA_PMP_GSCR_FEAT = 64, + SATA_PMP_GSCR_FEAT_EN = 96, + SATA_PMP_PSCR_STATUS = 0, + SATA_PMP_PSCR_ERROR = 1, + SATA_PMP_PSCR_CONTROL = 2, + SATA_PMP_FEAT_BIST = 1, + SATA_PMP_FEAT_PMREQ = 2, + SATA_PMP_FEAT_DYNSSC = 4, + SATA_PMP_FEAT_NOTIFY = 8, + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA40_SHORT = 3, + ATA_CBL_PATA_UNK = 4, + ATA_CBL_PATA_IGN = 5, + ATA_CBL_SATA = 6, + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + SERR_DATA_RECOVERED = 1, + SERR_COMM_RECOVERED = 2, + SERR_DATA = 256, + SERR_PERSISTENT = 512, + SERR_PROTOCOL = 1024, + SERR_INTERNAL = 2048, + SERR_PHYRDY_CHG = 65536, + SERR_PHY_INT_ERR = 131072, + SERR_COMM_WAKE = 262144, + SERR_10B_8B_ERR = 524288, + SERR_DISPARITY = 1048576, + SERR_CRC = 2097152, + SERR_HANDSHAKE = 4194304, + SERR_LINK_SEQ_ERR = 8388608, + SERR_TRANS_ST_ERROR = 16777216, + SERR_UNRECOG_FIS = 33554432, + SERR_DEV_XCHG = 67108864, +}; + +enum ata_prot_flags { + ATA_PROT_FLAG_PIO = 1, + ATA_PROT_FLAG_DMA = 2, + ATA_PROT_FLAG_NCQ = 4, + ATA_PROT_FLAG_ATAPI = 8, + ATA_PROT_UNKNOWN = 255, + ATA_PROT_NODATA = 0, + ATA_PROT_PIO = 1, + ATA_PROT_DMA = 2, + ATA_PROT_NCQ_NODATA = 4, + ATA_PROT_NCQ = 6, + ATAPI_PROT_NODATA = 8, + ATAPI_PROT_PIO = 9, + ATAPI_PROT_DMA = 10, +}; + +struct ata_bmdma_prd { + __le32 addr; + __le32 flags_len; +}; + +enum { + ATA_MSG_DRV = 1, + ATA_MSG_INFO = 2, + ATA_MSG_PROBE = 4, + ATA_MSG_WARN = 8, + ATA_MSG_MALLOC = 16, + ATA_MSG_CTL = 32, + ATA_MSG_INTR = 64, + ATA_MSG_ERR = 128, +}; + +enum { + LIBATA_MAX_PRD = 128, + LIBATA_DUMB_MAX_PRD = 64, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = 32, + ATA_SHORT_PAUSE = 16, + ATAPI_MAX_DRAIN = 16384, + ATA_ALL_DEVICES = 3, + ATA_SHT_EMULATED = 1, + ATA_SHT_THIS_ID = 4294967295, + ATA_TFLAG_LBA48 = 1, + ATA_TFLAG_ISADDR = 2, + ATA_TFLAG_DEVICE = 4, + ATA_TFLAG_WRITE = 8, + ATA_TFLAG_LBA = 16, + ATA_TFLAG_FUA = 32, + ATA_TFLAG_POLLING = 64, + ATA_DFLAG_LBA = 1, + ATA_DFLAG_LBA48 = 2, + ATA_DFLAG_CDB_INTR = 4, + ATA_DFLAG_NCQ = 8, + ATA_DFLAG_FLUSH_EXT = 16, + ATA_DFLAG_ACPI_PENDING = 32, + ATA_DFLAG_ACPI_FAILED = 64, + ATA_DFLAG_AN = 128, + ATA_DFLAG_TRUSTED = 256, + ATA_DFLAG_DMADIR = 1024, + ATA_DFLAG_CFG_MASK = 4095, + ATA_DFLAG_PIO = 4096, + ATA_DFLAG_NCQ_OFF = 8192, + ATA_DFLAG_SLEEPING = 32768, + ATA_DFLAG_DUBIOUS_XFER = 65536, + ATA_DFLAG_NO_UNLOAD = 131072, + ATA_DFLAG_UNLOCK_HPA = 262144, + ATA_DFLAG_NCQ_SEND_RECV = 524288, + ATA_DFLAG_NCQ_PRIO = 1048576, + ATA_DFLAG_NCQ_PRIO_ENABLE = 2097152, + ATA_DFLAG_INIT_MASK = 16777215, + ATA_DFLAG_DETACH = 16777216, + ATA_DFLAG_DETACHED = 33554432, + ATA_DFLAG_DA = 67108864, + ATA_DFLAG_DEVSLP = 134217728, + ATA_DFLAG_ACPI_DISABLED = 268435456, + ATA_DFLAG_D_SENSE = 536870912, + ATA_DFLAG_ZAC = 1073741824, + ATA_DEV_UNKNOWN = 0, + ATA_DEV_ATA = 1, + ATA_DEV_ATA_UNSUP = 2, + ATA_DEV_ATAPI = 3, + ATA_DEV_ATAPI_UNSUP = 4, + ATA_DEV_PMP = 5, + ATA_DEV_PMP_UNSUP = 6, + ATA_DEV_SEMB = 7, + ATA_DEV_SEMB_UNSUP = 8, + ATA_DEV_ZAC = 9, + ATA_DEV_ZAC_UNSUP = 10, + ATA_DEV_NONE = 11, + ATA_LFLAG_NO_HRST = 2, + ATA_LFLAG_NO_SRST = 4, + ATA_LFLAG_ASSUME_ATA = 8, + ATA_LFLAG_ASSUME_SEMB = 16, + ATA_LFLAG_ASSUME_CLASS = 24, + ATA_LFLAG_NO_RETRY = 32, + ATA_LFLAG_DISABLED = 64, + ATA_LFLAG_SW_ACTIVITY = 128, + ATA_LFLAG_NO_LPM = 256, + ATA_LFLAG_RST_ONCE = 512, + ATA_LFLAG_CHANGED = 1024, + ATA_LFLAG_NO_DB_DELAY = 2048, + ATA_FLAG_SLAVE_POSS = 1, + ATA_FLAG_SATA = 2, + ATA_FLAG_NO_LPM = 4, + ATA_FLAG_NO_LOG_PAGE = 32, + ATA_FLAG_NO_ATAPI = 64, + ATA_FLAG_PIO_DMA = 128, + ATA_FLAG_PIO_LBA48 = 256, + ATA_FLAG_PIO_POLLING = 512, + ATA_FLAG_NCQ = 1024, + ATA_FLAG_NO_POWEROFF_SPINDOWN = 2048, + ATA_FLAG_NO_HIBERNATE_SPINDOWN = 4096, + ATA_FLAG_DEBUGMSG = 8192, + ATA_FLAG_FPDMA_AA = 16384, + ATA_FLAG_IGN_SIMPLEX = 32768, + ATA_FLAG_NO_IORDY = 65536, + ATA_FLAG_ACPI_SATA = 131072, + ATA_FLAG_AN = 262144, + ATA_FLAG_PMP = 524288, + ATA_FLAG_FPDMA_AUX = 1048576, + ATA_FLAG_EM = 2097152, + ATA_FLAG_SW_ACTIVITY = 4194304, + ATA_FLAG_NO_DIPM = 8388608, + ATA_FLAG_SAS_HOST = 16777216, + ATA_PFLAG_EH_PENDING = 1, + ATA_PFLAG_EH_IN_PROGRESS = 2, + ATA_PFLAG_FROZEN = 4, + ATA_PFLAG_RECOVERED = 8, + ATA_PFLAG_LOADING = 16, + ATA_PFLAG_SCSI_HOTPLUG = 64, + ATA_PFLAG_INITIALIZING = 128, + ATA_PFLAG_RESETTING = 256, + ATA_PFLAG_UNLOADING = 512, + ATA_PFLAG_UNLOADED = 1024, + ATA_PFLAG_SUSPENDED = 131072, + ATA_PFLAG_PM_PENDING = 262144, + ATA_PFLAG_INIT_GTM_VALID = 524288, + ATA_PFLAG_PIO32 = 1048576, + ATA_PFLAG_PIO32CHANGE = 2097152, + ATA_PFLAG_EXTERNAL = 4194304, + ATA_QCFLAG_ACTIVE = 1, + ATA_QCFLAG_DMAMAP = 2, + ATA_QCFLAG_IO = 8, + ATA_QCFLAG_RESULT_TF = 16, + ATA_QCFLAG_CLEAR_EXCL = 32, + ATA_QCFLAG_QUIET = 64, + ATA_QCFLAG_RETRY = 128, + ATA_QCFLAG_FAILED = 65536, + ATA_QCFLAG_SENSE_VALID = 131072, + ATA_QCFLAG_EH_SCHEDULED = 262144, + ATA_HOST_SIMPLEX = 1, + ATA_HOST_STARTED = 2, + ATA_HOST_PARALLEL_SCAN = 4, + ATA_HOST_IGNORE_ATA = 8, + ATA_TMOUT_BOOT = 30000, + ATA_TMOUT_BOOT_QUICK = 7000, + ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, + ATA_TMOUT_FF_WAIT_LONG = 2000, + ATA_TMOUT_FF_WAIT = 800, + ATA_WAIT_AFTER_RESET = 150, + ATA_TMOUT_PMP_SRST_WAIT = 5000, + ATA_TMOUT_SPURIOUS_PHY = 10000, + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = 7, + ATA_SHIFT_UDMA = 12, + ATA_SHIFT_PRIO = 6, + ATA_PRIO_HIGH = 2, + ATA_DMA_PAD_SZ = 4, + ATA_ERING_SIZE = 32, + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + ATA_EH_DESC_LEN = 80, + ATA_EH_REVALIDATE = 1, + ATA_EH_SOFTRESET = 2, + ATA_EH_HARDRESET = 4, + ATA_EH_RESET = 6, + ATA_EH_ENABLE_LINK = 8, + ATA_EH_PARK = 32, + ATA_EH_PERDEV_MASK = 33, + ATA_EH_ALL_ACTIONS = 15, + ATA_EHI_HOTPLUGGED = 1, + ATA_EHI_NO_AUTOPSY = 4, + ATA_EHI_QUIET = 8, + ATA_EHI_NO_RECOVERY = 16, + ATA_EHI_DID_SOFTRESET = 65536, + ATA_EHI_DID_HARDRESET = 131072, + ATA_EHI_PRINTINFO = 262144, + ATA_EHI_SETMODE = 524288, + ATA_EHI_POST_SETMODE = 1048576, + ATA_EHI_DID_RESET = 196608, + ATA_EHI_TO_SLAVE_MASK = 12, + ATA_EH_MAX_TRIES = 5, + ATA_LINK_RESUME_TRIES = 5, + ATA_PROBE_MAX_TRIES = 3, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + SATA_PMP_RW_TIMEOUT = 3000, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6, + ATA_HORKAGE_DIAGNOSTIC = 1, + ATA_HORKAGE_NODMA = 2, + ATA_HORKAGE_NONCQ = 4, + ATA_HORKAGE_MAX_SEC_128 = 8, + ATA_HORKAGE_BROKEN_HPA = 16, + ATA_HORKAGE_DISABLE = 32, + ATA_HORKAGE_HPA_SIZE = 64, + ATA_HORKAGE_IVB = 256, + ATA_HORKAGE_STUCK_ERR = 512, + ATA_HORKAGE_BRIDGE_OK = 1024, + ATA_HORKAGE_ATAPI_MOD16_DMA = 2048, + ATA_HORKAGE_FIRMWARE_WARN = 4096, + ATA_HORKAGE_1_5_GBPS = 8192, + ATA_HORKAGE_NOSETXFER = 16384, + ATA_HORKAGE_BROKEN_FPDMA_AA = 32768, + ATA_HORKAGE_DUMP_ID = 65536, + ATA_HORKAGE_MAX_SEC_LBA48 = 131072, + ATA_HORKAGE_ATAPI_DMADIR = 262144, + ATA_HORKAGE_NO_NCQ_TRIM = 524288, + ATA_HORKAGE_NOLPM = 1048576, + ATA_HORKAGE_WD_BROKEN_LPM = 2097152, + ATA_HORKAGE_ZERO_AFTER_TRIM = 4194304, + ATA_HORKAGE_NO_DMA_LOG = 8388608, + ATA_HORKAGE_NOTRIM = 16777216, + ATA_HORKAGE_MAX_SEC_1024 = 33554432, + ATA_HORKAGE_MAX_TRIM_128M = 67108864, + ATA_DMA_MASK_ATA = 1, + ATA_DMA_MASK_ATAPI = 2, + ATA_DMA_MASK_CFA = 4, + ATAPI_READ = 0, + ATAPI_WRITE = 1, + ATAPI_READ_CD = 2, + ATAPI_PASS_THRU = 3, + ATAPI_MISC = 4, + ATA_TIMING_SETUP = 1, + ATA_TIMING_ACT8B = 2, + ATA_TIMING_REC8B = 4, + ATA_TIMING_CYC8B = 8, + ATA_TIMING_8BIT = 14, + ATA_TIMING_ACTIVE = 16, + ATA_TIMING_RECOVER = 32, + ATA_TIMING_DMACK_HOLD = 64, + ATA_TIMING_CYCLE = 128, + ATA_TIMING_UDMA = 256, + ATA_TIMING_ALL = 511, + ATA_ACPI_FILTER_SETXFER = 1, + ATA_ACPI_FILTER_LOCK = 2, + ATA_ACPI_FILTER_DIPM = 4, + ATA_ACPI_FILTER_FPDMA_OFFSET = 8, + ATA_ACPI_FILTER_FPDMA_AA = 16, + ATA_ACPI_FILTER_DEFAULT = 7, +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = 127, + ATA_MASK_MWDMA = 3968, + ATA_MASK_UDMA = 1044480, +}; + +enum ata_completion_errors { + AC_ERR_OK = 0, + AC_ERR_DEV = 1, + AC_ERR_HSM = 2, + AC_ERR_TIMEOUT = 4, + AC_ERR_MEDIA = 8, + AC_ERR_ATA_BUS = 16, + AC_ERR_HOST_BUS = 32, + AC_ERR_SYSTEM = 64, + AC_ERR_INVALID = 128, + AC_ERR_OTHER = 256, + AC_ERR_NODEV_HINT = 512, + AC_ERR_NCQ = 1024, +}; + +enum ata_lpm_policy { + ATA_LPM_UNKNOWN = 0, + ATA_LPM_MAX_POWER = 1, + ATA_LPM_MED_POWER = 2, + ATA_LPM_MED_POWER_WITH_DIPM = 3, + ATA_LPM_MIN_POWER_WITH_PARTIAL = 4, + ATA_LPM_MIN_POWER = 5, +}; + +struct ata_queued_cmd; + +typedef void (*ata_qc_cb_t)(struct ata_queued_cmd *); + +struct ata_taskfile { + long unsigned int flags; + u8 protocol; + u8 ctl; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 command; + u32 auxiliary; +}; + +struct ata_port; + +struct ata_device; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + struct ata_taskfile tf; + u8 cdb[16]; + long unsigned int flags; + unsigned int tag; + unsigned int hw_tag; + unsigned int n_elem; + unsigned int orig_n_elem; + int dma_dir; + unsigned int sect_size; + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + struct scatterlist sgent; + struct scatterlist *sg; + struct scatterlist *cursg; + unsigned int cursg_ofs; + unsigned int err_mask; + struct ata_taskfile result_tf; + ata_qc_cb_t complete_fn; + void *private_data; + void *lldd_task; +}; + +struct ata_link; + +typedef int (*ata_prereset_fn_t)(struct ata_link *, long unsigned int); + +struct ata_eh_info { + struct ata_device *dev; + u32 serror; + unsigned int err_mask; + unsigned int action; + unsigned int dev_action[2]; + unsigned int flags; + unsigned int probe_mask; + char desc[80]; + int desc_len; +}; + +struct ata_eh_context { + struct ata_eh_info i; + int tries[2]; + int cmd_timeout_idx[12]; + unsigned int classes[2]; + unsigned int did_probe_mask; + unsigned int unloaded_mask; + unsigned int saved_ncq_enabled; + u8 saved_xfer_mode[2]; + long unsigned int last_reset; +}; + +struct ata_ering_entry { + unsigned int eflags; + unsigned int err_mask; + u64 timestamp; +}; + +struct ata_ering { + int cursor; + struct ata_ering_entry ring[32]; +}; + +struct ata_device { + struct ata_link *link; + unsigned int devno; + unsigned int horkage; + long unsigned int flags; + struct scsi_device *sdev; + void *private_data; + union acpi_object *gtf_cache; + unsigned int gtf_filter; + void *zpodd; + struct device tdev; + u64 n_sectors; + u64 n_native_sectors; + unsigned int class; + long unsigned int unpark_deadline; + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; + unsigned int multi_count; + unsigned int max_sectors; + unsigned int cdb_len; + long unsigned int pio_mask; + long unsigned int mwdma_mask; + long unsigned int udma_mask; + u16 cylinders; + u16 heads; + u16 sectors; + long: 16; + long: 64; + union { + u16 id[256]; + u32 gscr[128]; + }; + u8 devslp_timing[8]; + u8 ncq_send_recv_cmds[20]; + u8 ncq_non_data_cmds[64]; + u32 zac_zoned_cap; + u32 zac_zones_optimal_open; + u32 zac_zones_optimal_nonseq; + u32 zac_zones_max_open; + int spdn_cnt; + struct ata_ering ering; + long: 64; +}; + +struct ata_link { + struct ata_port *ap; + int pmp; + struct device tdev; + unsigned int active_tag; + u32 sactive; + unsigned int flags; + u32 saved_scontrol; + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; + enum ata_lpm_policy lpm_policy; + struct ata_eh_info eh_info; + struct ata_eh_context eh_context; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct ata_device device[2]; + long unsigned int last_lpm_change; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*ata_reset_fn_t)(struct ata_link *, unsigned int *, long unsigned int); + +typedef void (*ata_postreset_fn_t)(struct ata_link *, unsigned int *); + +enum sw_activity { + OFF = 0, + BLINK_ON = 1, + BLINK_OFF = 2, +}; + +struct ata_ioports { + void *cmd_addr; + void *data_addr; + void *error_addr; + void *feature_addr; + void *nsect_addr; + void *lbal_addr; + void *lbam_addr; + void *lbah_addr; + void *device_addr; + void *status_addr; + void *command_addr; + void *altstatus_addr; + void *ctl_addr; + void *bmdma_addr; + void *scr_addr; +}; + +struct ata_port_operations; + +struct ata_host { + spinlock_t lock; + struct device *dev; + void * const *iomap; + unsigned int n_ports; + unsigned int n_tags; + void *private_data; + struct ata_port_operations *ops; + long unsigned int flags; + struct kref kref; + struct mutex eh_mutex; + struct task_struct *eh_owner; + struct ata_port *simplex_claimed; + struct ata_port *ports[0]; +}; + +struct ata_port_operations { + int (*qc_defer)(struct ata_queued_cmd *); + int (*check_atapi_dma)(struct ata_queued_cmd *); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *); + unsigned int (*qc_issue)(struct ata_queued_cmd *); + bool (*qc_fill_rtf)(struct ata_queued_cmd *); + int (*cable_detect)(struct ata_port *); + long unsigned int (*mode_filter)(struct ata_device *, long unsigned int); + void (*set_piomode)(struct ata_port *, struct ata_device *); + void (*set_dmamode)(struct ata_port *, struct ata_device *); + int (*set_mode)(struct ata_link *, struct ata_device **); + unsigned int (*read_id)(struct ata_device *, struct ata_taskfile *, u16 *); + void (*dev_config)(struct ata_device *); + void (*freeze)(struct ata_port *); + void (*thaw)(struct ata_port *); + ata_prereset_fn_t prereset; + ata_reset_fn_t softreset; + ata_reset_fn_t hardreset; + ata_postreset_fn_t postreset; + ata_prereset_fn_t pmp_prereset; + ata_reset_fn_t pmp_softreset; + ata_reset_fn_t pmp_hardreset; + ata_postreset_fn_t pmp_postreset; + void (*error_handler)(struct ata_port *); + void (*lost_interrupt)(struct ata_port *); + void (*post_internal_cmd)(struct ata_queued_cmd *); + void (*sched_eh)(struct ata_port *); + void (*end_eh)(struct ata_port *); + int (*scr_read)(struct ata_link *, unsigned int, u32 *); + int (*scr_write)(struct ata_link *, unsigned int, u32); + void (*pmp_attach)(struct ata_port *); + void (*pmp_detach)(struct ata_port *); + int (*set_lpm)(struct ata_link *, enum ata_lpm_policy, unsigned int); + int (*port_suspend)(struct ata_port *, pm_message_t); + int (*port_resume)(struct ata_port *); + int (*port_start)(struct ata_port *); + void (*port_stop)(struct ata_port *); + void (*host_stop)(struct ata_host *); + void (*sff_dev_select)(struct ata_port *, unsigned int); + void (*sff_set_devctl)(struct ata_port *, u8); + u8 (*sff_check_status)(struct ata_port *); + u8 (*sff_check_altstatus)(struct ata_port *); + void (*sff_tf_load)(struct ata_port *, const struct ata_taskfile *); + void (*sff_tf_read)(struct ata_port *, struct ata_taskfile *); + void (*sff_exec_command)(struct ata_port *, const struct ata_taskfile *); + unsigned int (*sff_data_xfer)(struct ata_queued_cmd *, unsigned char *, unsigned int, int); + void (*sff_irq_on)(struct ata_port *); + bool (*sff_irq_check)(struct ata_port *); + void (*sff_irq_clear)(struct ata_port *); + void (*sff_drain_fifo)(struct ata_queued_cmd *); + void (*bmdma_setup)(struct ata_queued_cmd *); + void (*bmdma_start)(struct ata_queued_cmd *); + void (*bmdma_stop)(struct ata_queued_cmd *); + u8 (*bmdma_status)(struct ata_port *); + ssize_t (*em_show)(struct ata_port *, char *); + ssize_t (*em_store)(struct ata_port *, const char *, size_t); + ssize_t (*sw_activity_show)(struct ata_device *, char *); + ssize_t (*sw_activity_store)(struct ata_device *, enum sw_activity); + ssize_t (*transmit_led_message)(struct ata_port *, u32, ssize_t); + void (*phy_reset)(struct ata_port *); + void (*eng_timeout)(struct ata_port *); + const struct ata_port_operations *inherits; +}; + +struct ata_port_stats { + long unsigned int unhandled_irq; + long unsigned int idle_irq; + long unsigned int rw_reqbuf; +}; + +struct ata_acpi_drive { + u32 pio; + u32 dma; +}; + +struct ata_acpi_gtm { + struct ata_acpi_drive drive[2]; + u32 flags; +}; + +struct ata_port { + struct Scsi_Host *scsi_host; + struct ata_port_operations *ops; + spinlock_t *lock; + long unsigned int flags; + unsigned int pflags; + unsigned int print_id; + unsigned int local_port_no; + unsigned int port_no; + struct ata_ioports ioaddr; + u8 ctl; + u8 last_ctl; + struct ata_link *sff_pio_task_link; + struct delayed_work sff_pio_task; + struct ata_bmdma_prd *bmdma_prd; + dma_addr_t bmdma_prd_dma; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; + struct ata_queued_cmd qcmd[33]; + long unsigned int sas_tag_allocated; + u64 qc_active; + int nr_active_links; + unsigned int sas_last_tag; + long: 64; + struct ata_link link; + struct ata_link *slave_link; + int nr_pmp_links; + struct ata_link *pmp_link; + struct ata_link *excl_link; + struct ata_port_stats stats; + struct ata_host *host; + struct device *dev; + struct device tdev; + struct mutex scsi_scan_mutex; + struct delayed_work hotplug_task; + struct work_struct scsi_rescan_task; + unsigned int hsm_task_state; + u32 msg_enable; + struct list_head eh_done_q; + wait_queue_head_t eh_wait_q; + int eh_tries; + struct completion park_req_pending; + pm_message_t pm_mesg; + enum ata_lpm_policy target_lpm_policy; + struct timer_list fastdrain_timer; + long unsigned int fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; + void *private_data; + struct ata_acpi_gtm __acpi_init_gtm; + int: 32; + u8 sector_buf[512]; +}; + +struct ata_port_info { + long unsigned int flags; + long unsigned int link_flags; + long unsigned int pio_mask; + long unsigned int mwdma_mask; + long unsigned int udma_mask; + struct ata_port_operations *port_ops; + void *private_data; +}; + +struct ata_timing { + short unsigned int mode; + short unsigned int setup; + short unsigned int act8b; + short unsigned int rec8b; + short unsigned int cyc8b; + short unsigned int active; + short unsigned int recover; + short unsigned int dmack_hold; + short unsigned int cycle; + short unsigned int udma; +}; + +struct pci_bits { + unsigned int reg; + unsigned int width; + long unsigned int mask; + long unsigned int val; +}; + +enum ata_link_iter_mode { + ATA_LITER_EDGE = 0, + ATA_LITER_HOST_FIRST = 1, + ATA_LITER_PMP_FIRST = 2, +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED = 0, + ATA_DITER_ENABLED_REVERSE = 1, + ATA_DITER_ALL = 2, + ATA_DITER_ALL_REVERSE = 3, +}; + +struct trace_event_raw_ata_qc_issue { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char cmd; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char feature; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + unsigned char proto; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_qc_complete_template { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned char status; + unsigned char dev; + unsigned char lbal; + unsigned char lbam; + unsigned char lbah; + unsigned char nsect; + unsigned char error; + unsigned char hob_lbal; + unsigned char hob_lbam; + unsigned char hob_lbah; + unsigned char hob_nsect; + unsigned char hob_feature; + unsigned char ctl; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int eh_action; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_raw_ata_eh_link_autopsy_qc { + struct trace_entry ent; + unsigned int ata_port; + unsigned int ata_dev; + unsigned int tag; + unsigned int qc_flags; + unsigned int eh_err_mask; + char __data[0]; +}; + +struct trace_event_data_offsets_ata_qc_issue {}; + +struct trace_event_data_offsets_ata_qc_complete_template {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy {}; + +struct trace_event_data_offsets_ata_eh_link_autopsy_qc {}; + +typedef void (*btf_trace_ata_qc_issue)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_internal)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_failed)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_qc_complete_done)(void *, struct ata_queued_cmd *); + +typedef void (*btf_trace_ata_eh_link_autopsy)(void *, struct ata_device *, unsigned int, unsigned int); + +typedef void (*btf_trace_ata_eh_link_autopsy_qc)(void *, struct ata_queued_cmd *); + +enum { + ATA_READID_POSTRESET = 1, + ATA_DNXFER_PIO = 0, + ATA_DNXFER_DMA = 1, + ATA_DNXFER_40C = 2, + ATA_DNXFER_FORCE_PIO = 3, + ATA_DNXFER_FORCE_PIO0 = 4, + ATA_DNXFER_QUIET = 2147483648, +}; + +struct ata_force_param { + const char *name; + u8 cbl; + u8 spd_limit; + long unsigned int xfer_mask; + unsigned int horkage_on; + unsigned int horkage_off; + u16 lflags; +}; + +struct ata_force_ent { + int port; + int device; + struct ata_force_param param; +}; + +struct ata_xfer_ent { + int shift; + int bits; + u8 base; +}; + +struct ata_blacklist_entry { + const char *model_num; + const char *model_rev; + long unsigned int horkage; +}; + +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *); + +struct ata_scsi_args { + struct ata_device *dev; + u16 *id; + struct scsi_cmnd *cmd; +}; + +enum ata_lpm_hints { + ATA_LPM_EMPTY = 1, + ATA_LPM_HIPM = 2, + ATA_LPM_WAKE_ONLY = 4, +}; + +enum { + ATA_EH_SPDN_NCQ_OFF = 1, + ATA_EH_SPDN_SPEED_DOWN = 2, + ATA_EH_SPDN_FALLBACK_TO_PIO = 4, + ATA_EH_SPDN_KEEP_ERRORS = 8, + ATA_EFLAG_IS_IO = 1, + ATA_EFLAG_DUBIOUS_XFER = 2, + ATA_EFLAG_OLD_ER = 2147483648, + ATA_ECAT_NONE = 0, + ATA_ECAT_ATA_BUS = 1, + ATA_ECAT_TOUT_HSM = 2, + ATA_ECAT_UNK_DEV = 3, + ATA_ECAT_DUBIOUS_NONE = 4, + ATA_ECAT_DUBIOUS_ATA_BUS = 5, + ATA_ECAT_DUBIOUS_TOUT_HSM = 6, + ATA_ECAT_DUBIOUS_UNK_DEV = 7, + ATA_ECAT_NR = 8, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + ATA_EH_RESET_COOL_DOWN = 5000, + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, + ATA_EH_UA_TRIES = 5, + ATA_EH_PROBE_TRIAL_INTERVAL = 60000, + ATA_EH_PROBE_TRIALS = 2, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const long unsigned int *timeouts; +}; + +struct speed_down_verdict_arg { + u64 since; + int xfer_ok; + int nr_errors[8]; +}; + +struct ata_internal { + struct scsi_transport_template t; + struct device_attribute private_port_attrs[3]; + struct device_attribute private_link_attrs[3]; + struct device_attribute private_dev_attrs[9]; + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + struct device_attribute *link_attrs[4]; + struct device_attribute *port_attrs[4]; + struct device_attribute *dev_attrs[10]; +}; + +struct ata_show_ering_arg { + char *buf; + int written; +}; + +enum hsm_task_states { + HSM_ST_IDLE = 0, + HSM_ST_FIRST = 1, + HSM_ST = 2, + HSM_ST_LAST = 3, + HSM_ST_ERR = 4, +}; + +struct ata_acpi_gtf { + u8 tf[7]; +}; + +struct ata_acpi_hotplug_context { + struct acpi_hotplug_context hp; + union { + struct ata_port *ap; + struct ata_device *dev; + } data; +}; + +struct rm_feature_desc { + __be16 feature_code; + __u8 curr: 1; + __u8 persistent: 1; + __u8 feature_version: 4; + __u8 reserved1: 2; + __u8 add_len; + __u8 lock: 1; + __u8 dbml: 1; + __u8 pvnt_jmpr: 1; + __u8 eject: 1; + __u8 load: 1; + __u8 mech_type: 3; + __u8 reserved2; + __u8 reserved3; + __u8 reserved4; +}; + +enum odd_mech_type { + ODD_MECH_TYPE_SLOT = 0, + ODD_MECH_TYPE_DRAWER = 1, + ODD_MECH_TYPE_UNSUPPORTED = 2, +}; + +struct zpodd { + enum odd_mech_type mech_type; + struct ata_device *dev; + bool from_notify; + bool zp_ready; + long unsigned int last_ready; + bool zp_sampled; + bool powered_off; +}; + +enum { + PIIX_IOCFG = 84, + ICH5_PMR = 144, + ICH5_PCS = 146, + PIIX_SIDPR_BAR = 5, + PIIX_SIDPR_LEN = 16, + PIIX_SIDPR_IDX = 0, + PIIX_SIDPR_DATA = 4, + PIIX_FLAG_CHECKINTR = 268435456, + PIIX_FLAG_SIDPR = 536870912, + PIIX_PATA_FLAGS = 1, + PIIX_SATA_FLAGS = 268435458, + PIIX_FLAG_PIO16 = 1073741824, + PIIX_80C_PRI = 48, + PIIX_80C_SEC = 192, + P0 = 0, + P1 = 1, + P2 = 2, + P3 = 3, + IDE = 4294967295, + NA = 4294967294, + RV = 4294967293, + PIIX_AHCI_DEVICE = 6, + PIIX_HOST_BROKEN_SUSPEND = 16777216, +}; + +enum piix_controller_ids { + piix_pata_mwdma = 0, + piix_pata_33 = 1, + ich_pata_33 = 2, + ich_pata_66 = 3, + ich_pata_100 = 4, + ich_pata_100_nomwdma1 = 5, + ich5_sata = 6, + ich6_sata = 7, + ich6m_sata = 8, + ich8_sata = 9, + ich8_2port_sata = 10, + ich8m_apple_sata = 11, + tolapai_sata = 12, + piix_pata_vmw = 13, + ich8_sata_snb = 14, + ich8_2port_sata_snb = 15, + ich8_2port_sata_byt = 16, +}; + +struct piix_map_db { + const u32 mask; + const u16 port_enable; + const int map[0]; +}; + +struct piix_host_priv { + const int *map; + u32 saved_iocfg; + void *sidpr; +}; + +struct ich_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +struct sis_chipset { + u16 device; + const struct ata_port_info *info; +}; + +struct sis_laptop { + u16 device; + u16 subvendor; + u16 subdevice; +}; + +enum { + ATA_GEN_CLASS_MATCH = 1, + ATA_GEN_FORCE_DMA = 2, + ATA_GEN_INTEL_IDER = 4, +}; + +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + size_t tx_len; + const void *tx_buf; + size_t rx_len; + void *rx_buf; +}; + +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +struct mipi_dsi_host; + +struct mipi_dsi_device; + +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *, struct mipi_dsi_device *); + int (*detach)(struct mipi_dsi_host *, struct mipi_dsi_device *); + ssize_t (*transfer)(struct mipi_dsi_host *, const struct mipi_dsi_msg *); +}; + +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; + struct list_head list; +}; + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888 = 0, + MIPI_DSI_FMT_RGB666 = 1, + MIPI_DSI_FMT_RGB666_PACKED = 2, + MIPI_DSI_FMT_RGB565 = 3, +}; + +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + char name[20]; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + long unsigned int mode_flags; + long unsigned int hs_rate; + long unsigned int lp_rate; +}; + +struct mipi_dsi_device_info { + char type[20]; + u32 channel; + struct device_node *node; +}; + +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK = 0, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK = 1, +}; + +struct mipi_dsi_driver { + struct device_driver driver; + int (*probe)(struct mipi_dsi_device *); + int (*remove)(struct mipi_dsi_device *); + void (*shutdown)(struct mipi_dsi_device *); +}; + +struct drm_dsc_picture_parameter_set { + u8 dsc_version; + u8 pps_identifier; + u8 pps_reserved; + u8 pps_3; + u8 pps_4; + u8 bits_per_pixel_low; + __be16 pic_height; + __be16 pic_width; + __be16 slice_height; + __be16 slice_width; + __be16 chunk_size; + u8 initial_xmit_delay_high; + u8 initial_xmit_delay_low; + __be16 initial_dec_delay; + u8 pps20_reserved; + u8 initial_scale_value; + __be16 scale_increment_interval; + u8 scale_decrement_interval_high; + u8 scale_decrement_interval_low; + u8 pps26_reserved; + u8 first_line_bpg_offset; + __be16 nfl_bpg_offset; + __be16 slice_bpg_offset; + __be16 initial_offset; + __be16 final_offset; + u8 flatness_min_qp; + u8 flatness_max_qp; + __be16 rc_model_size; + u8 rc_edge_factor; + u8 rc_quant_incr_limit0; + u8 rc_quant_incr_limit1; + u8 rc_tgt_offset; + u8 rc_buf_thresh[14]; + __be16 rc_range_parameters[15]; + u8 native_422_420; + u8 second_line_bpg_offset; + __be16 nsl_bpg_offset; + __be16 second_line_offset_adj; + u32 pps_long_94_reserved; + u32 pps_long_98_reserved; + u32 pps_long_102_reserved; + u32 pps_long_106_reserved; + u32 pps_long_110_reserved; + u32 pps_long_114_reserved; + u32 pps_long_118_reserved; + u32 pps_long_122_reserved; + __be16 pps_short_126_reserved; +} __attribute__((packed)); + +enum { + MIPI_DSI_V_SYNC_START = 1, + MIPI_DSI_V_SYNC_END = 17, + MIPI_DSI_H_SYNC_START = 33, + MIPI_DSI_H_SYNC_END = 49, + MIPI_DSI_COMPRESSION_MODE = 7, + MIPI_DSI_END_OF_TRANSMISSION = 8, + MIPI_DSI_COLOR_MODE_OFF = 2, + MIPI_DSI_COLOR_MODE_ON = 18, + MIPI_DSI_SHUTDOWN_PERIPHERAL = 34, + MIPI_DSI_TURN_ON_PERIPHERAL = 50, + MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 3, + MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 19, + MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 35, + MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 4, + MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 20, + MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 36, + MIPI_DSI_DCS_SHORT_WRITE = 5, + MIPI_DSI_DCS_SHORT_WRITE_PARAM = 21, + MIPI_DSI_DCS_READ = 6, + MIPI_DSI_EXECUTE_QUEUE = 22, + MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 55, + MIPI_DSI_NULL_PACKET = 9, + MIPI_DSI_BLANKING_PACKET = 25, + MIPI_DSI_GENERIC_LONG_WRITE = 41, + MIPI_DSI_DCS_LONG_WRITE = 57, + MIPI_DSI_PICTURE_PARAMETER_SET = 10, + MIPI_DSI_COMPRESSED_PIXEL_STREAM = 11, + MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 12, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 28, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 44, + MIPI_DSI_PACKED_PIXEL_STREAM_30 = 13, + MIPI_DSI_PACKED_PIXEL_STREAM_36 = 29, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 61, + MIPI_DSI_PACKED_PIXEL_STREAM_16 = 14, + MIPI_DSI_PACKED_PIXEL_STREAM_18 = 30, + MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 46, + MIPI_DSI_PACKED_PIXEL_STREAM_24 = 62, +}; + +enum { + MIPI_DCS_NOP = 0, + MIPI_DCS_SOFT_RESET = 1, + MIPI_DCS_GET_COMPRESSION_MODE = 3, + MIPI_DCS_GET_DISPLAY_ID = 4, + MIPI_DCS_GET_ERROR_COUNT_ON_DSI = 5, + MIPI_DCS_GET_RED_CHANNEL = 6, + MIPI_DCS_GET_GREEN_CHANNEL = 7, + MIPI_DCS_GET_BLUE_CHANNEL = 8, + MIPI_DCS_GET_DISPLAY_STATUS = 9, + MIPI_DCS_GET_POWER_MODE = 10, + MIPI_DCS_GET_ADDRESS_MODE = 11, + MIPI_DCS_GET_PIXEL_FORMAT = 12, + MIPI_DCS_GET_DISPLAY_MODE = 13, + MIPI_DCS_GET_SIGNAL_MODE = 14, + MIPI_DCS_GET_DIAGNOSTIC_RESULT = 15, + MIPI_DCS_ENTER_SLEEP_MODE = 16, + MIPI_DCS_EXIT_SLEEP_MODE = 17, + MIPI_DCS_ENTER_PARTIAL_MODE = 18, + MIPI_DCS_ENTER_NORMAL_MODE = 19, + MIPI_DCS_GET_IMAGE_CHECKSUM_RGB = 20, + MIPI_DCS_GET_IMAGE_CHECKSUM_CT = 21, + MIPI_DCS_EXIT_INVERT_MODE = 32, + MIPI_DCS_ENTER_INVERT_MODE = 33, + MIPI_DCS_SET_GAMMA_CURVE = 38, + MIPI_DCS_SET_DISPLAY_OFF = 40, + MIPI_DCS_SET_DISPLAY_ON = 41, + MIPI_DCS_SET_COLUMN_ADDRESS = 42, + MIPI_DCS_SET_PAGE_ADDRESS = 43, + MIPI_DCS_WRITE_MEMORY_START = 44, + MIPI_DCS_WRITE_LUT = 45, + MIPI_DCS_READ_MEMORY_START = 46, + MIPI_DCS_SET_PARTIAL_ROWS = 48, + MIPI_DCS_SET_PARTIAL_COLUMNS = 49, + MIPI_DCS_SET_SCROLL_AREA = 51, + MIPI_DCS_SET_TEAR_OFF = 52, + MIPI_DCS_SET_TEAR_ON = 53, + MIPI_DCS_SET_ADDRESS_MODE = 54, + MIPI_DCS_SET_SCROLL_START = 55, + MIPI_DCS_EXIT_IDLE_MODE = 56, + MIPI_DCS_ENTER_IDLE_MODE = 57, + MIPI_DCS_SET_PIXEL_FORMAT = 58, + MIPI_DCS_WRITE_MEMORY_CONTINUE = 60, + MIPI_DCS_SET_3D_CONTROL = 61, + MIPI_DCS_READ_MEMORY_CONTINUE = 62, + MIPI_DCS_GET_3D_CONTROL = 63, + MIPI_DCS_SET_VSYNC_TIMING = 64, + MIPI_DCS_SET_TEAR_SCANLINE = 68, + MIPI_DCS_GET_SCANLINE = 69, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 81, + MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 82, + MIPI_DCS_WRITE_CONTROL_DISPLAY = 83, + MIPI_DCS_GET_CONTROL_DISPLAY = 84, + MIPI_DCS_WRITE_POWER_SAVE = 85, + MIPI_DCS_GET_POWER_SAVE = 86, + MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 94, + MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 95, + MIPI_DCS_READ_DDB_START = 161, + MIPI_DCS_READ_PPS_START = 162, + MIPI_DCS_READ_DDB_CONTINUE = 168, + MIPI_DCS_READ_PPS_CONTINUE = 169, +}; + +struct drm_dmi_panel_orientation_data { + int width; + int height; + const char * const *bios_dates; + int orientation; +}; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + void *cookie; + void (*irq_set_state)(void *, bool); + unsigned int (*set_vga_decode)(void *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +enum vga_switcheroo_handler_flags_t { + VGA_SWITCHEROO_CAN_SWITCH_DDC = 1, + VGA_SWITCHEROO_NEEDS_EDP_CONFIG = 2, +}; + +enum vga_switcheroo_state { + VGA_SWITCHEROO_OFF = 0, + VGA_SWITCHEROO_ON = 1, + VGA_SWITCHEROO_NOT_FOUND = 2, +}; + +enum vga_switcheroo_client_id { + VGA_SWITCHEROO_UNKNOWN_ID = 4096, + VGA_SWITCHEROO_IGD = 0, + VGA_SWITCHEROO_DIS = 1, + VGA_SWITCHEROO_MAX_CLIENTS = 2, +}; + +struct vga_switcheroo_handler { + int (*init)(); + int (*switchto)(enum vga_switcheroo_client_id); + int (*switch_ddc)(enum vga_switcheroo_client_id); + int (*power_state)(enum vga_switcheroo_client_id, enum vga_switcheroo_state); + enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *); +}; + +struct vga_switcheroo_client_ops { + void (*set_gpu_state)(struct pci_dev *, enum vga_switcheroo_state); + void (*reprobe)(struct pci_dev *); + bool (*can_switch)(struct pci_dev *); + void (*gpu_bound)(struct pci_dev *, enum vga_switcheroo_client_id); +}; + +struct vga_switcheroo_client { + struct pci_dev *pdev; + struct fb_info *fb_info; + enum vga_switcheroo_state pwr_state; + const struct vga_switcheroo_client_ops *ops; + enum vga_switcheroo_client_id id; + bool active; + bool driver_power_control; + struct list_head list; + struct pci_dev *vga_dev; +}; + +struct vgasr_priv { + bool active; + bool delayed_switch_active; + enum vga_switcheroo_client_id delayed_client_id; + struct dentry *debugfs_root; + int registered_clients; + struct list_head clients; + const struct vga_switcheroo_handler *handler; + enum vga_switcheroo_handler_flags_t handler_flags; + struct mutex mux_hw_lock; + int old_ddc_owner; +}; + +typedef void (*spi_res_release_t)(struct spi_controller *, struct spi_message *, void *); + +struct spi_res { + struct list_head entry; + spi_res_release_t release; + long long unsigned int data[0]; +}; + +struct spi_replaced_transfers; + +typedef void (*spi_replaced_release_t)(struct spi_controller *, struct spi_message *, struct spi_replaced_transfers *); + +struct spi_replaced_transfers { + spi_replaced_release_t release; + void *extradata; + struct list_head replaced_transfers; + struct list_head *replaced_after; + size_t inserted; + struct spi_transfer inserted_transfers[0]; +}; + +struct spi_board_info { + char modalias[32]; + const void *platform_data; + const struct software_node *swnode; + void *controller_data; + int irq; + u32 max_speed_hz; + u16 bus_num; + u16 chip_select; + u32 mode; +}; + +enum spi_mem_data_dir { + SPI_MEM_NO_DATA = 0, + SPI_MEM_DATA_IN = 1, + SPI_MEM_DATA_OUT = 2, +}; + +struct spi_mem_op { + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + u16 opcode; + } cmd; + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + u64 val; + } addr; + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + } dummy; + struct { + u8 buswidth; + u8 dtr: 1; + enum spi_mem_data_dir dir; + unsigned int nbytes; + union { + void *in; + const void *out; + } buf; + } data; +}; + +struct spi_mem_dirmap_info { + struct spi_mem_op op_tmpl; + u64 offset; + u64 length; +}; + +struct spi_mem_dirmap_desc { + struct spi_mem *mem; + struct spi_mem_dirmap_info info; + unsigned int nodirmap; + void *priv; +}; + +struct spi_mem { + struct spi_device *spi; + void *drvpriv; + const char *name; +}; + +struct trace_event_raw_spi_controller { + struct trace_entry ent; + int bus_num; + char __data[0]; +}; + +struct trace_event_raw_spi_message { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_message *msg; + char __data[0]; +}; + +struct trace_event_raw_spi_message_done { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_message *msg; + unsigned int frame; + unsigned int actual; + char __data[0]; +}; + +struct trace_event_raw_spi_transfer { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_transfer *xfer; + int len; + u32 __data_loc_rx_buf; + u32 __data_loc_tx_buf; + char __data[0]; +}; + +struct trace_event_data_offsets_spi_controller {}; + +struct trace_event_data_offsets_spi_message {}; + +struct trace_event_data_offsets_spi_message_done {}; + +struct trace_event_data_offsets_spi_transfer { + u32 rx_buf; + u32 tx_buf; +}; + +typedef void (*btf_trace_spi_controller_idle)(void *, struct spi_controller *); + +typedef void (*btf_trace_spi_controller_busy)(void *, struct spi_controller *); + +typedef void (*btf_trace_spi_message_submit)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_message_start)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_message_done)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_transfer_start)(void *, struct spi_message *, struct spi_transfer *); + +typedef void (*btf_trace_spi_transfer_stop)(void *, struct spi_message *, struct spi_transfer *); + +struct boardinfo { + struct list_head list; + struct spi_board_info board_info; +}; + +struct acpi_spi_lookup { + struct spi_controller *ctlr; + u32 max_speed_hz; + u32 mode; + int irq; + u8 bits_per_word; + u8 chip_select; +}; + +struct spi_mem_driver { + struct spi_driver spidrv; + int (*probe)(struct spi_mem *); + int (*remove)(struct spi_mem *); + void (*shutdown)(struct spi_mem *); +}; + +struct devprobe2 { + struct net_device * (*probe)(int); + int status; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + NETIF_F_LLTX_BIT = 12, + NETIF_F_NETNS_LOCAL_BIT = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + NETIF_F_FCOE_MTU_BIT = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETIF_F_GRO_UDP_FWD_BIT = 59, + NETIF_F_HW_HSR_TAG_INS_BIT = 60, + NETIF_F_HW_HSR_TAG_RM_BIT = 61, + NETIF_F_HW_HSR_FWD_BIT = 62, + NETIF_F_HW_HSR_DUP_BIT = 63, + NETDEV_FEATURE_COUNT = 64, +}; + +typedef struct bio_vec skb_frag_t; + +struct skb_shared_hwtstamps { + ktime_t hwtstamp; +}; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_WIFI_STATUS = 16, + SKBTX_SCHED_TSTAMP = 64, +}; + +struct skb_shared_info { + __u8 flags; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_LIVE_RENAME_OK = 1073741824, + IFF_TX_SKB_NO_LINEAR = 2147483648, +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + ETHTOOL_MSG_FEC_GET_REPLY = 30, + ETHTOOL_MSG_FEC_NTF = 31, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY = 32, + ETHTOOL_MSG_STATS_GET_REPLY = 33, + __ETHTOOL_MSG_KERNEL_CNT = 34, + ETHTOOL_MSG_KERNEL_MAX = 33, +}; + +enum { + ETHTOOL_A_STATS_UNSPEC = 0, + ETHTOOL_A_STATS_PAD = 1, + ETHTOOL_A_STATS_HEADER = 2, + ETHTOOL_A_STATS_GROUPS = 3, + ETHTOOL_A_STATS_GRP = 4, + __ETHTOOL_A_STATS_CNT = 5, + ETHTOOL_A_STATS_MAX = 4, +}; + +struct phy_led_trigger { + struct led_trigger trigger; + char name[76]; + unsigned int speed; +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 if_1x_copper_passive: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_lx: 1; + u8 if_1x_sx: 1; + u8 e10g_base_sr: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_er: 1; + u8 sonet_oc3_short_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc12_short_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc48_short_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_oc192_short_reach: 1; + u8 escon_smf_1310_laser: 1; + u8 escon_mmf_1310_led: 1; + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e_base_bx10: 1; + u8 e_base_px: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_sa: 1; + u8 fc_ll_m: 1; + u8 fc_ll_l: 1; + u8 fc_ll_i: 1; + u8 fc_ll_s: 1; + u8 fc_ll_v: 1; + u8 unallocated_8_0: 1; + u8 unallocated_8_1: 1; + u8 sfp_ct_passive: 1; + u8 sfp_ct_active: 1; + u8 fc_tech_ll: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_media_sm: 1; + u8 unallocated_9_1: 1; + u8 fc_media_m5: 1; + u8 fc_media_m6: 1; + u8 fc_media_tv: 1; + u8 fc_media_mi: 1; + u8 fc_media_tp: 1; + u8 fc_media_tw: 1; + u8 fc_speed_100: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_200: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_400: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1200: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 reserved60_2: 6; + u8 reserved61: 8; + } passive; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_lim: 1; + u8 reserved60_4: 4; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *); +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); +}; + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct mii_timestamping_ctrl { + struct mii_timestamper * (*probe_channel)(struct device *, unsigned int); + void (*release_channel)(struct device *, struct mii_timestamper *); +}; + +struct mii_timestamping_desc { + struct list_head list; + struct mii_timestamping_ctrl *ctrl; + struct device *device; +}; + +struct sfp; + +struct sfp_socket_ops; + +struct sfp_quirk; + +struct sfp_bus { + struct kref kref; + struct list_head node; + struct fwnode_handle *fwnode; + const struct sfp_socket_ops *socket_ops; + struct device *sfp_dev; + struct sfp *sfp; + const struct sfp_quirk *sfp_quirk; + const struct sfp_upstream_ops *upstream_ops; + void *upstream; + struct phy_device *phydev; + bool registered; + bool started; +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +struct sfp_socket_ops { + void (*attach)(struct sfp *); + void (*detach)(struct sfp *); + void (*start)(struct sfp *); + void (*stop)(struct sfp *); + int (*module_info)(struct sfp *, struct ethtool_modinfo *); + int (*module_eeprom)(struct sfp *, struct ethtool_eeprom *, u8 *); + int (*module_eeprom_by_page)(struct sfp *, const struct ethtool_module_eeprom *, struct netlink_ext_ack *); +}; + +struct sfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct sfp_eeprom_id *, long unsigned int *); +}; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum { + MDIO_AN_C22 = 65504, +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector { + unsigned int used_keys; + short unsigned int offset[28]; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct nf_conntrack { + atomic_t use; +}; + +enum { + SKBFL_ZEROCOPY_ENABLE = 1, + SKBFL_SHARED_FRAG = 2, +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info { + void (*callback)(struct sk_buff *, struct ubuf_info *, bool); + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + refcount_t refcnt; + u8 flags; + struct mmpin mmp; +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +enum { + IFLA_TUN_UNSPEC = 0, + IFLA_TUN_OWNER = 1, + IFLA_TUN_GROUP = 2, + IFLA_TUN_TYPE = 3, + IFLA_TUN_PI = 4, + IFLA_TUN_VNET_HDR = 5, + IFLA_TUN_PERSIST = 6, + IFLA_TUN_MULTI_QUEUE = 7, + IFLA_TUN_NUM_QUEUES = 8, + IFLA_TUN_NUM_DISABLED_QUEUES = 9, + __IFLA_TUN_MAX = 10, +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + int defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int poll_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + struct hrtimer timer; + struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; + struct task_struct *thread; +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_CONSUMED = 4, +}; + +typedef enum gro_result gro_result_t; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +struct rps_sock_flow_table { + u32 mask; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 ents[0]; +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +struct wpan_phy; + +struct wpan_dev_header_ops; + +struct wpan_dev { + struct wpan_phy *wpan_phy; + int iftype; + struct list_head list; + struct net_device *netdev; + const struct wpan_dev_header_ops *header_ops; + struct net_device *lowpan_dev; + u32 identifier; + __le16 pan_id; + __le16 short_addr; + __le64 extended_addr; + atomic_t bsn; + atomic_t dsn; + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + bool lbt; + bool promiscuous_mode; + bool ackreq; +}; + +struct tun_pi { + __u16 flags; + __be16 proto; +}; + +struct tun_filter { + __u16 flags; + __u16 count; + __u8 addr[0]; +}; + +struct virtio_net_hdr { + __u8 flags; + __u8 gso_type; + __virtio16 hdr_len; + __virtio16 gso_size; + __virtio16 csum_start; + __virtio16 csum_offset; +}; + +struct tun_msg_ctl { + short unsigned int type; + short unsigned int num; + void *ptr; +}; + +struct tun_xdp_hdr { + int buflen; + struct virtio_net_hdr gso; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __u32 nh_tclassid; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + int fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry; + +struct nh_res_bucket { + struct nh_grp_entry *nh_entry; + atomic_long_t used_time; + long unsigned int migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_grp_entry { + struct nexthop *nh; + u8 weight; + union { + struct { + atomic_t upper_bound; + } hthr; + struct { + struct list_head uw_nh_entry; + u16 count_buckets; + u16 wants_buckets; + } res; + }; + struct list_head nh_list; + struct nexthop *nh_parent; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + struct list_head uw_nh_entries; + long unsigned int unbalanced_since; + u32 idle_timer; + u32 unbalanced_timer; + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[0]; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool is_multipath; + bool hash_threshold; + bool resilient; + bool fdb_nh; + bool has_v4; + struct nh_res_table *res_table; + struct nh_grp_entry nh_entries[0]; +}; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_VALUES_DS_TIMEOUT = 13, + AX25_MAX_VALUES = 14, +}; + +enum nl802154_cca_modes { + __NL802154_CCA_INVALID = 0, + NL802154_CCA_ENERGY = 1, + NL802154_CCA_CARRIER = 2, + NL802154_CCA_ENERGY_CARRIER = 3, + NL802154_CCA_ALOHA = 4, + NL802154_CCA_UWB_SHR = 5, + NL802154_CCA_UWB_MULTIPLEXED = 6, + __NL802154_CCA_ATTR_AFTER_LAST = 7, + NL802154_CCA_ATTR_MAX = 6, +}; + +enum nl802154_cca_opts { + NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0, + NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1, + __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2, + NL802154_CCA_OPT_ATTR_MAX = 1, +}; + +enum nl802154_supported_bool_states { + NL802154_SUPPORTED_BOOL_FALSE = 0, + NL802154_SUPPORTED_BOOL_TRUE = 1, + __NL802154_SUPPORTED_BOOL_INVALD = 2, + NL802154_SUPPORTED_BOOL_BOTH = 3, + __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4, + NL802154_SUPPORTED_BOOL_MAX = 3, +}; + +struct wpan_phy_supported { + u32 channels[32]; + u32 cca_modes; + u32 cca_opts; + u32 iftypes; + enum nl802154_supported_bool_states lbt; + u8 min_minbe; + u8 max_minbe; + u8 min_maxbe; + u8 max_maxbe; + u8 min_csma_backoffs; + u8 max_csma_backoffs; + s8 min_frame_retries; + s8 max_frame_retries; + size_t tx_powers_size; + size_t cca_ed_levels_size; + const s32 *tx_powers; + const s32 *cca_ed_levels; +}; + +struct wpan_phy_cca { + enum nl802154_cca_modes mode; + enum nl802154_cca_opts opt; +}; + +struct wpan_phy { + const void *privid; + u32 flags; + u8 current_channel; + u8 current_page; + struct wpan_phy_supported supported; + s32 transmit_power; + struct wpan_phy_cca cca; + __le64 perm_extended_addr; + s32 cca_ed_level; + u8 symbol_duration; + u16 lifs_period; + u16 sifs_period; + struct device dev; + possible_net_t _net; + long: 64; + char priv[0]; +}; + +struct ieee802154_addr { + u8 mode; + __le16 pan_id; + union { + __le16 short_addr; + __le64 extended_addr; + }; +}; + +struct wpan_dev_header_ops { + int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int); +}; + +struct tap_filter { + unsigned int count; + u32 mask[2]; + unsigned char addr[48]; +}; + +struct tun_struct; + +struct tun_file { + struct sock sk; + long: 64; + struct socket socket; + struct tun_struct *tun; + struct fasync_struct *fasync; + unsigned int flags; + union { + u16 queue_index; + unsigned int ifindex; + }; + struct napi_struct napi; + bool napi_enabled; + bool napi_frags_enabled; + struct mutex napi_mutex; + struct list_head next; + struct tun_struct *detached; + long: 64; + long: 64; + long: 64; + struct ptr_ring tx_ring; + struct xdp_rxq_info xdp_rxq; +}; + +struct tun_prog; + +struct tun_struct { + struct tun_file *tfiles[256]; + unsigned int numqueues; + unsigned int flags; + kuid_t owner; + kgid_t group; + struct net_device *dev; + netdev_features_t set_features; + int align; + int vnet_hdr_sz; + int sndbuf; + struct tap_filter txflt; + struct sock_fprog fprog; + bool filter_attached; + u32 msg_enable; + spinlock_t lock; + struct hlist_head flows[1024]; + struct timer_list flow_gc_timer; + long unsigned int ageing_time; + unsigned int numdisabled; + struct list_head disabled; + void *security; + u32 flow_count; + u32 rx_batched; + atomic_long_t rx_frame_errors; + struct bpf_prog *xdp_prog; + struct tun_prog *steering_prog; + struct tun_prog *filter_prog; + struct ethtool_link_ksettings link_ksettings; +}; + +struct tun_page { + struct page *page; + int count; +}; + +struct tun_flow_entry { + struct hlist_node hash_link; + struct callback_head rcu; + struct tun_struct *tun; + u32 rxhash; + u32 rps_rxhash; + int queue_index; + long: 32; + long: 64; + long unsigned int updated; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tun_prog { + struct callback_head rcu; + struct bpf_prog *prog; +}; + +struct veth { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + __IFLA_MAX = 56, +}; + +enum { + IFLA_PPP_UNSPEC = 0, + IFLA_PPP_DEV_FD = 1, + __IFLA_PPP_MAX = 2, +}; + +enum NPmode { + NPMODE_PASS = 0, + NPMODE_DROP = 1, + NPMODE_ERROR = 2, + NPMODE_QUEUE = 3, +}; + +struct pppstat { + __u32 ppp_discards; + __u32 ppp_ibytes; + __u32 ppp_ioctects; + __u32 ppp_ipackets; + __u32 ppp_ierrors; + __u32 ppp_ilqrs; + __u32 ppp_obytes; + __u32 ppp_ooctects; + __u32 ppp_opackets; + __u32 ppp_oerrors; + __u32 ppp_olqrs; +}; + +struct vjstat { + __u32 vjs_packets; + __u32 vjs_compressed; + __u32 vjs_searches; + __u32 vjs_misses; + __u32 vjs_uncompressedin; + __u32 vjs_compressedin; + __u32 vjs_errorin; + __u32 vjs_tossed; +}; + +struct compstat { + __u32 unc_bytes; + __u32 unc_packets; + __u32 comp_bytes; + __u32 comp_packets; + __u32 inc_bytes; + __u32 inc_packets; + __u32 in_count; + __u32 bytes_out; + double ratio; +}; + +struct ppp_stats { + struct pppstat p; + struct vjstat vj; +}; + +struct ppp_comp_stats { + struct compstat c; + struct compstat d; +}; + +struct ppp_idle32 { + __s32 xmit_idle; + __s32 recv_idle; +}; + +struct ppp_idle64 { + __s64 xmit_idle; + __s64 recv_idle; +}; + +struct npioctl { + int protocol; + enum NPmode mode; +}; + +struct ppp_option_data { + __u8 *ptr; + __u32 length; + int transmit; +}; + +struct ppp_channel; + +struct ppp_channel_ops { + int (*start_xmit)(struct ppp_channel *, struct sk_buff *); + int (*ioctl)(struct ppp_channel *, unsigned int, long unsigned int); + int (*fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *, const struct ppp_channel *); +}; + +struct ppp_channel { + void *private; + const struct ppp_channel_ops *ops; + int mtu; + int hdrlen; + void *ppp; + int speed; + int latency; +}; + +struct compressor { + int compress_proto; + void * (*comp_alloc)(unsigned char *, int); + void (*comp_free)(void *); + int (*comp_init)(void *, unsigned char *, int, int, int, int); + void (*comp_reset)(void *); + int (*compress)(void *, unsigned char *, unsigned char *, int, int); + void (*comp_stat)(void *, struct compstat *); + void * (*decomp_alloc)(unsigned char *, int); + void (*decomp_free)(void *); + int (*decomp_init)(void *, unsigned char *, int, int, int, int, int); + void (*decomp_reset)(void *); + int (*decompress)(void *, unsigned char *, int, unsigned char *, int); + void (*incomp)(void *, unsigned char *, int); + void (*decomp_stat)(void *, struct compstat *); + struct module *owner; + unsigned int comp_extra; +}; + +typedef __u8 byte_t; + +typedef __u32 int32; + +struct cstate___2 { + byte_t cs_this; + bool initialized; + struct cstate___2 *next; + struct iphdr cs_ip; + struct tcphdr cs_tcp; + unsigned char cs_ipopt[64]; + unsigned char cs_tcpopt[64]; + int cs_hsize; +}; + +struct slcompress { + struct cstate___2 *tstate; + struct cstate___2 *rstate; + byte_t tslot_limit; + byte_t rslot_limit; + byte_t xmit_oldest; + byte_t xmit_current; + byte_t recv_current; + byte_t flags; + int32 sls_o_nontcp; + int32 sls_o_tcp; + int32 sls_o_uncompressed; + int32 sls_o_compressed; + int32 sls_o_searches; + int32 sls_o_misses; + int32 sls_i_uncompressed; + int32 sls_i_compressed; + int32 sls_i_error; + int32 sls_i_tossed; + int32 sls_i_runt; + int32 sls_i_badcheck; +}; + +struct ppp_file { + enum { + INTERFACE = 1, + CHANNEL = 2, + } kind; + struct sk_buff_head xq; + struct sk_buff_head rq; + wait_queue_head_t rwait; + refcount_t refcnt; + int hdrlen; + int index; + int dead; +}; + +struct ppp_link_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ppp { + struct ppp_file file; + struct file *owner; + struct list_head channels; + int n_channels; + spinlock_t rlock; + spinlock_t wlock; + int *xmit_recursion; + int mru; + unsigned int flags; + unsigned int xstate; + unsigned int rstate; + int debug; + struct slcompress *vj; + enum NPmode npmode[6]; + struct sk_buff *xmit_pending; + struct compressor *xcomp; + void *xc_state; + struct compressor *rcomp; + void *rc_state; + long unsigned int last_xmit; + long unsigned int last_recv; + struct net_device *dev; + int closing; + int nxchan; + u32 nxseq; + int mrru; + u32 nextseq; + u32 minseq; + struct sk_buff_head mrq; + struct bpf_prog *pass_filter; + struct bpf_prog *active_filter; + struct net *ppp_net; + struct ppp_link_stats stats64; +}; + +struct channel { + struct ppp_file file; + struct list_head list; + struct ppp_channel *chan; + struct rw_semaphore chan_sem; + spinlock_t downl; + struct ppp *ppp; + struct net *chan_net; + struct list_head clist; + rwlock_t upl; + struct channel *bridge; + u8 avail; + u8 had_frag; + u32 lastseq; + int speed; +}; + +struct ppp_config { + struct file *file; + s32 unit; + bool ifname_is_set; +}; + +struct ppp_net { + struct idr units_idr; + struct mutex all_ppp_mutex; + struct list_head all_channels; + struct list_head new_channels; + int last_channel_index; + spinlock_t all_channels_lock; +}; + +struct sock_fprog32 { + short unsigned int len; + compat_caddr_t filter; +}; + +struct ppp_option_data32 { + compat_uptr_t ptr; + u32 length; + compat_int_t transmit; +}; + +struct ppp_mp_skb_parm { + u32 sequence; + u8 BEbits; +}; + +struct compressor_entry { + struct list_head list; + struct compressor *comp; +}; + +struct wl1251_platform_data { + int power_gpio; + int irq; + bool use_eeprom; +}; + +enum wwan_port_type { + WWAN_PORT_AT = 0, + WWAN_PORT_MBIM = 1, + WWAN_PORT_QMI = 2, + WWAN_PORT_QCDM = 3, + WWAN_PORT_FIREHOSE = 4, + WWAN_PORT_MAX = 5, +}; + +struct wwan_port; + +struct wwan_port_ops { + int (*start)(struct wwan_port *); + void (*stop)(struct wwan_port *); + int (*tx)(struct wwan_port *, struct sk_buff *); +}; + +struct wwan_port { + enum wwan_port_type type; + unsigned int start_count; + long unsigned int flags; + const struct wwan_port_ops *ops; + struct mutex ops_lock; + struct device dev; + struct sk_buff_head rxq; + wait_queue_head_t waitqueue; +}; + +struct wwan_device { + unsigned int id; + struct device dev; + atomic_t port_id; +}; + +enum skb_free_reason { + SKB_REASON_CONSUMED = 0, + SKB_REASON_DROPPED = 1, +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_STATS_STD = 16, + ETH_SS_STATS_ETH_PHY = 17, + ETH_SS_STATS_ETH_MAC = 18, + ETH_SS_STATS_ETH_CTRL = 19, + ETH_SS_STATS_RMON = 20, + ETH_SS_COUNT = 21, +}; + +struct pp_alloc_cache { + u32 count; + struct page *cache[128]; +}; + +struct page_pool_params { + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +struct page_pool { + struct page_pool_params p; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + u32 pages_state_hold_cnt; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + struct pp_alloc_cache alloc; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct ptr_ring ring; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xen_netif_tx_request { + grant_ref_t gref; + uint16_t offset; + uint16_t flags; + uint16_t id; + uint16_t size; +}; + +struct xen_netif_extra_info { + uint8_t type; + uint8_t flags; + union { + struct { + uint16_t size; + uint8_t type; + uint8_t pad; + uint16_t features; + } gso; + struct { + uint8_t addr[6]; + } mcast; + struct { + uint8_t type; + uint8_t algorithm; + uint8_t value[4]; + } hash; + struct { + uint16_t headroom; + uint16_t pad[2]; + } xdp; + uint16_t pad[3]; + } u; +}; + +struct xen_netif_tx_response { + uint16_t id; + int16_t status; +}; + +struct xen_netif_rx_request { + uint16_t id; + uint16_t pad; + grant_ref_t gref; +}; + +struct xen_netif_rx_response { + uint16_t id; + uint16_t offset; + uint16_t flags; + int16_t status; +}; + +union xen_netif_tx_sring_entry { + struct xen_netif_tx_request req; + struct xen_netif_tx_response rsp; +}; + +struct xen_netif_tx_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union xen_netif_tx_sring_entry ring[1]; +}; + +struct xen_netif_tx_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct xen_netif_tx_sring *sring; +}; + +union xen_netif_rx_sring_entry { + struct xen_netif_rx_request req; + struct xen_netif_rx_response rsp; +}; + +struct xen_netif_rx_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union xen_netif_rx_sring_entry ring[1]; +}; + +struct xen_netif_rx_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct xen_netif_rx_sring *sring; +}; + +struct netfront_cb { + int pull_to; +}; + +struct netfront_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +union skb_entry { + struct sk_buff *skb; + long unsigned int link; +}; + +struct netfront_info; + +struct netfront_queue { + unsigned int id; + char name[22]; + struct netfront_info *info; + struct bpf_prog *xdp_prog; + struct napi_struct napi; + unsigned int tx_evtchn; + unsigned int rx_evtchn; + unsigned int tx_irq; + unsigned int rx_irq; + char tx_irq_name[25]; + char rx_irq_name[25]; + spinlock_t tx_lock; + struct xen_netif_tx_front_ring tx; + int tx_ring_ref; + union skb_entry tx_skbs[256]; + grant_ref_t gref_tx_head; + grant_ref_t grant_tx_ref[256]; + struct page *grant_tx_page[256]; + unsigned int tx_skb_freelist; + long: 32; + long: 64; + spinlock_t rx_lock; + struct xen_netif_rx_front_ring rx; + int rx_ring_ref; + struct timer_list rx_refill_timer; + struct sk_buff *rx_skbs[256]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[256]; + struct page_pool *page_pool; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; +}; + +struct netfront_info { + struct list_head list; + struct net_device *netdev; + struct xenbus_device *xbdev; + struct netfront_queue *queues; + struct netfront_stats *rx_stats; + struct netfront_stats *tx_stats; + bool netback_has_xdp_headroom; + bool netfront_xdp_enabled; + atomic_t rx_gso_checksum_fixup; +}; + +struct netfront_rx_info { + struct xen_netif_rx_response rx; + struct xen_netif_extra_info extras[5]; +}; + +struct xennet_gnttab_make_txreq { + struct netfront_queue *queue; + struct sk_buff *skb; + struct page *page; + struct xen_netif_tx_request *tx; + unsigned int size; +}; + +struct xennet_stat { + char name[32]; + u16 offset; +}; + +struct vfio_info_cap_header { + __u16 id; + __u16 version; + __u32 next; +}; + +struct vfio_group_status { + __u32 argsz; + __u32 flags; +}; + +struct vfio_irq_set { + __u32 argsz; + __u32 flags; + __u32 index; + __u32 start; + __u32 count; + __u8 data[0]; +}; + +struct vfio_device_ops; + +struct vfio_group; + +struct vfio_device { + struct device *dev; + const struct vfio_device_ops *ops; + struct vfio_group *group; + refcount_t refcount; + struct completion comp; + struct list_head group_next; +}; + +struct vfio_device_ops { + char *name; + int (*open)(struct vfio_device *); + void (*release)(struct vfio_device *); + ssize_t (*read)(struct vfio_device *, char *, size_t, loff_t *); + ssize_t (*write)(struct vfio_device *, const char *, size_t, loff_t *); + long int (*ioctl)(struct vfio_device *, unsigned int, long unsigned int); + int (*mmap)(struct vfio_device *, struct vm_area_struct *); + void (*request)(struct vfio_device *, unsigned int); + int (*match)(struct vfio_device *, char *); +}; + +struct kvm___2; + +struct vfio_container; + +struct vfio_group { + struct kref kref; + int minor; + atomic_t container_users; + struct iommu_group *iommu_group; + struct vfio_container *container; + struct list_head device_list; + struct mutex device_lock; + struct device *dev; + struct notifier_block nb; + struct list_head vfio_next; + struct list_head container_next; + struct list_head unbound_list; + struct mutex unbound_lock; + atomic_t opened; + wait_queue_head_t container_q; + bool noiommu; + unsigned int dev_counter; + struct kvm___2 *kvm; + struct blocking_notifier_head notifier; +}; + +enum vfio_iommu_notify_type { + VFIO_IOMMU_CONTAINER_CLOSE = 0, +}; + +struct vfio_iommu_driver_ops { + char *name; + struct module *owner; + void * (*open)(long unsigned int); + void (*release)(void *); + ssize_t (*read)(void *, char *, size_t, loff_t *); + ssize_t (*write)(void *, const char *, size_t, loff_t *); + long int (*ioctl)(void *, unsigned int, long unsigned int); + int (*mmap)(void *, struct vm_area_struct *); + int (*attach_group)(void *, struct iommu_group *); + void (*detach_group)(void *, struct iommu_group *); + int (*pin_pages)(void *, struct iommu_group *, long unsigned int *, int, int, long unsigned int *); + int (*unpin_pages)(void *, long unsigned int *, int); + int (*register_notifier)(void *, long unsigned int *, struct notifier_block *); + int (*unregister_notifier)(void *, struct notifier_block *); + int (*dma_rw)(void *, dma_addr_t, void *, size_t, bool); + struct iommu_domain * (*group_iommu_domain)(void *, struct iommu_group *); + void (*notify)(void *, enum vfio_iommu_notify_type); +}; + +enum vfio_notify_type { + VFIO_IOMMU_NOTIFY = 0, + VFIO_GROUP_NOTIFY = 1, +}; + +struct vfio_info_cap { + struct vfio_info_cap_header *buf; + size_t size; +}; + +struct vfio { + struct class *class; + struct list_head iommu_drivers_list; + struct mutex iommu_drivers_lock; + struct list_head group_list; + struct idr group_idr; + struct mutex group_lock; + struct cdev group_cdev; + dev_t group_devt; +}; + +struct vfio_iommu_driver { + const struct vfio_iommu_driver_ops *ops; + struct list_head vfio_next; +}; + +struct vfio_container { + struct kref kref; + struct list_head group_list; + struct rw_semaphore group_lock; + struct vfio_iommu_driver *iommu_driver; + void *iommu_data; + bool noiommu; +}; + +struct vfio_unbound_dev { + struct device *dev; + struct list_head unbound_next; +}; + +struct vfio_group_put_work { + struct work_struct work; + struct vfio_group *group; +}; + +struct virqfd { + void *opaque; + struct eventfd_ctx *eventfd; + int (*handler)(void *, void *); + void (*thread)(void *, void *); + void *data; + struct work_struct inject; + wait_queue_entry_t wait; + poll_table pt; + struct work_struct shutdown; + struct virqfd **pvirqfd; +}; + +struct vfio_iommu_type1_info { + __u32 argsz; + __u32 flags; + __u64 iova_pgsizes; + __u32 cap_offset; +}; + +struct vfio_iova_range { + __u64 start; + __u64 end; +}; + +struct vfio_iommu_type1_info_cap_iova_range { + struct vfio_info_cap_header header; + __u32 nr_iovas; + __u32 reserved; + struct vfio_iova_range iova_ranges[0]; +}; + +struct vfio_iommu_type1_info_cap_migration { + struct vfio_info_cap_header header; + __u32 flags; + __u64 pgsize_bitmap; + __u64 max_dirty_bitmap_size; +}; + +struct vfio_iommu_type1_info_dma_avail { + struct vfio_info_cap_header header; + __u32 avail; +}; + +struct vfio_iommu_type1_dma_map { + __u32 argsz; + __u32 flags; + __u64 vaddr; + __u64 iova; + __u64 size; +}; + +struct vfio_bitmap { + __u64 pgsize; + __u64 size; + __u64 *data; +}; + +struct vfio_iommu_type1_dma_unmap { + __u32 argsz; + __u32 flags; + __u64 iova; + __u64 size; + __u8 data[0]; +}; + +struct vfio_iommu_type1_dirty_bitmap { + __u32 argsz; + __u32 flags; + __u8 data[0]; +}; + +struct vfio_iommu_type1_dirty_bitmap_get { + __u64 iova; + __u64 size; + struct vfio_bitmap bitmap; +}; + +struct vfio_group___2 { + struct iommu_group *iommu_group; + struct list_head next; + bool mdev_group; + bool pinned_page_dirty_scope; +}; + +struct mdev_type; + +struct mdev_device { + struct device dev; + guid_t uuid; + void *driver_data; + struct list_head next; + struct mdev_type *type; + struct device *iommu_device; + bool active; +}; + +struct vfio_domain; + +struct vfio_iommu { + struct list_head domain_list; + struct list_head iova_list; + struct vfio_domain *external_domain; + struct mutex lock; + struct rb_root dma_list; + struct blocking_notifier_head notifier; + unsigned int dma_avail; + unsigned int vaddr_invalid_count; + uint64_t pgsize_bitmap; + uint64_t num_non_pinned_groups; + wait_queue_head_t vaddr_wait; + bool v2; + bool nesting; + bool dirty_page_tracking; + bool container_open; +}; + +struct vfio_domain { + struct iommu_domain *domain; + struct list_head next; + struct list_head group_list; + int prot; + bool fgsp; +}; + +struct vfio_dma { + struct rb_node node; + dma_addr_t iova; + long unsigned int vaddr; + size_t size; + int prot; + bool iommu_mapped; + bool lock_cap; + bool vaddr_invalid; + struct task_struct *task; + struct rb_root pfn_list; + long unsigned int *bitmap; +}; + +struct vfio_batch { + struct page **pages; + struct page *fallback_page; + int capacity; + int size; + int offset; +}; + +struct vfio_iova { + struct list_head list; + dma_addr_t start; + dma_addr_t end; +}; + +struct vfio_pfn { + struct rb_node node; + dma_addr_t iova; + long unsigned int pfn; + unsigned int ref_count; +}; + +struct vfio_regions { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; + size_t len; +}; + +struct vfio_device_info { + __u32 argsz; + __u32 flags; + __u32 num_regions; + __u32 num_irqs; + __u32 cap_offset; +}; + +struct vfio_region_info { + __u32 argsz; + __u32 flags; + __u32 index; + __u32 cap_offset; + __u64 size; + __u64 offset; +}; + +struct vfio_region_info_cap_type { + struct vfio_info_cap_header header; + __u32 type; + __u32 subtype; +}; + +struct vfio_irq_info { + __u32 argsz; + __u32 flags; + __u32 index; + __u32 count; +}; + +enum { + VFIO_PCI_BAR0_REGION_INDEX = 0, + VFIO_PCI_BAR1_REGION_INDEX = 1, + VFIO_PCI_BAR2_REGION_INDEX = 2, + VFIO_PCI_BAR3_REGION_INDEX = 3, + VFIO_PCI_BAR4_REGION_INDEX = 4, + VFIO_PCI_BAR5_REGION_INDEX = 5, + VFIO_PCI_ROM_REGION_INDEX = 6, + VFIO_PCI_CONFIG_REGION_INDEX = 7, + VFIO_PCI_VGA_REGION_INDEX = 8, + VFIO_PCI_NUM_REGIONS = 9, +}; + +enum { + VFIO_PCI_INTX_IRQ_INDEX = 0, + VFIO_PCI_MSI_IRQ_INDEX = 1, + VFIO_PCI_MSIX_IRQ_INDEX = 2, + VFIO_PCI_ERR_IRQ_INDEX = 3, + VFIO_PCI_REQ_IRQ_INDEX = 4, + VFIO_PCI_NUM_IRQS = 5, +}; + +struct vfio_pci_dependent_device { + __u32 group_id; + __u16 segment; + __u8 bus; + __u8 devfn; +}; + +struct vfio_pci_hot_reset_info { + __u32 argsz; + __u32 flags; + __u32 count; + struct vfio_pci_dependent_device devices[0]; +}; + +struct vfio_pci_hot_reset { + __u32 argsz; + __u32 flags; + __u32 count; + __s32 group_fds[0]; +}; + +struct vfio_device_ioeventfd { + __u32 argsz; + __u32 flags; + __u64 offset; + __u64 data; + __s32 fd; +}; + +struct vfio_device_feature { + __u32 argsz; + __u32 flags; + __u8 data[0]; +}; + +struct irq_bypass_consumer; + +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +struct vfio_pci_device; + +struct vfio_pci_ioeventfd { + struct list_head next; + struct vfio_pci_device *vdev; + struct virqfd *virqfd; + void *addr; + uint64_t data; + loff_t pos; + int bar; + int count; + bool test_mem; +}; + +struct pci_saved_state___2; + +struct perm_bits; + +struct vfio_pci_irq_ctx; + +struct vfio_pci_region; + +struct vfio_pci_reflck; + +struct vfio_pci_vf_token; + +struct vfio_pci_device { + struct vfio_device vdev; + struct pci_dev *pdev; + void *barmap[6]; + bool bar_mmap_supported[6]; + u8 *pci_config_map; + u8 *vconfig; + struct perm_bits *msi_perm; + spinlock_t irqlock; + struct mutex igate; + struct vfio_pci_irq_ctx *ctx; + int num_ctx; + int irq_type; + int num_regions; + struct vfio_pci_region *region; + u8 msi_qmax; + u8 msix_bar; + u16 msix_size; + u32 msix_offset; + u32 rbar[7]; + bool pci_2_3; + bool virq_disabled; + bool reset_works; + bool extended_caps; + bool bardirty; + bool has_vga; + bool needs_reset; + bool nointx; + bool needs_pm_restore; + struct pci_saved_state___2 *pci_saved_state; + struct pci_saved_state___2 *pm_save; + struct vfio_pci_reflck *reflck; + int refcnt; + int ioeventfds_nr; + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + struct vfio_pci_vf_token *vf_token; + struct notifier_block nb; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; +}; + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +struct vfio_pci_regops { + size_t (*rw)(struct vfio_pci_device *, char *, size_t, loff_t *, bool); + void (*release)(struct vfio_pci_device *, struct vfio_pci_region *); + int (*mmap)(struct vfio_pci_device *, struct vfio_pci_region *, struct vm_area_struct *); + int (*add_capability)(struct vfio_pci_device *, struct vfio_pci_region *, struct vfio_info_cap *); +}; + +struct vfio_pci_region { + u32 type; + u32 subtype; + const struct vfio_pci_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_reflck { + struct kref kref; + struct mutex lock; +}; + +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +struct vfio_pci_fill_info { + int max; + int cur; + struct vfio_pci_dependent_device *devices; +}; + +struct vfio_pci_group_entry { + struct vfio_group *group; + int id; +}; + +struct vfio_pci_group_info { + int count; + struct vfio_pci_group_entry *groups; +}; + +struct vfio_pci_walk_info { + int (*fn)(struct pci_dev *, void *); + void *data; + struct pci_dev *pdev; + bool slot; + int ret; +}; + +struct vfio_devices { + struct vfio_pci_device **devices; + int cur_index; + int max_index; +}; + +struct perm_bits { + u8 *virt; + u8 *write; + int (*readfn)(struct vfio_pci_device *, int, int, struct perm_bits *, int, __le32 *); + int (*writefn)(struct vfio_pci_device *, int, int, struct perm_bits *, int, __le32); +}; + +struct cdrom_msf { + __u8 cdmsf_min0; + __u8 cdmsf_sec0; + __u8 cdmsf_frame0; + __u8 cdmsf_min1; + __u8 cdmsf_sec1; + __u8 cdmsf_frame1; +}; + +struct cdrom_volctrl { + __u8 channel0; + __u8 channel1; + __u8 channel2; + __u8 channel3; +}; + +struct cdrom_subchnl { + __u8 cdsc_format; + __u8 cdsc_audiostatus; + __u8 cdsc_adr: 4; + __u8 cdsc_ctrl: 4; + __u8 cdsc_trk; + __u8 cdsc_ind; + union cdrom_addr cdsc_absaddr; + union cdrom_addr cdsc_reladdr; +}; + +struct cdrom_read_audio { + union cdrom_addr addr; + __u8 addr_format; + int nframes; + __u8 *buf; +}; + +struct cdrom_blk { + unsigned int from; + short unsigned int len; +}; + +struct dvd_layer { + __u8 book_version: 4; + __u8 book_type: 4; + __u8 min_rate: 4; + __u8 disc_size: 4; + __u8 layer_type: 4; + __u8 track_path: 1; + __u8 nlayers: 2; + char: 1; + __u8 track_density: 4; + __u8 linear_density: 4; + __u8 bca: 1; + __u32 start_sector; + __u32 end_sector; + __u32 end_sector_l0; +}; + +struct dvd_physical { + __u8 type; + __u8 layer_num; + struct dvd_layer layer[4]; +}; + +struct dvd_copyright { + __u8 type; + __u8 layer_num; + __u8 cpst; + __u8 rmi; +}; + +struct dvd_disckey { + __u8 type; + unsigned int agid: 2; + __u8 value[2048]; +}; + +struct dvd_bca { + __u8 type; + int len; + __u8 value[188]; +}; + +struct dvd_manufact { + __u8 type; + __u8 layer_num; + int len; + __u8 value[2048]; +}; + +typedef union { + __u8 type; + struct dvd_physical physical; + struct dvd_copyright copyright; + struct dvd_disckey disckey; + struct dvd_bca bca; + struct dvd_manufact manufact; +} dvd_struct; + +typedef __u8 dvd_key[5]; + +typedef __u8 dvd_challenge[10]; + +struct dvd_lu_send_agid { + __u8 type; + unsigned int agid: 2; +}; + +struct dvd_host_send_challenge { + __u8 type; + unsigned int agid: 2; + dvd_challenge chal; +}; + +struct dvd_send_key { + __u8 type; + unsigned int agid: 2; + dvd_key key; +}; + +struct dvd_lu_send_challenge { + __u8 type; + unsigned int agid: 2; + dvd_challenge chal; +}; + +struct dvd_lu_send_title_key { + __u8 type; + unsigned int agid: 2; + dvd_key title_key; + int lba; + unsigned int cpm: 1; + unsigned int cp_sec: 1; + unsigned int cgms: 2; +}; + +struct dvd_lu_send_asf { + __u8 type; + unsigned int agid: 2; + unsigned int asf: 1; +}; + +struct dvd_host_send_rpcstate { + __u8 type; + __u8 pdrc; +}; + +struct dvd_lu_send_rpcstate { + __u8 type: 2; + __u8 vra: 3; + __u8 ucca: 3; + __u8 region_mask; + __u8 rpc_scheme; +}; + +typedef union { + __u8 type; + struct dvd_lu_send_agid lsa; + struct dvd_host_send_challenge hsc; + struct dvd_send_key lsk; + struct dvd_lu_send_challenge lsc; + struct dvd_send_key hsk; + struct dvd_lu_send_title_key lstk; + struct dvd_lu_send_asf lsasf; + struct dvd_host_send_rpcstate hrpcs; + struct dvd_lu_send_rpcstate lrpcs; +} dvd_authinfo; + +struct mrw_feature_desc { + __be16 feature_code; + __u8 curr: 1; + __u8 persistent: 1; + __u8 feature_version: 4; + __u8 reserved1: 2; + __u8 add_len; + __u8 write: 1; + __u8 reserved2: 7; + __u8 reserved3; + __u8 reserved4; + __u8 reserved5; +}; + +struct rwrt_feature_desc { + __be16 feature_code; + __u8 curr: 1; + __u8 persistent: 1; + __u8 feature_version: 4; + __u8 reserved1: 2; + __u8 add_len; + __u32 last_lba; + __u32 block_size; + __u16 blocking; + __u8 page_present: 1; + __u8 reserved2: 7; + __u8 reserved3; +}; + +typedef struct { + __be16 disc_information_length; + __u8 disc_status: 2; + __u8 border_status: 2; + __u8 erasable: 1; + __u8 reserved1: 3; + __u8 n_first_track; + __u8 n_sessions_lsb; + __u8 first_track_lsb; + __u8 last_track_lsb; + __u8 mrw_status: 2; + __u8 dbit: 1; + __u8 reserved2: 2; + __u8 uru: 1; + __u8 dbc_v: 1; + __u8 did_v: 1; + __u8 disc_type; + __u8 n_sessions_msb; + __u8 first_track_msb; + __u8 last_track_msb; + __u32 disc_id; + __u32 lead_in; + __u32 lead_out; + __u8 disc_bar_code[8]; + __u8 reserved3; + __u8 n_opc; +} disc_information; + +typedef struct { + __be16 track_information_length; + __u8 track_lsb; + __u8 session_lsb; + __u8 reserved1; + __u8 track_mode: 4; + __u8 copy: 1; + __u8 damage: 1; + __u8 reserved2: 2; + __u8 data_mode: 4; + __u8 fp: 1; + __u8 packet: 1; + __u8 blank: 1; + __u8 rt: 1; + __u8 nwa_v: 1; + __u8 lra_v: 1; + __u8 reserved3: 6; + __be32 track_start; + __be32 next_writable; + __be32 free_blocks; + __be32 fixed_packet_size; + __be32 track_size; + __be32 last_rec_address; +} track_information; + +struct mode_page_header { + __be16 mode_data_length; + __u8 medium_type; + __u8 reserved1; + __u8 reserved2; + __u8 reserved3; + __be16 desc_length; +}; + +typedef struct { + int data; + int audio; + int cdi; + int xa; + long int error; +} tracktype; + +struct cdrom_mechstat_header { + __u8 curslot: 5; + __u8 changer_state: 2; + __u8 fault: 1; + __u8 reserved1: 4; + __u8 door_open: 1; + __u8 mech_state: 3; + __u8 curlba[3]; + __u8 nslots; + __u16 slot_tablelen; +}; + +struct cdrom_slot { + __u8 change: 1; + __u8 reserved1: 6; + __u8 disc_present: 1; + __u8 reserved2[3]; +}; + +struct cdrom_changer_info { + struct cdrom_mechstat_header hdr; + struct cdrom_slot slots[256]; +}; + +struct modesel_head { + __u8 reserved1; + __u8 medium; + __u8 reserved2; + __u8 block_desc_length; + __u8 density; + __u8 number_of_blocks_hi; + __u8 number_of_blocks_med; + __u8 number_of_blocks_lo; + __u8 reserved3; + __u8 block_length_hi; + __u8 block_length_med; + __u8 block_length_lo; +}; + +typedef struct { + __u16 report_key_length; + __u8 reserved1; + __u8 reserved2; + __u8 ucca: 3; + __u8 vra: 3; + __u8 type_code: 2; + __u8 region_mask; + __u8 rpc_scheme; + __u8 reserved3; +} rpc_state_t; + +struct cdrom_sysctl_settings { + char info[1000]; + int autoclose; + int autoeject; + int debug; + int lock; + int check; +}; + +enum cdrom_print_option { + CTL_NAME = 0, + CTL_SPEED = 1, + CTL_SLOTS = 2, + CTL_CAPABILITY = 3, +}; + +struct compat_cdrom_read_audio { + union cdrom_addr addr; + u8 addr_format; + compat_int_t nframes; + compat_caddr_t buf; +}; + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +struct usb_device_id { + __u16 match_flags; + __u16 idVendor; + __u16 idProduct; + __u16 bcdDevice_lo; + __u16 bcdDevice_hi; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 bInterfaceNumber; + kernel_ulong_t driver_info; +}; + +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +}; + +enum usb_port_connect_type { + USB_PORT_CONNECT_TYPE_UNKNOWN = 0, + USB_PORT_CONNECT_TYPE_HOT_PLUG = 1, + USB_PORT_CONNECT_TYPE_HARD_WIRED = 2, + USB_PORT_NOT_USED = 3, +}; + +struct usb_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct usbdrv_wrap { + struct device_driver driver; + int for_devices; +}; + +struct usb_driver { + const char *name; + int (*probe)(struct usb_interface *, const struct usb_device_id *); + void (*disconnect)(struct usb_interface *); + int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *); + int (*suspend)(struct usb_interface *, pm_message_t); + int (*resume)(struct usb_interface *); + int (*reset_resume)(struct usb_interface *); + int (*pre_reset)(struct usb_interface *); + int (*post_reset)(struct usb_interface *); + const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; + struct usb_dynids dynids; + struct usbdrv_wrap drvwrap; + unsigned int no_dynamic_id: 1; + unsigned int supports_autosuspend: 1; + unsigned int disable_hub_initiated_lpm: 1; + unsigned int soft_unbind: 1; +}; + +struct usb_device_driver { + const char *name; + bool (*match)(struct usb_device *); + int (*probe)(struct usb_device *); + void (*disconnect)(struct usb_device *); + int (*suspend)(struct usb_device *, pm_message_t); + int (*resume)(struct usb_device *, pm_message_t); + const struct attribute_group **dev_groups; + struct usbdrv_wrap drvwrap; + const struct usb_device_id *id_table; + unsigned int supports_autosuspend: 1; + unsigned int generic_subclass: 1; +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy_io_ops; + +struct usb_phy { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy *); + void (*shutdown)(struct usb_phy *); + int (*set_vbus)(struct usb_phy *, int); + int (*set_power)(struct usb_phy *, unsigned int); + int (*set_suspend)(struct usb_phy *, int); + int (*set_wakeup)(struct usb_phy *, bool); + int (*notify_connect)(struct usb_phy *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy *); +}; + +struct usb_port_status { + __le16 wPortStatus; + __le16 wPortChange; + __le32 dwExtPortStatus; +}; + +struct usb_hub_status { + __le16 wHubStatus; + __le16 wHubChange; +}; + +struct usb_hub_descriptor { + __u8 bDescLength; + __u8 bDescriptorType; + __u8 bNbrPorts; + __le16 wHubCharacteristics; + __u8 bPwrOn2PwrGood; + __u8 bHubContrCurrent; + union { + struct { + __u8 DeviceRemovable[4]; + __u8 PortPwrCtrlMask[4]; + } hs; + struct { + __u8 bHubHdrDecLat; + __le16 wHubDelay; + __le16 DeviceRemovable; + } __attribute__((packed)) ss; + } u; +} __attribute__((packed)); + +struct usb_phy_io_ops { + int (*read)(struct usb_phy *, u32); + int (*write)(struct usb_phy *, u32, u32); +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +typedef u32 usb_port_location_t; + +struct usb_port; + +struct usb_hub { + struct device *intfdev; + struct usb_device *hdev; + struct kref kref; + struct urb *urb; + u8 (*buffer)[8]; + union { + struct usb_hub_status hub; + struct usb_port_status port; + } *status; + struct mutex status_mutex; + int error; + int nerrors; + long unsigned int event_bits[1]; + long unsigned int change_bits[1]; + long unsigned int removed_bits[1]; + long unsigned int wakeup_bits[1]; + long unsigned int power_bits[1]; + long unsigned int child_usage_bits[1]; + long unsigned int warm_reset_bits[1]; + struct usb_hub_descriptor *descriptor; + struct usb_tt tt; + unsigned int mA_per_port; + unsigned int wakeup_enabled_descendants; + unsigned int limited_power: 1; + unsigned int quiescing: 1; + unsigned int disconnected: 1; + unsigned int in_reset: 1; + unsigned int quirk_disable_autosuspend: 1; + unsigned int quirk_check_port_auto_suspend: 1; + unsigned int has_indicators: 1; + u8 indicator[31]; + struct delayed_work leds; + struct delayed_work init_work; + struct work_struct events; + spinlock_t irq_urb_lock; + struct timer_list irq_urb_retry; + struct usb_port **ports; +}; + +struct usb_dev_state; + +struct usb_port { + struct usb_device *child; + struct device dev; + struct usb_dev_state *port_owner; + struct usb_port *peer; + struct dev_pm_qos_request *req; + enum usb_port_connect_type connect_type; + usb_port_location_t location; + struct mutex status_lock; + u32 over_current_count; + u8 portnum; + u32 quirks; + unsigned int is_superspeed: 1; + unsigned int usb3_lpm_u1_permit: 1; + unsigned int usb3_lpm_u2_permit: 1; +}; + +struct find_interface_arg { + int minor; + struct device_driver *drv; +}; + +struct each_dev_arg { + void *data; + int (*fn)(struct usb_device *, void *); +}; + +struct each_hub_arg { + void *data; + int (*fn)(struct device *, void *); +}; + +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +}; + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +}; + +struct usbdevfs_hub_portinfo { + char nports; + char port[127]; +}; + +enum hub_led_mode { + INDICATOR_AUTO = 0, + INDICATOR_CYCLE = 1, + INDICATOR_GREEN_BLINK = 2, + INDICATOR_GREEN_BLINK_OFF = 3, + INDICATOR_AMBER_BLINK = 4, + INDICATOR_AMBER_BLINK_OFF = 5, + INDICATOR_ALT_BLINK = 6, + INDICATOR_ALT_BLINK_OFF = 7, +}; + +struct usb_tt_clear { + struct list_head clear_list; + unsigned int tt; + u16 devinfo; + struct usb_hcd *hcd; + struct usb_host_endpoint *ep; +}; + +enum hub_activation_type { + HUB_INIT = 0, + HUB_INIT2 = 1, + HUB_INIT3 = 2, + HUB_POST_RESET = 3, + HUB_RESUME = 4, + HUB_RESET_RESUME = 5, +}; + +enum hub_quiescing_type { + HUB_DISCONNECT = 0, + HUB_PRE_RESET = 1, + HUB_SUSPEND = 2, +}; + +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +}; + +struct usb_mon_operations { + void (*urb_submit)(struct usb_bus *, struct urb *); + void (*urb_submit_error)(struct usb_bus *, struct urb *, int); + void (*urb_complete)(struct usb_bus *, struct urb *, int); +}; + +struct usb_sg_request { + int status; + size_t bytes; + spinlock_t lock; + struct usb_device *dev; + int pipe; + int entries; + struct urb **urbs; + int count; + struct completion complete; +}; + +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdCDC; +} __attribute__((packed)); + +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; + __u8 bDataInterface; +}; + +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bmCapabilities; +}; + +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bMasterInterface0; + __u8 bSlaveInterface0; +}; + +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; +}; + +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +}; + +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__((packed)); + +struct usb_cdc_dmm_desc { + __u8 bFunctionLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u16 bcdVersion; + __le16 wMaxCommand; +} __attribute__((packed)); + +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__((packed)); + +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +}; + +struct usb_cdc_obex_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdVersion; +} __attribute__((packed)); + +struct usb_cdc_ncm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdNcmVersion; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMVersion; + __le16 wMaxControlMessage; + __u8 bNumberFilters; + __u8 bMaxFilterSize; + __le16 wMaxSegmentSize; + __u8 bmNetworkCapabilities; +} __attribute__((packed)); + +struct usb_cdc_mbim_extended_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdMBIMExtendedVersion; + __u8 bMaxOutstandingCommandMessages; + __le16 wMTU; +} __attribute__((packed)); + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + bool phonet_magic_present; +}; + +struct api_context { + struct completion done; + int status; +}; + +struct set_config_request { + struct usb_device *udev; + int config; + struct work_struct work; + struct list_head node; +}; + +struct usb_dynid { + struct list_head node; + struct usb_device_id id; +}; + +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +struct usb_class_driver { + char *name; + char * (*devnode)(struct device *, umode_t *); + const struct file_operations *fops; + int minor_base; +}; + +struct usb_class { + struct kref kref; + struct class *class; +}; + +struct ep_device { + struct usb_endpoint_descriptor *desc; + struct usb_device *udev; + struct device dev; +}; + +struct usbdevfs_ctrltransfer { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; + __u32 timeout; + void *data; +}; + +struct usbdevfs_bulktransfer { + unsigned int ep; + unsigned int len; + unsigned int timeout; + void *data; +}; + +struct usbdevfs_setinterface { + unsigned int interface; + unsigned int altsetting; +}; + +struct usbdevfs_disconnectsignal { + unsigned int signr; + void *context; +}; + +struct usbdevfs_getdriver { + unsigned int interface; + char driver[256]; +}; + +struct usbdevfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbdevfs_conninfo_ex { + __u32 size; + __u32 busnum; + __u32 devnum; + __u32 speed; + __u8 num_ports; + __u8 ports[7]; +}; + +struct usbdevfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +struct usbdevfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + union { + int number_of_packets; + unsigned int stream_id; + }; + int error_count; + unsigned int signr; + void *usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl { + int ifno; + int ioctl_code; + void *data; +}; + +struct usbdevfs_disconnect_claim { + unsigned int interface; + unsigned int flags; + char driver[256]; +}; + +struct usbdevfs_streams { + unsigned int num_streams; + unsigned int num_eps; + unsigned char eps[0]; +}; + +struct usbdevfs_ctrltransfer32 { + u8 bRequestType; + u8 bRequest; + u16 wValue; + u16 wIndex; + u16 wLength; + u32 timeout; + compat_caddr_t data; +}; + +struct usbdevfs_bulktransfer32 { + compat_uint_t ep; + compat_uint_t len; + compat_uint_t timeout; + compat_caddr_t data; +}; + +struct usbdevfs_disconnectsignal32 { + compat_int_t signr; + compat_caddr_t context; +}; + +struct usbdevfs_urb32 { + unsigned char type; + unsigned char endpoint; + compat_int_t status; + compat_uint_t flags; + compat_caddr_t buffer; + compat_int_t buffer_length; + compat_int_t actual_length; + compat_int_t start_frame; + compat_int_t number_of_packets; + compat_int_t error_count; + compat_uint_t signr; + compat_caddr_t usercontext; + struct usbdevfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbdevfs_ioctl32 { + s32 ifno; + s32 ioctl_code; + compat_caddr_t data; +}; + +struct usb_dev_state___2 { + struct list_head list; + struct usb_device *dev; + struct file *file; + spinlock_t lock; + struct list_head async_pending; + struct list_head async_completed; + struct list_head memory_list; + wait_queue_head_t wait; + wait_queue_head_t wait_for_resume; + unsigned int discsignr; + struct pid *disc_pid; + const struct cred *cred; + sigval_t disccontext; + long unsigned int ifclaimed; + u32 disabled_bulk_eps; + long unsigned int interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; +}; + +struct usb_memory { + struct list_head memlist; + int vma_use_count; + int urb_use_count; + u32 size; + void *mem; + dma_addr_t dma_handle; + long unsigned int vm_start; + struct usb_dev_state___2 *ps; +}; + +struct async { + struct list_head asynclist; + struct usb_dev_state___2 *ps; + struct pid *pid; + const struct cred *cred; + unsigned int signr; + unsigned int ifnum; + void *userbuffer; + void *userurb; + sigval_t userurb_sigval; + struct urb *urb; + struct usb_memory *usbm; + unsigned int mem_usage; + int status; + u8 bulk_addr; + u8 bulk_status; +}; + +enum snoop_when { + SUBMIT = 0, + COMPLETE = 1, +}; + +struct quirk_entry { + u16 vid; + u16 pid; + u32 flags; +}; + +struct class_info { + int class; + char *class_name; +}; + +struct usb_phy_roothub___2 { + struct phy *phy; + struct list_head list; +}; + +typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *); + +struct phy_devm { + struct usb_phy *phy; + struct notifier_block *nb; +}; + +struct usb_ep; + +struct usb_request { + void *buf; + unsigned int length; + dma_addr_t dma; + struct scatterlist *sg; + unsigned int num_sgs; + unsigned int num_mapped_sgs; + unsigned int stream_id: 16; + unsigned int is_last: 1; + unsigned int no_interrupt: 1; + unsigned int zero: 1; + unsigned int short_not_ok: 1; + unsigned int dma_mapped: 1; + void (*complete)(struct usb_ep *, struct usb_request *); + void *context; + struct list_head list; + unsigned int frame_number; + int status; + unsigned int actual; +}; + +struct usb_ep_caps { + unsigned int type_control: 1; + unsigned int type_iso: 1; + unsigned int type_bulk: 1; + unsigned int type_int: 1; + unsigned int dir_in: 1; + unsigned int dir_out: 1; +}; + +struct usb_ep_ops; + +struct usb_ep { + void *driver_data; + const char *name; + const struct usb_ep_ops *ops; + struct list_head ep_list; + struct usb_ep_caps caps; + bool claimed; + bool enabled; + unsigned int maxpacket: 16; + unsigned int maxpacket_limit: 16; + unsigned int max_streams: 16; + unsigned int mult: 2; + unsigned int maxburst: 5; + u8 address; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; +}; + +struct usb_ep_ops { + int (*enable)(struct usb_ep *, const struct usb_endpoint_descriptor *); + int (*disable)(struct usb_ep *); + void (*dispose)(struct usb_ep *); + struct usb_request * (*alloc_request)(struct usb_ep *, gfp_t); + void (*free_request)(struct usb_ep *, struct usb_request *); + int (*queue)(struct usb_ep *, struct usb_request *, gfp_t); + int (*dequeue)(struct usb_ep *, struct usb_request *); + int (*set_halt)(struct usb_ep *, int); + int (*set_wedge)(struct usb_ep *); + int (*fifo_status)(struct usb_ep *); + void (*fifo_flush)(struct usb_ep *); +}; + +struct usb_dcd_config_params { + __u8 bU1devExitLat; + __le16 bU2DevExitLat; + __u8 besl_baseline; + __u8 besl_deep; +}; + +struct usb_gadget_driver; + +struct usb_gadget_ops { + int (*get_frame)(struct usb_gadget *); + int (*wakeup)(struct usb_gadget *); + int (*set_selfpowered)(struct usb_gadget *, int); + int (*vbus_session)(struct usb_gadget *, int); + int (*vbus_draw)(struct usb_gadget *, unsigned int); + int (*pullup)(struct usb_gadget *, int); + int (*ioctl)(struct usb_gadget *, unsigned int, long unsigned int); + void (*get_config_params)(struct usb_gadget *, struct usb_dcd_config_params *); + int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); + int (*udc_stop)(struct usb_gadget *); + void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed); + void (*udc_set_ssp_rate)(struct usb_gadget *, enum usb_ssp_rate); + struct usb_ep * (*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); +}; + +struct usb_udc; + +struct usb_otg_caps; + +struct usb_gadget { + struct work_struct work; + struct usb_udc *udc; + const struct usb_gadget_ops *ops; + struct usb_ep *ep0; + struct list_head ep_list; + enum usb_device_speed speed; + enum usb_device_speed max_speed; + enum usb_ssp_rate ssp_rate; + enum usb_ssp_rate max_ssp_rate; + enum usb_device_state state; + const char *name; + struct device dev; + unsigned int isoch_delay; + unsigned int out_epnum; + unsigned int in_epnum; + unsigned int mA; + struct usb_otg_caps *otg_caps; + unsigned int sg_supported: 1; + unsigned int is_otg: 1; + unsigned int is_a_peripheral: 1; + unsigned int b_hnp_enable: 1; + unsigned int a_hnp_support: 1; + unsigned int a_alt_hnp_support: 1; + unsigned int hnp_polling_support: 1; + unsigned int host_request_flag: 1; + unsigned int quirk_ep_out_aligned_size: 1; + unsigned int quirk_altset_not_supp: 1; + unsigned int quirk_stall_not_supp: 1; + unsigned int quirk_zlp_not_supp: 1; + unsigned int quirk_avoids_skb_reserve: 1; + unsigned int is_selfpowered: 1; + unsigned int deactivated: 1; + unsigned int connected: 1; + unsigned int lpm_capable: 1; + int irq; +}; + +struct usb_gadget_driver { + char *function; + enum usb_device_speed max_speed; + int (*bind)(struct usb_gadget *, struct usb_gadget_driver *); + void (*unbind)(struct usb_gadget *); + int (*setup)(struct usb_gadget *, const struct usb_ctrlrequest *); + void (*disconnect)(struct usb_gadget *); + void (*suspend)(struct usb_gadget *); + void (*resume)(struct usb_gadget *); + void (*reset)(struct usb_gadget *); + struct device_driver driver; + char *udc_name; + struct list_head pending; + unsigned int match_existing_only: 1; +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +struct dwc2_dma_desc { + u32 status; + u32 buf; +}; + +struct dwc2_hw_params { + unsigned int op_mode: 3; + unsigned int arch: 2; + unsigned int dma_desc_enable: 1; + unsigned int enable_dynamic_fifo: 1; + unsigned int en_multiple_tx_fifo: 1; + unsigned int rx_fifo_size: 16; + char: 8; + unsigned int host_nperio_tx_fifo_size: 16; + unsigned int dev_nperio_tx_fifo_size: 16; + unsigned int host_perio_tx_fifo_size: 16; + unsigned int nperio_tx_q_depth: 3; + unsigned int host_perio_tx_q_depth: 3; + unsigned int dev_token_q_depth: 5; + char: 5; + unsigned int max_transfer_size: 26; + char: 6; + unsigned int max_packet_count: 11; + unsigned int host_channels: 5; + unsigned int hs_phy_type: 2; + unsigned int fs_phy_type: 2; + unsigned int i2c_enable: 1; + unsigned int acg_enable: 1; + unsigned int num_dev_ep: 4; + unsigned int num_dev_in_eps: 4; + char: 2; + unsigned int num_dev_perio_in_ep: 4; + unsigned int total_fifo_size: 16; + unsigned int power_optimized: 1; + unsigned int hibernation: 1; + unsigned int utmi_phy_data_width: 2; + unsigned int lpm_mode: 1; + unsigned int ipg_isoc_en: 1; + unsigned int service_interval_mode: 1; + u32 snpsid; + u32 dev_ep_dirs; + u32 g_tx_fifo_size[16]; +}; + +struct dwc2_core_params { + u8 otg_cap; + u8 phy_type; + u8 speed; + u8 phy_utmi_width; + bool phy_ulpi_ddr; + bool phy_ulpi_ext_vbus; + bool enable_dynamic_fifo; + bool en_multiple_tx_fifo; + bool i2c_enable; + bool acg_enable; + bool ulpi_fs_ls; + bool ts_dline; + bool reload_ctl; + bool uframe_sched; + bool external_id_pin_ctl; + int power_down; + bool no_clock_gating; + bool lpm; + bool lpm_clock_gating; + bool besl; + bool hird_threshold_en; + bool service_interval; + u8 hird_threshold; + bool activate_stm_fs_transceiver; + bool activate_stm_id_vb_detection; + bool ipg_isoc_en; + u16 max_packet_count; + u32 max_transfer_size; + u32 ahbcfg; + u32 ref_clk_per; + u16 sof_cnt_wkup_alert; + bool host_dma; + bool dma_desc_enable; + bool dma_desc_fs_enable; + bool host_support_fs_ls_low_power; + bool host_ls_low_power_phy_clk; + bool oc_disable; + u8 host_channels; + u16 host_rx_fifo_size; + u16 host_nperio_tx_fifo_size; + u16 host_perio_tx_fifo_size; + bool g_dma; + bool g_dma_desc; + u32 g_rx_fifo_size; + u32 g_np_tx_fifo_size; + u32 g_tx_fifo_size[16]; + bool change_speed_quirk; +}; + +enum dwc2_lx_state { + DWC2_L0 = 0, + DWC2_L1 = 1, + DWC2_L2 = 2, + DWC2_L3 = 3, +}; + +struct dwc2_gregs_backup { + u32 gotgctl; + u32 gintmsk; + u32 gahbcfg; + u32 gusbcfg; + u32 grxfsiz; + u32 gnptxfsiz; + u32 gi2cctl; + u32 glpmcfg; + u32 pcgcctl; + u32 pcgcctl1; + u32 gdfifocfg; + u32 gpwrdn; + bool valid; +}; + +struct dwc2_dregs_backup { + u32 dcfg; + u32 dctl; + u32 daintmsk; + u32 diepmsk; + u32 doepmsk; + u32 diepctl[16]; + u32 dieptsiz[16]; + u32 diepdma[16]; + u32 doepctl[16]; + u32 doeptsiz[16]; + u32 doepdma[16]; + u32 dtxfsiz[16]; + bool valid; +}; + +struct dwc2_hregs_backup { + u32 hcfg; + u32 haintmsk; + u32 hcintmsk[16]; + u32 hprt0; + u32 hfir; + u32 hptxfsiz; + bool valid; +}; + +union dwc2_hcd_internal_flags { + u32 d32; + struct { + unsigned int port_connect_status_change: 1; + unsigned int port_connect_status: 1; + unsigned int port_reset_change: 1; + unsigned int port_enable_change: 1; + unsigned int port_suspend_change: 1; + unsigned int port_over_current_change: 1; + unsigned int port_l1_change: 1; + unsigned int reserved: 25; + } b; +}; + +struct usb_role_switch; + +struct dwc2_hsotg_plat; + +struct dwc2_host_chan; + +struct dwc2_hsotg { + struct device *dev; + void *regs; + struct dwc2_hw_params hw_params; + struct dwc2_core_params params; + enum usb_otg_state op_state; + enum usb_dr_mode dr_mode; + struct usb_role_switch *role_sw; + unsigned int hcd_enabled: 1; + unsigned int gadget_enabled: 1; + unsigned int ll_hw_enabled: 1; + unsigned int hibernated: 1; + unsigned int in_ppd: 1; + bool bus_suspended; + unsigned int reset_phy_on_wake: 1; + unsigned int need_phy_for_wake: 1; + unsigned int phy_off_for_suspend: 1; + u16 frame_number; + struct phy *phy; + struct usb_phy *uphy; + struct dwc2_hsotg_plat *plat; + struct regulator_bulk_data supplies[2]; + struct regulator *vbus_supply; + struct regulator *usb33d; + spinlock_t lock; + void *priv; + int irq; + struct clk *clk; + struct reset_control *reset; + struct reset_control *reset_ecc; + unsigned int queuing_high_bandwidth: 1; + unsigned int srp_success: 1; + struct workqueue_struct *wq_otg; + struct work_struct wf_otg; + struct timer_list wkp_timer; + enum dwc2_lx_state lx_state; + struct dwc2_gregs_backup gr_backup; + struct dwc2_dregs_backup dr_backup; + struct dwc2_hregs_backup hr_backup; + struct dentry *debug_root; + struct debugfs_regset32 *regset; + bool needs_byte_swap; + union dwc2_hcd_internal_flags flags; + struct list_head non_periodic_sched_inactive; + struct list_head non_periodic_sched_waiting; + struct list_head non_periodic_sched_active; + struct list_head *non_periodic_qh_ptr; + struct list_head periodic_sched_inactive; + struct list_head periodic_sched_ready; + struct list_head periodic_sched_assigned; + struct list_head periodic_sched_queued; + struct list_head split_order; + u16 periodic_usecs; + long unsigned int hs_periodic_bitmap[13]; + u16 periodic_qh_count; + bool new_connection; + u16 last_frame_num; + struct list_head free_hc_list; + int periodic_channels; + int non_periodic_channels; + int available_host_channels; + struct dwc2_host_chan *hc_ptr_array[16]; + u8 *status_buf; + dma_addr_t status_buf_dma; + struct delayed_work start_work; + struct delayed_work reset_work; + struct work_struct phy_reset_work; + u8 otg_port; + u32 *frame_list; + dma_addr_t frame_list_dma; + u32 frame_list_sz; + struct kmem_cache *desc_gen_cache; + struct kmem_cache *desc_hsisoc_cache; + struct kmem_cache *unaligned_cache; +}; + +enum dwc2_halt_status { + DWC2_HC_XFER_NO_HALT_STATUS = 0, + DWC2_HC_XFER_COMPLETE = 1, + DWC2_HC_XFER_URB_COMPLETE = 2, + DWC2_HC_XFER_ACK = 3, + DWC2_HC_XFER_NAK = 4, + DWC2_HC_XFER_NYET = 5, + DWC2_HC_XFER_STALL = 6, + DWC2_HC_XFER_XACT_ERR = 7, + DWC2_HC_XFER_FRAME_OVERRUN = 8, + DWC2_HC_XFER_BABBLE_ERR = 9, + DWC2_HC_XFER_DATA_TOGGLE_ERR = 10, + DWC2_HC_XFER_AHB_ERR = 11, + DWC2_HC_XFER_PERIODIC_INCOMPLETE = 12, + DWC2_HC_XFER_URB_DEQUEUE = 13, +}; + +struct dwc2_qh; + +struct dwc2_host_chan { + u8 hc_num; + unsigned int dev_addr: 7; + unsigned int ep_num: 4; + unsigned int ep_is_in: 1; + unsigned int speed: 4; + unsigned int ep_type: 2; + char: 6; + unsigned int max_packet: 11; + unsigned int data_pid_start: 2; + unsigned int multi_count: 2; + u8 *xfer_buf; + dma_addr_t xfer_dma; + dma_addr_t align_buf; + u32 xfer_len; + u32 xfer_count; + u16 start_pkt_count; + u8 xfer_started; + u8 do_ping; + u8 error_state; + u8 halt_on_queue; + u8 halt_pending; + u8 do_split; + u8 complete_split; + u8 hub_addr; + u8 hub_port; + u8 xact_pos; + u8 requests; + u8 schinfo; + u16 ntd; + enum dwc2_halt_status halt_status; + u32 hcint; + struct dwc2_qh *qh; + struct list_head hc_list_entry; + dma_addr_t desc_list_addr; + u32 desc_list_sz; + struct list_head split_order_list_entry; +}; + +struct dwc2_hs_transfer_time { + u32 start_schedule_us; + u16 duration_us; +}; + +struct dwc2_tt; + +struct dwc2_qh { + struct dwc2_hsotg *hsotg; + u8 ep_type; + u8 ep_is_in; + u16 maxp; + u16 maxp_mult; + u8 dev_speed; + u8 data_toggle; + u8 ping_state; + u8 do_split; + u8 td_first; + u8 td_last; + u16 host_us; + u16 device_us; + u16 host_interval; + u16 device_interval; + u16 next_active_frame; + u16 start_active_frame; + s16 num_hs_transfers; + struct dwc2_hs_transfer_time hs_transfers[8]; + u32 ls_start_schedule_slice; + u16 ntd; + u8 *dw_align_buf; + dma_addr_t dw_align_buf_dma; + struct list_head qtd_list; + struct dwc2_host_chan *channel; + struct list_head qh_list_entry; + struct dwc2_dma_desc *desc_list; + dma_addr_t desc_list_dma; + u32 desc_list_sz; + u32 *n_bytes; + struct timer_list unreserve_timer; + struct hrtimer wait_timer; + struct dwc2_tt *dwc_tt; + int ttport; + unsigned int tt_buffer_dirty: 1; + unsigned int unreserve_pending: 1; + unsigned int schedule_low_speed: 1; + unsigned int want_wait: 1; + unsigned int wait_timer_cancel: 1; +}; + +struct dwc2_tt { + int refcount; + struct usb_tt *usb_tt; + long unsigned int periodic_bitmaps[0]; +}; + +enum dwc2_hsotg_dmamode { + S3C_HSOTG_DMA_NONE = 0, + S3C_HSOTG_DMA_ONLY = 1, + S3C_HSOTG_DMA_DRV = 2, +}; + +struct dwc2_hsotg_plat { + enum dwc2_hsotg_dmamode dma; + unsigned int is_osc: 1; + int phy_type; + int (*phy_init)(struct platform_device *, int); + int (*phy_exit)(struct platform_device *, int); +}; + +enum usb_role { + USB_ROLE_NONE = 0, + USB_ROLE_HOST = 1, + USB_ROLE_DEVICE = 2, +}; + +typedef int (*usb_role_switch_set_t)(struct usb_role_switch *, enum usb_role); + +typedef enum usb_role (*usb_role_switch_get_t)(struct usb_role_switch *); + +struct usb_role_switch_desc { + struct fwnode_handle *fwnode; + struct device *usb2_port; + struct device *usb3_port; + struct device *udc; + usb_role_switch_set_t set; + usb_role_switch_get_t get; + bool allow_userspace_control; + void *driver_data; + const char *name; +}; + +typedef void (*set_params_cb)(struct dwc2_hsotg *); + +struct dwc2_hcd_pipe_info { + u8 dev_addr; + u8 ep_num; + u8 pipe_type; + u8 pipe_dir; + u16 maxp; + u16 maxp_mult; +}; + +struct dwc2_hcd_iso_packet_desc { + u32 offset; + u32 length; + u32 actual_length; + u32 status; +}; + +struct dwc2_qtd; + +struct dwc2_hcd_urb { + void *priv; + struct dwc2_qtd *qtd; + void *buf; + dma_addr_t dma; + void *setup_packet; + dma_addr_t setup_dma; + u32 length; + u32 actual_length; + u32 status; + u32 error_count; + u32 packet_count; + u32 flags; + u16 interval; + struct dwc2_hcd_pipe_info pipe_info; + struct dwc2_hcd_iso_packet_desc iso_descs[0]; +}; + +enum dwc2_control_phase { + DWC2_CONTROL_SETUP = 0, + DWC2_CONTROL_DATA = 1, + DWC2_CONTROL_STATUS = 2, +}; + +struct dwc2_qtd { + enum dwc2_control_phase control_phase; + u8 in_process; + u8 data_toggle; + u8 complete_split; + u8 isoc_split_pos; + u16 isoc_frame_index; + u16 isoc_split_offset; + u16 isoc_td_last; + u16 isoc_td_first; + u32 ssplit_out_xfer_count; + u8 error_count; + u8 n_desc; + u16 isoc_frame_index_last; + u16 num_naks; + struct dwc2_hcd_urb *urb; + struct dwc2_qh *qh; + struct list_head qtd_list_entry; +}; + +enum dwc2_transaction_type { + DWC2_TRANSACTION_NONE = 0, + DWC2_TRANSACTION_PERIODIC = 1, + DWC2_TRANSACTION_NON_PERIODIC = 2, + DWC2_TRANSACTION_ALL = 3, +}; + +struct wrapper_priv_data { + struct dwc2_hsotg *hsotg; +}; + +enum amd_chipset_gen { + NOT_AMD_CHIPSET = 0, + AMD_CHIPSET_SB600 = 1, + AMD_CHIPSET_SB700 = 2, + AMD_CHIPSET_SB800 = 3, + AMD_CHIPSET_HUDSON2 = 4, + AMD_CHIPSET_BOLTON = 5, + AMD_CHIPSET_YANGTZE = 6, + AMD_CHIPSET_TAISHAN = 7, + AMD_CHIPSET_UNKNOWN = 8, +}; + +struct amd_chipset_type { + enum amd_chipset_gen gen; + u8 rev; +}; + +struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + struct amd_chipset_type sb_type; + int isoc_reqs; + int probe_count; + bool need_pll_quirk; +}; + +struct ehci_stats { + long unsigned int normal; + long unsigned int error; + long unsigned int iaa; + long unsigned int lost_iaa; + long unsigned int complete; + long unsigned int unlink; +}; + +struct ehci_per_sched { + struct usb_device *udev; + struct usb_host_endpoint *ep; + struct list_head ps_list; + u16 tt_usecs; + u16 cs_mask; + u16 period; + u16 phase; + u8 bw_phase; + u8 phase_uf; + u8 usecs; + u8 c_usecs; + u8 bw_uperiod; + u8 bw_period; +}; + +enum ehci_rh_state { + EHCI_RH_HALTED = 0, + EHCI_RH_SUSPENDED = 1, + EHCI_RH_RUNNING = 2, + EHCI_RH_STOPPING = 3, +}; + +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS = 0, + EHCI_HRTIMER_POLL_PSS = 1, + EHCI_HRTIMER_POLL_DEAD = 2, + EHCI_HRTIMER_UNLINK_INTR = 3, + EHCI_HRTIMER_FREE_ITDS = 4, + EHCI_HRTIMER_ACTIVE_UNLINK = 5, + EHCI_HRTIMER_START_UNLINK_INTR = 6, + EHCI_HRTIMER_ASYNC_UNLINKS = 7, + EHCI_HRTIMER_IAA_WATCHDOG = 8, + EHCI_HRTIMER_DISABLE_PERIODIC = 9, + EHCI_HRTIMER_DISABLE_ASYNC = 10, + EHCI_HRTIMER_IO_WATCHDOG = 11, + EHCI_HRTIMER_NUM_EVENTS = 12, +}; + +struct ehci_caps; + +struct ehci_regs; + +struct ehci_dbg_port; + +struct ehci_qh; + +union ehci_shadow; + +struct ehci_itd; + +struct ehci_sitd; + +struct ehci_hcd { + enum ehci_hrtimer_event next_hrtimer_event; + unsigned int enabled_hrtimer_events; + ktime_t hr_timeouts[12]; + struct hrtimer hrtimer; + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + struct ehci_caps *caps; + struct ehci_regs *regs; + struct ehci_dbg_port *debug; + __u32 hcs_params; + spinlock_t lock; + enum ehci_rh_state rh_state; + bool scanning: 1; + bool need_rescan: 1; + bool intr_unlinking: 1; + bool iaa_in_progress: 1; + bool async_unlinking: 1; + bool shutdown: 1; + struct ehci_qh *qh_scan_next; + struct ehci_qh *async; + struct ehci_qh *dummy; + struct list_head async_unlink; + struct list_head async_idle; + unsigned int async_unlink_cycle; + unsigned int async_count; + __le32 old_current; + __le32 old_token; + unsigned int periodic_size; + __le32 *periodic; + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned int i_thresh; + union ehci_shadow *pshadow; + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned int intr_unlink_wait_cycle; + unsigned int intr_unlink_cycle; + unsigned int now_frame; + unsigned int last_iso_frame; + unsigned int intr_count; + unsigned int isoc_count; + unsigned int periodic_count; + unsigned int uframe_periodic_max; + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + long unsigned int reset_done[15]; + long unsigned int bus_suspended; + long unsigned int companion_ports; + long unsigned int owned_ports; + long unsigned int port_c_suspend; + long unsigned int suspended_ports; + long unsigned int resuming_ports; + struct dma_pool___2 *qh_pool; + struct dma_pool___2 *qtd_pool; + struct dma_pool___2 *itd_pool; + struct dma_pool___2 *sitd_pool; + unsigned int random_frame; + long unsigned int next_statechange; + ktime_t last_periodic_enable; + u32 command; + unsigned int no_selective_suspend: 1; + unsigned int has_fsl_port_bug: 1; + unsigned int has_fsl_hs_errata: 1; + unsigned int has_fsl_susp_errata: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_capbase: 1; + unsigned int has_amcc_usb23: 1; + unsigned int need_io_watchdog: 1; + unsigned int amd_pll_fix: 1; + unsigned int use_dummy_qh: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int frame_index_bug: 1; + unsigned int need_oc_pp_cycle: 1; + unsigned int imx28_write_fix: 1; + unsigned int spurious_oc: 1; + __le32 *ohci_hcctrl_reg; + unsigned int has_hostpc: 1; + unsigned int has_tdi_phy_lpm: 1; + unsigned int has_ppcd: 1; + u8 sbrn; + struct ehci_stats stats; + struct dentry *debug_dir; + u8 bandwidth[64]; + u8 tt_budget[64]; + struct list_head tt_list; + long unsigned int priv[0]; +}; + +struct ehci_caps { + u32 hc_capbase; + u32 hcs_params; + u32 hcc_params; + u8 portroute[8]; +}; + +struct ehci_regs { + u32 command; + u32 status; + u32 intr_enable; + u32 frame_index; + u32 segment; + u32 frame_list; + u32 async_next; + u32 reserved1[2]; + u32 txfill_tuning; + u32 reserved2[6]; + u32 configured_flag; + u32 port_status[0]; + u32 reserved3[9]; + u32 usbmode; + u32 reserved4[6]; + u32 hostpc[0]; + u32 reserved5[17]; + u32 usbmode_ex; +}; + +struct ehci_dbg_port { + u32 control; + u32 pids; + u32 data03; + u32 data47; + u32 address; +}; + +struct ehci_fstn; + +union ehci_shadow { + struct ehci_qh *qh; + struct ehci_itd *itd; + struct ehci_sitd *sitd; + struct ehci_fstn *fstn; + __le32 *hw_next; + void *ptr; +}; + +struct ehci_qh_hw; + +struct ehci_qtd; + +struct ehci_qh { + struct ehci_qh_hw *hw; + dma_addr_t qh_dma; + union ehci_shadow qh_next; + struct list_head qtd_list; + struct list_head intr_node; + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; + unsigned int unlink_cycle; + u8 qh_state; + u8 xacterrs; + u8 unlink_reason; + u8 gap_uf; + unsigned int is_out: 1; + unsigned int clearing_tt: 1; + unsigned int dequeue_during_giveback: 1; + unsigned int should_be_inactive: 1; +}; + +struct ehci_iso_stream; + +struct ehci_itd { + __le32 hw_next; + __le32 hw_transaction[8]; + __le32 hw_bufp[7]; + __le32 hw_bufp_hi[7]; + dma_addr_t itd_dma; + union ehci_shadow itd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head itd_list; + unsigned int frame; + unsigned int pg; + unsigned int index[8]; + long: 64; +}; + +struct ehci_sitd { + __le32 hw_next; + __le32 hw_fullspeed_ep; + __le32 hw_uframe; + __le32 hw_results; + __le32 hw_buf[2]; + __le32 hw_backpointer; + __le32 hw_buf_hi[2]; + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; + struct urb *urb; + struct ehci_iso_stream *stream; + struct list_head sitd_list; + unsigned int frame; + unsigned int index; +}; + +struct ehci_qtd { + __le32 hw_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + dma_addr_t qtd_dma; + struct list_head qtd_list; + struct urb *urb; + size_t length; +}; + +struct ehci_fstn { + __le32 hw_next; + __le32 hw_prev; + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; + long: 64; +}; + +struct ehci_qh_hw { + __le32 hw_next; + __le32 hw_info1; + __le32 hw_info2; + __le32 hw_current; + __le32 hw_qtd_next; + __le32 hw_alt_next; + __le32 hw_token; + __le32 hw_buf[5]; + __le32 hw_buf_hi[5]; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +struct ehci_iso_packet { + u64 bufp; + __le32 transaction; + u8 cross; + u32 buf1; +}; + +struct ehci_iso_sched { + struct list_head td_list; + unsigned int span; + unsigned int first_packet; + struct ehci_iso_packet packet[0]; +}; + +struct ehci_iso_stream { + struct ehci_qh_hw *hw; + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; + struct list_head free_list; + struct ehci_per_sched ps; + unsigned int next_uframe; + __le32 splits; + u16 uperiod; + u16 maxp; + unsigned int bandwidth; + __le32 buf0; + __le32 buf1; + __le32 buf2; + __le32 address; +}; + +struct ehci_tt { + u16 bandwidth[8]; + struct list_head tt_list; + struct list_head ps_list; + struct usb_tt *usb_tt; + int tt_port; +}; + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); + struct usb_bus *bus; + struct mutex mutex; + size_t count; + char *output_buf; + size_t alloc_size; +}; + +struct soc_device_attribute { + const char *machine; + const char *family; + const char *revision; + const char *serial_number; + const char *soc_id; + const void *data; + const struct attribute_group *custom_attr_group; +}; + +struct usb_ehci_pdata { + int caps_offset; + unsigned int has_tt: 1; + unsigned int has_synopsys_hc_bug: 1; + unsigned int big_endian_desc: 1; + unsigned int big_endian_mmio: 1; + unsigned int no_io_watchdog: 1; + unsigned int reset_on_resume: 1; + unsigned int dma_mask_64: 1; + unsigned int spurious_oc: 1; + int (*power_on)(struct platform_device *); + void (*power_off)(struct platform_device *); + void (*power_suspend)(struct platform_device *); + int (*pre_setup)(struct usb_hcd *); +}; + +struct ehci_platform_priv { + struct clk *clks[4]; + struct reset_control *rsts; + bool reset_on_resume; + bool quirk_poll; + struct timer_list poll_timer; + struct delayed_work poll_work; +}; + +typedef __u32 __hc32; + +typedef __u16 __hc16; + +struct td; + +struct ed { + __hc32 hwINFO; + __hc32 hwTailP; + __hc32 hwHeadP; + __hc32 hwNextED; + dma_addr_t dma; + struct td *dummy; + struct ed *ed_next; + struct ed *ed_prev; + struct list_head td_list; + struct list_head in_use_list; + u8 state; + u8 type; + u8 branch; + u16 interval; + u16 load; + u16 last_iso; + u16 tick; + unsigned int takeback_wdh_cnt; + struct td *pending_td; + long: 64; +}; + +struct td { + __hc32 hwINFO; + __hc32 hwCBP; + __hc32 hwNextTD; + __hc32 hwBE; + __hc16 hwPSW[2]; + __u8 index; + struct ed *ed; + struct td *td_hash; + struct td *next_dl_td; + struct urb *urb; + dma_addr_t td_dma; + dma_addr_t data_dma; + struct list_head td_list; + long: 64; +}; + +struct ohci_hcca { + __hc32 int_table[32]; + __hc32 frame_no; + __hc32 done_head; + u8 reserved_for_hc[116]; + u8 what[4]; +}; + +struct ohci_roothub_regs { + __hc32 a; + __hc32 b; + __hc32 status; + __hc32 portstatus[15]; +}; + +struct ohci_regs { + __hc32 revision; + __hc32 control; + __hc32 cmdstatus; + __hc32 intrstatus; + __hc32 intrenable; + __hc32 intrdisable; + __hc32 hcca; + __hc32 ed_periodcurrent; + __hc32 ed_controlhead; + __hc32 ed_controlcurrent; + __hc32 ed_bulkhead; + __hc32 ed_bulkcurrent; + __hc32 donehead; + __hc32 fminterval; + __hc32 fmremaining; + __hc32 fmnumber; + __hc32 periodicstart; + __hc32 lsthresh; + struct ohci_roothub_regs roothub; + long: 64; + long: 64; +}; + +struct urb_priv { + struct ed *ed; + u16 length; + u16 td_cnt; + struct list_head pending; + struct td *td[0]; +}; + +typedef struct urb_priv urb_priv_t; + +enum ohci_rh_state { + OHCI_RH_HALTED = 0, + OHCI_RH_SUSPENDED = 1, + OHCI_RH_RUNNING = 2, +}; + +struct ohci_hcd { + spinlock_t lock; + struct ohci_regs *regs; + struct ohci_hcca *hcca; + dma_addr_t hcca_dma; + struct ed *ed_rm_list; + struct ed *ed_bulktail; + struct ed *ed_controltail; + struct ed *periodic[32]; + void (*start_hnp)(struct ohci_hcd *); + struct dma_pool___2 *td_cache; + struct dma_pool___2 *ed_cache; + struct td *td_hash[64]; + struct td *dl_start; + struct td *dl_end; + struct list_head pending; + struct list_head eds_in_use; + enum ohci_rh_state rh_state; + int num_ports; + int load[32]; + u32 hc_control; + long unsigned int next_statechange; + u32 fminterval; + unsigned int autostop: 1; + unsigned int working: 1; + unsigned int restart_work: 1; + long unsigned int flags; + unsigned int prev_frame_no; + unsigned int wdh_cnt; + unsigned int prev_wdh_cnt; + u32 prev_donehead; + struct timer_list io_watchdog; + struct work_struct nec_work; + struct dentry *debug_dir; + long unsigned int priv[0]; +}; + +struct ohci_driver_overrides { + const char *product_desc; + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); +}; + +struct debug_buffer___2 { + ssize_t (*fill_func)(struct debug_buffer___2 *); + struct ohci_hcd *ohci; + struct mutex mutex; + size_t count; + char *page; +}; + +struct usb_ohci_pdata { + unsigned int big_endian_desc: 1; + unsigned int big_endian_mmio: 1; + unsigned int no_big_frame_no: 1; + unsigned int num_ports; + int (*power_on)(struct platform_device *); + void (*power_off)(struct platform_device *); + void (*power_suspend)(struct platform_device *); +}; + +struct ohci_platform_priv { + struct clk *clks[3]; + struct reset_control *resets; +}; + +struct uhci_td; + +struct uhci_qh { + __le32 link; + __le32 element; + dma_addr_t dma_handle; + struct list_head node; + struct usb_host_endpoint *hep; + struct usb_device *udev; + struct list_head queue; + struct uhci_td *dummy_td; + struct uhci_td *post_td; + struct usb_iso_packet_descriptor *iso_packet_desc; + long unsigned int advance_jiffies; + unsigned int unlink_frame; + unsigned int period; + short int phase; + short int load; + unsigned int iso_frame; + int state; + int type; + int skel; + unsigned int initial_toggle: 1; + unsigned int needs_fixup: 1; + unsigned int is_stopped: 1; + unsigned int wait_expired: 1; + unsigned int bandwidth_reserved: 1; +}; + +struct uhci_td { + __le32 link; + __le32 status; + __le32 token; + __le32 buffer; + dma_addr_t dma_handle; + struct list_head list; + int frame; + struct list_head fl_list; +}; + +enum uhci_rh_state { + UHCI_RH_RESET = 0, + UHCI_RH_SUSPENDED = 1, + UHCI_RH_AUTO_STOPPED = 2, + UHCI_RH_RESUMING = 3, + UHCI_RH_SUSPENDING = 4, + UHCI_RH_RUNNING = 5, + UHCI_RH_RUNNING_NODEVS = 6, +}; + +struct uhci_hcd { + long unsigned int io_addr; + void *regs; + struct dma_pool___2 *qh_pool; + struct dma_pool___2 *td_pool; + struct uhci_td *term_td; + struct uhci_qh *skelqh[11]; + struct uhci_qh *next_qh; + spinlock_t lock; + dma_addr_t frame_dma_handle; + __le32 *frame; + void **frame_cpu; + enum uhci_rh_state rh_state; + long unsigned int auto_stop_time; + unsigned int frame_number; + unsigned int is_stopped; + unsigned int last_iso_frame; + unsigned int cur_iso_frame; + unsigned int scan_in_progress: 1; + unsigned int need_rescan: 1; + unsigned int dead: 1; + unsigned int RD_enable: 1; + unsigned int is_initialized: 1; + unsigned int fsbr_is_on: 1; + unsigned int fsbr_is_wanted: 1; + unsigned int fsbr_expiring: 1; + struct timer_list fsbr_timer; + unsigned int oc_low: 1; + unsigned int wait_for_hp: 1; + unsigned int big_endian_mmio: 1; + unsigned int big_endian_desc: 1; + unsigned int is_aspeed: 1; + long unsigned int port_c_suspend; + long unsigned int resuming_ports; + long unsigned int ports_timeout; + struct list_head idle_qh_list; + int rh_numports; + wait_queue_head_t waitqh; + int num_waiting; + int total_load; + short int load[32]; + struct clk *clk; + void (*reset_hc)(struct uhci_hcd *); + int (*check_and_reset_hc)(struct uhci_hcd *); + void (*configure_hc)(struct uhci_hcd *); + int (*resume_detect_interrupts_are_broken)(struct uhci_hcd *); + int (*global_suspend_mode_is_broken)(struct uhci_hcd *); +}; + +struct urb_priv___2 { + struct list_head node; + struct urb *urb; + struct uhci_qh *qh; + struct list_head td_list; + unsigned int fsbr: 1; +}; + +struct uhci_debug { + int size; + char *data; +}; + +struct xhci_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct xhci_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dev_notification; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_status_base; + __le32 port_power_base; + __le32 port_link_base; + __le32 reserved5; + __le32 reserved6[1016]; +}; + +struct xhci_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct xhci_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct xhci_intr_reg ir_set[128]; +}; + +struct xhci_doorbell_array { + __le32 doorbell[256]; +}; + +struct xhci_container_ctx { + unsigned int type; + int size; + u8 *bytes; + dma_addr_t dma; +}; + +struct xhci_slot_ctx { + __le32 dev_info; + __le32 dev_info2; + __le32 tt_info; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct xhci_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +struct xhci_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +union xhci_trb; + +struct xhci_command { + struct xhci_container_ctx *in_ctx; + u32 status; + int slot_id; + struct completion *completion; + union xhci_trb *command_trb; + struct list_head cmd_list; +}; + +struct xhci_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct xhci_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct xhci_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct xhci_generic_trb { + __le32 field[4]; +}; + +union xhci_trb { + struct xhci_link_trb link; + struct xhci_transfer_event trans_event; + struct xhci_event_cmd event_cmd; + struct xhci_generic_trb generic; +}; + +struct xhci_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct xhci_ring; + +struct xhci_stream_info { + struct xhci_ring **stream_rings; + unsigned int num_streams; + struct xhci_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct xarray trb_address_map; + struct xhci_command *free_streams_command; +}; + +enum xhci_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC = 1, + TYPE_BULK = 2, + TYPE_INTR = 3, + TYPE_STREAM = 4, + TYPE_COMMAND = 5, + TYPE_EVENT = 6, +}; + +struct xhci_segment; + +struct xhci_ring { + struct xhci_segment *first_seg; + struct xhci_segment *last_seg; + union xhci_trb *enqueue; + struct xhci_segment *enq_seg; + union xhci_trb *dequeue; + struct xhci_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int err_count; + unsigned int stream_id; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int num_trbs_free_temp; + unsigned int bounce_buf_len; + enum xhci_ring_type type; + bool last_td_was_short; + struct xarray *trb_address_map; +}; + +struct xhci_bw_info { + unsigned int ep_interval; + unsigned int mult; + unsigned int num_packets; + unsigned int max_packet_size; + unsigned int max_esit_payload; + unsigned int type; +}; + +struct xhci_virt_device; + +struct xhci_hcd; + +struct xhci_virt_ep { + struct xhci_virt_device *vdev; + unsigned int ep_index; + struct xhci_ring *ring; + struct xhci_stream_info *stream_info; + struct xhci_ring *new_ring; + unsigned int ep_state; + struct list_head cancelled_td_list; + struct timer_list stop_cmd_timer; + struct xhci_hcd *xhci; + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; + bool skip; + struct xhci_bw_info bw_info; + struct list_head bw_endpoint_list; + int next_frame_id; + bool use_extended_tbc; +}; + +struct xhci_interval_bw_table; + +struct xhci_tt_bw_info; + +struct xhci_virt_device { + int slot_id; + struct usb_device *udev; + struct xhci_container_ctx *out_ctx; + struct xhci_container_ctx *in_ctx; + struct xhci_virt_ep eps[31]; + u8 fake_port; + u8 real_port; + struct xhci_interval_bw_table *bw_table; + struct xhci_tt_bw_info *tt_info; + long unsigned int flags; + u16 current_mel; + void *debugfs_private; +}; + +struct xhci_erst_entry; + +struct xhci_erst { + struct xhci_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; + unsigned int erst_size; +}; + +struct s3_save { + u32 command; + u32 dev_nt; + u64 dcbaa_ptr; + u32 config_reg; + u32 irq_pending; + u32 irq_control; + u32 erst_size; + u64 erst_base; + u64 erst_dequeue; +}; + +struct xhci_bus_state { + long unsigned int bus_suspended; + long unsigned int next_statechange; + u32 port_c_suspend; + u32 suspended_ports; + u32 port_remote_wakeup; + long unsigned int resume_done[31]; + long unsigned int resuming_ports; + long unsigned int rexit_ports; + struct completion rexit_done[31]; + struct completion u3exit_done[31]; +}; + +struct xhci_port; + +struct xhci_hub { + struct xhci_port **ports; + unsigned int num_ports; + struct usb_hcd *hcd; + struct xhci_bus_state bus_state; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_device_context_array; + +struct xhci_scratchpad; + +struct xhci_root_port_bw_info; + +struct xhci_port_cap; + +struct xhci_hcd { + struct usb_hcd *main_hcd; + struct usb_hcd *shared_hcd; + struct xhci_cap_regs *cap_regs; + struct xhci_op_regs *op_regs; + struct xhci_run_regs *run_regs; + struct xhci_doorbell_array *dba; + struct xhci_intr_reg *ir_set; + __u32 hcs_params1; + __u32 hcs_params2; + __u32 hcs_params3; + __u32 hcc_params; + __u32 hcc_params2; + spinlock_t lock; + u8 sbrn; + u16 hci_version; + u8 max_slots; + u8 max_interrupters; + u8 max_ports; + u8 isoc_threshold; + u32 imod_interval; + int event_ring_max; + int page_size; + int page_shift; + int msix_count; + struct clk *clk; + struct clk *reg_clk; + struct reset_control *reset; + struct xhci_device_context_array *dcbaa; + struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; + struct list_head cmd_list; + unsigned int cmd_ring_reserved_trbs; + struct delayed_work cmd_timer; + struct completion cmd_ring_stop_completion; + struct xhci_command *current_cmd; + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_scratchpad *scratchpad; + struct list_head lpm_failed_devs; + struct mutex mutex; + struct xhci_command *lpm_command; + struct xhci_virt_device *devs[256]; + struct xhci_root_port_bw_info *rh_bw; + struct dma_pool___2 *device_pool; + struct dma_pool___2 *segment_pool; + struct dma_pool___2 *small_streams_pool; + struct dma_pool___2 *medium_streams_pool; + unsigned int xhc_state; + u32 command; + struct s3_save s3; + long long unsigned int quirks; + unsigned int num_active_eps; + unsigned int limit_active_eps; + struct xhci_port *hw_ports; + struct xhci_hub usb2_rhub; + struct xhci_hub usb3_rhub; + unsigned int hw_lpm_support: 1; + unsigned int broken_suspend: 1; + u32 *ext_caps; + unsigned int num_ext_caps; + struct xhci_port_cap *port_caps; + unsigned int num_port_caps; + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; + u16 test_mode; + struct dentry *debugfs_root; + struct dentry *debugfs_slots; + struct list_head regset_list; + void *dbc; + long unsigned int priv[0]; +}; + +struct xhci_segment { + union xhci_trb *trbs; + struct xhci_segment *next; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +enum xhci_overhead_type { + LS_OVERHEAD_TYPE = 0, + FS_OVERHEAD_TYPE = 1, + HS_OVERHEAD_TYPE = 2, +}; + +struct xhci_interval_bw { + unsigned int num_packets; + struct list_head endpoints; + unsigned int overhead[3]; +}; + +struct xhci_interval_bw_table { + unsigned int interval0_esit_payload; + struct xhci_interval_bw interval_bw[16]; + unsigned int bw_used; + unsigned int ss_bw_in; + unsigned int ss_bw_out; +}; + +struct xhci_tt_bw_info { + struct list_head tt_list; + int slot_id; + int ttport; + struct xhci_interval_bw_table bw_table; + int active_eps; +}; + +struct xhci_root_port_bw_info { + struct list_head tts; + unsigned int num_active_tts; + struct xhci_interval_bw_table bw_table; +}; + +struct xhci_device_context_array { + __le64 dev_context_ptrs[256]; + dma_addr_t dma; +}; + +enum xhci_setup_dev { + SETUP_CONTEXT_ONLY = 0, + SETUP_CONTEXT_ADDRESS = 1, +}; + +enum xhci_cancelled_td_status { + TD_DIRTY = 0, + TD_HALTED = 1, + TD_CLEARING_CACHE = 2, + TD_CLEARED = 3, +}; + +struct xhci_td { + struct list_head td_list; + struct list_head cancelled_td_list; + int status; + enum xhci_cancelled_td_status cancel_status; + struct urb *urb; + struct xhci_segment *start_seg; + union xhci_trb *first_trb; + union xhci_trb *last_trb; + struct xhci_segment *last_trb_seg; + struct xhci_segment *bounce_seg; + bool urb_length_set; + unsigned int num_trbs; +}; + +struct xhci_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct xhci_scratchpad { + u64 *sp_array; + dma_addr_t sp_dma; + void **sp_buffers; +}; + +struct urb_priv___3 { + int num_tds; + int num_tds_done; + struct xhci_td td[0]; +}; + +struct xhci_port_cap { + u32 *psi; + u8 psi_count; + u8 psi_uid_count; + u8 maj_rev; + u8 min_rev; +}; + +struct xhci_port { + __le32 *addr; + int hw_portnum; + int hcd_portnum; + struct xhci_hub *rhub; + struct xhci_port_cap *port_cap; +}; + +struct xhci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); +}; + +typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); + +enum xhci_ep_reset_type { + EP_HARD_RESET = 0, + EP_SOFT_RESET = 1, +}; + +struct dbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct dbc_str_descs { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +enum dbc_state { + DS_DISABLED = 0, + DS_INITIALIZED = 1, + DS_ENABLED = 2, + DS_CONNECTED = 3, + DS_CONFIGURED = 4, + DS_STALLED = 5, +}; + +struct xhci_dbc; + +struct dbc_ep { + struct xhci_dbc *dbc; + struct list_head list_pending; + struct xhci_ring *ring; + unsigned int direction: 1; +}; + +struct dbc_driver; + +struct xhci_dbc { + spinlock_t lock; + struct device *dev; + struct xhci_hcd *xhci; + struct dbc_regs *regs; + struct xhci_ring *ring_evt; + struct xhci_ring *ring_in; + struct xhci_ring *ring_out; + struct xhci_erst erst; + struct xhci_container_ctx *ctx; + struct dbc_str_descs *string; + dma_addr_t string_dma; + size_t string_size; + enum dbc_state state; + struct delayed_work event_work; + unsigned int resume_required: 1; + struct dbc_ep eps[2]; + const struct dbc_driver *driver; + void *priv; +}; + +struct dbc_driver { + int (*configure)(struct xhci_dbc *); + void (*disconnect)(struct xhci_dbc *); +}; + +struct dbc_request { + void *buf; + unsigned int length; + dma_addr_t dma; + void (*complete)(struct xhci_dbc *, struct dbc_request *); + struct list_head list_pool; + int status; + unsigned int actual; + struct xhci_dbc *dbc; + struct list_head list_pending; + dma_addr_t trb_dma; + union xhci_trb *trb; + unsigned int direction: 1; +}; + +struct trace_event_raw_xhci_log_msg { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctx { + struct trace_entry ent; + int ctx_64; + unsigned int ctx_type; + dma_addr_t ctx_dma; + u8 *ctx_va; + unsigned int ctx_ep_num; + int slot_id; + u32 __data_loc_ctx_data; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_trb { + struct trace_entry ent; + u32 type; + u32 field0; + u32 field1; + u32 field2; + u32 field3; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_free_virt_dev { + struct trace_entry ent; + void *vdev; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + u8 fake_port; + u8 real_port; + u16 current_mel; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_virt_dev { + struct trace_entry ent; + void *vdev; + long long unsigned int out_ctx; + long long unsigned int in_ctx; + int devnum; + int state; + int speed; + u8 portnum; + u8 level; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_urb { + struct trace_entry ent; + void *urb; + unsigned int pipe; + unsigned int stream; + int status; + unsigned int flags; + int num_mapped_sgs; + int num_sgs; + int length; + int actual; + int epnum; + int dir_in; + int type; + int slot_id; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ep_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u64 deq; + u32 tx_info; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_slot_ctx { + struct trace_entry ent; + u32 info; + u32 info2; + u32 tt_info; + u32 state; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ctrl_ctx { + struct trace_entry ent; + u32 drop; + u32 add; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_ring { + struct trace_entry ent; + u32 type; + void *ring; + dma_addr_t enq; + dma_addr_t deq; + dma_addr_t enq_seg; + dma_addr_t deq_seg; + unsigned int num_segs; + unsigned int stream_id; + unsigned int cycle_state; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_portsc { + struct trace_entry ent; + u32 portnum; + u32 portsc; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_log_doorbell { + struct trace_entry ent; + u32 slot; + u32 doorbell; + u32 __data_loc_str; + char __data[0]; +}; + +struct trace_event_raw_xhci_dbc_log_request { + struct trace_entry ent; + struct dbc_request *req; + bool dir; + unsigned int actual; + unsigned int length; + int status; + char __data[0]; +}; + +struct trace_event_data_offsets_xhci_log_msg { + u32 msg; +}; + +struct trace_event_data_offsets_xhci_log_ctx { + u32 ctx_data; +}; + +struct trace_event_data_offsets_xhci_log_trb { + u32 str; +}; + +struct trace_event_data_offsets_xhci_log_free_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_virt_dev {}; + +struct trace_event_data_offsets_xhci_log_urb {}; + +struct trace_event_data_offsets_xhci_log_ep_ctx { + u32 str; +}; + +struct trace_event_data_offsets_xhci_log_slot_ctx { + u32 str; +}; + +struct trace_event_data_offsets_xhci_log_ctrl_ctx { + u32 str; +}; + +struct trace_event_data_offsets_xhci_log_ring {}; + +struct trace_event_data_offsets_xhci_log_portsc { + u32 str; +}; + +struct trace_event_data_offsets_xhci_log_doorbell { + u32 str; +}; + +struct trace_event_data_offsets_xhci_dbc_log_request {}; + +typedef void (*btf_trace_xhci_dbg_address)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_context_change)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_quirks)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_reset_ep)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_cancel_urb)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_init)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_dbg_ring_expansion)(void *, struct va_format *); + +typedef void (*btf_trace_xhci_address_ctx)(void *, struct xhci_hcd *, struct xhci_container_ctx *, unsigned int); + +typedef void (*btf_trace_xhci_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_handle_command)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_queue_trb)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_dbc_handle_event)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_dbc_handle_transfer)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_dbc_gadget_ep_queue)(void *, struct xhci_ring *, struct xhci_generic_trb *); + +typedef void (*btf_trace_xhci_free_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_alloc_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_setup_addressable_virt_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_stop_device)(void *, struct xhci_virt_device *); + +typedef void (*btf_trace_xhci_urb_enqueue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_giveback)(void *, struct urb *); + +typedef void (*btf_trace_xhci_urb_dequeue)(void *, struct urb *); + +typedef void (*btf_trace_xhci_handle_cmd_stop_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_config_ep)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_add_endpoint)(void *, struct xhci_ep_ctx *); + +typedef void (*btf_trace_xhci_alloc_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_free_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_disable_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_discover_or_reset_device)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_setup_device_slot)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_addr_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_reset_dev)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_handle_cmd_set_deq)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint)(void *, struct xhci_slot_ctx *); + +typedef void (*btf_trace_xhci_address_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_configure_endpoint_ctrl_ctx)(void *, struct xhci_input_control_ctx *); + +typedef void (*btf_trace_xhci_ring_alloc)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_free)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_ring_expansion)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_enq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_inc_deq)(void *, struct xhci_ring *); + +typedef void (*btf_trace_xhci_handle_port_status)(void *, u32, u32); + +typedef void (*btf_trace_xhci_get_port_status)(void *, u32, u32); + +typedef void (*btf_trace_xhci_hub_status_data)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_ep_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_ring_host_doorbell)(void *, u32, u32); + +typedef void (*btf_trace_xhci_dbc_alloc_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_free_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_queue_request)(void *, struct dbc_request *); + +typedef void (*btf_trace_xhci_dbc_giveback_request)(void *, struct dbc_request *); + +struct usb_string_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wData[1]; +}; + +struct dbc_info_context { + __le64 string0; + __le64 manufacturer; + __le64 product; + __le64 serial; + __le32 length; + __le32 __reserved_0[7]; +}; + +enum evtreturn { + EVT_ERR = 4294967295, + EVT_DONE = 0, + EVT_GSER = 1, + EVT_DISC = 2, +}; + +struct kfifo { + union { + struct __kfifo kfifo; + unsigned char *type; + const unsigned char *const_type; + char (*rectype)[0]; + void *ptr; + const void *ptr_const; + }; + unsigned char buf[0]; +}; + +struct dbc_port { + struct tty_port port; + spinlock_t port_lock; + struct list_head read_pool; + struct list_head read_queue; + unsigned int n_read; + struct tasklet_struct push; + struct list_head write_pool; + struct kfifo write_fifo; + bool registered; +}; + +struct xhci_regset { + char name[32]; + struct debugfs_regset32 regset; + size_t nregs; + struct list_head list; +}; + +struct xhci_file_map { + const char *name; + int (*show)(struct seq_file *, void *); +}; + +struct xhci_ep_priv { + char name[32]; + struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; +}; + +struct xhci_slot_priv { + char name[32]; + struct dentry *root; + struct xhci_ep_priv *eps[31]; + struct xhci_virt_device *dev; +}; + +struct usb_debug_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDebugInEndpoint; + __u8 bDebugOutEndpoint; +}; + +struct ehci_dev { + u32 bus; + u32 slot; + u32 func; +}; + +typedef void (*set_debug_port_t)(int); + +struct usb_hcd___2; + +struct xdbc_regs { + __le32 capability; + __le32 doorbell; + __le32 ersts; + __le32 __reserved_0; + __le64 erstba; + __le64 erdp; + __le32 control; + __le32 status; + __le32 portsc; + __le32 __reserved_1; + __le64 dccp; + __le32 devinfo1; + __le32 devinfo2; +}; + +struct xdbc_trb { + __le32 field[4]; +}; + +struct xdbc_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 __reserved_0; +}; + +struct xdbc_info_context { + __le64 string0; + __le64 manufacturer; + __le64 product; + __le64 serial; + __le32 length; + __le32 __reserved_0[7]; +}; + +struct xdbc_ep_context { + __le32 ep_info1; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 __reserved_0[11]; +}; + +struct xdbc_context { + struct xdbc_info_context info; + struct xdbc_ep_context out; + struct xdbc_ep_context in; +}; + +struct xdbc_strings { + char string0[64]; + char manufacturer[64]; + char product[64]; + char serial[64]; +}; + +struct xdbc_segment { + struct xdbc_trb *trbs; + dma_addr_t dma; +}; + +struct xdbc_ring { + struct xdbc_segment *segment; + struct xdbc_trb *enqueue; + struct xdbc_trb *dequeue; + u32 cycle_state; +}; + +struct xdbc_state { + u16 vendor; + u16 device; + u32 bus; + u32 dev; + u32 func; + void *xhci_base; + u64 xhci_start; + size_t xhci_length; + int port_number; + struct xdbc_regs *xdbc_reg; + dma_addr_t table_dma; + void *table_base; + dma_addr_t erst_dma; + size_t erst_size; + void *erst_base; + struct xdbc_ring evt_ring; + struct xdbc_segment evt_seg; + dma_addr_t dbcc_dma; + size_t dbcc_size; + void *dbcc_base; + dma_addr_t string_dma; + size_t string_size; + void *string_base; + struct xdbc_ring out_ring; + struct xdbc_segment out_seg; + void *out_buf; + dma_addr_t out_dma; + struct xdbc_ring in_ring; + struct xdbc_segment in_seg; + void *in_buf; + dma_addr_t in_dma; + u32 flags; + raw_spinlock_t lock; +}; + +struct usb_role_switch { + struct device dev; + struct mutex lock; + enum usb_role role; + struct device *usb2_port; + struct device *usb3_port; + struct device *udc; + usb_role_switch_set_t set; + usb_role_switch_get_t get; + bool allow_userspace_control; +}; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +enum i8042_controller_reset_mode { + I8042_RESET_NEVER = 0, + I8042_RESET_ALWAYS = 1, + I8042_RESET_ON_S2RAM = 2, +}; + +struct i8042_port { + struct serio *serio; + int irq; + bool exists; + bool driver_bound; + signed char mux; +}; + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +union input_seq_state { + struct { + short unsigned int pos; + bool mutex_acquired; + }; + void *p; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct input_event_compat { + compat_ulong_t sec; + compat_ulong_t usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct ff_periodic_effect_compat { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + compat_uptr_t custom_data; +}; + +struct ff_effect_compat { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect_compat periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +struct mousedev_hw_data { + int dx; + int dy; + int dz; + int x; + int y; + int abs_event; + long unsigned int buttons; +}; + +struct mousedev { + int open; + struct input_handle handle; + wait_queue_head_t wait; + struct list_head client_list; + spinlock_t client_lock; + struct mutex mutex; + struct device dev; + struct cdev cdev; + bool exist; + struct list_head mixdev_node; + bool opened_by_mixdev; + struct mousedev_hw_data packet; + unsigned int pkt_count; + int old_x[4]; + int old_y[4]; + int frac_dx; + int frac_dy; + long unsigned int touch; + int (*open_device)(struct mousedev *); + void (*close_device)(struct mousedev *); +}; + +enum mousedev_emul { + MOUSEDEV_EMUL_PS2 = 0, + MOUSEDEV_EMUL_IMPS = 1, + MOUSEDEV_EMUL_EXPS = 2, +}; + +struct mousedev_motion { + int dx; + int dy; + int dz; + long unsigned int buttons; +}; + +struct mousedev_client { + struct fasync_struct *fasync; + struct mousedev *mousedev; + struct list_head node; + struct mousedev_motion packets[16]; + unsigned int head; + unsigned int tail; + spinlock_t packet_lock; + int pos_x; + int pos_y; + u8 ps2[6]; + unsigned char ready; + unsigned char buffer; + unsigned char bufsiz; + unsigned char imexseq; + unsigned char impsseq; + enum mousedev_emul mode; + long unsigned int last_buttons; +}; + +enum { + FRACTION_DENOM = 128, +}; + +struct input_mask { + __u32 type; + __u32 codes_size; + __u64 codes_ptr; +}; + +struct evdev_client; + +struct evdev { + int open; + struct input_handle handle; + struct evdev_client *grab; + struct list_head client_list; + spinlock_t client_lock; + struct mutex mutex; + struct device dev; + struct cdev cdev; + bool exist; +}; + +struct evdev_client { + unsigned int head; + unsigned int tail; + unsigned int packet_head; + spinlock_t buffer_lock; + wait_queue_head_t wait; + struct fasync_struct *fasync; + struct evdev *evdev; + struct list_head node; + enum input_clock_type clk_type; + bool revoked; + long unsigned int *evmasks[32]; + unsigned int bufsize; + struct input_event buffer[0]; +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[8]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + u32 function_row_physmap[24]; + int num_function_row_keys; +}; + +enum elants_chip_id { + EKTH3500 = 0, + EKTF3624 = 1, +}; + +enum elants_state { + ELAN_STATE_NORMAL = 0, + ELAN_WAIT_QUEUE_HEADER = 1, + ELAN_WAIT_RECALIBRATION = 2, +}; + +enum elants_iap_mode { + ELAN_IAP_OPERATIONAL = 0, + ELAN_IAP_RECOVERY = 1, +}; + +struct elants_data { + struct i2c_client *client; + struct input_dev *input; + struct regulator *vcc33; + struct regulator *vccio; + struct gpio_desc *reset_gpio; + u16 fw_version; + u8 test_version; + u8 solution_version; + u8 bc_version; + u8 iap_version; + u16 hw_version; + u8 major_res; + unsigned int x_res; + unsigned int y_res; + unsigned int x_max; + unsigned int y_max; + unsigned int phy_x; + unsigned int phy_y; + struct touchscreen_properties prop; + enum elants_state state; + enum elants_chip_id chip_id; + enum elants_iap_mode iap_mode; + struct mutex sysfs_mutex; + u8 cmd_resp[4]; + struct completion cmd_done; + bool wake_irq_enabled; + bool keep_power_in_suspend; + long: 48; + long: 64; + u8 buf[169]; + long: 56; + long: 64; + long: 64; +}; + +struct elants_version_attribute { + struct device_attribute dattr; + size_t field_offset; + size_t field_size; +}; + +struct uinput_ff_upload { + __u32 request_id; + __s32 retval; + struct ff_effect effect; + struct ff_effect old; +}; + +struct uinput_ff_erase { + __u32 request_id; + __s32 retval; + __u32 effect_id; +}; + +struct uinput_setup { + struct input_id id; + char name[80]; + __u32 ff_effects_max; +}; + +struct uinput_abs_setup { + __u16 code; + struct input_absinfo absinfo; +}; + +struct uinput_user_dev { + char name[80]; + struct input_id id; + __u32 ff_effects_max; + __s32 absmax[64]; + __s32 absmin[64]; + __s32 absfuzz[64]; + __s32 absflat[64]; +}; + +enum uinput_state { + UIST_NEW_DEVICE = 0, + UIST_SETUP_COMPLETE = 1, + UIST_CREATED = 2, +}; + +struct uinput_request { + unsigned int id; + unsigned int code; + int retval; + struct completion done; + union { + unsigned int effect_id; + struct { + struct ff_effect *effect; + struct ff_effect *old; + } upload; + } u; +}; + +struct uinput_device { + struct input_dev *dev; + struct mutex mutex; + enum uinput_state state; + wait_queue_head_t waitq; + unsigned char ready; + unsigned char head; + unsigned char tail; + struct input_event buff[16]; + unsigned int ff_effects_max; + struct uinput_request *requests[16]; + wait_queue_head_t requests_waitq; + spinlock_t requests_lock; +}; + +struct uinput_ff_upload_compat { + __u32 request_id; + __s32 retval; + struct ff_effect_compat effect; + struct ff_effect_compat old; +}; + +struct trace_event_raw_rtc_time_alarm_class { + struct trace_entry ent; + time64_t secs; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_freq { + struct trace_entry ent; + int freq; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_state { + struct trace_entry ent; + int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_alarm_irq_enable { + struct trace_entry ent; + unsigned int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_offset_class { + struct trace_entry ent; + long int offset; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_timer_class { + struct trace_entry ent; + struct rtc_timer *timer; + ktime_t expires; + ktime_t period; + char __data[0]; +}; + +struct trace_event_data_offsets_rtc_time_alarm_class {}; + +struct trace_event_data_offsets_rtc_irq_set_freq {}; + +struct trace_event_data_offsets_rtc_irq_set_state {}; + +struct trace_event_data_offsets_rtc_alarm_irq_enable {}; + +struct trace_event_data_offsets_rtc_offset_class {}; + +struct trace_event_data_offsets_rtc_timer_class {}; + +typedef void (*btf_trace_rtc_set_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_set_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_irq_set_freq)(void *, int, int); + +typedef void (*btf_trace_rtc_irq_set_state)(void *, int, int); + +typedef void (*btf_trace_rtc_alarm_irq_enable)(void *, unsigned int, int); + +typedef void (*btf_trace_rtc_set_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_read_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_timer_enqueue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_dequeue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_fired)(void *, struct rtc_timer *); + +enum { + none = 0, + day = 1, + month = 2, + year = 3, +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; +}; + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, +}; + +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + struct gpio_desc *wp_gpio; + const struct nvmem_cell_info *cells; + int ncells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + enum nvmem_type type; + bool read_only; + bool root_only; + bool no_of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct nvmem_device; + +struct cmos_rtc_board_info { + void (*wake_on)(struct device *); + void (*wake_off)(struct device *); + u32 flags; + int address_space; + u8 rtc_day_alarm; + u8 rtc_mon_alarm; + u8 rtc_century; +}; + +struct cmos_rtc { + struct rtc_device *rtc; + struct device *dev; + int irq; + struct resource *iomem; + time64_t alarm_expires; + void (*wake_on)(struct device *); + void (*wake_off)(struct device *); + u8 enabled_wake; + u8 suspend_ctrl; + u8 day_alrm; + u8 mon_alrm; + u8 century; + struct rtc_wkalrm saved_wkalrm; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct class_compat___2; + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +struct i2c_acpi_handler_data { + struct acpi_connection_info info; + struct i2c_adapter *adapter; +}; + +struct gsb_buffer { + u8 status; + u8 len; + union { + u16 wdata; + u8 bdata; + u8 data[0]; + }; +}; + +struct i2c_acpi_lookup { + struct i2c_board_info *info; + acpi_handle adapter_handle; + acpi_handle device_handle; + acpi_handle search_handle; + int n; + int index; + u32 speed; + u32 min_speed; + u32 force_speed; +}; + +struct i2c_smbus_ioctl_data { + __u8 read_write; + __u8 command; + __u32 size; + union i2c_smbus_data *data; +}; + +struct i2c_rdwr_ioctl_data { + struct i2c_msg *msgs; + __u32 nmsgs; +}; + +struct i2c_dev { + struct list_head list; + struct i2c_adapter *adap; + struct device dev; + struct cdev cdev; +}; + +struct i2c_smbus_ioctl_data32 { + u8 read_write; + u8 command; + u32 size; + compat_caddr_t data; +}; + +struct i2c_msg32 { + u16 addr; + u16 flags; + u16 len; + compat_caddr_t buf; +}; + +struct i2c_rdwr_ioctl_data32 { + compat_caddr_t msgs; + u32 nmsgs; +}; + +struct dw_i2c_dev { + struct device *dev; + struct regmap *map; + struct regmap *sysmap; + void *base; + void *ext; + struct completion cmd_complete; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct i2c_client *slave; + u32 (*get_clk_rate_khz)(struct dw_i2c_dev *); + int cmd_err; + struct i2c_msg *msgs; + int msgs_num; + int msg_write_idx; + u32 tx_buf_len; + u8 *tx_buf; + int msg_read_idx; + u32 rx_buf_len; + u8 *rx_buf; + int msg_err; + unsigned int status; + u32 abort_source; + int irq; + u32 flags; + struct i2c_adapter adapter; + u32 functionality; + u32 master_cfg; + u32 slave_cfg; + unsigned int tx_fifo_depth; + unsigned int rx_fifo_depth; + int rx_outstanding; + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + int (*acquire_lock)(); + void (*release_lock)(); + bool shared_with_punit; + void (*disable)(struct dw_i2c_dev *); + void (*disable_int)(struct dw_i2c_dev *); + int (*init)(struct dw_i2c_dev *); + int (*set_sda_hold_time)(struct dw_i2c_dev *); + int mode; + struct i2c_bus_recovery_info rinfo; + bool suspended; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_ktime_compat { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; +}; + +struct pps_kinfo_compat { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime_compat assert_tu; + struct pps_ktime_compat clear_tu; + int current_mode; +} __attribute__((packed)); + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_fdata_compat { + struct pps_kinfo_compat info; + struct pps_ktime_compat timeout; +} __attribute__((packed)); + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_clock_info { + struct module *owner; + char name[16]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjfreq)(struct ptp_clock_info *, s32); + int (*adjphase)(struct ptp_clock_info *, s32); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_PPS = 2, + PTP_CLOCK_PPSUSR = 3, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + struct pps_event_time pps_times; + }; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; +}; + +struct ptp_clock { + struct posix_clock clock; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct timestamp_event_queue tsevq; + struct mutex tsevq_mux; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int rsv[12]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +struct mt6397_chip { + struct device *dev; + struct regmap *regmap; + struct notifier_block pm_nb; + int irq; + struct irq_domain *irq_domain; + struct mutex irqlock; + u16 wake_mask[2]; + u16 irq_masks_cur[2]; + u16 irq_masks_cache[2]; + u16 int_con[2]; + u16 int_status[2]; + u16 chip_id; + void *irq_data; +}; + +struct mt6323_pwrc { + struct device *dev; + struct regmap *regmap; + u32 base; +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_battery_info { + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + int factory_internal_resistance_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +enum hwmon_sensor_types { + hwmon_chip = 0, + hwmon_temp = 1, + hwmon_in = 2, + hwmon_curr = 3, + hwmon_power = 4, + hwmon_energy = 5, + hwmon_humidity = 6, + hwmon_fan = 7, + hwmon_pwm = 8, + hwmon_intrusion = 9, + hwmon_max = 10, +}; + +enum hwmon_temp_attributes { + hwmon_temp_enable = 0, + hwmon_temp_input = 1, + hwmon_temp_type = 2, + hwmon_temp_lcrit = 3, + hwmon_temp_lcrit_hyst = 4, + hwmon_temp_min = 5, + hwmon_temp_min_hyst = 6, + hwmon_temp_max = 7, + hwmon_temp_max_hyst = 8, + hwmon_temp_crit = 9, + hwmon_temp_crit_hyst = 10, + hwmon_temp_emergency = 11, + hwmon_temp_emergency_hyst = 12, + hwmon_temp_alarm = 13, + hwmon_temp_lcrit_alarm = 14, + hwmon_temp_min_alarm = 15, + hwmon_temp_max_alarm = 16, + hwmon_temp_crit_alarm = 17, + hwmon_temp_emergency_alarm = 18, + hwmon_temp_fault = 19, + hwmon_temp_offset = 20, + hwmon_temp_label = 21, + hwmon_temp_lowest = 22, + hwmon_temp_highest = 23, + hwmon_temp_reset_history = 24, + hwmon_temp_rated_min = 25, + hwmon_temp_rated_max = 26, +}; + +enum hwmon_in_attributes { + hwmon_in_enable = 0, + hwmon_in_input = 1, + hwmon_in_min = 2, + hwmon_in_max = 3, + hwmon_in_lcrit = 4, + hwmon_in_crit = 5, + hwmon_in_average = 6, + hwmon_in_lowest = 7, + hwmon_in_highest = 8, + hwmon_in_reset_history = 9, + hwmon_in_label = 10, + hwmon_in_alarm = 11, + hwmon_in_min_alarm = 12, + hwmon_in_max_alarm = 13, + hwmon_in_lcrit_alarm = 14, + hwmon_in_crit_alarm = 15, + hwmon_in_rated_min = 16, + hwmon_in_rated_max = 17, +}; + +enum hwmon_curr_attributes { + hwmon_curr_enable = 0, + hwmon_curr_input = 1, + hwmon_curr_min = 2, + hwmon_curr_max = 3, + hwmon_curr_lcrit = 4, + hwmon_curr_crit = 5, + hwmon_curr_average = 6, + hwmon_curr_lowest = 7, + hwmon_curr_highest = 8, + hwmon_curr_reset_history = 9, + hwmon_curr_label = 10, + hwmon_curr_alarm = 11, + hwmon_curr_min_alarm = 12, + hwmon_curr_max_alarm = 13, + hwmon_curr_lcrit_alarm = 14, + hwmon_curr_crit_alarm = 15, + hwmon_curr_rated_min = 16, + hwmon_curr_rated_max = 17, +}; + +struct hwmon_ops { + umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int); + int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *); + int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **); + int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int); +}; + +struct hwmon_channel_info { + enum hwmon_sensor_types type; + const u32 *config; +}; + +struct hwmon_chip_info { + const struct hwmon_ops *ops; + const struct hwmon_channel_info **info; +}; + +struct power_supply_hwmon { + struct power_supply *psy; + long unsigned int *props; +}; + +struct hwmon_type_attr_list { + const u32 *attrs; + size_t n_attrs; +}; + +enum cm_batt_temp { + CM_BATT_OK = 0, + CM_BATT_OVERHEAT = 1, + CM_BATT_COLD = 2, +}; + +enum hwmon_chip_attributes { + hwmon_chip_temp_reset_history = 0, + hwmon_chip_in_reset_history = 1, + hwmon_chip_curr_reset_history = 2, + hwmon_chip_power_reset_history = 3, + hwmon_chip_register_tz = 4, + hwmon_chip_update_interval = 5, + hwmon_chip_alarms = 6, + hwmon_chip_samples = 7, + hwmon_chip_curr_samples = 8, + hwmon_chip_in_samples = 9, + hwmon_chip_power_samples = 10, + hwmon_chip_temp_samples = 11, +}; + +enum hwmon_power_attributes { + hwmon_power_enable = 0, + hwmon_power_average = 1, + hwmon_power_average_interval = 2, + hwmon_power_average_interval_max = 3, + hwmon_power_average_interval_min = 4, + hwmon_power_average_highest = 5, + hwmon_power_average_lowest = 6, + hwmon_power_average_max = 7, + hwmon_power_average_min = 8, + hwmon_power_input = 9, + hwmon_power_input_highest = 10, + hwmon_power_input_lowest = 11, + hwmon_power_reset_history = 12, + hwmon_power_accuracy = 13, + hwmon_power_cap = 14, + hwmon_power_cap_hyst = 15, + hwmon_power_cap_max = 16, + hwmon_power_cap_min = 17, + hwmon_power_min = 18, + hwmon_power_max = 19, + hwmon_power_crit = 20, + hwmon_power_lcrit = 21, + hwmon_power_label = 22, + hwmon_power_alarm = 23, + hwmon_power_cap_alarm = 24, + hwmon_power_min_alarm = 25, + hwmon_power_max_alarm = 26, + hwmon_power_lcrit_alarm = 27, + hwmon_power_crit_alarm = 28, + hwmon_power_rated_min = 29, + hwmon_power_rated_max = 30, +}; + +enum hwmon_energy_attributes { + hwmon_energy_enable = 0, + hwmon_energy_input = 1, + hwmon_energy_label = 2, +}; + +enum hwmon_humidity_attributes { + hwmon_humidity_enable = 0, + hwmon_humidity_input = 1, + hwmon_humidity_label = 2, + hwmon_humidity_min = 3, + hwmon_humidity_min_hyst = 4, + hwmon_humidity_max = 5, + hwmon_humidity_max_hyst = 6, + hwmon_humidity_alarm = 7, + hwmon_humidity_fault = 8, + hwmon_humidity_rated_min = 9, + hwmon_humidity_rated_max = 10, +}; + +enum hwmon_fan_attributes { + hwmon_fan_enable = 0, + hwmon_fan_input = 1, + hwmon_fan_label = 2, + hwmon_fan_min = 3, + hwmon_fan_max = 4, + hwmon_fan_div = 5, + hwmon_fan_pulses = 6, + hwmon_fan_target = 7, + hwmon_fan_alarm = 8, + hwmon_fan_min_alarm = 9, + hwmon_fan_max_alarm = 10, + hwmon_fan_fault = 11, +}; + +enum hwmon_pwm_attributes { + hwmon_pwm_input = 0, + hwmon_pwm_enable = 1, + hwmon_pwm_mode = 2, + hwmon_pwm_freq = 3, +}; + +enum hwmon_intrusion_attributes { + hwmon_intrusion_alarm = 0, + hwmon_intrusion_beep = 1, +}; + +struct trace_event_raw_hwmon_attr_class { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + long int val; + char __data[0]; +}; + +struct trace_event_raw_hwmon_attr_show_string { + struct trace_entry ent; + int index; + u32 __data_loc_attr_name; + u32 __data_loc_label; + char __data[0]; +}; + +struct trace_event_data_offsets_hwmon_attr_class { + u32 attr_name; +}; + +struct trace_event_data_offsets_hwmon_attr_show_string { + u32 attr_name; + u32 label; +}; + +typedef void (*btf_trace_hwmon_attr_show)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_store)(void *, int, const char *, long int); + +typedef void (*btf_trace_hwmon_attr_show_string)(void *, int, const char *, const char *); + +struct hwmon_device { + const char *name; + struct device dev; + const struct hwmon_chip_info *chip; + struct list_head tzdata; + struct attribute_group group; + const struct attribute_group **groups; +}; + +struct hwmon_device_attribute { + struct device_attribute dev_attr; + const struct hwmon_ops *ops; + enum hwmon_sensor_types type; + u32 attr; + int index; + char name[32]; +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct devfreq_dev_status { + long unsigned int total_time; + long unsigned int busy_time; + long unsigned int current_frequency; + void *private_data; +}; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_devfreq_get_power { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int freq; + u32 busy_time; + u32 total_time; + u32 power; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_devfreq_limit { + struct trace_entry ent; + u32 __data_loc_type; + unsigned int freq; + long unsigned int cdev_state; + u32 power; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; +}; + +struct trace_event_data_offsets_thermal_power_devfreq_get_power { + u32 type; +}; + +struct trace_event_data_offsets_thermal_power_devfreq_limit { + u32 type; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +typedef void (*btf_trace_thermal_power_devfreq_get_power)(void *, struct thermal_cooling_device *, struct devfreq_dev_status *, long unsigned int, u32); + +typedef void (*btf_trace_thermal_power_devfreq_limit)(void *, struct thermal_cooling_device *, long unsigned int, long unsigned int, u32); + +struct thermal_instance { + int id; + char name[20]; + struct thermal_zone_device *tz; + struct thermal_cooling_device *cdev; + int trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head tz_node; + struct list_head cdev_node; + unsigned int weight; +}; + +struct cooling_dev_stats { + spinlock_t lock; + unsigned int total_trans; + long unsigned int state; + long unsigned int max_states; + ktime_t last_time; + ktime_t *time_in_state; + unsigned int *trans_table; +}; + +struct genl_dumpit_info { + const struct genl_family *family; + struct genl_ops op; + struct nlattr **attrs; +}; + +enum thermal_genl_attr { + THERMAL_GENL_ATTR_UNSPEC = 0, + THERMAL_GENL_ATTR_TZ = 1, + THERMAL_GENL_ATTR_TZ_ID = 2, + THERMAL_GENL_ATTR_TZ_TEMP = 3, + THERMAL_GENL_ATTR_TZ_TRIP = 4, + THERMAL_GENL_ATTR_TZ_TRIP_ID = 5, + THERMAL_GENL_ATTR_TZ_TRIP_TYPE = 6, + THERMAL_GENL_ATTR_TZ_TRIP_TEMP = 7, + THERMAL_GENL_ATTR_TZ_TRIP_HYST = 8, + THERMAL_GENL_ATTR_TZ_MODE = 9, + THERMAL_GENL_ATTR_TZ_NAME = 10, + THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT = 11, + THERMAL_GENL_ATTR_TZ_GOV = 12, + THERMAL_GENL_ATTR_TZ_GOV_NAME = 13, + THERMAL_GENL_ATTR_CDEV = 14, + THERMAL_GENL_ATTR_CDEV_ID = 15, + THERMAL_GENL_ATTR_CDEV_CUR_STATE = 16, + THERMAL_GENL_ATTR_CDEV_MAX_STATE = 17, + THERMAL_GENL_ATTR_CDEV_NAME = 18, + THERMAL_GENL_ATTR_GOV_NAME = 19, + __THERMAL_GENL_ATTR_MAX = 20, +}; + +enum thermal_genl_sampling { + THERMAL_GENL_SAMPLING_TEMP = 0, + __THERMAL_GENL_SAMPLING_MAX = 1, +}; + +enum thermal_genl_event { + THERMAL_GENL_EVENT_UNSPEC = 0, + THERMAL_GENL_EVENT_TZ_CREATE = 1, + THERMAL_GENL_EVENT_TZ_DELETE = 2, + THERMAL_GENL_EVENT_TZ_DISABLE = 3, + THERMAL_GENL_EVENT_TZ_ENABLE = 4, + THERMAL_GENL_EVENT_TZ_TRIP_UP = 5, + THERMAL_GENL_EVENT_TZ_TRIP_DOWN = 6, + THERMAL_GENL_EVENT_TZ_TRIP_CHANGE = 7, + THERMAL_GENL_EVENT_TZ_TRIP_ADD = 8, + THERMAL_GENL_EVENT_TZ_TRIP_DELETE = 9, + THERMAL_GENL_EVENT_CDEV_ADD = 10, + THERMAL_GENL_EVENT_CDEV_DELETE = 11, + THERMAL_GENL_EVENT_CDEV_STATE_UPDATE = 12, + THERMAL_GENL_EVENT_TZ_GOV_CHANGE = 13, + __THERMAL_GENL_EVENT_MAX = 14, +}; + +enum thermal_genl_cmd { + THERMAL_GENL_CMD_UNSPEC = 0, + THERMAL_GENL_CMD_TZ_GET_ID = 1, + THERMAL_GENL_CMD_TZ_GET_TRIP = 2, + THERMAL_GENL_CMD_TZ_GET_TEMP = 3, + THERMAL_GENL_CMD_TZ_GET_GOV = 4, + THERMAL_GENL_CMD_TZ_GET_MODE = 5, + THERMAL_GENL_CMD_CDEV_GET = 6, + __THERMAL_GENL_CMD_MAX = 7, +}; + +struct param { + struct nlattr **attrs; + struct sk_buff *msg; + const char *name; + int tz_id; + int cdev_id; + int trip_id; + int trip_temp; + int trip_type; + int trip_hyst; + int temp; + int cdev_state; + int cdev_max_state; +}; + +typedef int (*cb_t)(struct param *); + +struct thermal_hwmon_device { + char type[20]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; + struct thermal_hwmon_attr temp_crit; +}; + +struct trace_event_raw_thermal_power_allocator { + struct trace_entry ent; + int tz_id; + u32 __data_loc_req_power; + u32 total_req_power; + u32 __data_loc_granted_power; + u32 total_granted_power; + size_t num_actors; + u32 power_range; + u32 max_allocatable_power; + int current_temp; + s32 delta_temp; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_allocator_pid { + struct trace_entry ent; + int tz_id; + s32 err; + s32 err_integral; + s64 p; + s64 i; + s64 d; + s32 output; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_power_allocator { + u32 req_power; + u32 granted_power; +}; + +struct trace_event_data_offsets_thermal_power_allocator_pid {}; + +typedef void (*btf_trace_thermal_power_allocator)(void *, struct thermal_zone_device *, u32 *, u32, u32 *, u32, size_t, u32, u32, int, s32); + +typedef void (*btf_trace_thermal_power_allocator_pid)(void *, struct thermal_zone_device *, s32, s32, s64, s64, s64, s32); + +struct power_allocator_params { + bool allocated_tzp; + s64 err_integral; + s32 prev_err; + int trip_switch_on; + int trip_max_desired_temperature; + u32 sustainable_power; +}; + +enum devfreq_timer { + DEVFREQ_TIMER_DEFERRABLE = 0, + DEVFREQ_TIMER_DELAYED = 1, + DEVFREQ_TIMER_NUM = 2, +}; + +struct devfreq_dev_profile { + long unsigned int initial_freq; + unsigned int polling_ms; + enum devfreq_timer timer; + bool is_cooling_device; + int (*target)(struct device *, long unsigned int *, u32); + int (*get_dev_status)(struct device *, struct devfreq_dev_status *); + int (*get_cur_freq)(struct device *, long unsigned int *); + void (*exit)(struct device *); + long unsigned int *freq_table; + unsigned int max_state; +}; + +struct devfreq_stats { + unsigned int total_trans; + unsigned int *trans_table; + u64 *time_in_state; + u64 last_update; +}; + +struct devfreq_governor; + +struct devfreq { + struct list_head node; + struct mutex lock; + struct device dev; + struct devfreq_dev_profile *profile; + const struct devfreq_governor *governor; + struct opp_table *opp_table; + struct notifier_block nb; + struct delayed_work work; + long unsigned int previous_freq; + struct devfreq_dev_status last_status; + void *data; + struct dev_pm_qos_request user_min_freq_req; + struct dev_pm_qos_request user_max_freq_req; + long unsigned int scaling_min_freq; + long unsigned int scaling_max_freq; + bool stop_polling; + long unsigned int suspend_freq; + long unsigned int resume_freq; + atomic_t suspend_count; + struct devfreq_stats stats; + struct srcu_notifier_head transition_notifier_list; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct devfreq_governor { + struct list_head node; + const char name[16]; + const u64 attrs; + const u64 flags; + int (*get_target_freq)(struct devfreq *, long unsigned int *); + int (*event_handler)(struct devfreq *, unsigned int, void *); +}; + +struct devfreq_cooling_power { + int (*get_real_power)(struct devfreq *, u32 *, long unsigned int, long unsigned int); +}; + +struct devfreq_cooling_device { + struct thermal_cooling_device *cdev; + struct devfreq *devfreq; + long unsigned int cooling_state; + u32 *freq_table; + size_t max_state; + struct devfreq_cooling_power *power_ops; + u32 res_util; + int capped_state; + struct dev_pm_qos_request req_max_freq; + struct em_perf_domain *em_pd; +}; + +struct _thermal_state { + u64 next_check; + u64 last_interrupt_time; + struct delayed_work therm_work; + long unsigned int count; + long unsigned int last_count; + long unsigned int max_time_ms; + long unsigned int total_time_ms; + bool rate_control_active; + bool new_event; + u8 level; + u8 sample_index; + u8 sample_count; + u8 average; + u8 baseline_temp; + u8 temp_samples[3]; +}; + +struct thermal_state { + struct _thermal_state core_throttle; + struct _thermal_state core_power_limit; + struct _thermal_state package_throttle; + struct _thermal_state package_power_limit; + struct _thermal_state core_thresh0; + struct _thermal_state core_thresh1; + struct _thermal_state pkg_thresh0; + struct _thermal_state pkg_thresh1; +}; + +struct watchdog_info { + __u32 options; + __u32 firmware_version; + __u8 identity[32]; +}; + +struct watchdog_device; + +struct watchdog_ops { + struct module *owner; + int (*start)(struct watchdog_device *); + int (*stop)(struct watchdog_device *); + int (*ping)(struct watchdog_device *); + unsigned int (*status)(struct watchdog_device *); + int (*set_timeout)(struct watchdog_device *, unsigned int); + int (*set_pretimeout)(struct watchdog_device *, unsigned int); + unsigned int (*get_timeleft)(struct watchdog_device *); + int (*restart)(struct watchdog_device *, long unsigned int, void *); + long int (*ioctl)(struct watchdog_device *, unsigned int, long unsigned int); +}; + +struct watchdog_governor; + +struct watchdog_core_data; + +struct watchdog_device { + int id; + struct device *parent; + const struct attribute_group **groups; + const struct watchdog_info *info; + const struct watchdog_ops *ops; + const struct watchdog_governor *gov; + unsigned int bootstatus; + unsigned int timeout; + unsigned int pretimeout; + unsigned int min_timeout; + unsigned int max_timeout; + unsigned int min_hw_heartbeat_ms; + unsigned int max_hw_heartbeat_ms; + struct notifier_block reboot_nb; + struct notifier_block restart_nb; + void *driver_data; + struct watchdog_core_data *wd_data; + long unsigned int status; + struct list_head deferred; +}; + +struct watchdog_governor { + const char name[20]; + void (*pretimeout)(struct watchdog_device *); +}; + +struct watchdog_core_data { + struct device dev; + struct cdev cdev; + struct watchdog_device *wdd; + struct mutex lock; + ktime_t last_keepalive; + ktime_t last_hw_keepalive; + ktime_t open_deadline; + struct hrtimer timer; + struct kthread_work work; + long unsigned int status; +}; + +struct watchdog_pretimeout { + struct watchdog_device *wdd; + struct list_head entry; +}; + +struct governor_priv { + struct watchdog_governor *gov; + struct list_head entry; +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_lo; + __u32 events_hi; + __u32 cp_events_lo; + __u32 cp_events_hi; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +typedef struct mdu_array_info_s mdu_array_info_t; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +struct mddev; + +struct md_rdev; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + atomic_t active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + __u64 events; + int can_decrease_events; + char uuid[16]; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + char *last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + struct request_queue *queue; + struct bitmap *bitmap; + struct { + struct file *file; + loff_t offset; + long unsigned int space; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + struct bio *flush_bio; + atomic_t flush_pending; + ktime_t start_flush; + ktime_t prev_flush_start; + struct work_struct flush_work; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct work_struct del_work; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + RemoveSynchronized = 14, + ExternalBbl = 15, + FailFast = 16, + LastDev = 17, + CollisionCheck = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_ALLOW_SB_UPDATE = 8, + MD_UPDATING_SB = 9, + MD_NOT_READY = 10, + MD_BROKEN = 11, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct bitmap_page; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum recovery_flags { + MD_RECOVERY_RUNNING = 0, + MD_RECOVERY_SYNC = 1, + MD_RECOVERY_RECOVER = 2, + MD_RECOVERY_INTR = 3, + MD_RECOVERY_DONE = 4, + MD_RECOVERY_NEEDED = 5, + MD_RECOVERY_REQUESTED = 6, + MD_RECOVERY_CHECK = 7, + MD_RECOVERY_RESHAPE = 8, + MD_RECOVERY_FROZEN = 9, + MD_RECOVERY_ERROR = 10, + MD_RECOVERY_WAIT = 11, + MD_RESYNCING_REMOTE = 12, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct dm_ioctl { + __u32 version[3]; + __u32 data_size; + __u32 data_start; + __u32 target_count; + __s32 open_count; + __u32 flags; + __u32 event_nr; + __u32 padding; + __u64 dev; + char name[128]; + char uuid[129]; + char data[7]; +}; + +struct dm_target_spec { + __u64 sector_start; + __u64 length; + __s32 status; + __u32 next; + char target_type[16]; +}; + +struct dm_device { + struct dm_ioctl dmi; + struct dm_target_spec *table[256]; + char *target_args_array[256]; + struct list_head list; +}; + +typedef enum { + STATUSTYPE_INFO = 0, + STATUSTYPE_TABLE = 1, +} status_type_t; + +union map_info___2 { + void *ptr; +}; + +struct dm_target; + +typedef int (*dm_ctr_fn)(struct dm_target *, unsigned int, char **); + +struct dm_table; + +struct target_type; + +struct dm_target { + struct dm_table *table; + struct target_type *type; + sector_t begin; + sector_t len; + uint32_t max_io_len; + unsigned int num_flush_bios; + unsigned int num_discard_bios; + unsigned int num_secure_erase_bios; + unsigned int num_write_same_bios; + unsigned int num_write_zeroes_bios; + unsigned int per_io_data_size; + void *private; + char *error; + bool flush_supported: 1; + bool discards_supported: 1; + bool limit_swap_bios: 1; +}; + +typedef void (*dm_dtr_fn)(struct dm_target *); + +typedef int (*dm_map_fn)(struct dm_target *, struct bio *); + +typedef int (*dm_clone_and_map_request_fn)(struct dm_target *, struct request *, union map_info___2 *, struct request **); + +typedef void (*dm_release_clone_request_fn)(struct request *, union map_info___2 *); + +typedef int (*dm_endio_fn)(struct dm_target *, struct bio *, blk_status_t *); + +typedef int (*dm_request_endio_fn)(struct dm_target *, struct request *, blk_status_t, union map_info___2 *); + +typedef void (*dm_presuspend_fn)(struct dm_target *); + +typedef void (*dm_presuspend_undo_fn)(struct dm_target *); + +typedef void (*dm_postsuspend_fn)(struct dm_target *); + +typedef int (*dm_preresume_fn)(struct dm_target *); + +typedef void (*dm_resume_fn)(struct dm_target *); + +typedef void (*dm_status_fn)(struct dm_target *, status_type_t, unsigned int, char *, unsigned int); + +typedef int (*dm_message_fn)(struct dm_target *, unsigned int, char **, char *, unsigned int); + +typedef int (*dm_prepare_ioctl_fn)(struct dm_target *, struct block_device **); + +struct dm_report_zones_args; + +typedef int (*dm_report_zones_fn)(struct dm_target *, struct dm_report_zones_args *, unsigned int); + +struct dm_report_zones_args { + struct dm_target *tgt; + sector_t next_sector; + void *orig_data; + report_zones_cb orig_cb; + unsigned int zone_idx; + sector_t start; +}; + +struct dm_dev; + +typedef int (*iterate_devices_callout_fn)(struct dm_target *, struct dm_dev *, sector_t, sector_t, void *); + +struct dm_dev { + struct block_device *bdev; + struct dax_device *dax_dev; + fmode_t mode; + char name[16]; +}; + +typedef int (*dm_iterate_devices_fn)(struct dm_target *, iterate_devices_callout_fn, void *); + +typedef void (*dm_io_hints_fn)(struct dm_target *, struct queue_limits *); + +typedef int (*dm_busy_fn)(struct dm_target *); + +typedef long int (*dm_dax_direct_access_fn)(struct dm_target *, long unsigned int, long int, void **, pfn_t *); + +typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *, long unsigned int, void *, size_t, struct iov_iter *); + +typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *, long unsigned int, size_t); + +struct target_type { + uint64_t features; + const char *name; + struct module *module; + unsigned int version[3]; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_clone_and_map_request_fn clone_and_map_rq; + dm_release_clone_request_fn release_clone_rq; + dm_endio_fn end_io; + dm_request_endio_fn rq_end_io; + dm_presuspend_fn presuspend; + dm_presuspend_undo_fn presuspend_undo; + dm_postsuspend_fn postsuspend; + dm_preresume_fn preresume; + dm_resume_fn resume; + dm_status_fn status; + dm_message_fn message; + dm_prepare_ioctl_fn prepare_ioctl; + dm_report_zones_fn report_zones; + dm_busy_fn busy; + dm_iterate_devices_fn iterate_devices; + dm_io_hints_fn io_hints; + dm_dax_direct_access_fn direct_access; + dm_dax_copy_iter_fn dax_copy_from_iter; + dm_dax_copy_iter_fn dax_copy_to_iter; + dm_dax_zero_page_range_fn dax_zero_page_range; + struct list_head list; +}; + +enum dm_uevent_type { + DM_UEVENT_PATH_FAILED = 0, + DM_UEVENT_PATH_REINSTATED = 1, +}; + +struct mapped_device; + +struct dm_uevent { + struct mapped_device *md; + enum kobject_action action; + struct kobj_uevent_env ku_env; + struct list_head elist; + char name[128]; + char uuid[129]; +}; + +enum dm_queue_mode { + DM_TYPE_NONE = 0, + DM_TYPE_BIO_BASED = 1, + DM_TYPE_REQUEST_BASED = 2, + DM_TYPE_DAX_BIO_BASED = 3, +}; + +struct mapped_device___2; + +struct dm_md_mempools; + +struct dm_table { + struct mapped_device___2 *md; + enum dm_queue_mode type; + unsigned int depth; + unsigned int counts[16]; + sector_t *index[16]; + unsigned int num_targets; + unsigned int num_allocated; + sector_t *highs; + struct dm_target *targets; + struct target_type *immutable_target_type; + bool integrity_supported: 1; + bool singleton: 1; + unsigned int integrity_added: 1; + fmode_t mode; + struct list_head devices; + void (*event_fn)(void *); + void *event_context; + struct dm_md_mempools *mempools; + struct blk_keyslot_manager *ksm; +}; + +struct dm_stats_last_position; + +struct dm_stats { + struct mutex mutex; + struct list_head list; + struct dm_stats_last_position *last; + sector_t last_sector; + unsigned int last_rw; +}; + +struct dm_stats_aux { + bool merged; + long long unsigned int duration_ns; +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +struct mapped_device___2 { + struct mutex suspend_lock; + struct mutex table_devices_lock; + struct list_head table_devices; + void *map; + long unsigned int flags; + struct mutex type_lock; + enum dm_queue_mode type; + int numa_node_id; + struct request_queue *queue; + atomic_t holders; + atomic_t open_count; + struct dm_target *immutable_target; + struct target_type *immutable_target_type; + char name[16]; + struct gendisk *disk; + struct dax_device *dax_dev; + struct work_struct work; + wait_queue_head_t wait; + spinlock_t deferred_lock; + struct bio_list deferred; + void *interface_ptr; + wait_queue_head_t eventq; + atomic_t event_nr; + atomic_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; + unsigned int internal_suspend_count; + struct bio_set io_bs; + struct bio_set bs; + struct workqueue_struct *wq; + struct hd_geometry geometry; + struct dm_kobject_holder kobj_holder; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_stats stats; + struct blk_mq_tag_set *tag_set; + bool init_tio_pdu: 1; + struct srcu_struct io_barrier; +}; + +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + +struct dm_io; + +struct clone_info { + struct dm_table *map; + struct bio *bio; + struct dm_io *io; + sector_t sector; + unsigned int sector_count; +}; + +struct dm_target_io { + unsigned int magic; + struct dm_io *io; + struct dm_target *ti; + unsigned int target_bio_nr; + unsigned int *len_ptr; + bool inside_dm_io; + struct bio clone; +}; + +struct dm_io { + unsigned int magic; + struct mapped_device___2 *md; + blk_status_t status; + atomic_t io_count; + struct bio *orig_bio; + long unsigned int start_time; + spinlock_t endio_lock; + struct dm_stats_aux stats_aux; + struct dm_target_io tio; +}; + +struct table_device { + struct list_head list; + refcount_t count; + struct dm_dev dm_dev; +}; + +struct dm_pr { + u64 old_key; + u64 new_key; + u32 flags; + bool fail_early; +}; + +struct dm_arg_set { + unsigned int argc; + char **argv; +}; + +struct dm_arg { + unsigned int min; + unsigned int max; + char *error; +}; + +struct dm_dev_internal { + struct list_head list; + refcount_t count; + struct dm_dev *dm_dev; +}; + +struct dm_keyslot_manager { + struct blk_keyslot_manager ksm; + struct mapped_device___2 *md; +}; + +struct dm_keyslot_evict_args { + const struct blk_crypto_key *key; + int err; +}; + +enum suspend_mode { + PRESUSPEND = 0, + PRESUSPEND_UNDO = 1, + POSTSUSPEND = 2, +}; + +struct linear_c { + struct dm_dev *dev; + sector_t start; +}; + +struct stripe { + struct dm_dev *dev; + sector_t physical_start; + atomic_t error_count; +}; + +struct stripe_c { + uint32_t stripes; + int stripes_shift; + sector_t stripe_width; + uint32_t chunk_size; + int chunk_size_shift; + struct dm_target *ti; + struct work_struct trigger_event; + struct stripe stripe[0]; +}; + +struct dm_target_deps { + __u32 count; + __u32 padding; + __u64 dev[0]; +}; + +struct dm_name_list { + __u64 dev; + __u32 next; + char name[0]; +}; + +struct dm_target_versions { + __u32 next; + __u32 version[3]; + char name[0]; +}; + +struct dm_target_msg { + __u64 sector; + char message[0]; +}; + +enum { + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD = 1, + DM_LIST_DEVICES_CMD = 2, + DM_DEV_CREATE_CMD = 3, + DM_DEV_REMOVE_CMD = 4, + DM_DEV_RENAME_CMD = 5, + DM_DEV_SUSPEND_CMD = 6, + DM_DEV_STATUS_CMD = 7, + DM_DEV_WAIT_CMD = 8, + DM_TABLE_LOAD_CMD = 9, + DM_TABLE_CLEAR_CMD = 10, + DM_TABLE_DEPS_CMD = 11, + DM_TABLE_STATUS_CMD = 12, + DM_LIST_VERSIONS_CMD = 13, + DM_TARGET_MSG_CMD = 14, + DM_DEV_SET_GEOMETRY_CMD = 15, + DM_DEV_ARM_POLL_CMD = 16, + DM_GET_TARGET_VERSION_CMD = 17, +}; + +struct dm_file { + volatile unsigned int global_event_nr; +}; + +struct hash_cell { + struct rb_node name_node; + struct rb_node uuid_node; + bool name_set; + bool uuid_set; + char *name; + char *uuid; + struct mapped_device___2 *md; + struct dm_table *new_map; +}; + +struct vers_iter { + size_t param_size; + struct dm_target_versions *vers; + struct dm_target_versions *old_vers; + char *end; + uint32_t flags; +}; + +typedef int (*ioctl_fn)(struct file *, struct dm_ioctl *, size_t); + +struct dm_io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + +typedef void (*io_notify_fn)(long unsigned int, void *); + +enum dm_io_mem_type { + DM_IO_PAGE_LIST = 0, + DM_IO_BIO = 1, + DM_IO_VMA = 2, + DM_IO_KMEM = 3, +}; + +struct dm_io_memory { + enum dm_io_mem_type type; + unsigned int offset; + union { + struct page_list *pl; + struct bio *bio; + void *vma; + void *addr; + } ptr; +}; + +struct dm_io_notify { + io_notify_fn fn; + void *context; +}; + +struct dm_io_client; + +struct dm_io_request { + int bi_op; + int bi_op_flags; + struct dm_io_memory mem; + struct dm_io_notify notify; + struct dm_io_client *client; +}; + +struct dm_io_client { + mempool_t pool; + struct bio_set bios; +}; + +struct io { + long unsigned int error_bits; + atomic_t count; + struct dm_io_client *client; + io_notify_fn callback; + void *context; + void *vma_invalidate_address; + long unsigned int vma_invalidate_size; + long: 64; +}; + +struct dpages { + void (*get_page)(struct dpages *, struct page **, long unsigned int *, unsigned int *); + void (*next_page)(struct dpages *); + union { + unsigned int context_u; + struct bvec_iter context_bi; + }; + void *context_ptr; + void *vma_invalidate_address; + long unsigned int vma_invalidate_size; +}; + +struct sync_io { + long unsigned int error_bits; + struct completion wait; +}; + +struct dm_kcopyd_throttle { + unsigned int throttle; + unsigned int num_io_jobs; + unsigned int io_period; + unsigned int total_period; + unsigned int last_jiffies; +}; + +typedef void (*dm_kcopyd_notify_fn)(int, long unsigned int, void *); + +struct dm_kcopyd_client { + struct page_list *pages; + unsigned int nr_reserved_pages; + unsigned int nr_free_pages; + unsigned int sub_job_size; + struct dm_io_client *io_client; + wait_queue_head_t destroyq; + mempool_t job_pool; + struct workqueue_struct *kcopyd_wq; + struct work_struct kcopyd_work; + struct dm_kcopyd_throttle *throttle; + atomic_t nr_jobs; + spinlock_t job_lock; + struct list_head callback_jobs; + struct list_head complete_jobs; + struct list_head io_jobs; + struct list_head pages_jobs; +}; + +struct kcopyd_job { + struct dm_kcopyd_client *kc; + struct list_head list; + long unsigned int flags; + int read_err; + long unsigned int write_err; + int rw; + struct dm_io_region source; + unsigned int num_dests; + struct dm_io_region dests[8]; + struct page_list *pages; + dm_kcopyd_notify_fn fn; + void *context; + struct mutex lock; + atomic_t sub_jobs; + sector_t progress; + sector_t write_offset; + struct kcopyd_job *master_job; +}; + +struct dm_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct mapped_device___2 *, char *); + ssize_t (*store)(struct mapped_device___2 *, const char *, size_t); +}; + +struct dm_stats_last_position { + sector_t last_sector; + unsigned int last_rw; +}; + +struct dm_stat_percpu { + long long unsigned int sectors[2]; + long long unsigned int ios[2]; + long long unsigned int merges[2]; + long long unsigned int ticks[2]; + long long unsigned int io_ticks[2]; + long long unsigned int io_ticks_total; + long long unsigned int time_in_queue; + long long unsigned int *histogram; +}; + +struct dm_stat_shared { + atomic_t in_flight[2]; + long long unsigned int stamp; + struct dm_stat_percpu tmp; +}; + +struct dm_stat { + struct list_head list_entry; + int id; + unsigned int stat_flags; + size_t n_entries; + sector_t start; + sector_t end; + sector_t step; + unsigned int n_histogram_entries; + long long unsigned int *histogram_boundaries; + const char *program_id; + const char *aux_data; + struct callback_head callback_head; + size_t shared_alloc_size; + size_t percpu_alloc_size; + size_t histogram_alloc_size; + struct dm_stat_percpu *stat_percpu[8192]; + struct dm_stat_shared stat_shared[0]; +}; + +struct dm_rq_target_io; + +struct dm_rq_clone_bio_info { + struct bio *orig; + struct dm_rq_target_io *tio; + struct bio clone; +}; + +struct dm_rq_target_io { + struct mapped_device___2 *md; + struct dm_target *ti; + struct request *orig; + struct request *clone; + struct kthread_work work; + blk_status_t error; + union map_info___2 info; + struct dm_stats_aux stats_aux; + long unsigned int duration_jiffies; + unsigned int n_sectors; + unsigned int completed; +}; + +enum dev_type { + DEV_UNKNOWN = 0, + DEV_X1 = 1, + DEV_X2 = 2, + DEV_X4 = 3, + DEV_X8 = 4, + DEV_X16 = 5, + DEV_X32 = 6, + DEV_X64 = 7, +}; + +enum hw_event_mc_err_type { + HW_EVENT_ERR_CORRECTED = 0, + HW_EVENT_ERR_UNCORRECTED = 1, + HW_EVENT_ERR_DEFERRED = 2, + HW_EVENT_ERR_FATAL = 3, + HW_EVENT_ERR_INFO = 4, +}; + +enum mem_type { + MEM_EMPTY = 0, + MEM_RESERVED = 1, + MEM_UNKNOWN = 2, + MEM_FPM = 3, + MEM_EDO = 4, + MEM_BEDO = 5, + MEM_SDR = 6, + MEM_RDR = 7, + MEM_DDR = 8, + MEM_RDDR = 9, + MEM_RMBS = 10, + MEM_DDR2 = 11, + MEM_FB_DDR2 = 12, + MEM_RDDR2 = 13, + MEM_XDR = 14, + MEM_DDR3 = 15, + MEM_RDDR3 = 16, + MEM_LRDDR3 = 17, + MEM_LPDDR3 = 18, + MEM_DDR4 = 19, + MEM_RDDR4 = 20, + MEM_LRDDR4 = 21, + MEM_LPDDR4 = 22, + MEM_DDR5 = 23, + MEM_NVDIMM = 24, + MEM_WIO2 = 25, +}; + +enum edac_type { + EDAC_UNKNOWN = 0, + EDAC_NONE = 1, + EDAC_RESERVED = 2, + EDAC_PARITY = 3, + EDAC_EC = 4, + EDAC_SECDED = 5, + EDAC_S2ECD2ED = 6, + EDAC_S4ECD4ED = 7, + EDAC_S8ECD8ED = 8, + EDAC_S16ECD16ED = 9, +}; + +enum scrub_type { + SCRUB_UNKNOWN = 0, + SCRUB_NONE = 1, + SCRUB_SW_PROG = 2, + SCRUB_SW_SRC = 3, + SCRUB_SW_PROG_SRC = 4, + SCRUB_SW_TUNABLE = 5, + SCRUB_HW_PROG = 6, + SCRUB_HW_SRC = 7, + SCRUB_HW_PROG_SRC = 8, + SCRUB_HW_TUNABLE = 9, +}; + +enum edac_mc_layer_type { + EDAC_MC_LAYER_BRANCH = 0, + EDAC_MC_LAYER_CHANNEL = 1, + EDAC_MC_LAYER_SLOT = 2, + EDAC_MC_LAYER_CHIP_SELECT = 3, + EDAC_MC_LAYER_ALL_MEM = 4, +}; + +struct edac_mc_layer { + enum edac_mc_layer_type type; + unsigned int size; + bool is_virt_csrow; +}; + +struct mem_ctl_info; + +struct dimm_info { + struct device dev; + char label[32]; + unsigned int location[3]; + struct mem_ctl_info *mci; + unsigned int idx; + u32 grain; + enum dev_type dtype; + enum mem_type mtype; + enum edac_type edac_mode; + u32 nr_pages; + unsigned int csrow; + unsigned int cschannel; + u16 smbios_handle; + u32 ce_count; + u32 ue_count; +}; + +struct mcidev_sysfs_attribute; + +struct edac_raw_error_desc { + char location[256]; + char label[296]; + long int grain; + u16 error_count; + enum hw_event_mc_err_type type; + int top_layer; + int mid_layer; + int low_layer; + long unsigned int page_frame_number; + long unsigned int offset_in_page; + long unsigned int syndrome; + const char *msg; + const char *other_detail; +}; + +struct csrow_info; + +struct mem_ctl_info { + struct device dev; + struct bus_type *bus; + struct list_head link; + struct module *owner; + long unsigned int mtype_cap; + long unsigned int edac_ctl_cap; + long unsigned int edac_cap; + long unsigned int scrub_cap; + enum scrub_type scrub_mode; + int (*set_sdram_scrub_rate)(struct mem_ctl_info *, u32); + int (*get_sdram_scrub_rate)(struct mem_ctl_info *); + void (*edac_check)(struct mem_ctl_info *); + long unsigned int (*ctl_page_to_phys)(struct mem_ctl_info *, long unsigned int); + int mc_idx; + struct csrow_info **csrows; + unsigned int nr_csrows; + unsigned int num_cschannel; + unsigned int n_layers; + struct edac_mc_layer *layers; + bool csbased; + unsigned int tot_dimms; + struct dimm_info **dimms; + struct device *pdev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + u32 ce_noinfo_count; + u32 ue_noinfo_count; + u32 ue_mc; + u32 ce_mc; + struct completion complete; + const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; + struct delayed_work work; + struct edac_raw_error_desc error_desc; + int op_state; + struct dentry *debugfs; + u8 fake_inject_layer[3]; + bool fake_inject_ue; + u16 fake_inject_count; +}; + +struct rank_info { + int chan_idx; + struct csrow_info *csrow; + struct dimm_info *dimm; + u32 ce_count; +}; + +struct csrow_info { + struct device dev; + long unsigned int first_page; + long unsigned int last_page; + long unsigned int page_mask; + int csrow_idx; + u32 ue_count; + u32 ce_count; + struct mem_ctl_info *mci; + u32 nr_channels; + struct rank_info **channels; +}; + +struct edac_device_counter { + u32 ue_count; + u32 ce_count; +}; + +struct edac_device_ctl_info; + +struct edac_dev_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_ctl_info *, char *); + ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); +}; + +struct edac_device_instance; + +struct edac_device_ctl_info { + struct list_head link; + struct module *owner; + int dev_idx; + int log_ue; + int log_ce; + int panic_on_ue; + unsigned int poll_msec; + long unsigned int delay; + struct edac_dev_sysfs_attribute *sysfs_attributes; + struct bus_type *edac_subsys; + int op_state; + struct delayed_work work; + void (*edac_check)(struct edac_device_ctl_info *); + struct device *dev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + struct completion removal_complete; + char name[32]; + u32 nr_instances; + struct edac_device_instance *instances; + struct edac_device_counter counters; + struct kobject kobj; +}; + +struct edac_device_block; + +struct edac_dev_sysfs_block_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); + struct edac_device_block *block; + unsigned int value; +}; + +struct edac_device_block { + struct edac_device_instance *instance; + char name[32]; + struct edac_device_counter counters; + int nr_attribs; + struct edac_dev_sysfs_block_attribute *block_attributes; + struct kobject kobj; +}; + +struct edac_device_instance { + struct edac_device_ctl_info *ctl; + char name[35]; + struct edac_device_counter counters; + u32 nr_blocks; + struct edac_device_block *blocks; + struct kobject kobj; +}; + +struct ctl_info_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_ctl_info *, char *); + ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); +}; + +struct instance_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_instance *, char *); + ssize_t (*store)(struct edac_device_instance *, const char *, size_t); +}; + +struct edac_pci_counter { + atomic_t pe_count; + atomic_t npe_count; +}; + +struct edac_pci_ctl_info { + struct list_head link; + int pci_idx; + struct bus_type *edac_subsys; + int op_state; + struct delayed_work work; + void (*edac_check)(struct edac_pci_ctl_info *); + struct device *dev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + struct completion complete; + char name[32]; + struct edac_pci_counter counters; + struct kobject kobj; +}; + +struct edac_pci_gen_data { + int edac_idx; +}; + +struct instance_attribute___2 { + struct attribute attr; + ssize_t (*show)(struct edac_pci_ctl_info *, char *); + ssize_t (*store)(struct edac_pci_ctl_info *, const char *, size_t); +}; + +struct edac_pci_dev_attribute { + struct attribute attr; + void *value; + ssize_t (*show)(void *, char *); + ssize_t (*store)(void *, const char *, size_t); +}; + +typedef void (*pci_parity_check_fn_t)(struct pci_dev *); + +struct ghes_pvt { + struct mem_ctl_info *mci; + char other_detail[400]; + char msg[80]; +}; + +struct ghes_hw_desc { + int num_dimms; + struct dimm_info *dimms; +}; + +struct memdev_dmi_entry { + u8 type; + u8 length; + u16 handle; + u16 phys_mem_array_handle; + u16 mem_err_info_handle; + u16 total_width; + u16 data_width; + u16 size; + u8 form_factor; + u8 device_set; + u8 device_locator; + u8 bank_locator; + u8 memory_type; + u16 type_detail; + u16 speed; + u8 manufacturer; + u8 serial_number; + u8 asset_tag; + u8 part_number; + u8 attributes; + u32 extended_size; + u16 conf_mem_clk_speed; +} __attribute__((packed)); + +struct eisa_device_id { + char sig[8]; + kernel_ulong_t driver_data; +}; + +struct eisa_device { + struct eisa_device_id id; + int slot; + int state; + long unsigned int base_addr; + struct resource res[4]; + u64 dma_mask; + struct device dev; + char pretty_name[50]; +}; + +struct eisa_driver { + const struct eisa_device_id *id_table; + struct device_driver driver; +}; + +struct eisa_root_device { + struct device *dev; + struct resource *res; + long unsigned int bus_base_addr; + int slots; + int force_probe; + u64 dma_mask; + int bus_nr; + struct resource eisa_root_res; +}; + +struct eisa_device_info { + struct eisa_device_id id; + char name[50]; +}; + +enum opp_table_access { + OPP_TABLE_ACCESS_UNKNOWN = 0, + OPP_TABLE_ACCESS_EXCLUSIVE = 1, + OPP_TABLE_ACCESS_SHARED = 2, +}; + +struct icc_path; + +struct dev_pm_opp___2; + +struct dev_pm_set_opp_data; + +struct dev_pm_opp_supply; + +struct opp_table___2 { + struct list_head node; + struct list_head lazy; + struct blocking_notifier_head head; + struct list_head dev_list; + struct list_head opp_list; + struct kref kref; + struct mutex lock; + struct device_node *np; + long unsigned int clock_latency_ns_max; + unsigned int voltage_tolerance_v1; + unsigned int parsed_static_opps; + enum opp_table_access shared_opp; + long unsigned int current_rate; + struct dev_pm_opp___2 *current_opp; + struct dev_pm_opp___2 *suspend_opp; + struct mutex genpd_virt_dev_lock; + struct device **genpd_virt_devs; + struct opp_table___2 **required_opp_tables; + unsigned int required_opp_count; + unsigned int *supported_hw; + unsigned int supported_hw_count; + const char *prop_name; + struct clk *clk; + struct regulator **regulators; + int regulator_count; + struct icc_path **paths; + unsigned int path_count; + bool enabled; + bool genpd_performance_state; + bool is_genpd; + int (*set_opp)(struct dev_pm_set_opp_data *); + struct dev_pm_opp_supply *sod_supplies; + struct dev_pm_set_opp_data *set_opp_data; + struct dentry *dentry; + char dentry_name[255]; +}; + +struct dev_pm_opp_icc_bw; + +struct dev_pm_opp___2 { + struct list_head node; + struct kref kref; + bool available; + bool dynamic; + bool turbo; + bool suspend; + bool removed; + unsigned int pstate; + long unsigned int rate; + unsigned int level; + struct dev_pm_opp_supply *supplies; + struct dev_pm_opp_icc_bw *bandwidth; + long unsigned int clock_latency_ns; + struct dev_pm_opp___2 **required_opps; + struct opp_table___2 *opp_table; + struct device_node *np; + struct dentry *dentry; +}; + +enum dev_pm_opp_event { + OPP_EVENT_ADD = 0, + OPP_EVENT_REMOVE = 1, + OPP_EVENT_ENABLE = 2, + OPP_EVENT_DISABLE = 3, + OPP_EVENT_ADJUST_VOLTAGE = 4, +}; + +struct dev_pm_opp_supply { + long unsigned int u_volt; + long unsigned int u_volt_min; + long unsigned int u_volt_max; + long unsigned int u_amp; +}; + +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + +struct dev_pm_opp_info { + long unsigned int rate; + struct dev_pm_opp_supply *supplies; +}; + +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + +struct opp_device { + struct list_head node; + const struct device *dev; + struct dentry *dentry; +}; + +struct cpufreq_policy_data { + struct cpufreq_cpuinfo cpuinfo; + struct cpufreq_frequency_table *freq_table; + unsigned int cpu; + unsigned int min; + unsigned int max; +}; + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); +}; + +struct cpufreq_driver { + char name[16]; + u16 flags; + void *driver_data; + int (*init)(struct cpufreq_policy *); + int (*verify)(struct cpufreq_policy_data *); + int (*setpolicy)(struct cpufreq_policy *); + int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); + int (*target_index)(struct cpufreq_policy *, unsigned int); + unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); + void (*adjust_perf)(unsigned int, long unsigned int, long unsigned int, long unsigned int); + unsigned int (*resolve_freq)(struct cpufreq_policy *, unsigned int); + unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); + int (*target_intermediate)(struct cpufreq_policy *, unsigned int); + unsigned int (*get)(unsigned int); + void (*update_limits)(unsigned int); + int (*bios_limit)(int, unsigned int *); + int (*online)(struct cpufreq_policy *); + int (*offline)(struct cpufreq_policy *); + int (*exit)(struct cpufreq_policy *); + void (*stop_cpu)(struct cpufreq_policy *); + int (*suspend)(struct cpufreq_policy *); + int (*resume)(struct cpufreq_policy *); + void (*ready)(struct cpufreq_policy *); + struct freq_attr **attr; + bool boost_enabled; + int (*set_boost)(struct cpufreq_policy *, int); +}; + +struct cpufreq_stats { + unsigned int total_trans; + long long unsigned int last_time; + unsigned int max_state; + unsigned int state_num; + unsigned int last_index; + u64 *time_in_state; + unsigned int *freq_table; + unsigned int *trans_table; + unsigned int reset_pending; + long long unsigned int reset_time; +}; + +enum { + OD_NORMAL_SAMPLE = 0, + OD_SUB_SAMPLE = 1, +}; + +struct dbs_data { + struct gov_attr_set attr_set; + void *tuners; + unsigned int ignore_nice_load; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int io_is_busy; +}; + +struct policy_dbs_info { + struct cpufreq_policy *policy; + struct mutex update_mutex; + u64 last_sample_time; + s64 sample_delay_ns; + atomic_t work_count; + struct irq_work irq_work; + struct work_struct work; + struct dbs_data *dbs_data; + struct list_head list; + unsigned int rate_mult; + unsigned int idle_periods; + bool is_shared; + bool work_in_progress; +}; + +struct dbs_governor { + struct cpufreq_governor gov; + struct kobj_type kobj_type; + struct dbs_data *gdbs_data; + unsigned int (*gov_dbs_update)(struct cpufreq_policy *); + struct policy_dbs_info * (*alloc)(); + void (*free)(struct policy_dbs_info *); + int (*init)(struct dbs_data *); + void (*exit)(struct dbs_data *); + void (*start)(struct cpufreq_policy *); +}; + +struct od_ops { + unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, unsigned int); +}; + +struct od_policy_dbs_info { + struct policy_dbs_info policy_dbs; + unsigned int freq_lo; + unsigned int freq_lo_delay_us; + unsigned int freq_hi_delay_us; + unsigned int sample_type: 1; +}; + +struct od_dbs_tuners { + unsigned int powersave_bias; +}; + +struct cs_policy_dbs_info { + struct policy_dbs_info policy_dbs; + unsigned int down_skip; + unsigned int requested_freq; +}; + +struct cs_dbs_tuners { + unsigned int down_threshold; + unsigned int freq_step; +}; + +struct cpu_dbs_info { + u64 prev_cpu_idle; + u64 prev_update_time; + u64 prev_cpu_nice; + unsigned int prev_load; + struct update_util_data update_util; + struct policy_dbs_info *policy_dbs; +}; + +enum { + UNDEFINED_CAPABLE = 0, + SYSTEM_INTEL_MSR_CAPABLE = 1, + SYSTEM_AMD_MSR_CAPABLE = 2, + SYSTEM_IO_CAPABLE = 3, +}; + +struct acpi_cpufreq_data { + unsigned int resume; + unsigned int cpu_feature; + unsigned int acpi_perf_cpu; + cpumask_var_t freqdomain_cpus; + void (*cpu_freq_write)(struct acpi_pct_register *, u32); + u32 (*cpu_freq_read)(struct acpi_pct_register *); +}; + +struct drv_cmd { + struct acpi_pct_register *reg; + u32 val; + union { + void (*write)(struct acpi_pct_register *, u32); + u32 (*read)(struct acpi_pct_register *); + } func; +}; + +struct powernow_k8_data { + unsigned int cpu; + u32 numps; + u32 batps; + u32 rvo; + u32 irt; + u32 vidmvs; + u32 vstable; + u32 plllock; + u32 exttype; + u32 currvid; + u32 currfid; + struct cpufreq_frequency_table *powernow_table; + struct acpi_processor_performance acpi_data; + struct cpumask *available_cores; +}; + +struct psb_s { + u8 signature[10]; + u8 tableversion; + u8 flags1; + u16 vstable; + u8 flags2; + u8 num_tables; + u32 cpuid; + u8 plllocktime; + u8 maxfid; + u8 maxvid; + u8 numps; +}; + +struct pst_s { + u8 fid; + u8 vid; +}; + +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned int newstate; +}; + +struct init_on_cpu { + struct powernow_k8_data *data; + int rc; +}; + +struct pcc_register_resource { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct pcc_memory_resource { + u8 descriptor; + u16 length; + u8 space_id; + u8 resource_usage; + u8 type_specific; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +} __attribute__((packed)); + +struct pcc_header { + u32 signature; + u16 length; + u8 major; + u8 minor; + u32 features; + u16 command; + u16 status; + u32 latency; + u32 minimum_time; + u32 maximum_time; + u32 nominal; + u32 throttled_frequency; + u32 minimum_frequency; +}; + +struct pcc_cpu { + u32 input_offset; + u32 output_offset; +}; + +struct cpu_id { + __u8 x86; + __u8 x86_model; + __u8 x86_stepping; +}; + +enum { + CPU_BANIAS = 0, + CPU_DOTHAN_A1 = 1, + CPU_DOTHAN_A2 = 2, + CPU_DOTHAN_B0 = 3, + CPU_MP4HT_D0 = 4, + CPU_MP4HT_E0 = 5, +}; + +struct cpu_model { + const struct cpu_id *cpu_id; + const char *model_name; + unsigned int max_freq; + struct cpufreq_frequency_table *op_points; +}; + +enum acpi_preferred_pm_profiles { + PM_UNSPECIFIED = 0, + PM_DESKTOP = 1, + PM_MOBILE = 2, + PM_WORKSTATION = 3, + PM_ENTERPRISE_SERVER = 4, + PM_SOHO_SERVER = 5, + PM_APPLIANCE_PC = 6, + PM_PERFORMANCE_SERVER = 7, + PM_TABLET = 8, +}; + +struct sample { + int32_t core_avg_perf; + int32_t busy_scaled; + u64 aperf; + u64 mperf; + u64 tsc; + u64 time; +}; + +struct pstate_data { + int current_pstate; + int min_pstate; + int max_pstate; + int max_pstate_physical; + int scaling; + int turbo_pstate; + unsigned int max_freq; + unsigned int turbo_freq; +}; + +struct vid_data { + int min; + int max; + int turbo; + int32_t ratio; +}; + +struct global_params { + bool no_turbo; + bool turbo_disabled; + bool turbo_disabled_mf; + int max_perf_pct; + int min_perf_pct; +}; + +struct cpudata { + int cpu; + unsigned int policy; + struct update_util_data update_util; + bool update_util_set; + struct pstate_data pstate; + struct vid_data vid; + u64 last_update; + u64 last_sample_time; + u64 aperf_mperf_shift; + u64 prev_aperf; + u64 prev_mperf; + u64 prev_tsc; + u64 prev_cummulative_iowait; + struct sample sample; + int32_t min_perf_ratio; + int32_t max_perf_ratio; + struct acpi_processor_performance acpi_perf_data; + bool valid_pss_table; + unsigned int iowait_boost; + s16 epp_powersave; + s16 epp_policy; + s16 epp_default; + s16 epp_cached; + u64 hwp_req_cached; + u64 hwp_cap_cached; + u64 last_io_update; + unsigned int sched_flags; + u32 hwp_boost_min; + bool suspended; +}; + +struct pstate_funcs { + int (*get_max)(); + int (*get_max_physical)(); + int (*get_min)(); + int (*get_turbo)(); + int (*get_scaling)(); + int (*get_aperf_mperf_shift)(); + u64 (*get_val)(struct cpudata *, int); + void (*get_vid)(struct cpudata *); +}; + +enum { + PSS = 0, + PPC = 1, +}; + +struct cpuidle_governor { + char name[16]; + struct list_head governor_list; + unsigned int rating; + int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); + void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); + int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); + void (*reflect)(struct cpuidle_device *, int); +}; + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; + struct cpuidle_device *device; +}; + +struct cpuidle_device_kobj { + struct cpuidle_device *dev; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_device *, char *); + ssize_t (*store)(struct cpuidle_device *, const char *, size_t); +}; + +struct cpuidle_state_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); + ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); +}; + +struct ladder_device_state { + struct { + u32 promotion_count; + u32 demotion_count; + u64 promotion_time_ns; + u64 demotion_time_ns; + } threshold; + struct { + int promotion_count; + int demotion_count; + } stats; +}; + +struct ladder_device { + struct ladder_device_state states[10]; +}; + +struct menu_device { + int needs_update; + int tick_wakeup; + u64 next_timer_ns; + unsigned int bucket; + unsigned int correction_factor[12]; + unsigned int intervals[8]; + int interval_ptr; +}; + +struct teo_idle_state { + unsigned int early_hits; + unsigned int hits; + unsigned int misses; +}; + +struct teo_cpu { + s64 time_span_ns; + s64 sleep_length_ns; + struct teo_idle_state states[10]; + int interval_idx; + u64 intervals[8]; +}; + +struct mmc_cid { + unsigned int manfid; + char prod_name[8]; + unsigned char prv; + unsigned int serial; + short unsigned int oemid; + short unsigned int year; + unsigned char hwrev; + unsigned char fwrev; + unsigned char month; +}; + +struct mmc_csd { + unsigned char structure; + unsigned char mmca_vsn; + short unsigned int cmdclass; + short unsigned int taac_clks; + unsigned int taac_ns; + unsigned int c_size; + unsigned int r2w_factor; + unsigned int max_dtr; + unsigned int erase_size; + unsigned int read_blkbits; + unsigned int write_blkbits; + unsigned int capacity; + unsigned int read_partial: 1; + unsigned int read_misalign: 1; + unsigned int write_partial: 1; + unsigned int write_misalign: 1; + unsigned int dsr_imp: 1; +}; + +struct mmc_ext_csd { + u8 rev; + u8 erase_group_def; + u8 sec_feature_support; + u8 rel_sectors; + u8 rel_param; + bool enhanced_rpmb_supported; + u8 part_config; + u8 cache_ctrl; + u8 rst_n_function; + u8 max_packed_writes; + u8 max_packed_reads; + u8 packed_event_en; + unsigned int part_time; + unsigned int sa_timeout; + unsigned int generic_cmd6_time; + unsigned int power_off_longtime; + u8 power_off_notification; + unsigned int hs_max_dtr; + unsigned int hs200_max_dtr; + unsigned int sectors; + unsigned int hc_erase_size; + unsigned int hc_erase_timeout; + unsigned int sec_trim_mult; + unsigned int sec_erase_mult; + unsigned int trim_timeout; + bool partition_setting_completed; + long long unsigned int enhanced_area_offset; + unsigned int enhanced_area_size; + unsigned int cache_size; + bool hpi_en; + bool hpi; + unsigned int hpi_cmd; + bool bkops; + bool man_bkops_en; + bool auto_bkops_en; + unsigned int data_sector_size; + unsigned int data_tag_unit_size; + unsigned int boot_ro_lock; + bool boot_ro_lockable; + bool ffu_capable; + bool cmdq_en; + bool cmdq_support; + unsigned int cmdq_depth; + u8 fwrev[8]; + u8 raw_exception_status; + u8 raw_partition_support; + u8 raw_rpmb_size_mult; + u8 raw_erased_mem_count; + u8 strobe_support; + u8 raw_ext_csd_structure; + u8 raw_card_type; + u8 raw_driver_strength; + u8 out_of_int_time; + u8 raw_pwr_cl_52_195; + u8 raw_pwr_cl_26_195; + u8 raw_pwr_cl_52_360; + u8 raw_pwr_cl_26_360; + u8 raw_s_a_timeout; + u8 raw_hc_erase_gap_size; + u8 raw_erase_timeout_mult; + u8 raw_hc_erase_grp_size; + u8 raw_sec_trim_mult; + u8 raw_sec_erase_mult; + u8 raw_sec_feature_support; + u8 raw_trim_mult; + u8 raw_pwr_cl_200_195; + u8 raw_pwr_cl_200_360; + u8 raw_pwr_cl_ddr_52_195; + u8 raw_pwr_cl_ddr_52_360; + u8 raw_pwr_cl_ddr_200_360; + u8 raw_bkops_status; + u8 raw_sectors[4]; + u8 pre_eol_info; + u8 device_life_time_est_typ_a; + u8 device_life_time_est_typ_b; + unsigned int feature_support; +}; + +struct sd_scr { + unsigned char sda_vsn; + unsigned char sda_spec3; + unsigned char sda_spec4; + unsigned char sda_specx; + unsigned char bus_widths; + unsigned char cmds; +}; + +struct sd_ssr { + unsigned int au; + unsigned int erase_timeout; + unsigned int erase_offset; +}; + +struct sd_switch_caps { + unsigned int hs_max_dtr; + unsigned int uhs_max_dtr; + unsigned int sd3_bus_mode; + unsigned int sd3_drv_type; + unsigned int sd3_curr_limit; +}; + +struct sdio_cccr { + unsigned int sdio_vsn; + unsigned int sd_vsn; + unsigned int multi_block: 1; + unsigned int low_speed: 1; + unsigned int wide_bus: 1; + unsigned int high_power: 1; + unsigned int high_speed: 1; + unsigned int disable_cd: 1; +}; + +struct sdio_cis { + short unsigned int vendor; + short unsigned int device; + short unsigned int blksize; + unsigned int max_dtr; +}; + +struct mmc_part { + u64 size; + unsigned int part_cfg; + char name[20]; + bool force_ro; + unsigned int area_type; +}; + +struct mmc_host; + +struct sdio_func; + +struct sdio_func_tuple; + +struct mmc_card { + struct mmc_host *host; + struct device dev; + u32 ocr; + unsigned int rca; + unsigned int type; + unsigned int state; + unsigned int quirks; + unsigned int quirk_max_rate; + bool reenable_cmdq; + unsigned int erase_size; + unsigned int erase_shift; + unsigned int pref_erase; + unsigned int eg_boundary; + unsigned int erase_arg; + u8 erased_byte; + u32 raw_cid[4]; + u32 raw_csd[4]; + u32 raw_scr[2]; + u32 raw_ssr[16]; + struct mmc_cid cid; + struct mmc_csd csd; + struct mmc_ext_csd ext_csd; + struct sd_scr scr; + struct sd_ssr ssr; + struct sd_switch_caps sw_caps; + unsigned int sdio_funcs; + atomic_t sdio_funcs_probed; + struct sdio_cccr cccr; + struct sdio_cis cis; + struct sdio_func *sdio_func[7]; + struct sdio_func *sdio_single_irq; + u8 major_rev; + u8 minor_rev; + unsigned int num_info; + const char **info; + struct sdio_func_tuple *tuples; + unsigned int sd_bus_speed; + unsigned int mmc_avail_type; + unsigned int drive_strength; + struct dentry *debugfs_root; + struct mmc_part part[7]; + unsigned int nr_parts; + struct workqueue_struct *complete_wq; +}; + +typedef unsigned int mmc_pm_flag_t; + +struct mmc_ios { + unsigned int clock; + short unsigned int vdd; + unsigned int power_delay_ms; + unsigned char bus_mode; + unsigned char chip_select; + unsigned char power_mode; + unsigned char bus_width; + unsigned char timing; + unsigned char signal_voltage; + unsigned char drv_type; + bool enhanced_strobe; +}; + +struct mmc_ctx { + struct task_struct *task; +}; + +struct mmc_slot { + int cd_irq; + bool cd_wake_enabled; + void *handler_priv; +}; + +struct mmc_supply { + struct regulator *vmmc; + struct regulator *vqmmc; +}; + +struct mmc_host_ops; + +struct mmc_pwrseq; + +struct mmc_bus_ops; + +struct mmc_request; + +struct mmc_cqe_ops; + +struct mmc_host { + struct device *parent; + struct device class_dev; + int index; + const struct mmc_host_ops *ops; + struct mmc_pwrseq *pwrseq; + unsigned int f_min; + unsigned int f_max; + unsigned int f_init; + u32 ocr_avail; + u32 ocr_avail_sdio; + u32 ocr_avail_sd; + u32 ocr_avail_mmc; + struct wakeup_source *ws; + u32 max_current_330; + u32 max_current_300; + u32 max_current_180; + u32 caps; + u32 caps2; + int fixed_drv_type; + mmc_pm_flag_t pm_caps; + unsigned int max_seg_size; + short unsigned int max_segs; + short unsigned int unused; + unsigned int max_req_size; + unsigned int max_blk_size; + unsigned int max_blk_count; + unsigned int max_busy_timeout; + spinlock_t lock; + struct mmc_ios ios; + unsigned int use_spi_crc: 1; + unsigned int claimed: 1; + unsigned int doing_init_tune: 1; + unsigned int can_retune: 1; + unsigned int doing_retune: 1; + unsigned int retune_now: 1; + unsigned int retune_paused: 1; + unsigned int retune_crc_disable: 1; + unsigned int can_dma_map_merge: 1; + int rescan_disable; + int rescan_entered; + int need_retune; + int hold_retune; + unsigned int retune_period; + struct timer_list retune_timer; + bool trigger_card_event; + struct mmc_card *card; + wait_queue_head_t wq; + struct mmc_ctx *claimer; + int claim_cnt; + struct mmc_ctx default_ctx; + struct delayed_work detect; + int detect_change; + struct mmc_slot slot; + const struct mmc_bus_ops *bus_ops; + unsigned int sdio_irqs; + struct task_struct *sdio_irq_thread; + struct delayed_work sdio_irq_work; + bool sdio_irq_pending; + atomic_t sdio_irq_thread_abort; + mmc_pm_flag_t pm_flags; + struct led_trigger *led; + bool regulator_enabled; + struct mmc_supply supply; + struct dentry *debugfs_root; + struct mmc_request *ongoing_mrq; + unsigned int actual_clock; + unsigned int slotno; + int dsr_req; + u32 dsr; + const struct mmc_cqe_ops *cqe_ops; + void *cqe_private; + int cqe_qdepth; + bool cqe_enabled; + bool cqe_on; + struct blk_keyslot_manager ksm; + bool hsq_enabled; + long: 56; + long: 64; + long: 64; + long unsigned int private[0]; +}; + +struct mmc_data; + +struct mmc_command { + u32 opcode; + u32 arg; + u32 resp[4]; + unsigned int flags; + unsigned int retries; + int error; + unsigned int busy_timeout; + struct mmc_data *data; + struct mmc_request *mrq; +}; + +struct mmc_data { + unsigned int timeout_ns; + unsigned int timeout_clks; + unsigned int blksz; + unsigned int blocks; + unsigned int blk_addr; + int error; + unsigned int flags; + unsigned int bytes_xfered; + struct mmc_command *stop; + struct mmc_request *mrq; + unsigned int sg_len; + int sg_count; + struct scatterlist *sg; + s32 host_cookie; +}; + +struct mmc_request { + struct mmc_command *sbc; + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_command *stop; + struct completion completion; + struct completion cmd_completion; + void (*done)(struct mmc_request *); + void (*recovery_notifier)(struct mmc_request *); + struct mmc_host *host; + bool cap_cmd_during_tfr; + int tag; + bool crypto_enabled; + int crypto_key_slot; + u32 data_unit_num; +}; + +struct mmc_host_ops { + void (*post_req)(struct mmc_host *, struct mmc_request *, int); + void (*pre_req)(struct mmc_host *, struct mmc_request *); + void (*request)(struct mmc_host *, struct mmc_request *); + int (*request_atomic)(struct mmc_host *, struct mmc_request *); + void (*set_ios)(struct mmc_host *, struct mmc_ios *); + int (*get_ro)(struct mmc_host *); + int (*get_cd)(struct mmc_host *); + void (*enable_sdio_irq)(struct mmc_host *, int); + void (*ack_sdio_irq)(struct mmc_host *); + void (*init_card)(struct mmc_host *, struct mmc_card *); + int (*start_signal_voltage_switch)(struct mmc_host *, struct mmc_ios *); + int (*card_busy)(struct mmc_host *); + int (*execute_tuning)(struct mmc_host *, u32); + int (*prepare_hs400_tuning)(struct mmc_host *, struct mmc_ios *); + int (*hs400_prepare_ddr)(struct mmc_host *); + void (*hs400_downgrade)(struct mmc_host *); + void (*hs400_complete)(struct mmc_host *); + void (*hs400_enhanced_strobe)(struct mmc_host *, struct mmc_ios *); + int (*select_drive_strength)(struct mmc_card *, unsigned int, int, int, int *); + void (*hw_reset)(struct mmc_host *); + void (*card_event)(struct mmc_host *); + int (*multi_io_quirk)(struct mmc_card *, unsigned int, int); + int (*init_sd_express)(struct mmc_host *, struct mmc_ios *); +}; + +struct mmc_cqe_ops { + int (*cqe_enable)(struct mmc_host *, struct mmc_card *); + void (*cqe_disable)(struct mmc_host *); + int (*cqe_request)(struct mmc_host *, struct mmc_request *); + void (*cqe_post_req)(struct mmc_host *, struct mmc_request *); + void (*cqe_off)(struct mmc_host *); + int (*cqe_wait_for_idle)(struct mmc_host *); + bool (*cqe_timeout)(struct mmc_host *, struct mmc_request *, bool *); + void (*cqe_recovery_start)(struct mmc_host *); + void (*cqe_recovery_finish)(struct mmc_host *); +}; + +struct mmc_pwrseq_ops; + +struct mmc_pwrseq { + const struct mmc_pwrseq_ops *ops; + struct device *dev; + struct list_head pwrseq_node; + struct module *owner; +}; + +struct mmc_bus_ops { + void (*remove)(struct mmc_host *); + void (*detect)(struct mmc_host *); + int (*pre_suspend)(struct mmc_host *); + int (*suspend)(struct mmc_host *); + int (*resume)(struct mmc_host *); + int (*runtime_suspend)(struct mmc_host *); + int (*runtime_resume)(struct mmc_host *); + int (*alive)(struct mmc_host *); + int (*shutdown)(struct mmc_host *); + int (*hw_reset)(struct mmc_host *); + int (*sw_reset)(struct mmc_host *); + bool (*cache_enabled)(struct mmc_host *); +}; + +struct trace_event_raw_mmc_request_start { + struct trace_entry ent; + u32 cmd_opcode; + u32 cmd_arg; + unsigned int cmd_flags; + unsigned int cmd_retries; + u32 stop_opcode; + u32 stop_arg; + unsigned int stop_flags; + unsigned int stop_retries; + u32 sbc_opcode; + u32 sbc_arg; + unsigned int sbc_flags; + unsigned int sbc_retries; + unsigned int blocks; + unsigned int blk_addr; + unsigned int blksz; + unsigned int data_flags; + int tag; + unsigned int can_retune; + unsigned int doing_retune; + unsigned int retune_now; + int need_retune; + int hold_retune; + unsigned int retune_period; + struct mmc_request *mrq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_mmc_request_done { + struct trace_entry ent; + u32 cmd_opcode; + int cmd_err; + u32 cmd_resp[4]; + unsigned int cmd_retries; + u32 stop_opcode; + int stop_err; + u32 stop_resp[4]; + unsigned int stop_retries; + u32 sbc_opcode; + int sbc_err; + u32 sbc_resp[4]; + unsigned int sbc_retries; + unsigned int bytes_xfered; + int data_err; + int tag; + unsigned int can_retune; + unsigned int doing_retune; + unsigned int retune_now; + int need_retune; + int hold_retune; + unsigned int retune_period; + struct mmc_request *mrq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_mmc_request_start { + u32 name; +}; + +struct trace_event_data_offsets_mmc_request_done { + u32 name; +}; + +typedef void (*btf_trace_mmc_request_start)(void *, struct mmc_host *, struct mmc_request *); + +typedef void (*btf_trace_mmc_request_done)(void *, struct mmc_host *, struct mmc_request *); + +struct mmc_pwrseq_ops { + void (*pre_power_on)(struct mmc_host *); + void (*post_power_on)(struct mmc_host *); + void (*power_off)(struct mmc_host *); + void (*reset)(struct mmc_host *); +}; + +enum mmc_busy_cmd { + MMC_BUSY_CMD6 = 0, + MMC_BUSY_ERASE = 1, + MMC_BUSY_HPI = 2, +}; + +struct mmc_driver { + struct device_driver drv; + int (*probe)(struct mmc_card *); + void (*remove)(struct mmc_card *); + void (*shutdown)(struct mmc_card *); +}; + +struct mmc_clk_phase { + bool valid; + u16 in_deg; + u16 out_deg; +}; + +struct mmc_clk_phase_map { + struct mmc_clk_phase phase[11]; +}; + +struct mmc_fixup { + const char *name; + u64 rev_start; + u64 rev_end; + unsigned int manfid; + short unsigned int oemid; + u16 cis_vendor; + u16 cis_device; + unsigned int ext_csd_rev; + void (*vendor_fixup)(struct mmc_card *, int); + int data; +}; + +typedef void sdio_irq_handler_t(struct sdio_func *); + +struct sdio_func { + struct mmc_card *card; + struct device dev; + sdio_irq_handler_t *irq_handler; + unsigned int num; + unsigned char class; + short unsigned int vendor; + short unsigned int device; + unsigned int max_blksize; + unsigned int cur_blksize; + unsigned int enable_timeout; + unsigned int state; + u8 *tmpbuf; + u8 major_rev; + u8 minor_rev; + unsigned int num_info; + const char **info; + struct sdio_func_tuple *tuples; +}; + +struct sdio_func_tuple { + struct sdio_func_tuple *next; + unsigned char code; + unsigned char size; + unsigned char data[0]; +}; + +struct sdio_device_id { + __u8 class; + __u16 vendor; + __u16 device; + kernel_ulong_t driver_data; +}; + +struct sdio_driver { + char *name; + const struct sdio_device_id *id_table; + int (*probe)(struct sdio_func *, const struct sdio_device_id *); + void (*remove)(struct sdio_func *); + struct device_driver drv; +}; + +typedef int tpl_parse_t(struct mmc_card *, struct sdio_func *, const unsigned char *, unsigned int); + +struct cis_tpl { + unsigned char code; + unsigned char min_size; + tpl_parse_t *parse; +}; + +struct mmc_gpio { + struct gpio_desc *ro_gpio; + struct gpio_desc *cd_gpio; + irqreturn_t (*cd_gpio_isr)(int, void *); + char *ro_label; + char *cd_label; + u32 cd_debounce_delay_ms; +}; + +enum mmc_issue_type { + MMC_ISSUE_SYNC = 0, + MMC_ISSUE_DCMD = 1, + MMC_ISSUE_ASYNC = 2, + MMC_ISSUE_MAX = 3, +}; + +struct mmc_blk_request { + struct mmc_request mrq; + struct mmc_command sbc; + struct mmc_command cmd; + struct mmc_command stop; + struct mmc_data data; +}; + +enum mmc_drv_op { + MMC_DRV_OP_IOCTL = 0, + MMC_DRV_OP_IOCTL_RPMB = 1, + MMC_DRV_OP_BOOT_WP = 2, + MMC_DRV_OP_GET_CARD_STATUS = 3, + MMC_DRV_OP_GET_EXT_CSD = 4, +}; + +struct mmc_queue_req { + struct mmc_blk_request brq; + struct scatterlist *sg; + enum mmc_drv_op drv_op; + int drv_op_result; + void *drv_op_data; + unsigned int ioc_count; + int retries; +}; + +struct pci_dev___2; + +struct sdhci_pci_data { + struct pci_dev___2 *pdev; + int slotno; + int rst_n_gpio; + int cd_gpio; + int (*setup)(struct sdhci_pci_data *); + void (*cleanup)(struct sdhci_pci_data *); +}; + +struct led_init_data { + struct fwnode_handle *fwnode; + const char *default_label; + const char *devicename; + bool devname_mandatory; +}; + +struct led_properties { + u32 color; + bool color_present; + const char *function; + u32 func_enum; + bool func_enum_present; + const char *label; +}; + +enum cpu_led_event { + CPU_LED_IDLE_START = 0, + CPU_LED_IDLE_END = 1, + CPU_LED_START = 2, + CPU_LED_STOP = 3, + CPU_LED_HALTED = 4, +}; + +struct led_trigger_cpu { + bool is_active; + char name[8]; + struct led_trigger *_trig; +}; + +struct dmi_memdev_info { + const char *device; + const char *bank; + u64 size; + u16 handle; + u8 type; +}; + +struct edd_device { + unsigned int index; + unsigned int mbr_signature; + struct edd_info *info; + struct kobject kobj; +}; + +struct edd_attribute { + struct attribute attr; + ssize_t (*show)(struct edd_device *, char *); + int (*test)(struct edd_device *); +}; + +struct dmi_device_attribute { + struct device_attribute dev_attr; + int field; +}; + +struct mafield { + const char *prefix; + int field; +}; + +struct acpi_table_ibft { + struct acpi_table_header header; + u8 reserved[12]; +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +struct bmp_header { + u16 id; + u32 size; +} __attribute__((packed)); + +typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool); + +typedef struct { + u16 version; + u16 length; + u32 runtime_services_supported; +} efi_rt_properties_table_t; + +struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_store_t *query_variable_store; +}; + +struct efivars { + struct kset *kset; + struct kobject *kobject; + const struct efivar_operations *ops; +}; + +struct linux_efi_random_seed { + u32 size; + u8 bits[0]; +}; + +struct linux_efi_memreserve { + int size; + atomic_t count; + phys_addr_t next; + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +struct efi_error_code { + efi_status_t status; + int errno; + const char *description; +}; + +struct efi_generic_dev_path { + u8 type; + u8 sub_type; + u16 length; +}; + +struct variable_validate { + efi_guid_t vendor; + char *name; + bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int); +}; + +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 reserved; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + +typedef u64 efi_physical_addr_t; + +typedef struct { + u64 length; + u64 data; +} efi_capsule_block_desc_t; + +struct compat_efi_variable { + efi_char16_t VariableName[512]; + efi_guid_t VendorGuid; + __u32 DataSize; + __u8 Data[1024]; + __u32 Status; + __u32 Attributes; +}; + +struct efivar_attribute { + struct attribute attr; + ssize_t (*show)(struct efivar_entry *, char *); + ssize_t (*store)(struct efivar_entry *, const char *, size_t); +}; + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[0]; +}; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + struct kobject kobj; + struct list_head list; +}; + +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *, char *); + ssize_t (*store)(struct esre_entry *, const char *, size_t); +}; + +struct cper_sec_proc_generic { + u64 validation_bits; + u8 proc_type; + u8 proc_isa; + u8 proc_error_type; + u8 operation; + u8 flags; + u8 level; + u16 reserved; + u64 cpu_version; + char cpu_brand[128]; + u64 proc_id; + u64 target_addr; + u64 requestor_id; + u64 responder_id; + u64 ip; +}; + +struct cper_sec_proc_ia { + u64 validation_bits; + u64 lapic_id; + u8 cpuid[48]; +}; + +struct cper_mem_err_compact { + u64 validation_bits; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; + u8 extended; +} __attribute__((packed)); + +struct cper_sec_fw_err_rec_ref { + u8 record_type; + u8 revision; + u8 reserved[6]; + u64 record_identifier; + guid_t record_identifier_guid; +}; + +struct efi_runtime_map_entry { + efi_memory_desc_t md; + struct kobject kobj; +}; + +struct map_attribute { + struct attribute attr; + ssize_t (*show)(struct efi_runtime_map_entry *, char *); +}; + +struct efi_acpi_dev_path { + struct efi_generic_dev_path header; + u32 hid; + u32 uid; +}; + +struct efi_pci_dev_path { + struct efi_generic_dev_path header; + u8 fn; + u8 dev; +}; + +struct efi_vendor_dev_path { + struct efi_generic_dev_path header; + efi_guid_t vendorguid; + u8 vendordata[0]; +}; + +struct efi_dev_path { + union { + struct efi_generic_dev_path header; + struct efi_acpi_dev_path acpi; + struct efi_pci_dev_path pci; + struct efi_vendor_dev_path vendor; + }; +}; + +struct dev_header { + u32 len; + u32 prop_count; + struct efi_dev_path path[0]; +}; + +struct properties_header { + u32 len; + u32 version; + u32 dev_count; + struct dev_header dev_header[0]; +}; + +struct efi_embedded_fw { + struct list_head list; + const char *name; + const u8 *data; + size_t length; +}; + +struct efi_embedded_fw_desc { + const char *name; + u8 prefix[8]; + u32 length; + u8 sha256[32]; +}; + +struct efi_mokvar_sysfs_attr { + struct bin_attribute bin_attr; + struct list_head node; +}; + +struct cper_ia_err_info { + guid_t err_type; + u64 validation_bits; + u64 check_info; + u64 target_id; + u64 requestor_id; + u64 responder_id; + u64 ip; +}; + +enum err_types { + ERR_TYPE_CACHE = 0, + ERR_TYPE_TLB = 1, + ERR_TYPE_BUS = 2, + ERR_TYPE_MS = 3, + N_ERR_TYPES = 4, +}; + +struct ts_dmi_data { + struct efi_embedded_fw_desc embedded_fw; + const char *acpi_name; + const struct property_entry *properties; +}; + +enum ppfear_regs { + SPT_PMC_XRAM_PPFEAR0A = 1424, + SPT_PMC_XRAM_PPFEAR0B = 1425, + SPT_PMC_XRAM_PPFEAR0C = 1426, + SPT_PMC_XRAM_PPFEAR0D = 1427, + SPT_PMC_XRAM_PPFEAR1A = 1428, +}; + +struct pmc_bit_map { + const char *name; + u32 bit_mask; +}; + +struct pmc_reg_map { + const struct pmc_bit_map **pfear_sts; + const struct pmc_bit_map *mphy_sts; + const struct pmc_bit_map *pll_sts; + const struct pmc_bit_map **slps0_dbg_maps; + const struct pmc_bit_map *ltr_show_sts; + const struct pmc_bit_map *msr_sts; + const struct pmc_bit_map **lpm_sts; + const u32 slp_s0_offset; + const int slp_s0_res_counter_step; + const u32 ltr_ignore_offset; + const int regmap_length; + const u32 ppfear0_offset; + const int ppfear_buckets; + const u32 pm_cfg_offset; + const int pm_read_disable_bit; + const u32 slps0_dbg_offset; + const u32 ltr_ignore_max; + const u32 pm_vric1_offset; + const int lpm_num_maps; + const int lpm_res_counter_step_x2; + const u32 lpm_sts_latch_en_offset; + const u32 lpm_en_offset; + const u32 lpm_priority_offset; + const u32 lpm_residency_offset; + const u32 lpm_status_offset; + const u32 lpm_live_status_offset; + const u32 etr3_offset; +}; + +struct pmc_dev { + u32 base_addr; + void *regbase; + const struct pmc_reg_map *map; + struct dentry *dbgfs_dir; + int pmc_xram_read_bit; + struct mutex lock; + bool check_counters; + u64 pc10_counter; + u64 s0ix_counter; + int num_lpm_modes; + int lpm_en_modes[8]; + u32 *lpm_req_regs; +}; + +struct intel_scu_ipc_data { + struct resource mem; + int irq; +}; + +struct intel_scu_ipc_dev___2 { + struct device dev; + struct resource mem; + struct module *owner; + int irq; + void *ipc_base; + struct completion cmd_complete; +}; + +struct intel_scu_ipc_devres { + struct intel_scu_ipc_dev___2 *scu; +}; + +struct pmc_reg_map___2 { + const struct pmc_bit_map *d3_sts_0; + const struct pmc_bit_map *d3_sts_1; + const struct pmc_bit_map *func_dis; + const struct pmc_bit_map *func_dis_2; + const struct pmc_bit_map *pss; +}; + +struct pmc_data { + const struct pmc_reg_map___2 *map; + const struct pmc_clk *clks; +}; + +struct pmc_dev___2 { + u32 base_addr; + void *regmap; + const struct pmc_reg_map___2 *map; + struct dentry *dbgfs_dir; + bool init; +}; + +enum ec_status { + EC_RES_SUCCESS = 0, + EC_RES_INVALID_COMMAND = 1, + EC_RES_ERROR = 2, + EC_RES_INVALID_PARAM = 3, + EC_RES_ACCESS_DENIED = 4, + EC_RES_INVALID_RESPONSE = 5, + EC_RES_INVALID_VERSION = 6, + EC_RES_INVALID_CHECKSUM = 7, + EC_RES_IN_PROGRESS = 8, + EC_RES_UNAVAILABLE = 9, + EC_RES_TIMEOUT = 10, + EC_RES_OVERFLOW = 11, + EC_RES_INVALID_HEADER = 12, + EC_RES_REQUEST_TRUNCATED = 13, + EC_RES_RESPONSE_TOO_BIG = 14, + EC_RES_BUS_ERROR = 15, + EC_RES_BUSY = 16, + EC_RES_INVALID_HEADER_VERSION = 17, + EC_RES_INVALID_HEADER_CRC = 18, + EC_RES_INVALID_DATA_CRC = 19, + EC_RES_DUP_UNAVAILABLE = 20, +}; + +enum host_event_code { + EC_HOST_EVENT_LID_CLOSED = 1, + EC_HOST_EVENT_LID_OPEN = 2, + EC_HOST_EVENT_POWER_BUTTON = 3, + EC_HOST_EVENT_AC_CONNECTED = 4, + EC_HOST_EVENT_AC_DISCONNECTED = 5, + EC_HOST_EVENT_BATTERY_LOW = 6, + EC_HOST_EVENT_BATTERY_CRITICAL = 7, + EC_HOST_EVENT_BATTERY = 8, + EC_HOST_EVENT_THERMAL_THRESHOLD = 9, + EC_HOST_EVENT_DEVICE = 10, + EC_HOST_EVENT_THERMAL = 11, + EC_HOST_EVENT_USB_CHARGER = 12, + EC_HOST_EVENT_KEY_PRESSED = 13, + EC_HOST_EVENT_INTERFACE_READY = 14, + EC_HOST_EVENT_KEYBOARD_RECOVERY = 15, + EC_HOST_EVENT_THERMAL_SHUTDOWN = 16, + EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, + EC_HOST_EVENT_THROTTLE_START = 18, + EC_HOST_EVENT_THROTTLE_STOP = 19, + EC_HOST_EVENT_HANG_DETECT = 20, + EC_HOST_EVENT_HANG_REBOOT = 21, + EC_HOST_EVENT_PD_MCU = 22, + EC_HOST_EVENT_BATTERY_STATUS = 23, + EC_HOST_EVENT_PANIC = 24, + EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25, + EC_HOST_EVENT_RTC = 26, + EC_HOST_EVENT_MKBP = 27, + EC_HOST_EVENT_USB_MUX = 28, + EC_HOST_EVENT_MODE_CHANGE = 29, + EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30, + EC_HOST_EVENT_WOV = 31, + EC_HOST_EVENT_INVALID = 32, +}; + +struct ec_host_request { + uint8_t struct_version; + uint8_t checksum; + uint16_t command; + uint8_t command_version; + uint8_t reserved; + uint16_t data_len; +}; + +struct ec_params_hello { + uint32_t in_data; +}; + +struct ec_response_hello { + uint32_t out_data; +}; + +struct ec_params_get_cmd_versions { + uint8_t cmd; +}; + +struct ec_response_get_cmd_versions { + uint32_t version_mask; +}; + +enum ec_comms_status { + EC_COMMS_STATUS_PROCESSING = 1, +}; + +struct ec_response_get_comms_status { + uint32_t flags; +}; + +struct ec_response_get_protocol_info { + uint32_t protocol_versions; + uint16_t max_request_packet_size; + uint16_t max_response_packet_size; + uint32_t flags; +}; + +enum ec_led_colors { + EC_LED_COLOR_RED = 0, + EC_LED_COLOR_GREEN = 1, + EC_LED_COLOR_BLUE = 2, + EC_LED_COLOR_YELLOW = 3, + EC_LED_COLOR_WHITE = 4, + EC_LED_COLOR_AMBER = 5, + EC_LED_COLOR_COUNT = 6, +}; + +enum motionsense_command { + MOTIONSENSE_CMD_DUMP = 0, + MOTIONSENSE_CMD_INFO = 1, + MOTIONSENSE_CMD_EC_RATE = 2, + MOTIONSENSE_CMD_SENSOR_ODR = 3, + MOTIONSENSE_CMD_SENSOR_RANGE = 4, + MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + MOTIONSENSE_CMD_DATA = 6, + MOTIONSENSE_CMD_FIFO_INFO = 7, + MOTIONSENSE_CMD_FIFO_FLUSH = 8, + MOTIONSENSE_CMD_FIFO_READ = 9, + MOTIONSENSE_CMD_PERFORM_CALIB = 10, + MOTIONSENSE_CMD_SENSOR_OFFSET = 11, + MOTIONSENSE_CMD_LIST_ACTIVITIES = 12, + MOTIONSENSE_CMD_SET_ACTIVITY = 13, + MOTIONSENSE_CMD_LID_ANGLE = 14, + MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15, + MOTIONSENSE_CMD_SPOOF = 16, + MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17, + MOTIONSENSE_CMD_SENSOR_SCALE = 18, + MOTIONSENSE_NUM_CMDS = 19, +}; + +struct ec_response_motion_sensor_data { + uint8_t flags; + uint8_t sensor_num; + union { + int16_t data[3]; + struct { + uint16_t reserved; + uint32_t timestamp; + } __attribute__((packed)); + struct { + uint8_t activity; + uint8_t state; + int16_t add_info[2]; + }; + }; +} __attribute__((packed)); + +struct ec_response_motion_sense_fifo_info { + uint16_t size; + uint16_t count; + uint32_t timestamp; + uint16_t total_lost; + uint16_t lost[0]; +} __attribute__((packed)); + +struct ec_response_motion_sense_fifo_data { + uint32_t number_data; + struct ec_response_motion_sensor_data data[0]; +}; + +struct ec_motion_sense_activity { + uint8_t sensor_num; + uint8_t activity; + uint8_t enable; + uint8_t reserved; + uint16_t parameters[3]; +}; + +struct ec_params_motion_sense { + uint8_t cmd; + union { + struct { + uint8_t max_sensor_count; + } dump; + struct { + int16_t data; + } kb_wake_angle; + struct { + uint8_t sensor_num; + } info; + struct { + uint8_t sensor_num; + } info_3; + struct { + uint8_t sensor_num; + } data; + struct { + uint8_t sensor_num; + } fifo_flush; + struct { + uint8_t sensor_num; + } perform_calib; + struct { + uint8_t sensor_num; + } list_activities; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } ec_rate; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } sensor_odr; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } sensor_range; + struct { + uint8_t sensor_num; + uint16_t flags; + int16_t temp; + int16_t offset[3]; + } __attribute__((packed)) sensor_offset; + struct { + uint8_t sensor_num; + uint16_t flags; + int16_t temp; + uint16_t scale[3]; + } __attribute__((packed)) sensor_scale; + struct { + uint32_t max_data_vector; + } fifo_read; + struct ec_motion_sense_activity set_activity; + struct { + int8_t enable; + } fifo_int_enable; + struct { + uint8_t sensor_id; + uint8_t spoof_enable; + uint8_t reserved; + int16_t components[3]; + } __attribute__((packed)) spoof; + struct { + int16_t lid_angle; + int16_t hys_degree; + } tablet_mode_threshold; + }; +} __attribute__((packed)); + +struct ec_response_motion_sense { + union { + struct { + uint8_t module_flags; + uint8_t sensor_count; + struct ec_response_motion_sensor_data sensor[0]; + } __attribute__((packed)) dump; + struct { + uint8_t type; + uint8_t location; + uint8_t chip; + } info; + struct { + uint8_t type; + uint8_t location; + uint8_t chip; + uint32_t min_frequency; + uint32_t max_frequency; + uint32_t fifo_max_event_count; + } info_3; + struct ec_response_motion_sensor_data data; + struct { + int32_t ret; + } ec_rate; + struct { + int32_t ret; + } sensor_odr; + struct { + int32_t ret; + } sensor_range; + struct { + int32_t ret; + } kb_wake_angle; + struct { + int32_t ret; + } fifo_int_enable; + struct { + int32_t ret; + } spoof; + struct { + int16_t temp; + int16_t offset[3]; + } sensor_offset; + struct { + int16_t temp; + int16_t offset[3]; + } perform_calib; + struct { + int16_t temp; + uint16_t scale[3]; + } sensor_scale; + struct ec_response_motion_sense_fifo_info fifo_info; + struct ec_response_motion_sense_fifo_info fifo_flush; + struct ec_response_motion_sense_fifo_data fifo_read; + struct { + uint16_t reserved; + uint32_t enabled; + uint32_t disabled; + } __attribute__((packed)) list_activities; + struct { + uint16_t value; + } lid_angle; + struct { + uint16_t lid_angle; + uint16_t hys_degree; + } tablet_mode_threshold; + }; +}; + +enum ec_temp_thresholds { + EC_TEMP_THRESH_WARN = 0, + EC_TEMP_THRESH_HIGH = 1, + EC_TEMP_THRESH_HALT = 2, + EC_TEMP_THRESH_COUNT = 3, +}; + +enum ec_mkbp_event { + EC_MKBP_EVENT_KEY_MATRIX = 0, + EC_MKBP_EVENT_HOST_EVENT = 1, + EC_MKBP_EVENT_SENSOR_FIFO = 2, + EC_MKBP_EVENT_BUTTON = 3, + EC_MKBP_EVENT_SWITCH = 4, + EC_MKBP_EVENT_FINGERPRINT = 5, + EC_MKBP_EVENT_SYSRQ = 6, + EC_MKBP_EVENT_HOST_EVENT64 = 7, + EC_MKBP_EVENT_CEC_EVENT = 8, + EC_MKBP_EVENT_CEC_MESSAGE = 9, + EC_MKBP_EVENT_COUNT = 10, +}; + +union ec_response_get_next_data_v1 { + uint8_t key_matrix[16]; + uint32_t host_event; + uint64_t host_event64; + struct { + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } __attribute__((packed)) sensor_fifo; + uint32_t buttons; + uint32_t switches; + uint32_t fp_events; + uint32_t sysrq; + uint32_t cec_events; + uint8_t cec_message[16]; +}; + +struct ec_response_get_next_event_v1 { + uint8_t event_type; + union ec_response_get_next_data_v1 data; +} __attribute__((packed)); + +struct ec_response_host_event_mask { + uint32_t mask; +}; + +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = 4, + EC_MSG_RX_PROTO_BYTES = 3, + EC_PROTO2_MSG_BYTES = 256, + EC_MAX_MSG_BYTES = 65536, +}; + +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +struct cros_ec_device { + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *, unsigned int, unsigned int, void *); + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + bool suspended; + int (*cmd_xfer)(struct cros_ec_device *, struct cros_ec_command *); + int (*pkt_xfer)(struct cros_ec_device *, struct cros_ec_command *); + struct mutex lock; + u8 mkbp_event_supported; + bool host_sleep_v1; + struct blocking_notifier_head event_notifier; + struct ec_response_get_next_event_v1 event_data; + int event_size; + u32 host_event_wake_mask; + u32 last_resume_result; + ktime_t last_event_time; + struct notifier_block notifier_ready; + struct platform_device *ec; + struct platform_device *pd; +}; + +struct cros_ec_debugfs; + +struct cros_ec_dev { + struct device class_dev; + struct cros_ec_device *ec_dev; + struct device *dev; + struct cros_ec_debugfs *debug_info; + bool has_kb_wake_angle; + u16 cmd_offset; + u32 features[2]; +}; + +struct trace_event_raw_cros_ec_request_start { + struct trace_entry ent; + uint32_t version; + uint32_t offset; + uint32_t command; + uint32_t outsize; + uint32_t insize; + char __data[0]; +}; + +struct trace_event_raw_cros_ec_request_done { + struct trace_entry ent; + uint32_t version; + uint32_t offset; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + int retval; + char __data[0]; +}; + +struct trace_event_data_offsets_cros_ec_request_start {}; + +struct trace_event_data_offsets_cros_ec_request_done {}; + +typedef void (*btf_trace_cros_ec_request_start)(void *, struct cros_ec_command *); + +typedef void (*btf_trace_cros_ec_request_done)(void *, struct cros_ec_command *, int); + +struct acpi_table_pcct { + struct acpi_table_header header; + u32 flags; + u64 reserved; +}; + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, + ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, + ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, + ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, + ACPI_PCCT_TYPE_RESERVED = 6, +}; + +struct acpi_pcct_subspace { + struct acpi_subtable_header header; + u8 reserved[6]; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +} __attribute__((packed)); + +struct hwspinlock___2; + +struct hwspinlock_ops { + int (*trylock)(struct hwspinlock___2 *); + void (*unlock)(struct hwspinlock___2 *); + void (*relax)(struct hwspinlock___2 *); +}; + +struct hwspinlock_device; + +struct hwspinlock___2 { + struct hwspinlock_device *bank; + spinlock_t lock; + void *priv; +}; + +struct hwspinlock_device { + struct device *dev; + const struct hwspinlock_ops *ops; + int base_id; + int num_locks; + struct hwspinlock___2 lock[0]; +}; + +struct resource_table { + u32 ver; + u32 num; + u32 reserved[2]; + u32 offset[0]; +}; + +struct fw_rsc_hdr { + u32 type; + u8 data[0]; +}; + +enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, +}; + +struct fw_rsc_carveout { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +}; + +struct fw_rsc_devmem { + u32 da; + u32 pa; + u32 len; + u32 flags; + u32 reserved; + u8 name[32]; +}; + +struct fw_rsc_trace { + u32 da; + u32 len; + u32 reserved; + u8 name[32]; +}; + +struct fw_rsc_vdev_vring { + u32 da; + u32 align; + u32 num; + u32 notifyid; + u32 pa; +}; + +struct fw_rsc_vdev { + u32 id; + u32 notifyid; + u32 dfeatures; + u32 gfeatures; + u32 config_len; + u8 status; + u8 num_of_vrings; + u8 reserved[2]; + struct fw_rsc_vdev_vring vring[0]; +}; + +struct rproc; + +struct rproc_mem_entry { + void *va; + bool is_iomem; + dma_addr_t dma; + size_t len; + u32 da; + void *priv; + char name[32]; + struct list_head node; + u32 rsc_offset; + u32 flags; + u32 of_resm_idx; + int (*alloc)(struct rproc *, struct rproc_mem_entry *); + int (*release)(struct rproc *, struct rproc_mem_entry *); +}; + +enum rproc_dump_mechanism { + RPROC_COREDUMP_DISABLED = 0, + RPROC_COREDUMP_ENABLED = 1, + RPROC_COREDUMP_INLINE = 2, +}; + +struct rproc_ops; + +struct rproc { + struct list_head node; + struct iommu_domain *domain; + const char *name; + const char *firmware; + void *priv; + struct rproc_ops *ops; + struct device dev; + atomic_t power; + unsigned int state; + enum rproc_dump_mechanism dump_conf; + struct mutex lock; + struct dentry *dbg_dir; + struct list_head traces; + int num_traces; + struct list_head carveouts; + struct list_head mappings; + u64 bootaddr; + struct list_head rvdevs; + struct list_head subdevs; + struct idr notifyids; + int index; + struct work_struct crash_handler; + unsigned int crash_cnt; + bool recovery_disabled; + int max_notifyid; + struct resource_table *table_ptr; + struct resource_table *clean_table; + struct resource_table *cached_table; + size_t table_sz; + bool has_iommu; + bool auto_boot; + struct list_head dump_segments; + int nb_vdev; + u8 elf_class; + u16 elf_machine; + struct cdev cdev; + bool cdev_put_on_release; +}; + +enum rsc_handling_status { + RSC_HANDLED = 0, + RSC_IGNORED = 1, +}; + +struct rproc_ops { + int (*prepare)(struct rproc *); + int (*unprepare)(struct rproc *); + int (*start)(struct rproc *); + int (*stop)(struct rproc *); + int (*attach)(struct rproc *); + int (*detach)(struct rproc *); + void (*kick)(struct rproc *, int); + void * (*da_to_va)(struct rproc *, u64, size_t, bool *); + int (*parse_fw)(struct rproc *, const struct firmware *); + int (*handle_rsc)(struct rproc *, u32, void *, int, int); + struct resource_table * (*find_loaded_rsc_table)(struct rproc *, const struct firmware *); + struct resource_table * (*get_loaded_rsc_table)(struct rproc *, size_t *); + int (*load)(struct rproc *, const struct firmware *); + int (*sanity_check)(struct rproc *, const struct firmware *); + u64 (*get_boot_addr)(struct rproc *, const struct firmware *); + long unsigned int (*panic)(struct rproc *); + void (*coredump)(struct rproc *); +}; + +enum rproc_state { + RPROC_OFFLINE = 0, + RPROC_SUSPENDED = 1, + RPROC_RUNNING = 2, + RPROC_CRASHED = 3, + RPROC_DELETED = 4, + RPROC_ATTACHED = 5, + RPROC_DETACHED = 6, + RPROC_LAST = 7, +}; + +enum rproc_crash_type { + RPROC_MMUFAULT = 0, + RPROC_WATCHDOG = 1, + RPROC_FATAL_ERROR = 2, +}; + +struct rproc_subdev { + struct list_head node; + int (*prepare)(struct rproc_subdev *); + int (*start)(struct rproc_subdev *); + void (*stop)(struct rproc_subdev *, bool); + void (*unprepare)(struct rproc_subdev *); +}; + +struct rproc_vdev; + +struct rproc_vring { + void *va; + int len; + u32 da; + u32 align; + int notifyid; + struct rproc_vdev *rvdev; + struct virtqueue *vq; +}; + +struct rproc_vdev { + struct kref refcount; + struct rproc_subdev subdev; + struct device dev; + unsigned int id; + struct list_head node; + struct rproc *rproc; + struct rproc_vring vring[2]; + u32 rsc_offset; + u32 index; +}; + +struct rproc_debug_trace { + struct rproc *rproc; + struct dentry *tfile; + struct list_head node; + struct rproc_mem_entry trace_mem; +}; + +typedef int (*rproc_handle_resource_t)(struct rproc *, void *, int, int); + +struct rproc_dump_segment { + struct list_head node; + dma_addr_t da; + size_t size; + void *priv; + void (*dump)(struct rproc *, struct rproc_dump_segment *, void *, size_t, size_t); + loff_t offset; +}; + +struct rproc_coredump_state { + struct rproc *rproc; + void *header; + struct completion dump_done; +}; + +struct devfreq_freqs { + long unsigned int old; + long unsigned int new; +}; + +struct devfreq_passive_data { + struct devfreq *parent; + int (*get_target_freq)(struct devfreq *, long unsigned int *); + struct devfreq *this; + struct notifier_block nb; +}; + +struct trace_event_raw_devfreq_frequency { + struct trace_entry ent; + u32 __data_loc_dev_name; + long unsigned int freq; + long unsigned int prev_freq; + long unsigned int busy_time; + long unsigned int total_time; + char __data[0]; +}; + +struct trace_event_raw_devfreq_monitor { + struct trace_entry ent; + long unsigned int freq; + long unsigned int busy_time; + long unsigned int total_time; + unsigned int polling_ms; + u32 __data_loc_dev_name; + char __data[0]; +}; + +struct trace_event_data_offsets_devfreq_frequency { + u32 dev_name; +}; + +struct trace_event_data_offsets_devfreq_monitor { + u32 dev_name; +}; + +typedef void (*btf_trace_devfreq_frequency)(void *, struct devfreq *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_devfreq_monitor)(void *, struct devfreq *); + +struct devfreq_notifier_devres { + struct devfreq *devfreq; + struct notifier_block *nb; + unsigned int list; +}; + +struct devfreq_event_desc; + +struct devfreq_event_dev { + struct list_head node; + struct device dev; + struct mutex lock; + u32 enable_count; + const struct devfreq_event_desc *desc; +}; + +struct devfreq_event_ops; + +struct devfreq_event_desc { + const char *name; + u32 event_type; + void *driver_data; + const struct devfreq_event_ops *ops; +}; + +struct devfreq_event_data { + long unsigned int load_count; + long unsigned int total_count; +}; + +struct devfreq_event_ops { + int (*enable)(struct devfreq_event_dev *); + int (*disable)(struct devfreq_event_dev *); + int (*reset)(struct devfreq_event_dev *); + int (*set_event)(struct devfreq_event_dev *); + int (*get_event)(struct devfreq_event_dev *, struct devfreq_event_data *); +}; + +struct devfreq_simple_ondemand_data { + unsigned int upthreshold; + unsigned int downdifferential; +}; + +struct userspace_data { + long unsigned int user_frequency; + bool valid; +}; + +union extcon_property_value { + int intval; +}; + +struct extcon_cable; + +struct extcon_dev___2 { + const char *name; + const unsigned int *supported_cable; + const u32 *mutually_exclusive; + struct device dev; + struct raw_notifier_head nh_all; + struct raw_notifier_head *nh; + struct list_head entry; + int max_supported; + spinlock_t lock; + u32 state; + struct device_type extcon_dev_type; + struct extcon_cable *cables; + struct attribute_group attr_g_muex; + struct attribute **attrs_muex; + struct device_attribute *d_attrs_muex; +}; + +struct extcon_cable { + struct extcon_dev___2 *edev; + int cable_index; + struct attribute_group attr_g; + struct device_attribute attr_name; + struct device_attribute attr_state; + struct attribute *attrs[3]; + union extcon_property_value usb_propval[3]; + union extcon_property_value chg_propval[1]; + union extcon_property_value jack_propval[1]; + union extcon_property_value disp_propval[2]; + long unsigned int usb_bits[1]; + long unsigned int chg_bits[1]; + long unsigned int jack_bits[1]; + long unsigned int disp_bits[1]; +}; + +struct __extcon_info { + unsigned int type; + unsigned int id; + const char *name; +}; + +struct extcon_dev_notifier_devres { + struct extcon_dev___2 *edev; + unsigned int id; + struct notifier_block *nb; +}; + +enum vme_resource_type { + VME_MASTER = 0, + VME_SLAVE = 1, + VME_DMA = 2, + VME_LM = 3, +}; + +struct vme_dma_attr { + u32 type; + void *private; +}; + +struct vme_resource { + enum vme_resource_type type; + struct list_head *entry; +}; + +struct vme_bridge; + +struct vme_dev { + int num; + struct vme_bridge *bridge; + struct device dev; + struct list_head drv_list; + struct list_head bridge_list; +}; + +struct vme_callback { + void (*func)(int, int, void *); + void *priv_data; +}; + +struct vme_irq { + int count; + struct vme_callback callback[256]; +}; + +struct vme_slave_resource; + +struct vme_master_resource; + +struct vme_dma_list; + +struct vme_lm_resource; + +struct vme_bridge { + char name[16]; + int num; + struct list_head master_resources; + struct list_head slave_resources; + struct list_head dma_resources; + struct list_head lm_resources; + struct list_head vme_error_handlers; + struct list_head devices; + struct device *parent; + void *driver_priv; + struct list_head bus_list; + struct vme_irq irq[7]; + struct mutex irq_mtx; + int (*slave_get)(struct vme_slave_resource *, int *, long long unsigned int *, long long unsigned int *, dma_addr_t *, u32 *, u32 *); + int (*slave_set)(struct vme_slave_resource *, int, long long unsigned int, long long unsigned int, dma_addr_t, u32, u32); + int (*master_get)(struct vme_master_resource *, int *, long long unsigned int *, long long unsigned int *, u32 *, u32 *, u32 *); + int (*master_set)(struct vme_master_resource *, int, long long unsigned int, long long unsigned int, u32, u32, u32); + ssize_t (*master_read)(struct vme_master_resource *, void *, size_t, loff_t); + ssize_t (*master_write)(struct vme_master_resource *, void *, size_t, loff_t); + unsigned int (*master_rmw)(struct vme_master_resource *, unsigned int, unsigned int, unsigned int, loff_t); + int (*dma_list_add)(struct vme_dma_list *, struct vme_dma_attr *, struct vme_dma_attr *, size_t); + int (*dma_list_exec)(struct vme_dma_list *); + int (*dma_list_empty)(struct vme_dma_list *); + void (*irq_set)(struct vme_bridge *, int, int, int); + int (*irq_generate)(struct vme_bridge *, int, int); + int (*lm_set)(struct vme_lm_resource *, long long unsigned int, u32, u32); + int (*lm_get)(struct vme_lm_resource *, long long unsigned int *, u32 *, u32 *); + int (*lm_attach)(struct vme_lm_resource *, int, void (*)(void *), void *); + int (*lm_detach)(struct vme_lm_resource *, int); + int (*slot_get)(struct vme_bridge *); + void * (*alloc_consistent)(struct device *, size_t, dma_addr_t *); + void (*free_consistent)(struct device *, size_t, void *, dma_addr_t); +}; + +struct vme_driver { + const char *name; + int (*match)(struct vme_dev *); + int (*probe)(struct vme_dev *); + void (*remove)(struct vme_dev *); + struct device_driver driver; + struct list_head devices; +}; + +struct vme_master_resource { + struct list_head list; + struct vme_bridge *parent; + spinlock_t lock; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; + u32 width_attr; + struct resource bus_resource; + void *kern_base; +}; + +struct vme_slave_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; +}; + +struct vme_dma_pattern { + u32 pattern; + u32 type; +}; + +struct vme_dma_pci { + dma_addr_t address; +}; + +struct vme_dma_vme { + long long unsigned int address; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +struct vme_dma_resource; + +struct vme_dma_list { + struct list_head list; + struct vme_dma_resource *parent; + struct list_head entries; + struct mutex mtx; +}; + +struct vme_dma_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + struct list_head pending; + struct list_head running; + u32 route_attr; +}; + +struct vme_lm_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + int monitors; +}; + +struct vme_error_handler { + struct list_head list; + long long unsigned int start; + long long unsigned int end; + long long unsigned int first_error; + u32 aspace; + unsigned int num_errors; +}; + +struct powercap_control_type; + +struct powercap_control_type_ops { + int (*set_enable)(struct powercap_control_type *, bool); + int (*get_enable)(struct powercap_control_type *, bool *); + int (*release)(struct powercap_control_type *); +}; + +struct powercap_control_type { + struct device dev; + struct idr idr; + int nr_zones; + const struct powercap_control_type_ops *ops; + struct mutex lock; + bool allocated; + struct list_head node; +}; + +struct powercap_zone; + +struct powercap_zone_ops { + int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *); + int (*get_energy_uj)(struct powercap_zone *, u64 *); + int (*reset_energy_uj)(struct powercap_zone *); + int (*get_max_power_range_uw)(struct powercap_zone *, u64 *); + int (*get_power_uw)(struct powercap_zone *, u64 *); + int (*set_enable)(struct powercap_zone *, bool); + int (*get_enable)(struct powercap_zone *, bool *); + int (*release)(struct powercap_zone *); +}; + +struct powercap_zone_constraint; + +struct powercap_zone { + int id; + char *name; + void *control_type_inst; + const struct powercap_zone_ops *ops; + struct device dev; + int const_id_cnt; + struct idr idr; + struct idr *parent_idr; + void *private_data; + struct attribute **zone_dev_attrs; + int zone_attr_count; + struct attribute_group dev_zone_attr_group; + const struct attribute_group *dev_attr_groups[2]; + bool allocated; + struct powercap_zone_constraint *constraints; +}; + +struct powercap_zone_constraint_ops; + +struct powercap_zone_constraint { + int id; + struct powercap_zone *power_zone; + const struct powercap_zone_constraint_ops *ops; +}; + +struct powercap_zone_constraint_ops { + int (*set_power_limit_uw)(struct powercap_zone *, int, u64); + int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *); + int (*set_time_window_us)(struct powercap_zone *, int, u64); + int (*get_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_max_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_min_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *); + const char * (*get_name)(struct powercap_zone *, int); +}; + +struct dtpm_ops; + +struct dtpm { + struct powercap_zone zone; + struct dtpm *parent; + struct list_head sibling; + struct list_head children; + struct dtpm_ops *ops; + long unsigned int flags; + u64 power_limit; + u64 power_max; + u64 power_min; + int weight; + void *private; +}; + +struct dtpm_ops { + u64 (*set_power_uw)(struct dtpm *, u64); + u64 (*get_power_uw)(struct dtpm *); + void (*release)(struct dtpm *); +}; + +struct dtpm_descr; + +typedef int (*dtpm_init_t)(struct dtpm_descr *); + +struct dtpm_descr { + struct dtpm *parent; + const char *name; + dtpm_init_t init; +}; + +struct dtpm_cpu { + struct freq_qos_request qos_req; + int cpu; +}; + +struct powercap_constraint_attr { + struct device_attribute power_limit_attr; + struct device_attribute time_window_attr; + struct device_attribute max_power_attr; + struct device_attribute min_power_attr; + struct device_attribute max_time_window_attr; + struct device_attribute min_time_window_attr; + struct device_attribute name_attr; +}; + +struct idle_inject_thread { + struct task_struct *tsk; + int should_run; +}; + +struct idle_inject_device { + struct hrtimer timer; + unsigned int idle_duration_us; + unsigned int run_duration_us; + unsigned int latency_us; + long unsigned int cpumask[0]; +}; + +struct trace_event_raw_extlog_mem_event { + struct trace_entry ent; + u32 err_seq; + u8 etype; + u8 sev; + u64 pa; + u8 pa_mask_lsb; + guid_t fru_id; + u32 __data_loc_fru_text; + struct cper_mem_err_compact data; + char __data[0]; +}; + +struct trace_event_raw_mc_event { + struct trace_entry ent; + unsigned int error_type; + u32 __data_loc_msg; + u32 __data_loc_label; + u16 error_count; + u8 mc_index; + s8 top_layer; + s8 middle_layer; + s8 lower_layer; + long int address; + u8 grain_bits; + long int syndrome; + u32 __data_loc_driver_detail; + char __data[0]; +}; + +struct trace_event_raw_arm_event { + struct trace_entry ent; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; + u8 affinity; + char __data[0]; +}; + +struct trace_event_raw_non_standard_event { + struct trace_entry ent; + char sec_type[16]; + char fru_id[16]; + u32 __data_loc_fru_text; + u8 sev; + u32 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_aer_event { + struct trace_entry ent; + u32 __data_loc_dev_name; + u32 status; + u8 severity; + u8 tlp_header_valid; + u32 tlp_header[4]; + char __data[0]; +}; + +struct trace_event_raw_memory_failure_event { + struct trace_entry ent; + long unsigned int pfn; + int type; + int result; + char __data[0]; +}; + +struct trace_event_data_offsets_extlog_mem_event { + u32 fru_text; +}; + +struct trace_event_data_offsets_mc_event { + u32 msg; + u32 label; + u32 driver_detail; +}; + +struct trace_event_data_offsets_arm_event {}; + +struct trace_event_data_offsets_non_standard_event { + u32 fru_text; + u32 buf; +}; + +struct trace_event_data_offsets_aer_event { + u32 dev_name; +}; + +struct trace_event_data_offsets_memory_failure_event {}; + +typedef void (*btf_trace_extlog_mem_event)(void *, struct cper_sec_mem_err *, u32, const guid_t *, const char *, u8); + +typedef void (*btf_trace_mc_event)(void *, const unsigned int, const char *, const char *, const int, const u8, const s8, const s8, const s8, long unsigned int, const u8, long unsigned int, const char *); + +typedef void (*btf_trace_arm_event)(void *, const struct cper_sec_proc_arm *); + +typedef void (*btf_trace_non_standard_event)(void *, const guid_t *, const guid_t *, const char *, const u8, const u8 *, const u32); + +typedef void (*btf_trace_aer_event)(void *, const char *, const u32, const u8, const u8, struct aer_header_log_regs *); + +typedef void (*btf_trace_memory_failure_event)(void *, long unsigned int, int, int); + +struct ce_array { + u64 *array; + unsigned int n; + unsigned int decay_count; + u64 pfns_poisoned; + u64 ces_entered; + u64 decays_done; + union { + struct { + __u32 disabled: 1; + __u32 __resv: 31; + }; + __u32 flags; + }; +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_device___2 { + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + void *priv; +}; + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct device_node *np; + struct nvmem_device___2 *nvmem; + struct list_head node; +}; + +struct icc_node; + +struct icc_req { + struct hlist_node req_node; + struct icc_node *node; + struct device *dev; + bool enabled; + u32 tag; + u32 avg_bw; + u32 peak_bw; +}; + +struct icc_path___2 { + const char *name; + size_t num_nodes; + struct icc_req reqs[0]; +}; + +struct icc_node_data { + struct icc_node *node; + u32 tag; +}; + +struct icc_provider; + +struct icc_node { + int id; + const char *name; + struct icc_node **links; + size_t num_links; + struct icc_provider *provider; + struct list_head node_list; + struct list_head search_list; + struct icc_node *reverse; + u8 is_traversed: 1; + struct hlist_head req_list; + u32 avg_bw; + u32 peak_bw; + u32 init_avg; + u32 init_peak; + void *data; +}; + +struct icc_onecell_data { + unsigned int num_nodes; + struct icc_node *nodes[0]; +}; + +struct icc_provider { + struct list_head provider_list; + struct list_head nodes; + int (*set)(struct icc_node *, struct icc_node *); + int (*aggregate)(struct icc_node *, u32, u32, u32, u32 *, u32 *); + void (*pre_aggregate)(struct icc_node *); + int (*get_bw)(struct icc_node *, u32 *, u32 *); + struct icc_node * (*xlate)(struct of_phandle_args *, void *); + struct icc_node_data * (*xlate_extended)(struct of_phandle_args *, void *); + struct device *dev; + int users; + bool inter_set; + void *data; +}; + +struct trace_event_raw_icc_set_bw { + struct trace_entry ent; + u32 __data_loc_path_name; + u32 __data_loc_dev; + u32 __data_loc_node_name; + u32 avg_bw; + u32 peak_bw; + u32 node_avg_bw; + u32 node_peak_bw; + char __data[0]; +}; + +struct trace_event_raw_icc_set_bw_end { + struct trace_entry ent; + u32 __data_loc_path_name; + u32 __data_loc_dev; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_icc_set_bw { + u32 path_name; + u32 dev; + u32 node_name; +}; + +struct trace_event_data_offsets_icc_set_bw_end { + u32 path_name; + u32 dev; +}; + +typedef void (*btf_trace_icc_set_bw)(void *, struct icc_path___2 *, struct icc_node *, int, u32, u32); + +typedef void (*btf_trace_icc_set_bw_end)(void *, struct icc_path___2 *, int); + +struct icc_bulk_data { + struct icc_path *path; + const char *name; + u32 avg_bw; + u32 peak_bw; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +} __attribute__((packed)); + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +} __attribute__((packed)); + +struct libipw_device; + +struct iw_spy_data; + +struct iw_public_data { + struct iw_spy_data *spy_data; + struct libipw_device *libipw; +}; + +struct iw_param { + __s32 value; + __u8 fixed; + __u8 disabled; + __u16 flags; +}; + +struct iw_point { + void *pointer; + __u16 length; + __u16 flags; +}; + +struct iw_freq { + __s32 m; + __s16 e; + __u8 i; + __u8 flags; +}; + +struct iw_quality { + __u8 qual; + __u8 level; + __u8 noise; + __u8 updated; +}; + +struct iw_discarded { + __u32 nwid; + __u32 code; + __u32 fragment; + __u32 retries; + __u32 misc; +}; + +struct iw_missed { + __u32 beacon; +}; + +struct iw_statistics { + __u16 status; + struct iw_quality qual; + struct iw_discarded discard; + struct iw_missed miss; +}; + +union iwreq_data { + char name[16]; + struct iw_point essid; + struct iw_param nwid; + struct iw_freq freq; + struct iw_param sens; + struct iw_param bitrate; + struct iw_param txpower; + struct iw_param rts; + struct iw_param frag; + __u32 mode; + struct iw_param retry; + struct iw_point encoding; + struct iw_param power; + struct iw_quality qual; + struct sockaddr ap_addr; + struct sockaddr addr; + struct iw_param param; + struct iw_point data; +}; + +struct iw_priv_args { + __u32 cmd; + __u16 set_args; + __u16 get_args; + char name[16]; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct iw_request_info { + __u16 cmd; + __u16 flags; +}; + +struct iw_spy_data { + int spy_number; + u_char spy_address[48]; + struct iw_quality spy_stat[8]; + struct iw_quality spy_thr_low; + struct iw_quality spy_thr_high; + u_char spy_thr_under[8]; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_LAST = 16384, + SOF_TIMESTAMPING_MASK = 32767, +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct prot_inuse { + int val[64]; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + unsigned int processed; + unsigned int time_squeeze; + unsigned int received_rps; + struct softnet_data *rps_ipi_list; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct sk_buff_head xfrm_backlog; + struct { + u16 recursion; + u8 more; + } xmit; + int: 32; + unsigned int input_queue_head; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u16 tsflags; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 net_frag_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + struct hlist_node icsk_listen_portaddr_node; + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int search_high; + int search_low; + u32 probe_size: 31; + u32 enabled: 1; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + u64 icsk_ca_priv[13]; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head owners; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(const struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_sock_af_ops; + +struct tcp_md5sig_info; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + u16 tcp_header_len; + u16 gso_segs; + __be32 pred_flags; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_nxt; + u32 copied_seq; + u32 rcv_wup; + u32 snd_nxt; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u64 bytes_acked; + u32 dsack_dups; + u32 snd_una; + u32 snd_sml; + u32 rcv_tstamp; + u32 lsndtime; + u32 last_oow_ack_time; + u32 compressed_ack_rcv_nxt; + u32 tsoffset; + struct list_head tsq_node; + struct list_head tsorted_sent_queue; + u32 snd_wl1; + u32 snd_wnd; + u32 max_window; + u32 mss_cache; + u32 window_clamp; + u32 rcv_ssthresh; + struct tcp_rack rack; + u16 advmss; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u32 chrono_start; + u32 chrono_stat[3]; + u8 chrono_type: 2; + u8 rate_app_limited: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 is_sack_reneg: 1; + u8 fastopen_client_fail: 2; + u8 nonagle: 4; + u8 thin_lto: 1; + u8 recvmsg_inq: 1; + u8 repair: 1; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 is_cwnd_limited: 1; + u32 tlp_high_seq; + u32 tcp_tx_delay; + u64 tcp_wstamp_ns; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 srtt_us; + u32 mdev_us; + u32 mdev_max_us; + u32 rttvar_us; + u32 rtt_seq; + struct minmax rtt_min; + u32 packets_out; + u32 retrans_out; + u32 max_packets_out; + u32 max_packets_seq; + u16 urg_data; + u8 ecn_flags; + u8 keepalive_probes; + u32 reordering; + u32 reord_seen; + u32 snd_up; + struct tcp_options_received rx_opt; + u32 snd_ssthresh; + u32 snd_cwnd; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 prr_out; + u32 delivered; + u32 delivered_ce; + u32 lost; + u32 app_limited; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_wnd; + u32 write_seq; + u32 notsent_lowat; + u32 pushed_seq; + u32 lost_out; + u32 sacked_out; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + struct rb_root out_of_order_queue; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + struct sk_buff *highest_sack; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + u64 bytes_retrans; + u32 total_retrans; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u16 timeout_rehash; + u32 rcv_ooopack; + u32 rcv_rtt_last_tsecr; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 mtu_info; + bool is_mptcp; + bool syn_smc; + const struct tcp_sock_af_ops *af_specific; + struct tcp_md5sig_info *md5sig_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; +}; + +struct tcp_md5sig_key; + +struct tcp_sock_af_ops { + struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); + int (*md5_parse)(struct sock *, int, sockptr_t, int); +}; + +struct tcp_md5sig_info { + struct hlist_head head; + struct callback_head rcu; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +union tcp_md5_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + union tcp_md5_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +struct net_protocol { + int (*early_demux)(struct sk_buff *); + int (*early_demux_handler)(struct sk_buff *); + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int netns_ok: 1; + unsigned int icmp_strict_tag_validation: 1; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool drop_req; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct sk_buff *, struct flowi *, struct request_sock *); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; + __u32 frag_off; +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +struct napi_gro_cb { + void *frag0; + unsigned int frag0_len; + int data_offset; + u16 flush; + u16 flush_id; + u16 count; + u16 gro_remcsum_start; + long unsigned int age; + u16 proto; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 is_atomic: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + __wsum csum; + struct sk_buff *last; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct vlan_ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +struct napi_alloc_cache { + struct page_frag_cache page; + unsigned int skb_count; + void *skb_cache[64]; +}; + +typedef int (*sendmsg_func)(struct sock *, struct msghdr *, struct kvec *, size_t, size_t); + +typedef int (*sendpage_func)(struct sock *, struct page *, int, size_t, int); + +struct ahash_request___2; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; + struct lsmblob lsmblob; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_packed *bstats; + spinlock_t *stats_lock; + seqcount_t *running; + struct gnet_stats_basic_cpu *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + __RTNLGRP_MAX = 34, +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct pcpu_gen_cookie { + local_t nesting; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +typedef u16 u_int16_t; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + __be16 dst_opt_type; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + int: 32; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 last_dir; + u8 flags; +}; + +struct nf_ct_event; + +struct nf_ct_event_notifier { + int (*fcn)(unsigned int, struct nf_ct_event *); +}; + +struct nf_exp_event; + +struct nf_exp_event_notifier { + int (*fcn)(unsigned int, struct nf_exp_event *); +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 2, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 4, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 8, + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 16, + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 32, + __TCA_FLOWER_KEY_CT_FLAGS_MAX = 33, +}; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, + DEVLINK_PORT_FLAVOUR_PCI_SF = 7, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_pci_sf_attrs { + u32 controller; + u32 sf; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + struct devlink_port_pci_sf_attrs pci_sf; + }; +}; + +struct devlink; + +struct devlink_port { + struct list_head list; + struct list_head param_list; + struct list_head region_list; + struct devlink *devlink; + unsigned int index; + bool registered; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + void *type_dev; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct mutex reporters_lock; +}; + +enum phylink_op_type { + PHYLINK_NETDEV = 0, + PHYLINK_DEV = 1, +}; + +struct phylink_link_state; + +struct phylink_config { + struct device *dev; + enum phylink_op_type type; + bool pcs_poll; + bool poll_fixed_state; + bool ovr_an_inband; + void (*get_fixed_state)(struct phylink_config *, struct phylink_link_state *); +}; + +struct dsa_device_ops; + +struct dsa_switch_tree; + +struct packet_type; + +struct dsa_switch; + +struct dsa_netdevice_ops; + +struct dsa_port { + union { + struct net_device *master; + struct net_device *slave; + }; + const struct dsa_device_ops *tag_ops; + struct dsa_switch_tree *dst; + struct sk_buff * (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *); + bool (*filter)(const struct sk_buff *, struct net_device *); + enum { + DSA_PORT_TYPE_UNUSED = 0, + DSA_PORT_TYPE_CPU = 1, + DSA_PORT_TYPE_DSA = 2, + DSA_PORT_TYPE_USER = 3, + } type; + struct dsa_switch *ds; + unsigned int index; + const char *name; + struct dsa_port *cpu_dp; + u8 mac[6]; + struct device_node *dn; + unsigned int ageing_time; + bool vlan_filtering; + u8 stp_state; + struct net_device *bridge_dev; + struct devlink_port devlink_port; + bool devlink_port_setup; + struct phylink *pl; + struct phylink_config pl_config; + struct net_device *lag_dev; + bool lag_tx_enabled; + struct net_device *hsr_dev; + struct list_head list; + void *priv; + const struct ethtool_ops *orig_ethtool_ops; + const struct dsa_netdevice_ops *netdev_ops; + bool setup; +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + void *af_packet_priv; + struct list_head list; +}; + +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + NETDEV_LAG_TX_TYPE_RANDOM = 1, + NETDEV_LAG_TX_TYPE_BROADCAST = 2, + NETDEV_LAG_TX_TYPE_ROUNDROBIN = 3, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 4, + NETDEV_LAG_TX_TYPE_HASH = 5, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE = 0, + NETDEV_LAG_HASH_L2 = 1, + NETDEV_LAG_HASH_L34 = 2, + NETDEV_LAG_HASH_L23 = 3, + NETDEV_LAG_HASH_E23 = 4, + NETDEV_LAG_HASH_E34 = 5, + NETDEV_LAG_HASH_VLAN_SRCMAC = 6, + NETDEV_LAG_HASH_UNKNOWN = 7, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_WAKE = 19, + FLOW_ACTION_QUEUE = 20, + FLOW_ACTION_SAMPLE = 21, + FLOW_ACTION_POLICE = 22, + FLOW_ACTION_CT = 23, + FLOW_ACTION_CT_METADATA = 24, + FLOW_ACTION_MPLS_PUSH = 25, + FLOW_ACTION_MPLS_POP = 26, + FLOW_ACTION_MPLS_MANGLE = 27, + FLOW_ACTION_GATE = 28, + FLOW_ACTION_PPPOE_PUSH = 29, + NUM_FLOW_ACTIONS = 30, +}; + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +typedef void (*action_destr)(void *); + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct nf_flowtable; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + __be16 tun_flags; + u8 tos; + u8 ttl; + __be32 label; + __be16 tp_src; + __be16 tp_dst; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; +}; + +struct psample_group; + +struct action_gate_entry; + +struct flow_action_entry { + enum flow_action_id id; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 index; + u32 burst; + u64 rate_bytes_ps; + u64 burst_pkt; + u64 rate_pkt_ps; + u32 mtu; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + bool orig_dir; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + u32 index; + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + struct { + u16 sid; + } pppoe; + }; + struct flow_action_cookie *cookie; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + long unsigned int cookie; + struct flow_rule *rule; + struct flow_stats stats; + u32 classid; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +struct dsa_chip_data { + struct device *host_dev; + int sw_addr; + struct device *netdev[12]; + int eeprom_len; + struct device_node *of_node; + char *port_names[12]; + struct device_node *port_dn[12]; + s8 rtable[4]; +}; + +struct dsa_platform_data { + struct device *netdev; + struct net_device *of_netdev; + int nr_chips; + struct dsa_chip_data *chip; +}; + +struct phylink_link_state { + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + phy_interface_t interface; + int speed; + int duplex; + int pause; + unsigned int link: 1; + unsigned int an_enabled: 1; + unsigned int an_complete: 1; +}; + +enum devlink_sb_pool_type { + DEVLINK_SB_POOL_TYPE_INGRESS = 0, + DEVLINK_SB_POOL_TYPE_EGRESS = 1, +}; + +enum devlink_sb_threshold_type { + DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0, + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1, +}; + +enum devlink_eswitch_encap_mode { + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0, + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1, +}; + +enum devlink_param_cmode { + DEVLINK_PARAM_CMODE_RUNTIME = 0, + DEVLINK_PARAM_CMODE_DRIVERINIT = 1, + DEVLINK_PARAM_CMODE_PERMANENT = 2, + __DEVLINK_PARAM_CMODE_MAX = 3, + DEVLINK_PARAM_CMODE_MAX = 2, +}; + +enum devlink_trap_action { + DEVLINK_TRAP_ACTION_DROP = 0, + DEVLINK_TRAP_ACTION_TRAP = 1, + DEVLINK_TRAP_ACTION_MIRROR = 2, +}; + +enum devlink_trap_type { + DEVLINK_TRAP_TYPE_DROP = 0, + DEVLINK_TRAP_TYPE_EXCEPTION = 1, + DEVLINK_TRAP_TYPE_CONTROL = 2, +}; + +enum devlink_reload_action { + DEVLINK_RELOAD_ACTION_UNSPEC = 0, + DEVLINK_RELOAD_ACTION_DRIVER_REINIT = 1, + DEVLINK_RELOAD_ACTION_FW_ACTIVATE = 2, + __DEVLINK_RELOAD_ACTION_MAX = 3, + DEVLINK_RELOAD_ACTION_MAX = 2, +}; + +enum devlink_reload_limit { + DEVLINK_RELOAD_LIMIT_UNSPEC = 0, + DEVLINK_RELOAD_LIMIT_NO_RESET = 1, + __DEVLINK_RELOAD_LIMIT_MAX = 2, + DEVLINK_RELOAD_LIMIT_MAX = 1, +}; + +enum devlink_dpipe_field_mapping_type { + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0, + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1, +}; + +enum devlink_port_fn_state { + DEVLINK_PORT_FN_STATE_INACTIVE = 0, + DEVLINK_PORT_FN_STATE_ACTIVE = 1, +}; + +enum devlink_port_fn_opstate { + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0, + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1, +}; + +struct devlink_dev_stats { + u32 reload_stats[6]; + u32 remote_reload_stats[6]; +}; + +struct devlink_dpipe_headers; + +struct devlink_ops; + +struct devlink { + struct list_head list; + struct list_head port_list; + struct list_head sb_list; + struct list_head dpipe_table_list; + struct list_head resource_list; + struct list_head param_list; + struct list_head region_list; + struct list_head reporter_list; + struct mutex reporters_lock; + struct devlink_dpipe_headers *dpipe_headers; + struct list_head trap_list; + struct list_head trap_group_list; + struct list_head trap_policer_list; + const struct devlink_ops *ops; + struct xarray snapshot_ids; + struct devlink_dev_stats stats; + struct device *dev; + possible_net_t _net; + struct mutex lock; + u8 reload_failed: 1; + u8 reload_enabled: 1; + u8 registered: 1; + long: 61; + long: 64; + char priv[0]; +}; + +struct devlink_dpipe_header; + +struct devlink_dpipe_headers { + struct devlink_dpipe_header **headers; + unsigned int headers_count; +}; + +struct devlink_sb_pool_info; + +struct devlink_info_req; + +struct devlink_flash_update_params; + +struct devlink_trap; + +struct devlink_trap_group; + +struct devlink_trap_policer; + +struct devlink_port_new_attrs; + +struct devlink_ops { + u32 supported_flash_update_params; + long unsigned int reload_actions; + long unsigned int reload_limits; + int (*reload_down)(struct devlink *, bool, enum devlink_reload_action, enum devlink_reload_limit, struct netlink_ext_ack *); + int (*reload_up)(struct devlink *, enum devlink_reload_action, enum devlink_reload_limit, u32 *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_split)(struct devlink *, unsigned int, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, unsigned int, struct netlink_ext_ack *); + int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *); + int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *); + int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *); + int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32, struct netlink_ext_ack *); + int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *); + int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *); + int (*sb_occ_snapshot)(struct devlink *, unsigned int); + int (*sb_occ_max_clear)(struct devlink *, unsigned int); + int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *); + int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *); + int (*eswitch_mode_get)(struct devlink *, u16 *); + int (*eswitch_mode_set)(struct devlink *, u16, struct netlink_ext_ack *); + int (*eswitch_inline_mode_get)(struct devlink *, u8 *); + int (*eswitch_inline_mode_set)(struct devlink *, u8, struct netlink_ext_ack *); + int (*eswitch_encap_mode_get)(struct devlink *, enum devlink_eswitch_encap_mode *); + int (*eswitch_encap_mode_set)(struct devlink *, enum devlink_eswitch_encap_mode, struct netlink_ext_ack *); + int (*info_get)(struct devlink *, struct devlink_info_req *, struct netlink_ext_ack *); + int (*flash_update)(struct devlink *, struct devlink_flash_update_params *, struct netlink_ext_ack *); + int (*trap_init)(struct devlink *, const struct devlink_trap *, void *); + void (*trap_fini)(struct devlink *, const struct devlink_trap *, void *); + int (*trap_action_set)(struct devlink *, const struct devlink_trap *, enum devlink_trap_action, struct netlink_ext_ack *); + int (*trap_group_init)(struct devlink *, const struct devlink_trap_group *); + int (*trap_group_set)(struct devlink *, const struct devlink_trap_group *, const struct devlink_trap_policer *, struct netlink_ext_ack *); + int (*trap_group_action_set)(struct devlink *, const struct devlink_trap_group *, enum devlink_trap_action, struct netlink_ext_ack *); + int (*trap_policer_init)(struct devlink *, const struct devlink_trap_policer *); + void (*trap_policer_fini)(struct devlink *, const struct devlink_trap_policer *); + int (*trap_policer_set)(struct devlink *, const struct devlink_trap_policer *, u64, u64, struct netlink_ext_ack *); + int (*trap_policer_counter_get)(struct devlink *, const struct devlink_trap_policer *, u64 *); + int (*port_function_hw_addr_get)(struct devlink *, struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_function_hw_addr_set)(struct devlink *, struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); + int (*port_new)(struct devlink *, const struct devlink_port_new_attrs *, struct netlink_ext_ack *, unsigned int *); + int (*port_del)(struct devlink *, unsigned int, struct netlink_ext_ack *); + int (*port_fn_state_get)(struct devlink *, struct devlink_port *, enum devlink_port_fn_state *, enum devlink_port_fn_opstate *, struct netlink_ext_ack *); + int (*port_fn_state_set)(struct devlink *, struct devlink_port *, enum devlink_port_fn_state, struct netlink_ext_ack *); +}; + +struct devlink_port_new_attrs { + enum devlink_port_flavour flavour; + unsigned int port_index; + u32 controller; + u32 sfnum; + u16 pfnum; + u8 port_index_valid: 1; + u8 controller_valid: 1; + u8 sfnum_valid: 1; +}; + +struct devlink_sb_pool_info { + enum devlink_sb_pool_type pool_type; + u32 size; + enum devlink_sb_threshold_type threshold_type; + u32 cell_size; +}; + +struct devlink_dpipe_field { + const char *name; + unsigned int id; + unsigned int bitwidth; + enum devlink_dpipe_field_mapping_type mapping_type; +}; + +struct devlink_dpipe_header { + const char *name; + unsigned int id; + struct devlink_dpipe_field *fields; + unsigned int fields_count; + bool global; +}; + +union devlink_param_value { + u8 vu8; + u16 vu16; + u32 vu32; + char vstr[32]; + bool vbool; +}; + +struct devlink_param_gset_ctx { + union devlink_param_value val; + enum devlink_param_cmode cmode; +}; + +struct devlink_flash_update_params { + const struct firmware *fw; + const char *component; + u32 overwrite_mask; +}; + +struct devlink_trap_policer { + u32 id; + u64 init_rate; + u64 init_burst; + u64 max_rate; + u64 min_rate; + u64 max_burst; + u64 min_burst; +}; + +struct devlink_trap_group { + const char *name; + u16 id; + bool generic; + u32 init_policer_id; +}; + +struct devlink_trap { + enum devlink_trap_type type; + enum devlink_trap_action init_action; + bool generic; + u16 id; + const char *name; + u16 init_group_id; + u32 metadata_cap; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct switchdev_brport_flags { + long unsigned int val; + long unsigned int mask; +}; + +enum switchdev_obj_id { + SWITCHDEV_OBJ_ID_UNDEFINED = 0, + SWITCHDEV_OBJ_ID_PORT_VLAN = 1, + SWITCHDEV_OBJ_ID_PORT_MDB = 2, + SWITCHDEV_OBJ_ID_HOST_MDB = 3, + SWITCHDEV_OBJ_ID_MRP = 4, + SWITCHDEV_OBJ_ID_RING_TEST_MRP = 5, + SWITCHDEV_OBJ_ID_RING_ROLE_MRP = 6, + SWITCHDEV_OBJ_ID_RING_STATE_MRP = 7, + SWITCHDEV_OBJ_ID_IN_TEST_MRP = 8, + SWITCHDEV_OBJ_ID_IN_ROLE_MRP = 9, + SWITCHDEV_OBJ_ID_IN_STATE_MRP = 10, +}; + +struct switchdev_obj { + struct list_head list; + struct net_device *orig_dev; + enum switchdev_obj_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); +}; + +struct switchdev_obj_port_vlan { + struct switchdev_obj obj; + u16 flags; + u16 vid; +}; + +struct switchdev_obj_port_mdb { + struct switchdev_obj obj; + unsigned char addr[6]; + u16 vid; +}; + +struct switchdev_obj_mrp { + struct switchdev_obj obj; + struct net_device *p_port; + struct net_device *s_port; + u32 ring_id; + u16 prio; +}; + +struct switchdev_obj_ring_role_mrp { + struct switchdev_obj obj; + u8 ring_role; + u32 ring_id; + u8 sw_backup; +}; + +enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_BRCM = 1, + DSA_TAG_PROTO_BRCM_LEGACY = 22, + DSA_TAG_PROTO_BRCM_PREPEND = 2, + DSA_TAG_PROTO_DSA = 3, + DSA_TAG_PROTO_EDSA = 4, + DSA_TAG_PROTO_GSWIP = 5, + DSA_TAG_PROTO_KSZ9477 = 6, + DSA_TAG_PROTO_KSZ9893 = 7, + DSA_TAG_PROTO_LAN9303 = 8, + DSA_TAG_PROTO_MTK = 9, + DSA_TAG_PROTO_QCA = 10, + DSA_TAG_PROTO_TRAILER = 11, + DSA_TAG_PROTO_8021Q = 12, + DSA_TAG_PROTO_SJA1105 = 13, + DSA_TAG_PROTO_KSZ8795 = 14, + DSA_TAG_PROTO_OCELOT = 15, + DSA_TAG_PROTO_AR9331 = 16, + DSA_TAG_PROTO_RTL4_A = 17, + DSA_TAG_PROTO_HELLCREEK = 18, + DSA_TAG_PROTO_XRS700X = 19, + DSA_TAG_PROTO_OCELOT_8021Q = 20, + DSA_TAG_PROTO_SEVILLE = 21, +}; + +struct dsa_device_ops { + struct sk_buff * (*xmit)(struct sk_buff *, struct net_device *); + struct sk_buff * (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *); + void (*flow_dissect)(const struct sk_buff *, __be16 *, int *); + bool (*filter)(const struct sk_buff *, struct net_device *); + unsigned int overhead; + const char *name; + enum dsa_tag_protocol proto; + bool promisc_on_master; + bool tail_tag; +}; + +struct dsa_netdevice_ops { + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); +}; + +struct dsa_switch_tree { + struct list_head list; + struct raw_notifier_head nh; + unsigned int index; + struct kref refcount; + bool setup; + const struct dsa_device_ops *tag_ops; + enum dsa_tag_protocol default_proto; + struct dsa_platform_data *pd; + struct list_head ports; + struct list_head rtable; + struct net_device **lags; + unsigned int lags_len; +}; + +struct dsa_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +struct dsa_mall_policer_tc_entry { + u32 burst; + u64 rate_bytes_per_sec; +}; + +struct dsa_switch_ops; + +struct dsa_switch { + bool setup; + struct device *dev; + struct dsa_switch_tree *dst; + unsigned int index; + struct notifier_block nb; + void *priv; + struct dsa_chip_data *cd; + const struct dsa_switch_ops *ops; + u32 phys_mii_mask; + struct mii_bus *slave_mii_bus; + unsigned int ageing_time_min; + unsigned int ageing_time_max; + struct devlink *devlink; + unsigned int num_tx_queues; + bool vlan_filtering_is_global; + bool configure_vlan_while_not_filtering; + bool untag_bridge_pvid; + bool assisted_learning_on_cpu_port; + bool vlan_filtering; + bool pcs_poll; + bool mtu_enforcement_ingress; + unsigned int num_lag_ids; + size_t num_ports; +}; + +struct fixed_phy_status___2; + +typedef int dsa_fdb_dump_cb_t(const unsigned char *, u16, bool, void *); + +struct dsa_switch_ops { + enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *, int, enum dsa_tag_protocol); + int (*change_tag_protocol)(struct dsa_switch *, int, enum dsa_tag_protocol); + int (*setup)(struct dsa_switch *); + void (*teardown)(struct dsa_switch *); + u32 (*get_phy_flags)(struct dsa_switch *, int); + int (*phy_read)(struct dsa_switch *, int, int); + int (*phy_write)(struct dsa_switch *, int, int, u16); + void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); + void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status___2 *); + void (*phylink_validate)(struct dsa_switch *, int, long unsigned int *, struct phylink_link_state *); + int (*phylink_mac_link_state)(struct dsa_switch *, int, struct phylink_link_state *); + void (*phylink_mac_config)(struct dsa_switch *, int, unsigned int, const struct phylink_link_state *); + void (*phylink_mac_an_restart)(struct dsa_switch *, int); + void (*phylink_mac_link_down)(struct dsa_switch *, int, unsigned int, phy_interface_t); + void (*phylink_mac_link_up)(struct dsa_switch *, int, unsigned int, phy_interface_t, struct phy_device *, int, int, bool, bool); + void (*phylink_fixed_state)(struct dsa_switch *, int, struct phylink_link_state *); + void (*get_strings)(struct dsa_switch *, int, u32, uint8_t *); + void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); + int (*get_sset_count)(struct dsa_switch *, int, int); + void (*get_ethtool_phy_stats)(struct dsa_switch *, int, uint64_t *); + void (*get_stats64)(struct dsa_switch *, int, struct rtnl_link_stats64 *); + void (*self_test)(struct dsa_switch *, int, struct ethtool_test *, u64 *); + void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); + int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); + int (*get_ts_info)(struct dsa_switch *, int, struct ethtool_ts_info *); + int (*suspend)(struct dsa_switch *); + int (*resume)(struct dsa_switch *); + int (*port_enable)(struct dsa_switch *, int, struct phy_device *); + void (*port_disable)(struct dsa_switch *, int); + int (*set_mac_eee)(struct dsa_switch *, int, struct ethtool_eee *); + int (*get_mac_eee)(struct dsa_switch *, int, struct ethtool_eee *); + int (*get_eeprom_len)(struct dsa_switch *); + int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); + int (*get_regs_len)(struct dsa_switch *, int); + void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); + int (*port_prechangeupper)(struct dsa_switch *, int, struct netdev_notifier_changeupper_info *); + int (*set_ageing_time)(struct dsa_switch *, unsigned int); + int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); + void (*port_bridge_leave)(struct dsa_switch *, int, struct net_device *); + void (*port_stp_state_set)(struct dsa_switch *, int, u8); + void (*port_fast_age)(struct dsa_switch *, int); + int (*port_pre_bridge_flags)(struct dsa_switch *, int, struct switchdev_brport_flags, struct netlink_ext_ack *); + int (*port_bridge_flags)(struct dsa_switch *, int, struct switchdev_brport_flags, struct netlink_ext_ack *); + int (*port_vlan_filtering)(struct dsa_switch *, int, bool, struct netlink_ext_ack *); + int (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *, struct netlink_ext_ack *); + int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); + int (*port_fdb_add)(struct dsa_switch *, int, const unsigned char *, u16); + int (*port_fdb_del)(struct dsa_switch *, int, const unsigned char *, u16); + int (*port_fdb_dump)(struct dsa_switch *, int, dsa_fdb_dump_cb_t *, void *); + int (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); + int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); + int (*get_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *); + int (*cls_flower_add)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*cls_flower_del)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*cls_flower_stats)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*port_mirror_add)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *, bool); + void (*port_mirror_del)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *); + int (*port_policer_add)(struct dsa_switch *, int, struct dsa_mall_policer_tc_entry *); + void (*port_policer_del)(struct dsa_switch *, int); + int (*port_setup_tc)(struct dsa_switch *, int, enum tc_setup_type, void *); + int (*crosschip_bridge_join)(struct dsa_switch *, int, int, int, struct net_device *); + void (*crosschip_bridge_leave)(struct dsa_switch *, int, int, int, struct net_device *); + int (*crosschip_lag_change)(struct dsa_switch *, int, int); + int (*crosschip_lag_join)(struct dsa_switch *, int, int, struct net_device *, struct netdev_lag_upper_info *); + int (*crosschip_lag_leave)(struct dsa_switch *, int, int, struct net_device *); + int (*port_hwtstamp_get)(struct dsa_switch *, int, struct ifreq *); + int (*port_hwtstamp_set)(struct dsa_switch *, int, struct ifreq *); + void (*port_txtstamp)(struct dsa_switch *, int, struct sk_buff *); + bool (*port_rxtstamp)(struct dsa_switch *, int, struct sk_buff *, unsigned int); + int (*devlink_param_get)(struct dsa_switch *, u32, struct devlink_param_gset_ctx *); + int (*devlink_param_set)(struct dsa_switch *, u32, struct devlink_param_gset_ctx *); + int (*devlink_info_get)(struct dsa_switch *, struct devlink_info_req *, struct netlink_ext_ack *); + int (*devlink_sb_pool_get)(struct dsa_switch *, unsigned int, u16, struct devlink_sb_pool_info *); + int (*devlink_sb_pool_set)(struct dsa_switch *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *); + int (*devlink_sb_port_pool_get)(struct dsa_switch *, int, unsigned int, u16, u32 *); + int (*devlink_sb_port_pool_set)(struct dsa_switch *, int, unsigned int, u16, u32, struct netlink_ext_ack *); + int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *, int, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *); + int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *, int, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *); + int (*devlink_sb_occ_snapshot)(struct dsa_switch *, unsigned int); + int (*devlink_sb_occ_max_clear)(struct dsa_switch *, unsigned int); + int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *, int, unsigned int, u16, u32 *, u32 *); + int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *, int, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *); + int (*port_change_mtu)(struct dsa_switch *, int, int); + int (*port_max_mtu)(struct dsa_switch *, int); + int (*port_lag_change)(struct dsa_switch *, int); + int (*port_lag_join)(struct dsa_switch *, int, struct net_device *, struct netdev_lag_upper_info *); + int (*port_lag_leave)(struct dsa_switch *, int, struct net_device *); + int (*port_hsr_join)(struct dsa_switch *, int, struct net_device *); + int (*port_hsr_leave)(struct dsa_switch *, int, struct net_device *); + int (*port_mrp_add)(struct dsa_switch *, int, const struct switchdev_obj_mrp *); + int (*port_mrp_del)(struct dsa_switch *, int, const struct switchdev_obj_mrp *); + int (*port_mrp_add_ring_role)(struct dsa_switch *, int, const struct switchdev_obj_ring_role_mrp *); + int (*port_mrp_del_ring_role)(struct dsa_switch *, int, const struct switchdev_obj_ring_role_mrp *); +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + __LWTUNNEL_ENCAP_MAX = 9, +}; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + } u; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 type: 4; + __u8 ver: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct mpls_label { + __be32 entry; +}; + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_zone zone; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + u16 cpu; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct { } __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + u_int32_t secmark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_ct_ext { + u8 offset[9]; + u8 len; + char data[0]; +}; + +struct nf_conntrack_helper; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + refcount_t use; + unsigned int flags; + unsigned int class; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_ECACHE = 4, + NF_CT_EXT_TSTAMP = 5, + NF_CT_EXT_TIMEOUT = 6, + NF_CT_EXT_LABELS = 7, + NF_CT_EXT_SYNPROXY = 8, + NF_CT_EXT_NUM = 9, +}; + +struct nf_ct_event { + struct nf_conn *ct; + u32 portid; + int report; +}; + +struct nf_exp_event { + struct nf_conntrack_expect *exp; + u32 portid; + int report; +}; + +struct nf_conn_labels { + long unsigned int bits[2]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct tc_skb_ext { + __u32 chain; + __u16 mru; + bool post_ct; +}; + +struct ipv4_devconf { + void *sysctl; + int data[32]; + long unsigned int state[1]; +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_NUMHOOKS = 1, +}; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +struct netdev_boot_setup { + char name[16]; + struct ifmap map; +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, + NAPIF_STATE_PREFER_BUSY_POLL = 128, + NAPIF_STATE_THREADED = 256, + NAPIF_STATE_SCHED_THREADED = 512, +}; + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[5]; +}; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct netpoll; + +struct netpoll_info { + refcount_t refcnt; + struct semaphore dev_lock; + struct sk_buff_head txq; + struct delayed_work tx_work; + struct netpoll *netpoll; + struct callback_head rcu; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + u32 mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +union inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct netpoll { + struct net_device *dev; + char dev_name[16]; + const char *name; + union inet_addr local_ip; + union inet_addr remote_ip; + bool ipv6; + u16 local_port; + u16 remote_port; + u8 remote_mac[6]; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, + __QDISC_STATE_MISSED = 2, +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + __IPV4_DEVCONF_MAX = 33, +}; + +struct in_ifaddr { + struct hlist_node hash; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct dev_kfree_skb_cb { + enum skb_free_reason reason; +}; + +struct netdev_adjacent { + struct net_device *dev; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + __NDA_MAX = 15, +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + __NDTPA_MAX = 19, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES = 3, + NEIGH_LINK_TABLE = 3, +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + u32 min_dump_alloc; +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 37, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 38, + __IFLA_BRPORT_MAX = 39, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + __IFLA_OFFLOAD_XSTATS_MAX = 2, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + IFLA_BRIDGE_CFM = 5, + __IFLA_BRIDGE_MAX = 6, +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +struct rtnl_af_ops { + struct list_head list; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *); + int (*set_link_af)(struct net_device *, const struct nlattr *, struct netlink_ext_ack *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; + struct rhashtable hmac_infos; +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_INGRESS = 1, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum bpf_lwt_encap_mode { + BPF_LWT_ENCAP_SEG6 = 0, + BPF_LWT_ENCAP_SEG6_INLINE = 1, + BPF_LWT_ENCAP_IP = 2, +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + __u16 tunnel_ext; + __u32 tunnel_label; +}; + +struct bpf_xfrm_state { + __u32 reqid; + __u32 spi; + __u16 family; + __u16 ext; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + union { + __u16 tot_len; + __u16 mtu_result; + }; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __u8 smac[6]; + __u8 dmac[6]; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum bpf_check_mtu_flags { + BPF_MTU_CHK_SEGS = 1, +}; + +enum bpf_check_mtu_ret { + BPF_MTU_CHK_RET_SUCCESS = 0, + BPF_MTU_CHK_RET_FRAG_NEEDED = 1, + BPF_MTU_CHK_RET_SEGS_TOOBIG = 2, +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + volatile unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_kill: 1; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; + struct tcp_md5sig_key *tw_md5_key; +}; + +struct udp_sock { + struct inet_sock inet; + int pending; + unsigned int corkflag; + __u8 encap_type; + unsigned char no_check6_tx: 1; + unsigned char no_check6_rx: 1; + unsigned char encap_enabled: 1; + unsigned char gro_enabled: 1; + unsigned char accept_udp_l4: 1; + unsigned char accept_udp_fraglist: 1; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + __u8 pcflag; + __u8 unused[3]; + int (*encap_rcv)(struct sock *, struct sk_buff *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sk_buff_head reader_queue; + int forward_deficit; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +}; + +struct fib6_result; + +struct fib6_config; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + void (*xfrm6_local_rxpmtu)(struct sk_buff *, u32); + int (*xfrm6_udp_encap_rcv)(struct sock *, struct sk_buff *); + int (*xfrm6_rcv_encap)(struct sk_buff *, int, __be32, int); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + struct net_device * (*ipv6_dev_find)(struct net *, const struct in6_addr *, struct net_device *); +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); +}; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + __u32 tcp_tw_isn; + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + union { + struct { + __u32 in_flight: 30; + __u32 is_app_limited: 1; + __u32 unused: 1; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + }; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct xdp_umem { + void *addrs; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; +}; + +struct xsk_queue; + +struct xdp_sock { + struct sock sk; + long: 64; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long: 64; + struct xsk_queue *tx; + struct list_head tx_list; + spinlock_t rx_lock; + u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; + long: 64; +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +enum { + SEG6_LOCAL_ACTION_UNSPEC = 0, + SEG6_LOCAL_ACTION_END = 1, + SEG6_LOCAL_ACTION_END_X = 2, + SEG6_LOCAL_ACTION_END_T = 3, + SEG6_LOCAL_ACTION_END_DX2 = 4, + SEG6_LOCAL_ACTION_END_DX6 = 5, + SEG6_LOCAL_ACTION_END_DX4 = 6, + SEG6_LOCAL_ACTION_END_DT6 = 7, + SEG6_LOCAL_ACTION_END_DT4 = 8, + SEG6_LOCAL_ACTION_END_B6 = 9, + SEG6_LOCAL_ACTION_END_B6_ENCAP = 10, + SEG6_LOCAL_ACTION_END_BM = 11, + SEG6_LOCAL_ACTION_END_S = 12, + SEG6_LOCAL_ACTION_END_AS = 13, + SEG6_LOCAL_ACTION_END_AM = 14, + SEG6_LOCAL_ACTION_END_BPF = 15, + __SEG6_LOCAL_ACTION_MAX = 16, +}; + +struct seg6_bpf_srh_state { + struct ipv6_sr_hdr *srh; + u16 hdrlen; + bool valid; +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_chacha20_poly1305 { + struct tls_crypto_info info; + unsigned char iv[12]; + unsigned char key[32]; + unsigned char salt[0]; + unsigned char rec_seq[8]; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct strparser strp; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + struct sk_buff *recv_pkt; + u8 control; + u8 async_capable: 1; + u8 decrypted: 1; + atomic_t decrypt_pending; + spinlock_t decrypt_compl_lock; + bool async_notify; +}; + +struct cipher_context { + char *iv; + char *rec_seq; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + }; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool in_tcp_sendpages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + struct sock *sk; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +struct bpf_scratchpad { + union { + __be32 diff[128]; + u8 buff[512]; + }; +}; + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 2, + BPF_F_PEER = 4, + BPF_F_NEXTHOP = 8, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_ptr_cookie)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_skb_get_xfrm_state)(struct sk_buff *, u32, struct bpf_xfrm_state *, u32, u64); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_check_mtu)(struct sk_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_xdp_check_mtu)(struct xdp_buff *, u32, u32 *, s32, u64); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_store_bytes)(struct sk_buff *, u32, const void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_action)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_adjust_srh)(struct sk_buff *, u32, s32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_sock_from_file)(struct file *); + +struct bpf_dtab_netdev___2; + +struct bpf_cpu_map_entry___2; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +typedef int gifconf_func_t(struct net_device *, char *, int, int); + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct xdp_frame_bulk { + int count; + void *xa; + void *q[16]; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct xdp_buff_xsk; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + u32 heads_cnt; + u16 queue_id; + long: 16; + long: 64; + long: 64; + long: 64; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 frame_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool dma_need_sync; + bool unaligned; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + bool unaligned; + u64 orig_addr; + struct list_head free_list_node; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; + struct callback_head rcu; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct fib_rule_uid_range { + __u32 start; + __u32 end; +}; + +enum { + FRA_UNSPEC = 0, + FRA_DST = 1, + FRA_SRC = 2, + FRA_IIFNAME = 3, + FRA_GOTO = 4, + FRA_UNUSED2 = 5, + FRA_PRIORITY = 6, + FRA_UNUSED3 = 7, + FRA_UNUSED4 = 8, + FRA_UNUSED5 = 9, + FRA_FWMARK = 10, + FRA_FLOW = 11, + FRA_TUN_ID = 12, + FRA_SUPPRESS_IFGROUP = 13, + FRA_SUPPRESS_PREFIXLEN = 14, + FRA_TABLE = 15, + FRA_FWMASK = 16, + FRA_OIFNAME = 17, + FRA_PAD = 18, + FRA_L3MDEV = 19, + FRA_UID_RANGE = 20, + FRA_PROTOCOL = 21, + FRA_IP_PROTO = 22, + FRA_SPORT_RANGE = 23, + FRA_DPORT_RANGE = 24, + __FRA_MAX = 25, +}; + +enum { + FR_ACT_UNSPEC = 0, + FR_ACT_TO_TBL = 1, + FR_ACT_GOTO = 2, + FR_ACT_NOP = 3, + FR_ACT_RES3 = 4, + FR_ACT_RES4 = 5, + FR_ACT_BLACKHOLE = 6, + FR_ACT_UNREACHABLE = 7, + FR_ACT_PROHIBIT = 8, + __FR_ACT_MAX = 9, +}; + +struct fib_rule_notifier_info { + struct fib_notifier_info info; + struct fib_rule *rule; +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + short unsigned int protocol; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + u32 driver; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_ni_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_ni_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; +}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int *sysctl_mem; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 lport; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *); + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u16 family; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u16 family; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup { + u32 name; +}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + u32 kind; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + unsigned char mac_addr[6]; + } dst; + __be16 proto; + __u16 vid; +}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + long unsigned int delay_time; +}; + +struct net_bridge_port; + +struct bridge_mcast_querier { + struct br_ip addr; + struct net_bridge_port *port; +}; + +struct net_bridge; + +struct net_bridge_vlan_group; + +struct bridge_mcast_stats; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + struct list_head list; + long unsigned int flags; + struct net_bridge_vlan_group *vlgrp; + struct net_bridge_port *backup_port; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_own_query ip6_own_query; + u32 multicast_eht_hosts_limit; + u32 multicast_eht_hosts_cnt; + unsigned char multicast_router; + struct bridge_mcast_stats *mcast_stats; + struct timer_list multicast_router_timer; + struct hlist_head mglist; + struct hlist_node rlist; + char sysfs_name[16]; + struct netpoll *np; + int offload_fwd_mark; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + struct bridge_stp_xstats stp_xstats; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct hlist_head frame_type_list; + struct net_device *dev; + long unsigned int options; + __be16 vlan_proto; + u16 default_pvid; + struct net_bridge_vlan_group *vlgrp; + struct rhashtable fdb_hash_tbl; + struct list_head port_list; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + u32 hash_max; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + spinlock_t multicast_lock; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct hlist_head router_list; + struct timer_list multicast_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct bridge_mcast_stats *mcast_stats; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + int offload_fwd_mark; + struct hlist_head fdb_list; + struct hlist_head mrp_list; + struct hlist_head mep_list; +}; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 64; + long: 64; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + u32 dev; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, const struct page *, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, const struct page *, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +struct net_dm_drop_point { + __u8 pc[8]; + __u32 count; +}; + +struct net_dm_alert_msg { + __u32 entries; + struct net_dm_drop_point points[0]; +}; + +enum { + NET_DM_CMD_UNSPEC = 0, + NET_DM_CMD_ALERT = 1, + NET_DM_CMD_CONFIG = 2, + NET_DM_CMD_START = 3, + NET_DM_CMD_STOP = 4, + NET_DM_CMD_PACKET_ALERT = 5, + NET_DM_CMD_CONFIG_GET = 6, + NET_DM_CMD_CONFIG_NEW = 7, + NET_DM_CMD_STATS_GET = 8, + NET_DM_CMD_STATS_NEW = 9, + _NET_DM_CMD_MAX = 10, +}; + +enum net_dm_attr { + NET_DM_ATTR_UNSPEC = 0, + NET_DM_ATTR_ALERT_MODE = 1, + NET_DM_ATTR_PC = 2, + NET_DM_ATTR_SYMBOL = 3, + NET_DM_ATTR_IN_PORT = 4, + NET_DM_ATTR_TIMESTAMP = 5, + NET_DM_ATTR_PROTO = 6, + NET_DM_ATTR_PAYLOAD = 7, + NET_DM_ATTR_PAD = 8, + NET_DM_ATTR_TRUNC_LEN = 9, + NET_DM_ATTR_ORIG_LEN = 10, + NET_DM_ATTR_QUEUE_LEN = 11, + NET_DM_ATTR_STATS = 12, + NET_DM_ATTR_HW_STATS = 13, + NET_DM_ATTR_ORIGIN = 14, + NET_DM_ATTR_HW_TRAP_GROUP_NAME = 15, + NET_DM_ATTR_HW_TRAP_NAME = 16, + NET_DM_ATTR_HW_ENTRIES = 17, + NET_DM_ATTR_HW_ENTRY = 18, + NET_DM_ATTR_HW_TRAP_COUNT = 19, + NET_DM_ATTR_SW_DROPS = 20, + NET_DM_ATTR_HW_DROPS = 21, + NET_DM_ATTR_FLOW_ACTION_COOKIE = 22, + __NET_DM_ATTR_MAX = 23, + NET_DM_ATTR_MAX = 22, +}; + +enum net_dm_alert_mode { + NET_DM_ALERT_MODE_SUMMARY = 0, + NET_DM_ALERT_MODE_PACKET = 1, +}; + +enum { + NET_DM_ATTR_PORT_NETDEV_IFINDEX = 0, + NET_DM_ATTR_PORT_NETDEV_NAME = 1, + __NET_DM_ATTR_PORT_MAX = 2, + NET_DM_ATTR_PORT_MAX = 1, +}; + +enum { + NET_DM_ATTR_STATS_DROPPED = 0, + __NET_DM_ATTR_STATS_MAX = 1, + NET_DM_ATTR_STATS_MAX = 0, +}; + +enum net_dm_origin { + NET_DM_ORIGIN_SW = 0, + NET_DM_ORIGIN_HW = 1, +}; + +struct devlink_trap_metadata { + const char *trap_name; + const char *trap_group_name; + struct net_device *input_dev; + const struct flow_action_cookie *fa_cookie; + enum devlink_trap_type trap_type; +}; + +struct net_dm_stats { + u64 dropped; + struct u64_stats_sync syncp; +}; + +struct net_dm_hw_entry { + char trap_name[40]; + u32 count; +}; + +struct net_dm_hw_entries { + u32 num_entries; + struct net_dm_hw_entry entries[0]; +}; + +struct per_cpu_dm_data { + spinlock_t lock; + union { + struct sk_buff *skb; + struct net_dm_hw_entries *hw_entries; + }; + struct sk_buff_head drop_queue; + struct work_struct dm_alert_work; + struct timer_list send_timer; + struct net_dm_stats stats; +}; + +struct dm_hw_stat_delta { + struct net_device *dev; + long unsigned int last_rx; + struct list_head list; + struct callback_head rcu; + long unsigned int last_drop_val; +}; + +struct net_dm_alert_ops { + void (*kfree_skb_probe)(void *, struct sk_buff *, void *); + void (*napi_poll_probe)(void *, struct napi_struct *, int, int); + void (*work_item_func)(struct work_struct *); + void (*hw_work_item_func)(struct work_struct *); + void (*hw_trap_probe)(void *, const struct devlink *, struct sk_buff *, const struct devlink_trap_metadata *); +}; + +struct net_dm_skb_cb { + union { + struct devlink_trap_metadata *hw_metadata; + void *pc; + }; +}; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = 1, + ETH_TEST_FL_FAILED = 2, + ETH_TEST_FL_EXTERNAL_LB = 4, + ETH_TEST_FL_EXTERNAL_LB_DONE = 8, +}; + +struct net_packet_attrs { + unsigned char *src; + unsigned char *dst; + u32 ip_src; + u32 ip_dst; + bool tcp; + u16 sport; + u16 dport; + int timeout; + int size; + int max_size; + u8 id; + u16 queue_mapping; +}; + +struct net_test_priv { + struct net_packet_attrs *packet; + struct packet_type pt; + struct completion comp; + int double_vlan; + int vlan_id; + int ok; +}; + +struct netsfhdr { + __be32 version; + __be64 magic; + u8 id; +} __attribute__((packed)); + +struct net_test { + char name[32]; + int (*fn)(struct net_device *); +}; + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + LWT_BPF_PROG_UNSPEC = 0, + LWT_BPF_PROG_FD = 1, + LWT_BPF_PROG_NAME = 2, + __LWT_BPF_PROG_MAX = 3, +}; + +enum { + LWT_BPF_UNSPEC = 0, + LWT_BPF_IN = 1, + LWT_BPF_OUT = 2, + LWT_BPF_XMIT = 3, + LWT_BPF_XMIT_HEADROOM = 4, + __LWT_BPF_MAX = 5, +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 1, +}; + +struct bpf_lwt_prog { + struct bpf_prog *prog; + char *name; +}; + +struct bpf_lwt { + struct bpf_lwt_prog in; + struct bpf_lwt_prog out; + struct bpf_lwt_prog xmit; + int family; +}; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +enum devlink_command { + DEVLINK_CMD_UNSPEC = 0, + DEVLINK_CMD_GET = 1, + DEVLINK_CMD_SET = 2, + DEVLINK_CMD_NEW = 3, + DEVLINK_CMD_DEL = 4, + DEVLINK_CMD_PORT_GET = 5, + DEVLINK_CMD_PORT_SET = 6, + DEVLINK_CMD_PORT_NEW = 7, + DEVLINK_CMD_PORT_DEL = 8, + DEVLINK_CMD_PORT_SPLIT = 9, + DEVLINK_CMD_PORT_UNSPLIT = 10, + DEVLINK_CMD_SB_GET = 11, + DEVLINK_CMD_SB_SET = 12, + DEVLINK_CMD_SB_NEW = 13, + DEVLINK_CMD_SB_DEL = 14, + DEVLINK_CMD_SB_POOL_GET = 15, + DEVLINK_CMD_SB_POOL_SET = 16, + DEVLINK_CMD_SB_POOL_NEW = 17, + DEVLINK_CMD_SB_POOL_DEL = 18, + DEVLINK_CMD_SB_PORT_POOL_GET = 19, + DEVLINK_CMD_SB_PORT_POOL_SET = 20, + DEVLINK_CMD_SB_PORT_POOL_NEW = 21, + DEVLINK_CMD_SB_PORT_POOL_DEL = 22, + DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23, + DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25, + DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26, + DEVLINK_CMD_SB_OCC_SNAPSHOT = 27, + DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28, + DEVLINK_CMD_ESWITCH_GET = 29, + DEVLINK_CMD_ESWITCH_SET = 30, + DEVLINK_CMD_DPIPE_TABLE_GET = 31, + DEVLINK_CMD_DPIPE_ENTRIES_GET = 32, + DEVLINK_CMD_DPIPE_HEADERS_GET = 33, + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34, + DEVLINK_CMD_RESOURCE_SET = 35, + DEVLINK_CMD_RESOURCE_DUMP = 36, + DEVLINK_CMD_RELOAD = 37, + DEVLINK_CMD_PARAM_GET = 38, + DEVLINK_CMD_PARAM_SET = 39, + DEVLINK_CMD_PARAM_NEW = 40, + DEVLINK_CMD_PARAM_DEL = 41, + DEVLINK_CMD_REGION_GET = 42, + DEVLINK_CMD_REGION_SET = 43, + DEVLINK_CMD_REGION_NEW = 44, + DEVLINK_CMD_REGION_DEL = 45, + DEVLINK_CMD_REGION_READ = 46, + DEVLINK_CMD_PORT_PARAM_GET = 47, + DEVLINK_CMD_PORT_PARAM_SET = 48, + DEVLINK_CMD_PORT_PARAM_NEW = 49, + DEVLINK_CMD_PORT_PARAM_DEL = 50, + DEVLINK_CMD_INFO_GET = 51, + DEVLINK_CMD_HEALTH_REPORTER_GET = 52, + DEVLINK_CMD_HEALTH_REPORTER_SET = 53, + DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 54, + DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 55, + DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 56, + DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 57, + DEVLINK_CMD_FLASH_UPDATE = 58, + DEVLINK_CMD_FLASH_UPDATE_END = 59, + DEVLINK_CMD_FLASH_UPDATE_STATUS = 60, + DEVLINK_CMD_TRAP_GET = 61, + DEVLINK_CMD_TRAP_SET = 62, + DEVLINK_CMD_TRAP_NEW = 63, + DEVLINK_CMD_TRAP_DEL = 64, + DEVLINK_CMD_TRAP_GROUP_GET = 65, + DEVLINK_CMD_TRAP_GROUP_SET = 66, + DEVLINK_CMD_TRAP_GROUP_NEW = 67, + DEVLINK_CMD_TRAP_GROUP_DEL = 68, + DEVLINK_CMD_TRAP_POLICER_GET = 69, + DEVLINK_CMD_TRAP_POLICER_SET = 70, + DEVLINK_CMD_TRAP_POLICER_NEW = 71, + DEVLINK_CMD_TRAP_POLICER_DEL = 72, + DEVLINK_CMD_HEALTH_REPORTER_TEST = 73, + __DEVLINK_CMD_MAX = 74, + DEVLINK_CMD_MAX = 73, +}; + +enum devlink_eswitch_mode { + DEVLINK_ESWITCH_MODE_LEGACY = 0, + DEVLINK_ESWITCH_MODE_SWITCHDEV = 1, +}; + +enum { + DEVLINK_ATTR_STATS_RX_PACKETS = 0, + DEVLINK_ATTR_STATS_RX_BYTES = 1, + DEVLINK_ATTR_STATS_RX_DROPPED = 2, + __DEVLINK_ATTR_STATS_MAX = 3, + DEVLINK_ATTR_STATS_MAX = 2, +}; + +enum { + DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT = 0, + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT = 1, + __DEVLINK_FLASH_OVERWRITE_MAX_BIT = 2, + DEVLINK_FLASH_OVERWRITE_MAX_BIT = 1, +}; + +enum { + DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT = 0, + DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE = 1, +}; + +enum devlink_attr { + DEVLINK_ATTR_UNSPEC = 0, + DEVLINK_ATTR_BUS_NAME = 1, + DEVLINK_ATTR_DEV_NAME = 2, + DEVLINK_ATTR_PORT_INDEX = 3, + DEVLINK_ATTR_PORT_TYPE = 4, + DEVLINK_ATTR_PORT_DESIRED_TYPE = 5, + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6, + DEVLINK_ATTR_PORT_NETDEV_NAME = 7, + DEVLINK_ATTR_PORT_IBDEV_NAME = 8, + DEVLINK_ATTR_PORT_SPLIT_COUNT = 9, + DEVLINK_ATTR_PORT_SPLIT_GROUP = 10, + DEVLINK_ATTR_SB_INDEX = 11, + DEVLINK_ATTR_SB_SIZE = 12, + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 13, + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 14, + DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 15, + DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 16, + DEVLINK_ATTR_SB_POOL_INDEX = 17, + DEVLINK_ATTR_SB_POOL_TYPE = 18, + DEVLINK_ATTR_SB_POOL_SIZE = 19, + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 20, + DEVLINK_ATTR_SB_THRESHOLD = 21, + DEVLINK_ATTR_SB_TC_INDEX = 22, + DEVLINK_ATTR_SB_OCC_CUR = 23, + DEVLINK_ATTR_SB_OCC_MAX = 24, + DEVLINK_ATTR_ESWITCH_MODE = 25, + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26, + DEVLINK_ATTR_DPIPE_TABLES = 27, + DEVLINK_ATTR_DPIPE_TABLE = 28, + DEVLINK_ATTR_DPIPE_TABLE_NAME = 29, + DEVLINK_ATTR_DPIPE_TABLE_SIZE = 30, + DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 31, + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 32, + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 33, + DEVLINK_ATTR_DPIPE_ENTRIES = 34, + DEVLINK_ATTR_DPIPE_ENTRY = 35, + DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 36, + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 37, + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 38, + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 39, + DEVLINK_ATTR_DPIPE_MATCH = 40, + DEVLINK_ATTR_DPIPE_MATCH_VALUE = 41, + DEVLINK_ATTR_DPIPE_MATCH_TYPE = 42, + DEVLINK_ATTR_DPIPE_ACTION = 43, + DEVLINK_ATTR_DPIPE_ACTION_VALUE = 44, + DEVLINK_ATTR_DPIPE_ACTION_TYPE = 45, + DEVLINK_ATTR_DPIPE_VALUE = 46, + DEVLINK_ATTR_DPIPE_VALUE_MASK = 47, + DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 48, + DEVLINK_ATTR_DPIPE_HEADERS = 49, + DEVLINK_ATTR_DPIPE_HEADER = 50, + DEVLINK_ATTR_DPIPE_HEADER_NAME = 51, + DEVLINK_ATTR_DPIPE_HEADER_ID = 52, + DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 53, + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 54, + DEVLINK_ATTR_DPIPE_HEADER_INDEX = 55, + DEVLINK_ATTR_DPIPE_FIELD = 56, + DEVLINK_ATTR_DPIPE_FIELD_NAME = 57, + DEVLINK_ATTR_DPIPE_FIELD_ID = 58, + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 59, + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 60, + DEVLINK_ATTR_PAD = 61, + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62, + DEVLINK_ATTR_RESOURCE_LIST = 63, + DEVLINK_ATTR_RESOURCE = 64, + DEVLINK_ATTR_RESOURCE_NAME = 65, + DEVLINK_ATTR_RESOURCE_ID = 66, + DEVLINK_ATTR_RESOURCE_SIZE = 67, + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68, + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69, + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70, + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71, + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72, + DEVLINK_ATTR_RESOURCE_UNIT = 73, + DEVLINK_ATTR_RESOURCE_OCC = 74, + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75, + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76, + DEVLINK_ATTR_PORT_FLAVOUR = 77, + DEVLINK_ATTR_PORT_NUMBER = 78, + DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 79, + DEVLINK_ATTR_PARAM = 80, + DEVLINK_ATTR_PARAM_NAME = 81, + DEVLINK_ATTR_PARAM_GENERIC = 82, + DEVLINK_ATTR_PARAM_TYPE = 83, + DEVLINK_ATTR_PARAM_VALUES_LIST = 84, + DEVLINK_ATTR_PARAM_VALUE = 85, + DEVLINK_ATTR_PARAM_VALUE_DATA = 86, + DEVLINK_ATTR_PARAM_VALUE_CMODE = 87, + DEVLINK_ATTR_REGION_NAME = 88, + DEVLINK_ATTR_REGION_SIZE = 89, + DEVLINK_ATTR_REGION_SNAPSHOTS = 90, + DEVLINK_ATTR_REGION_SNAPSHOT = 91, + DEVLINK_ATTR_REGION_SNAPSHOT_ID = 92, + DEVLINK_ATTR_REGION_CHUNKS = 93, + DEVLINK_ATTR_REGION_CHUNK = 94, + DEVLINK_ATTR_REGION_CHUNK_DATA = 95, + DEVLINK_ATTR_REGION_CHUNK_ADDR = 96, + DEVLINK_ATTR_REGION_CHUNK_LEN = 97, + DEVLINK_ATTR_INFO_DRIVER_NAME = 98, + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99, + DEVLINK_ATTR_INFO_VERSION_FIXED = 100, + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101, + DEVLINK_ATTR_INFO_VERSION_STORED = 102, + DEVLINK_ATTR_INFO_VERSION_NAME = 103, + DEVLINK_ATTR_INFO_VERSION_VALUE = 104, + DEVLINK_ATTR_SB_POOL_CELL_SIZE = 105, + DEVLINK_ATTR_FMSG = 106, + DEVLINK_ATTR_FMSG_OBJ_NEST_START = 107, + DEVLINK_ATTR_FMSG_PAIR_NEST_START = 108, + DEVLINK_ATTR_FMSG_ARR_NEST_START = 109, + DEVLINK_ATTR_FMSG_NEST_END = 110, + DEVLINK_ATTR_FMSG_OBJ_NAME = 111, + DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE = 112, + DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA = 113, + DEVLINK_ATTR_HEALTH_REPORTER = 114, + DEVLINK_ATTR_HEALTH_REPORTER_NAME = 115, + DEVLINK_ATTR_HEALTH_REPORTER_STATE = 116, + DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT = 117, + DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT = 118, + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS = 119, + DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD = 120, + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER = 121, + DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME = 122, + DEVLINK_ATTR_FLASH_UPDATE_COMPONENT = 123, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG = 124, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE = 125, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL = 126, + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127, + DEVLINK_ATTR_PORT_PCI_VF_NUMBER = 128, + DEVLINK_ATTR_STATS = 129, + DEVLINK_ATTR_TRAP_NAME = 130, + DEVLINK_ATTR_TRAP_ACTION = 131, + DEVLINK_ATTR_TRAP_TYPE = 132, + DEVLINK_ATTR_TRAP_GENERIC = 133, + DEVLINK_ATTR_TRAP_METADATA = 134, + DEVLINK_ATTR_TRAP_GROUP_NAME = 135, + DEVLINK_ATTR_RELOAD_FAILED = 136, + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS = 137, + DEVLINK_ATTR_NETNS_FD = 138, + DEVLINK_ATTR_NETNS_PID = 139, + DEVLINK_ATTR_NETNS_ID = 140, + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP = 141, + DEVLINK_ATTR_TRAP_POLICER_ID = 142, + DEVLINK_ATTR_TRAP_POLICER_RATE = 143, + DEVLINK_ATTR_TRAP_POLICER_BURST = 144, + DEVLINK_ATTR_PORT_FUNCTION = 145, + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER = 146, + DEVLINK_ATTR_PORT_LANES = 147, + DEVLINK_ATTR_PORT_SPLITTABLE = 148, + DEVLINK_ATTR_PORT_EXTERNAL = 149, + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT = 151, + DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK = 152, + DEVLINK_ATTR_RELOAD_ACTION = 153, + DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED = 154, + DEVLINK_ATTR_RELOAD_LIMITS = 155, + DEVLINK_ATTR_DEV_STATS = 156, + DEVLINK_ATTR_RELOAD_STATS = 157, + DEVLINK_ATTR_RELOAD_STATS_ENTRY = 158, + DEVLINK_ATTR_RELOAD_STATS_LIMIT = 159, + DEVLINK_ATTR_RELOAD_STATS_VALUE = 160, + DEVLINK_ATTR_REMOTE_RELOAD_STATS = 161, + DEVLINK_ATTR_RELOAD_ACTION_INFO = 162, + DEVLINK_ATTR_RELOAD_ACTION_STATS = 163, + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164, + __DEVLINK_ATTR_MAX = 165, + DEVLINK_ATTR_MAX = 164, +}; + +enum devlink_dpipe_match_type { + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0, +}; + +enum devlink_dpipe_action_type { + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0, +}; + +enum devlink_dpipe_field_ethernet_id { + DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0, +}; + +enum devlink_dpipe_field_ipv4_id { + DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0, +}; + +enum devlink_dpipe_field_ipv6_id { + DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0, +}; + +enum devlink_dpipe_header_id { + DEVLINK_DPIPE_HEADER_ETHERNET = 0, + DEVLINK_DPIPE_HEADER_IPV4 = 1, + DEVLINK_DPIPE_HEADER_IPV6 = 2, +}; + +enum devlink_resource_unit { + DEVLINK_RESOURCE_UNIT_ENTRY = 0, +}; + +enum devlink_port_function_attr { + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0, + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1, + DEVLINK_PORT_FN_ATTR_STATE = 2, + DEVLINK_PORT_FN_ATTR_OPSTATE = 3, + __DEVLINK_PORT_FUNCTION_ATTR_MAX = 4, + DEVLINK_PORT_FUNCTION_ATTR_MAX = 3, +}; + +struct devlink_dpipe_match { + enum devlink_dpipe_match_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +struct devlink_dpipe_action { + enum devlink_dpipe_action_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +struct devlink_dpipe_value { + union { + struct devlink_dpipe_action *action; + struct devlink_dpipe_match *match; + }; + unsigned int mapping_value; + bool mapping_valid; + unsigned int value_size; + void *value; + void *mask; +}; + +struct devlink_dpipe_entry { + u64 index; + struct devlink_dpipe_value *match_values; + unsigned int match_values_count; + struct devlink_dpipe_value *action_values; + unsigned int action_values_count; + u64 counter; + bool counter_valid; +}; + +struct devlink_dpipe_dump_ctx { + struct genl_info *info; + enum devlink_command cmd; + struct sk_buff *skb; + struct nlattr *nest; + void *hdr; +}; + +struct devlink_dpipe_table_ops; + +struct devlink_dpipe_table { + void *priv; + struct list_head list; + const char *name; + bool counters_enabled; + bool counter_control_extern; + bool resource_valid; + u64 resource_id; + u64 resource_units; + struct devlink_dpipe_table_ops *table_ops; + struct callback_head rcu; +}; + +struct devlink_dpipe_table_ops { + int (*actions_dump)(void *, struct sk_buff *); + int (*matches_dump)(void *, struct sk_buff *); + int (*entries_dump)(void *, bool, struct devlink_dpipe_dump_ctx *); + int (*counters_set_update)(void *, bool); + u64 (*size_get)(void *); +}; + +struct devlink_resource_size_params { + u64 size_min; + u64 size_max; + u64 size_granularity; + enum devlink_resource_unit unit; +}; + +typedef u64 devlink_resource_occ_get_t(void *); + +struct devlink_resource { + const char *name; + u64 id; + u64 size; + u64 size_new; + bool size_valid; + struct devlink_resource *parent; + struct devlink_resource_size_params size_params; + struct list_head list; + struct list_head resource_list; + devlink_resource_occ_get_t *occ_get; + void *occ_get_priv; +}; + +enum devlink_param_type { + DEVLINK_PARAM_TYPE_U8 = 0, + DEVLINK_PARAM_TYPE_U16 = 1, + DEVLINK_PARAM_TYPE_U32 = 2, + DEVLINK_PARAM_TYPE_STRING = 3, + DEVLINK_PARAM_TYPE_BOOL = 4, +}; + +struct devlink_flash_notify { + const char *status_msg; + const char *component; + long unsigned int done; + long unsigned int total; + long unsigned int timeout; +}; + +struct devlink_param { + u32 id; + const char *name; + bool generic; + enum devlink_param_type type; + long unsigned int supported_cmodes; + int (*get)(struct devlink *, u32, struct devlink_param_gset_ctx *); + int (*set)(struct devlink *, u32, struct devlink_param_gset_ctx *); + int (*validate)(struct devlink *, u32, union devlink_param_value, struct netlink_ext_ack *); +}; + +struct devlink_param_item { + struct list_head list; + const struct devlink_param *param; + union devlink_param_value driverinit_value; + bool driverinit_value_valid; + bool published; +}; + +enum devlink_param_generic_id { + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET = 0, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS = 1, + DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV = 2, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT = 3, + DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI = 4, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX = 5, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN = 6, + DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY = 7, + DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE = 8, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE = 9, + DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET = 10, + __DEVLINK_PARAM_GENERIC_ID_MAX = 11, + DEVLINK_PARAM_GENERIC_ID_MAX = 10, +}; + +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *); + int (*snapshot)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u8 **); + void *priv; +}; + +struct devlink_port_region_ops { + const char *name; + void (*destructor)(const void *); + int (*snapshot)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u8 **); + void *priv; +}; + +enum devlink_health_reporter_state { + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY = 0, + DEVLINK_HEALTH_REPORTER_STATE_ERROR = 1, +}; + +struct devlink_health_reporter; + +struct devlink_fmsg; + +struct devlink_health_reporter_ops { + char *name; + int (*recover)(struct devlink_health_reporter *, void *, struct netlink_ext_ack *); + int (*dump)(struct devlink_health_reporter *, struct devlink_fmsg *, void *, struct netlink_ext_ack *); + int (*diagnose)(struct devlink_health_reporter *, struct devlink_fmsg *, struct netlink_ext_ack *); + int (*test)(struct devlink_health_reporter *, struct netlink_ext_ack *); +}; + +struct devlink_health_reporter { + struct list_head list; + void *priv; + const struct devlink_health_reporter_ops *ops; + struct devlink *devlink; + struct devlink_port *devlink_port; + struct devlink_fmsg *dump_fmsg; + struct mutex dump_lock; + u64 graceful_period; + bool auto_recover; + bool auto_dump; + u8 health_state; + u64 dump_ts; + u64 dump_real_ts; + u64 error_count; + u64 recovery_count; + u64 last_recovery_ts; + refcount_t refcount; +}; + +struct devlink_fmsg { + struct list_head item_list; + bool putting_binary; +}; + +enum devlink_trap_generic_id { + DEVLINK_TRAP_GENERIC_ID_SMAC_MC = 0, + DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH = 1, + DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER = 2, + DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER = 3, + DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST = 4, + DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER = 5, + DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE = 6, + DEVLINK_TRAP_GENERIC_ID_TTL_ERROR = 7, + DEVLINK_TRAP_GENERIC_ID_TAIL_DROP = 8, + DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET = 9, + DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC = 10, + DEVLINK_TRAP_GENERIC_ID_DIP_LB = 11, + DEVLINK_TRAP_GENERIC_ID_SIP_MC = 12, + DEVLINK_TRAP_GENERIC_ID_SIP_LB = 13, + DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR = 14, + DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC = 15, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE = 16, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 17, + DEVLINK_TRAP_GENERIC_ID_MTU_ERROR = 18, + DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH = 19, + DEVLINK_TRAP_GENERIC_ID_RPF = 20, + DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE = 21, + DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS = 22, + DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS = 23, + DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE = 24, + DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR = 25, + DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC = 26, + DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP = 27, + DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP = 28, + DEVLINK_TRAP_GENERIC_ID_STP = 29, + DEVLINK_TRAP_GENERIC_ID_LACP = 30, + DEVLINK_TRAP_GENERIC_ID_LLDP = 31, + DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY = 32, + DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT = 33, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT = 34, + DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT = 35, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE = 36, + DEVLINK_TRAP_GENERIC_ID_MLD_QUERY = 37, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT = 38, + DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT = 39, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE = 40, + DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP = 41, + DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP = 42, + DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST = 43, + DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE = 44, + DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY = 45, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT = 46, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT = 47, + DEVLINK_TRAP_GENERIC_ID_IPV4_BFD = 48, + DEVLINK_TRAP_GENERIC_ID_IPV6_BFD = 49, + DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF = 50, + DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF = 51, + DEVLINK_TRAP_GENERIC_ID_IPV4_BGP = 52, + DEVLINK_TRAP_GENERIC_ID_IPV6_BGP = 53, + DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP = 54, + DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP = 55, + DEVLINK_TRAP_GENERIC_ID_IPV4_PIM = 56, + DEVLINK_TRAP_GENERIC_ID_IPV6_PIM = 57, + DEVLINK_TRAP_GENERIC_ID_UC_LB = 58, + DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE = 59, + DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE = 60, + DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE = 61, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES = 62, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS = 63, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT = 64, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT = 65, + DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT = 66, + DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT = 67, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT = 68, + DEVLINK_TRAP_GENERIC_ID_PTP_EVENT = 69, + DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL = 70, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE = 71, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP = 72, + DEVLINK_TRAP_GENERIC_ID_EARLY_DROP = 73, + DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING = 74, + DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING = 75, + DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING = 76, + DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING = 77, + DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING = 78, + DEVLINK_TRAP_GENERIC_ID_ARP_PARSING = 79, + DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING = 80, + DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING = 81, + DEVLINK_TRAP_GENERIC_ID_GRE_PARSING = 82, + DEVLINK_TRAP_GENERIC_ID_UDP_PARSING = 83, + DEVLINK_TRAP_GENERIC_ID_TCP_PARSING = 84, + DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING = 85, + DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING = 86, + DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING = 87, + DEVLINK_TRAP_GENERIC_ID_GTP_PARSING = 88, + DEVLINK_TRAP_GENERIC_ID_ESP_PARSING = 89, + DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP = 90, + DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER = 91, + __DEVLINK_TRAP_GENERIC_ID_MAX = 92, + DEVLINK_TRAP_GENERIC_ID_MAX = 91, +}; + +enum devlink_trap_group_generic_id { + DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS = 0, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS = 1, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS = 2, + DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS = 3, + DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS = 4, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS = 5, + DEVLINK_TRAP_GROUP_GENERIC_ID_STP = 6, + DEVLINK_TRAP_GROUP_GENERIC_ID_LACP = 7, + DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP = 8, + DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING = 9, + DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP = 10, + DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY = 11, + DEVLINK_TRAP_GROUP_GENERIC_ID_BFD = 12, + DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF = 13, + DEVLINK_TRAP_GROUP_GENERIC_ID_BGP = 14, + DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP = 15, + DEVLINK_TRAP_GROUP_GENERIC_ID_PIM = 16, + DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB = 17, + DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY = 18, + DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY = 19, + DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6 = 20, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT = 21, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL = 22, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE = 23, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP = 24, + DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS = 25, + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 26, + DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 25, +}; + +struct devlink_info_req { + struct sk_buff *msg; +}; + +struct trace_event_raw_devlink_hwmsg { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + bool incoming; + long unsigned int type; + u32 __data_loc_buf; + size_t len; + char __data[0]; +}; + +struct trace_event_raw_devlink_hwerr { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + int err; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_report { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_recover_aborted { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + bool health_state; + u64 time_since_last_recover; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_reporter_state_update { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + u8 new_state; + char __data[0]; +}; + +struct trace_event_raw_devlink_trap_report { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_trap_name; + u32 __data_loc_trap_group_name; + u32 __data_loc_input_dev_name; + char __data[0]; +}; + +struct trace_event_data_offsets_devlink_hwmsg { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 buf; +}; + +struct trace_event_data_offsets_devlink_hwerr { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 msg; +}; + +struct trace_event_data_offsets_devlink_health_report { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; + u32 msg; +}; + +struct trace_event_data_offsets_devlink_health_recover_aborted { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; +}; + +struct trace_event_data_offsets_devlink_health_reporter_state_update { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; +}; + +struct trace_event_data_offsets_devlink_trap_report { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 trap_name; + u32 trap_group_name; + u32 input_dev_name; +}; + +typedef void (*btf_trace_devlink_hwmsg)(void *, const struct devlink *, bool, long unsigned int, const u8 *, size_t); + +typedef void (*btf_trace_devlink_hwerr)(void *, const struct devlink *, int, const char *); + +typedef void (*btf_trace_devlink_health_report)(void *, const struct devlink *, const char *, const char *); + +typedef void (*btf_trace_devlink_health_recover_aborted)(void *, const struct devlink *, const char *, bool, u64); + +typedef void (*btf_trace_devlink_health_reporter_state_update)(void *, const struct devlink *, const char *, bool); + +typedef void (*btf_trace_devlink_trap_report)(void *, const struct devlink *, struct sk_buff *, const struct devlink_trap_metadata *); + +struct devlink_sb { + struct list_head list; + unsigned int index; + u32 size; + u16 ingress_pools_count; + u16 egress_pools_count; + u16 ingress_tc_count; + u16 egress_tc_count; +}; + +struct devlink_region { + struct devlink *devlink; + struct devlink_port *port; + struct list_head list; + union { + const struct devlink_region_ops *ops; + const struct devlink_port_region_ops *port_ops; + }; + struct list_head snapshot_list; + u32 max_snapshots; + u32 cur_snapshots; + u64 size; +}; + +struct devlink_snapshot { + struct list_head list; + struct devlink_region *region; + u8 *data; + u32 id; +}; + +enum devlink_multicast_groups { + DEVLINK_MCGRP_CONFIG = 0, +}; + +struct devlink_reload_combination { + enum devlink_reload_action action; + enum devlink_reload_limit limit; +}; + +struct devlink_fmsg_item { + struct list_head list; + int attrtype; + u8 nla_type; + u16 len; + int value[0]; +}; + +struct devlink_stats { + u64 rx_bytes; + u64 rx_packets; + struct u64_stats_sync syncp; +}; + +struct devlink_trap_policer_item { + const struct devlink_trap_policer *policer; + u64 rate; + u64 burst; + struct list_head list; +}; + +struct devlink_trap_group_item { + const struct devlink_trap_group *group; + struct devlink_trap_policer_item *policer_item; + struct list_head list; + struct devlink_stats *stats; +}; + +struct devlink_trap_item { + const struct devlink_trap *trap; + struct devlink_trap_group_item *group_item; + struct list_head list; + enum devlink_trap_action action; + struct devlink_stats *stats; + void *priv; +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + raw_spinlock_t lock; + long: 32; + long: 64; + long: 64; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + raw_spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; + long: 32; + long: 64; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +typedef u64 (*btf_bpf_sk_storage_get_tracing)(struct bpf_map *, struct sock *, void *, u64); + +typedef u64 (*btf_bpf_sk_storage_delete_tracing)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +struct compat_cmsghdr { + compat_size_t cmsg_len; + compat_int_t cmsg_level; + compat_int_t cmsg_type; +}; + +struct nvmem_cell___2; + +struct fch_hdr { + __u8 daddr[6]; + __u8 saddr[6]; +}; + +struct fcllc { + __u8 dsap; + __u8 ssap; + __u8 llc; + __u8 protid[3]; + __be16 ethertype; +}; + +struct fddi_8022_1_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; +}; + +struct fddi_8022_2_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl_1; + __u8 ctrl_2; +}; + +struct fddi_snap_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; + __u8 oui[3]; + __be16 ethertype; +}; + +struct fddihdr { + __u8 fc; + __u8 daddr[6]; + __u8 saddr[6]; + union { + struct fddi_8022_1_hdr llc_8022_1; + struct fddi_8022_2_hdr llc_8022_2; + struct fddi_snap_hdr llc_snap; + } hdr; +} __attribute__((packed)); + +enum macvlan_mode { + MACVLAN_MODE_PRIVATE = 1, + MACVLAN_MODE_VEPA = 2, + MACVLAN_MODE_BRIDGE = 4, + MACVLAN_MODE_PASSTHRU = 8, + MACVLAN_MODE_SOURCE = 16, +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + __TCA_MAX = 16, +}; + +struct vlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + +struct netpoll___2; + +struct skb_array { + struct ptr_ring ring; +}; + +struct macvlan_port; + +struct macvlan_dev { + struct net_device *dev; + struct list_head list; + struct hlist_node hlist; + struct macvlan_port *port; + struct net_device *lowerdev; + void *accel_priv; + struct vlan_pcpu_stats *pcpu_stats; + long unsigned int mc_filter[4]; + netdev_features_t set_features; + enum macvlan_mode mode; + u16 flags; + unsigned int macaddr_count; + u32 bc_queue_len_req; + struct netpoll___2 *netpoll; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u8 linklayer; + u8 shift; +}; + +struct psched_pktrate { + u64 rate_pkts_ps; + u32 mult; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +struct sch_frag_data { + long unsigned int dst; + struct qdisc_skb_cb cb; + __be16 inner_protocol; + u16 vlan_tci; + __be16 vlan_proto; + unsigned int l2_len; + u8 l2_data[18]; + int (*xmit)(struct sk_buff *); +}; + +enum tc_link_layer { + TC_LINKLAYER_UNAWARE = 0, + TC_LINKLAYER_ETHERNET = 1, + TC_LINKLAYER_ATM = 2, +}; + +enum { + TCA_STAB_UNSPEC = 0, + TCA_STAB_BASE = 1, + TCA_STAB_DATA = 2, + __TCA_STAB_MAX = 3, +}; + +struct qdisc_rate_table { + struct tc_ratespec rate; + u32 data[256]; + struct qdisc_rate_table *next; + int refcnt; +}; + +struct Qdisc_class_common { + u32 classid; + struct hlist_node hnode; +}; + +struct Qdisc_class_hash { + struct hlist_head *hash; + unsigned int hashsize; + unsigned int hashmask; + unsigned int hashelems; +}; + +struct qdisc_watchdog { + u64 last_expires; + struct hrtimer timer; + struct Qdisc *qdisc; +}; + +enum tc_root_command { + TC_ROOT_GRAFT = 0, +}; + +struct tc_root_qopt_offload { + enum tc_root_command command; + u32 handle; + bool ingress; +}; + +struct check_loop_arg { + struct qdisc_walker w; + struct Qdisc *p; + int depth; +}; + +struct tcf_bind_args { + struct tcf_walker w; + long unsigned int base; + long unsigned int cl; + u32 classid; +}; + +struct tc_bind_class_args { + struct qdisc_walker w; + long unsigned int new_cl; + u32 portid; + u32 clid; +}; + +struct qdisc_dump_args { + struct qdisc_walker w; + struct sk_buff *skb; + struct netlink_callback *cb; +}; + +enum net_xmit_qdisc_t { + __NET_XMIT_STOLEN = 65536, + __NET_XMIT_BYPASS = 131072, +}; + +enum { + TCA_ACT_UNSPEC = 0, + TCA_ACT_KIND = 1, + TCA_ACT_OPTIONS = 2, + TCA_ACT_INDEX = 3, + TCA_ACT_STATS = 4, + TCA_ACT_PAD = 5, + TCA_ACT_COOKIE = 6, + TCA_ACT_FLAGS = 7, + TCA_ACT_HW_STATS = 8, + TCA_ACT_USED_HW_STATS = 9, + __TCA_ACT_MAX = 10, +}; + +enum tca_id { + TCA_ID_UNSPEC = 0, + TCA_ID_POLICE = 1, + TCA_ID_GACT = 5, + TCA_ID_IPT = 6, + TCA_ID_PEDIT = 7, + TCA_ID_MIRRED = 8, + TCA_ID_NAT = 9, + TCA_ID_XT = 10, + TCA_ID_SKBEDIT = 11, + TCA_ID_VLAN = 12, + TCA_ID_BPF = 13, + TCA_ID_CONNMARK = 14, + TCA_ID_SKBMOD = 15, + TCA_ID_CSUM = 16, + TCA_ID_TUNNEL_KEY = 17, + TCA_ID_SIMP = 22, + TCA_ID_IFE = 25, + TCA_ID_SAMPLE = 26, + TCA_ID_CTINFO = 27, + TCA_ID_MPLS = 28, + TCA_ID_CT = 29, + TCA_ID_GATE = 30, + __TCA_ID_MAX = 255, +}; + +struct tcf_t { + __u64 install; + __u64 lastuse; + __u64 expires; + __u64 firstuse; +}; + +struct psample_group { + struct list_head list; + struct net *net; + u32 group_num; + u32 refcount; + u32 seq; + struct callback_head rcu; +}; + +struct action_gate_entry { + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; +}; + +enum qdisc_class_ops_flags { + QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, +}; + +enum tcf_proto_ops_flags { + TCF_PROTO_OPS_DOIT_UNLOCKED = 1, +}; + +typedef void tcf_chain_head_change_t(struct tcf_proto *, void *); + +struct tcf_idrinfo { + struct mutex lock; + struct idr action_idr; + struct net *net; +}; + +struct tc_action_ops; + +struct tc_cookie; + +struct tc_action { + const struct tc_action_ops *ops; + __u32 type; + struct tcf_idrinfo *idrinfo; + u32 tcfa_index; + refcount_t tcfa_refcnt; + atomic_t tcfa_bindcnt; + int tcfa_action; + struct tcf_t tcfa_tm; + struct gnet_stats_basic_packed tcfa_bstats; + struct gnet_stats_basic_packed tcfa_bstats_hw; + struct gnet_stats_queue tcfa_qstats; + struct net_rate_estimator *tcfa_rate_est; + spinlock_t tcfa_lock; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_basic_cpu *cpu_bstats_hw; + struct gnet_stats_queue *cpu_qstats; + struct tc_cookie *act_cookie; + struct tcf_chain *goto_chain; + u32 tcfa_flags; + u8 hw_stats; + u8 used_hw_stats; + bool used_hw_stats_valid; +}; + +typedef void (*tc_action_priv_destructor)(void *); + +struct tc_action_ops { + struct list_head head; + char kind[16]; + enum tca_id id; + size_t size; + struct module *owner; + int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); + int (*dump)(struct sk_buff *, struct tc_action *, int, int); + void (*cleanup)(struct tc_action *); + int (*lookup)(struct net *, struct tc_action **, u32); + int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, int, int, bool, struct tcf_proto *, u32, struct netlink_ext_ack *); + int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); + void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); + size_t (*get_fill_size)(const struct tc_action *); + struct net_device * (*get_dev)(const struct tc_action *, tc_action_priv_destructor *); + struct psample_group * (*get_psample_group)(const struct tc_action *, tc_action_priv_destructor *); +}; + +struct tc_cookie { + u8 *data; + u32 len; + struct callback_head rcu; +}; + +struct tcf_block_ext_info { + enum flow_block_binder_type binder_type; + tcf_chain_head_change_t *chain_head_change; + void *chain_head_change_priv; + u32 block_index; +}; + +struct tcf_qevent { + struct tcf_block *block; + struct tcf_block_ext_info info; + struct tcf_proto *filter_chain; +}; + +struct tcf_exts { + __u32 type; + int nr_actions; + struct tc_action **actions; + struct net *net; + int action; + int police; +}; + +enum pedit_header_type { + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, + __PEDIT_HDR_TYPE_MAX = 6, +}; + +enum pedit_cmd { + TCA_PEDIT_KEY_EX_CMD_SET = 0, + TCA_PEDIT_KEY_EX_CMD_ADD = 1, + __PEDIT_CMD_MAX = 2, +}; + +struct tc_pedit_key { + __u32 mask; + __u32 val; + __u32 off; + __u32 at; + __u32 offmask; + __u32 shift; +}; + +struct tcf_pedit_key_ex { + enum pedit_header_type htype; + enum pedit_cmd cmd; +}; + +struct tcf_pedit { + struct tc_action common; + unsigned char tcfp_nkeys; + unsigned char tcfp_flags; + struct tc_pedit_key *tcfp_keys; + struct tcf_pedit_key_ex *tcfp_keys_ex; +}; + +struct tcf_mirred { + struct tc_action common; + int tcfm_eaction; + bool tcfm_mac_header_xmit; + struct net_device *tcfm_dev; + struct list_head tcfm_list; +}; + +struct tcf_vlan_params { + int tcfv_action; + unsigned char tcfv_push_dst[6]; + unsigned char tcfv_push_src[6]; + u16 tcfv_push_vid; + __be16 tcfv_push_proto; + u8 tcfv_push_prio; + bool tcfv_push_prio_exists; + struct callback_head rcu; +}; + +struct tcf_vlan { + struct tc_action common; + struct tcf_vlan_params *vlan_p; +}; + +struct tcf_tunnel_key_params { + struct callback_head rcu; + int tcft_action; + struct metadata_dst *tcft_enc_metadata; +}; + +struct tcf_tunnel_key { + struct tc_action common; + struct tcf_tunnel_key_params *params; +}; + +struct tcf_csum_params { + u32 update_flags; + struct callback_head rcu; +}; + +struct tcf_csum { + struct tc_action common; + struct tcf_csum_params *params; +}; + +struct tcf_gact { + struct tc_action common; + u16 tcfg_ptype; + u16 tcfg_pval; + int tcfg_paction; + atomic_t packets; +}; + +struct tcf_police_params { + int tcfp_result; + u32 tcfp_ewma_rate; + s64 tcfp_burst; + u32 tcfp_mtu; + s64 tcfp_mtu_ptoks; + s64 tcfp_pkt_burst; + struct psched_ratecfg rate; + bool rate_present; + struct psched_ratecfg peak; + bool peak_present; + struct psched_pktrate ppsrate; + bool pps_present; + struct callback_head rcu; +}; + +struct tcf_police { + struct tc_action common; + struct tcf_police_params *params; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t tcfp_lock; + s64 tcfp_toks; + s64 tcfp_ptoks; + s64 tcfp_pkttoks; + s64 tcfp_t_c; + long: 64; + long: 64; + long: 64; +}; + +struct tcf_sample { + struct tc_action common; + u32 rate; + bool truncate; + u32 trunc_size; + struct psample_group *psample_group; + u32 psample_group_num; + struct list_head tcfm_list; +}; + +struct tcf_skbedit_params { + u32 flags; + u32 priority; + u32 mark; + u32 mask; + u16 queue_mapping; + u16 ptype; + struct callback_head rcu; +}; + +struct tcf_skbedit { + struct tc_action common; + struct tcf_skbedit_params *params; +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +struct tcf_ct_flow_table; + +struct tcf_ct_params { + struct nf_conn *tmpl; + u16 zone; + u32 mark; + u32 mark_mask; + u32 labels[4]; + u32 labels_mask[4]; + struct nf_nat_range2 range; + bool ipv4_range; + u16 ct_action; + struct callback_head rcu; + struct tcf_ct_flow_table *ct_ft; + struct nf_flowtable *nf_ft; +}; + +struct tcf_ct { + struct tc_action common; + struct tcf_ct_params *params; +}; + +struct tcf_mpls_params { + int tcfm_action; + u32 tcfm_label; + u8 tcfm_tc; + u8 tcfm_ttl; + u8 tcfm_bos; + __be16 tcfm_proto; + struct callback_head rcu; +}; + +struct tcf_mpls { + struct tc_action common; + struct tcf_mpls_params *mpls_p; +}; + +struct tcfg_gate_entry { + int index; + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; + struct list_head list; +}; + +struct tcf_gate_params { + s32 tcfg_priority; + u64 tcfg_basetime; + u64 tcfg_cycletime; + u64 tcfg_cycletime_ext; + u32 tcfg_flags; + s32 tcfg_clockid; + size_t num_entries; + struct list_head entries; +}; + +struct tcf_gate { + struct tc_action common; + struct tcf_gate_params param; + u8 current_gate_status; + ktime_t current_close_time; + u32 current_entry_octets; + s32 current_max_octets; + struct tcfg_gate_entry *next_entry; + struct hrtimer hitimer; + enum tk_offsets tk_offset; +}; + +struct tcf_filter_chain_list_item { + struct list_head list; + tcf_chain_head_change_t *chain_head_change; + void *chain_head_change_priv; +}; + +struct tcf_net { + spinlock_t idr_lock; + struct idr idr; +}; + +struct tcf_block_owner_item { + struct list_head list; + struct Qdisc *q; + enum flow_block_binder_type binder_type; +}; + +struct tcf_chain_info { + struct tcf_proto **pprev; + struct tcf_proto *next; +}; + +struct tcf_dump_args { + struct tcf_walker w; + struct sk_buff *skb; + struct netlink_callback *cb; + struct tcf_block *block; + struct Qdisc *q; + u32 parent; + bool terse_dump; +}; + +struct tcamsg { + unsigned char tca_family; + unsigned char tca__pad1; + short unsigned int tca__pad2; +}; + +enum { + TCA_ROOT_UNSPEC = 0, + TCA_ROOT_TAB = 1, + TCA_ROOT_FLAGS = 2, + TCA_ROOT_COUNT = 3, + TCA_ROOT_TIME_DELTA = 4, + __TCA_ROOT_MAX = 5, +}; + +struct tc_action_net { + struct tcf_idrinfo *idrinfo; + const struct tc_action_ops *ops; +}; + +struct tc_fifo_qopt { + __u32 limit; +}; + +enum tc_fifo_command { + TC_FIFO_REPLACE = 0, + TC_FIFO_DESTROY = 1, + TC_FIFO_STATS = 2, +}; + +struct tc_fifo_qopt_offload { + enum tc_fifo_command command; + u32 handle; + u32 parent; + union { + struct tc_qopt_offload_stats stats; + }; +}; + +struct tcf_ematch_tree_hdr { + __u16 nmatches; + __u16 progid; +}; + +enum { + TCA_EMATCH_TREE_UNSPEC = 0, + TCA_EMATCH_TREE_HDR = 1, + TCA_EMATCH_TREE_LIST = 2, + __TCA_EMATCH_TREE_MAX = 3, +}; + +struct tcf_ematch_hdr { + __u16 matchid; + __u16 kind; + __u16 flags; + __u16 pad; +}; + +struct tcf_pkt_info { + unsigned char *ptr; + int nexthdr; +}; + +struct tcf_ematch_ops; + +struct tcf_ematch { + struct tcf_ematch_ops *ops; + long unsigned int data; + unsigned int datalen; + u16 matchid; + u16 flags; + struct net *net; +}; + +struct tcf_ematch_ops { + int kind; + int datalen; + int (*change)(struct net *, void *, int, struct tcf_ematch *); + int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); + void (*destroy)(struct tcf_ematch *); + int (*dump)(struct sk_buff *, struct tcf_ematch *); + struct module *owner; + struct list_head link; +}; + +struct tcf_ematch_tree { + struct tcf_ematch_tree_hdr hdr; + struct tcf_ematch *matches; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + __NLMSGERR_ATTR_MAX = 5, + NLMSGERR_ATTR_MAX = 4, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct trace_event_raw_netlink_extack { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_netlink_extack { + u32 msg; +}; + +typedef void (*btf_trace_netlink_extack)(void *, const char *); + +struct netlink_sock { + struct sock sk; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 flags; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex *cb_mutex; + struct mutex cb_def_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + struct module *module; + struct rhash_head node; + struct callback_head rcu; + struct work_struct work; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + bool (*compare)(struct net *, struct sock *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_ops *ops; + int hdrlen; +}; + +struct netlink_policy_dump_state; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + unsigned int opidx; + u32 op; + u16 fam_id; + u8 policies: 1; + u8 single_op: 1; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state___2 { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_test_timer { + enum { + NO_PREEMPT = 0, + NO_MIGRATE = 1, + } mode; + u32 i; + u64 time_start; + u64 time_spent; +}; + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + __ETHTOOL_TUNABLE_COUNT = 4, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT = 0, + ETHTOOL_FEC_AUTO_BIT = 1, + ETHTOOL_FEC_OFF_BIT = 2, + ETHTOOL_FEC_RS_BIT = 3, + ETHTOOL_FEC_BASER_BIT = 4, + ETHTOOL_FEC_LLRS_BIT = 5, +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[3]; + __u32 advertising[3]; + __u32 lp_advertising[3]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; + long: 48; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + int: 32; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + ETHTOOL_MSG_FEC_GET = 29, + ETHTOOL_MSG_FEC_SET = 30, + ETHTOOL_MSG_MODULE_EEPROM_GET = 31, + ETHTOOL_MSG_STATS_GET = 32, + __ETHTOOL_MSG_USER_CNT = 33, + ETHTOOL_MSG_USER_MAX = 32, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + __ETHTOOL_A_HEADER_CNT = 4, + ETHTOOL_A_HEADER_MAX = 3, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + ETHTOOL_A_LINKMODES_LANES = 9, + __ETHTOOL_A_LINKMODES_CNT = 10, + ETHTOOL_A_LINKMODES_MAX = 9, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + __ETHTOOL_A_LINKSTATE_CNT = 7, + ETHTOOL_A_LINKSTATE_MAX = 6, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + __ETHTOOL_A_RINGS_CNT = 10, + ETHTOOL_A_RINGS_MAX = 9, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + __ETHTOOL_A_COALESCE_CNT = 24, + ETHTOOL_A_COALESCE_MAX = 23, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + __ETHTOOL_A_PAUSE_CNT = 6, + ETHTOOL_A_PAUSE_MAX = 5, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + __ETHTOOL_A_TSINFO_CNT = 6, + ETHTOOL_A_TSINFO_MAX = 5, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum { + ETHTOOL_A_FEC_UNSPEC = 0, + ETHTOOL_A_FEC_HEADER = 1, + ETHTOOL_A_FEC_MODES = 2, + ETHTOOL_A_FEC_AUTO = 3, + ETHTOOL_A_FEC_ACTIVE = 4, + ETHTOOL_A_FEC_STATS = 5, + __ETHTOOL_A_FEC_CNT = 6, + ETHTOOL_A_FEC_MAX = 5, +}; + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC = 0, + ETHTOOL_A_MODULE_EEPROM_HEADER = 1, + ETHTOOL_A_MODULE_EEPROM_OFFSET = 2, + ETHTOOL_A_MODULE_EEPROM_LENGTH = 3, + ETHTOOL_A_MODULE_EEPROM_PAGE = 4, + ETHTOOL_A_MODULE_EEPROM_BANK = 5, + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS = 6, + ETHTOOL_A_MODULE_EEPROM_DATA = 7, + __ETHTOOL_A_MODULE_EEPROM_CNT = 8, + ETHTOOL_A_MODULE_EEPROM_MAX = 7, +}; + +enum { + ETHTOOL_STATS_ETH_PHY = 0, + ETHTOOL_STATS_ETH_MAC = 1, + ETHTOOL_STATS_ETH_CTRL = 2, + ETHTOOL_STATS_RMON = 3, + __ETHTOOL_STATS_CNT = 4, +}; + +enum { + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR = 0, + __ETHTOOL_A_STATS_ETH_PHY_CNT = 1, + ETHTOOL_A_STATS_ETH_PHY_MAX = 0, +}; + +enum { + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT = 0, + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL = 1, + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL = 2, + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT = 3, + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR = 4, + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR = 5, + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES = 6, + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER = 7, + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL = 8, + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL = 9, + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR = 10, + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR = 11, + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES = 12, + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR = 13, + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST = 14, + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST = 15, + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER = 16, + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST = 17, + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST = 18, + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR = 19, + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN = 20, + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR = 21, + __ETHTOOL_A_STATS_ETH_MAC_CNT = 22, + ETHTOOL_A_STATS_ETH_MAC_MAX = 21, +}; + +enum { + ETHTOOL_A_STATS_ETH_CTRL_3_TX = 0, + ETHTOOL_A_STATS_ETH_CTRL_4_RX = 1, + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP = 2, + __ETHTOOL_A_STATS_ETH_CTRL_CNT = 3, + ETHTOOL_A_STATS_ETH_CTRL_MAX = 2, +}; + +enum { + ETHTOOL_A_STATS_RMON_UNDERSIZE = 0, + ETHTOOL_A_STATS_RMON_OVERSIZE = 1, + ETHTOOL_A_STATS_RMON_FRAG = 2, + ETHTOOL_A_STATS_RMON_JABBER = 3, + __ETHTOOL_A_STATS_RMON_CNT = 4, + ETHTOOL_A_STATS_RMON_MAX = 3, +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct ethnl_req_info { + struct net_device *dev; + u32 flags; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + int pos_hash; + int pos_idx; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[21]; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + u32 supported_params; +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + struct ethtool_pause_stats pausestat; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_eee eee; +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_ts_info ts_info; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + __ETHTOOL_A_CABLE_RESULT_CNT = 3, + ETHTOOL_A_CABLE_RESULT_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 3, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + int pos_hash; + int pos_idx; +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC = 0, + ETHTOOL_A_FEC_STAT_PAD = 1, + ETHTOOL_A_FEC_STAT_CORRECTED = 2, + ETHTOOL_A_FEC_STAT_UNCORR = 3, + ETHTOOL_A_FEC_STAT_CORR_BITS = 4, + __ETHTOOL_A_FEC_STAT_CNT = 5, + ETHTOOL_A_FEC_STAT_MAX = 4, +}; + +struct fec_stat_grp { + u64 stats[9]; + u8 cnt; +}; + +struct fec_reply_data { + struct ethnl_reply_data base; + long unsigned int fec_link_modes[2]; + u32 active_fec; + u8 fec_auto; + struct fec_stat_grp corr; + struct fec_stat_grp uncorr; + struct fec_stat_grp corr_bits; +}; + +struct eeprom_req_info { + struct ethnl_req_info base; + u32 offset; + u32 length; + u8 page; + u8 bank; + u8 i2c_address; +}; + +struct eeprom_reply_data { + struct ethnl_reply_data base; + u32 length; + u8 *data; +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC = 0, + ETHTOOL_A_STATS_GRP_PAD = 1, + ETHTOOL_A_STATS_GRP_ID = 2, + ETHTOOL_A_STATS_GRP_SS_ID = 3, + ETHTOOL_A_STATS_GRP_STAT = 4, + ETHTOOL_A_STATS_GRP_HIST_RX = 5, + ETHTOOL_A_STATS_GRP_HIST_TX = 6, + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW = 7, + ETHTOOL_A_STATS_GRP_HIST_BKT_HI = 8, + ETHTOOL_A_STATS_GRP_HIST_VAL = 9, + __ETHTOOL_A_STATS_GRP_CNT = 10, + ETHTOOL_A_STATS_GRP_MAX = 4, +}; + +struct stats_req_info { + struct ethnl_req_info base; + long unsigned int stat_mask[1]; +}; + +struct stats_reply_data { + struct ethnl_reply_data base; + struct ethtool_eth_phy_stats phy_stats; + struct ethtool_eth_mac_stats mac_stats; + struct ethtool_eth_ctrl_stats ctrl_stats; + struct ethtool_rmon_stats rmon_stats; + const struct ethtool_rmon_hist_range *rmon_ranges; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct nf_conn___2; + +enum nf_nat_manip_type; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn___2 *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn___2 *, enum nf_nat_manip_type, enum ip_conntrack_dir); +}; + +struct nf_conntrack_tuple___2; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple___2 *, const struct sk_buff *); +}; + +struct nfnl_ct_hook { + size_t (*build_size)(const struct nf_conn___2 *); + int (*build)(struct sk_buff *, struct nf_conn___2 *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn___2 *); + int (*attach_expect)(const struct nlattr *, struct nf_conn___2 *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn___2 *, enum ip_conntrack_info, s32); +}; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + __u16 frag_max_size; + struct net_device *physindev; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct ip_sf_list; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + u8 tos; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +struct ip_rt_acct { + __u32 o_bytes; + __u32 o_packets; + __u32 i_bytes; + __u32 i_packets; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + u8 fa_tos; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload: 1; + u8 trap: 1; + u8 offload_failed: 1; + u8 unused: 5; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; +}; + +struct raw_hashinfo { + rwlock_t lock; + struct hlist_head ht[256]; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist[1]; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1]; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +} __attribute__((packed)); + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +} __attribute__((packed)); + +struct compat_group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1]; +} __attribute__((packed)); + +enum { + BPFILTER_IPT_SO_SET_REPLACE = 64, + BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65, + BPFILTER_IPT_SET_MAX = 66, +}; + +enum { + BPFILTER_IPT_SO_GET_INFO = 64, + BPFILTER_IPT_SO_GET_ENTRIES = 65, + BPFILTER_IPT_SO_GET_REVISION_MATCH = 66, + BPFILTER_IPT_SO_GET_REVISION_TARGET = 67, + BPFILTER_IPT_GET_MAX = 68, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_MAX_STATES = 13, +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, +}; + +enum { + TCP_FLAG_CWR = 32768, + TCP_FLAG_ECE = 16384, + TCP_FLAG_URG = 8192, + TCP_FLAG_ACK = 4096, + TCP_FLAG_PSH = 2048, + TCP_FLAG_RST = 1024, + TCP_FLAG_SYN = 512, + TCP_FLAG_FIN = 256, + TCP_RESERVED_BITS = 15, + TCP_DATA_OFFSET = 240, +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, + TCP_NLA_TTL = 26, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; + __u64 copybuf_address; + __s32 copybuf_len; + __u32 flags; + __u64 msg_control; + __u64 msg_controllen; + __u32 msg_flags; + __u32 reserved; +}; + +struct tcp_md5sig_pool { + struct ahash_request *md5_req; + void *scratch; +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +enum { + TCP_CMSG_INQ = 1, + TCP_CMSG_TS = 2, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct mptcp_ext { + union { + u64 data_ack; + u32 data_ack32; + }; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u8 use_map: 1; + u8 dsn64: 1; + u8 data_fin: 1; + u8 use_ack: 1; + u8 ack64: 1; + u8 mpc_map: 1; + u8 frozen: 1; + u8 reset_transient: 1; + u8 reset_reason: 4; +}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, +}; + +struct mptcp_rm_list { + u8 ids[8]; + u8 nr; +}; + +struct mptcp_addr_info { + u8 id; + sa_family_t family; + __be16 port; + union { + struct in_addr addr; + struct in6_addr addr6; + }; +}; + +struct mptcp_out_options { + u16 suboptions; + u64 sndr_key; + u64 rcvr_key; + u64 ahmac; + struct mptcp_addr_info addr; + struct mptcp_rm_list rm_list; + u8 join_id; + u8 backup; + u8 reset_reason: 4; + u8 reset_transient: 1; + u32 nonce; + u64 thmac; + u32 token; + u8 hmac[20]; + struct mptcp_ext ext_copy; +}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +struct tcp_md5sig { + struct __kernel_sockaddr_storage tcpm_addr; + __u8 tcpm_flags; + __u8 tcpm_prefixlen; + __u16 tcpm_keylen; + int tcpm_ifindex; + __u8 tcpm_key[80]; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct tcp4_pseudohdr { + __be32 saddr; + __be32 daddr; + __u8 pad; + __u8 protocol; + __be16 len; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; + int bucket; + int offset; + int sbucket; + int num; + loff_t last_pos; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + possible_net_t tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct icmp_filter { + __u32 data; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct udp_dev_scratch { + u32 _tsize_state; + u16 len; + bool is_linear; + bool csum_unnecessary; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + int: 32; + int bucket; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +typedef struct sock * (*udp_lookup_t)(const struct sk_buff *, __be16, __be16); + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +typedef struct { + char ax25_call[7]; +} ax25_address; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +struct icmp_ext_hdr { + __u8 reserved1: 4; + __u8 version: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[16]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + bool (*handler)(struct sk_buff *); + short int error; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + __IFA_MAX = 11, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct compat_rtentry { + u32 rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short int rt_pad4; + short int rt_metric; + compat_uptr_t rt_dev; + u32 rt_mtu; + u32 rt_window; + short unsigned int rt_irtt; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 qrv: 3; + __u8 suppress: 1; + __u8 resv: 4; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +struct fib_config { + u8 fc_dst_len; + u8 fc_tos; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + u8 tos; + u8 type; + u32 tb_id; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct key_vector *tnode[0]; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_use_stats { + unsigned int gets; + unsigned int backtrack; + unsigned int semantic_match_passed; + unsigned int semantic_match_miss; + unsigned int null_node_hit; + unsigned int resize_node_skipped; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; + struct trie_use_stats *stats; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + loff_t pos; + t_key key; +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 reserved: 5; + __u32 override: 1; + __u32 solicited: 1; + __u32 router: 1; + __u32 reserved2: 24; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 reserved: 3; + __u8 router_pref: 2; + __u8 home_agent: 1; + __u8 other: 1; + __u8 managed: 1; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct ping_table { + struct hlist_nulls_head hash[64]; + rwlock_t lock; +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 length: 5; + u8 r3: 1; + u8 r2: 1; + u8 r1: 1; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 hwid_upper: 2; + __u8 ft: 5; + __u8 p: 1; + __u8 o: 1; + __u8 gra: 2; + __u8 dir: 1; + __u8 hwid: 4; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 resvd1; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + NEXTHOP_GRP_TYPE_RES = 1, + __NEXTHOP_GRP_TYPE_MAX = 2, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + NHA_RES_GROUP = 12, + NHA_RES_BUCKET = 13, + __NHA_MAX = 14, +}; + +enum { + NHA_RES_GROUP_UNSPEC = 0, + NHA_RES_GROUP_PAD = 0, + NHA_RES_GROUP_BUCKETS = 1, + NHA_RES_GROUP_IDLE_TIMER = 2, + NHA_RES_GROUP_UNBALANCED_TIMER = 3, + NHA_RES_GROUP_UNBALANCED_TIME = 4, + __NHA_RES_GROUP_MAX = 5, +}; + +enum { + NHA_RES_BUCKET_UNSPEC = 0, + NHA_RES_BUCKET_PAD = 0, + NHA_RES_BUCKET_INDEX = 1, + NHA_RES_BUCKET_IDLE_TIME = 2, + NHA_RES_BUCKET_NH_ID = 3, + __NHA_RES_BUCKET_MAX = 4, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + long unsigned int nh_grp_res_idle_timer; + long unsigned int nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, + NEXTHOP_EVENT_REPLACE = 1, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE = 2, + NEXTHOP_EVENT_BUCKET_REPLACE = 3, +}; + +enum nh_notifier_info_type { + NH_NOTIFIER_INFO_TYPE_SINGLE = 0, + NH_NOTIFIER_INFO_TYPE_GRP = 1, + NH_NOTIFIER_INFO_TYPE_RES_TABLE = 2, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET = 3, +}; + +struct nh_notifier_single_info { + struct net_device *dev; + u8 gw_family; + union { + __be32 ipv4; + struct in6_addr ipv6; + }; + u8 is_reject: 1; + u8 is_fdb: 1; + u8 has_encap: 1; +}; + +struct nh_notifier_grp_entry_info { + u8 weight; + u32 id; + struct nh_notifier_single_info nh; +}; + +struct nh_notifier_grp_info { + u16 num_nh; + bool is_fdb; + struct nh_notifier_grp_entry_info nh_entries[0]; +}; + +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + struct nh_notifier_single_info nhs[0]; +}; + +struct nh_notifier_info { + struct net *net; + struct netlink_ext_ack *extack; + u32 id; + enum nh_notifier_info_type type; + union { + struct nh_notifier_single_info *nh; + struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; + }; +}; + +struct nh_dump_filter { + u32 nh_id; + int dev_idx; + int master_idx; + bool group_filter; + bool fdb_filter; + u32 res_bucket_nh_id; +}; + +struct rtm_dump_nh_ctx { + u32 idx; +}; + +struct rtm_dump_res_bucket_ctx { + struct rtm_dump_nh_ctx nh; + u16 bucket_index; + u32 done_nh_idx; +}; + +struct rtm_dump_nexthop_bucket_data { + struct rtm_dump_res_bucket_ctx *ctx; + struct nh_dump_filter filter; +}; + +struct bpfilter_umh_ops { + struct umd_info info; + struct mutex lock; + int (*sockopt)(struct sock *, int, sockptr_t, unsigned int, bool); + int (*start)(); +}; + +struct inet6_protocol { + void (*early_demux)(struct sk_buff *); + void (*early_demux_handler)(struct sk_buff *); + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +struct fib4_rule { + struct fib_rule common; + u8 dst_len; + u8 src_len; + u8 tos; + __be32 src; + __be32 srcmask; + __be32 dst; + __be32 dstmask; + u32 tclassid; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimreghdr { + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; + +typedef short unsigned int vifi_t; + +struct vifctl { + vifi_t vifc_vifi; + unsigned char vifc_flags; + unsigned char vifc_threshold; + unsigned int vifc_rate_limit; + union { + struct in_addr vifc_lcl_addr; + int vifc_lcl_ifindex; + }; + struct in_addr vifc_rmt_addr; +}; + +struct mfcctl { + struct in_addr mfcc_origin; + struct in_addr mfcc_mcastgrp; + vifi_t mfcc_parent; + unsigned char mfcc_ttls[32]; + unsigned int mfcc_pkt_cnt; + unsigned int mfcc_byte_cnt; + unsigned int mfcc_wrong_if; + int mfcc_expire; +}; + +struct sioc_sg_req { + struct in_addr src; + struct in_addr grp; + long unsigned int pktcnt; + long unsigned int bytecnt; + long unsigned int wrong_if; +}; + +struct sioc_vif_req { + vifi_t vifi; + long unsigned int icount; + long unsigned int ocount; + long unsigned int ibytes; + long unsigned int obytes; +}; + +struct igmpmsg { + __u32 unused1; + __u32 unused2; + unsigned char im_msgtype; + unsigned char im_mbz; + unsigned char im_vif; + unsigned char im_vif_hi; + struct in_addr im_src; + struct in_addr im_dst; +}; + +enum { + IPMRA_TABLE_UNSPEC = 0, + IPMRA_TABLE_ID = 1, + IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2, + IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3, + IPMRA_TABLE_MROUTE_DO_ASSERT = 4, + IPMRA_TABLE_MROUTE_DO_PIM = 5, + IPMRA_TABLE_VIFS = 6, + IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7, + __IPMRA_TABLE_MAX = 8, +}; + +enum { + IPMRA_VIF_UNSPEC = 0, + IPMRA_VIF = 1, + __IPMRA_VIF_MAX = 2, +}; + +enum { + IPMRA_VIFA_UNSPEC = 0, + IPMRA_VIFA_IFINDEX = 1, + IPMRA_VIFA_VIF_ID = 2, + IPMRA_VIFA_FLAGS = 3, + IPMRA_VIFA_BYTES_IN = 4, + IPMRA_VIFA_BYTES_OUT = 5, + IPMRA_VIFA_PACKETS_IN = 6, + IPMRA_VIFA_PACKETS_OUT = 7, + IPMRA_VIFA_LOCAL_ADDR = 8, + IPMRA_VIFA_REMOTE_ADDR = 9, + IPMRA_VIFA_PAD = 10, + __IPMRA_VIFA_MAX = 11, +}; + +enum { + IPMRA_CREPORT_UNSPEC = 0, + IPMRA_CREPORT_MSGTYPE = 1, + IPMRA_CREPORT_VIF_ID = 2, + IPMRA_CREPORT_SRC_ADDR = 3, + IPMRA_CREPORT_DST_ADDR = 4, + IPMRA_CREPORT_PKT = 5, + IPMRA_CREPORT_TABLE = 6, + __IPMRA_CREPORT_MAX = 7, +}; + +struct vif_device { + struct net_device *dev; + long unsigned int bytes_in; + long unsigned int bytes_out; + long unsigned int pkt_in; + long unsigned int pkt_out; + long unsigned int rate_limit; + unsigned char threshold; + short unsigned int flags; + int link; + struct netdev_phys_item_id dev_parent_id; + __be32 local; + __be32 remote; +}; + +struct vif_entry_notifier_info { + struct fib_notifier_info info; + struct net_device *dev; + short unsigned int vif_index; + short unsigned int vif_flags; + u32 tb_id; +}; + +enum { + MFC_STATIC = 1, + MFC_OFFLOAD = 2, +}; + +struct mr_mfc { + struct rhlist_head mnode; + short unsigned int mfc_parent; + int mfc_flags; + union { + struct { + long unsigned int expires; + struct sk_buff_head unresolved; + } unres; + struct { + long unsigned int last_assert; + int minvif; + int maxvif; + long unsigned int bytes; + long unsigned int pkt; + long unsigned int wrong_if; + long unsigned int lastuse; + unsigned char ttls[32]; + refcount_t refcount; + } res; + } mfc_un; + struct list_head list; + struct callback_head rcu; + void (*free)(struct callback_head *); +}; + +struct mfc_entry_notifier_info { + struct fib_notifier_info info; + struct mr_mfc *mfc; + u32 tb_id; +}; + +struct mr_table_ops { + const struct rhashtable_params *rht_params; + void *cmparg_any; +}; + +struct mr_table { + struct list_head list; + possible_net_t net; + struct mr_table_ops ops; + u32 id; + struct sock *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct vif_device vif_table[32]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + bool mroute_do_wrvifwhole; + int mroute_reg_vif_num; +}; + +struct mr_vif_iter { + struct seq_net_private p; + struct mr_table *mrt; + int ct; +}; + +struct mr_mfc_iter { + struct seq_net_private p; + struct mr_table *mrt; + struct list_head *cache; + spinlock_t *lock; +}; + +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +struct mfc_cache { + struct mr_mfc _c; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; +}; + +struct ipmr_result { + struct mr_table *mrt; +}; + +struct compat_sioc_sg_req { + struct in_addr src; + struct in_addr grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_vif_req { + vifi_t vifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct rta_mfc_stats { + __u64 mfcs_packets; + __u64 mfcs_bytes; + __u64 mfcs_wrong_if; +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +struct tls_rec { + struct list_head list; + int tx_ready; + int tx_flags; + struct sk_msg msg_plaintext; + struct sk_msg msg_encrypted; + struct scatterlist sg_aead_in[2]; + struct scatterlist sg_aead_out[2]; + char content_type; + struct scatterlist sg_content_type; + char aad_space[13]; + u8 iv_data[16]; + struct aead_request aead_req; + u8 aead_req_ctx[0]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + spinlock_t encrypt_compl_lock; + int async_notify; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_NUM_CFGS = 2, +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +struct cipso_v4_map_cache_bkt { + spinlock_t lock; + u32 size; + struct list_head list; +}; + +struct cipso_v4_map_cache_entry { + u32 hash; + unsigned char *key; + size_t key_len; + struct netlbl_lsm_cache *lsm_data; + u32 activity; + struct list_head list; +}; + +struct xfrm_policy_afinfo { + struct dst_ops *dst_ops; + struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32); + int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32); + int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *); + struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *); +}; + +struct xfrm_state_afinfo { + u8 family; + u8 proto; + const struct xfrm_type_offload *type_offload_esp; + const struct xfrm_type *type_esp; + const struct xfrm_type *type_ipip; + const struct xfrm_type *type_ipip6; + const struct xfrm_type *type_comp; + const struct xfrm_type *type_ah; + const struct xfrm_type *type_routing; + const struct xfrm_type *type_dstopts; + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*transport_finish)(struct sk_buff *, int); + void (*local_error)(struct sk_buff *, u32); +}; + +struct ip_tunnel; + +struct ip6_tnl; + +struct xfrm_tunnel_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + union { + struct ip_tunnel *ip4; + struct ip6_tnl *ip6; + } tunnel; +}; + +struct xfrm_mode_skb_cb { + struct xfrm_tunnel_skb_cb header; + __be16 id; + __be16 frag_off; + u8 ihl; + u8 tos; + u8 ttl; + u8 protocol; + u8 optlen; + u8 flow_lbl[3]; +}; + +struct xfrm_spi_skb_cb { + struct xfrm_tunnel_skb_cb header; + unsigned int daddroff; + unsigned int family; + __be32 seq; +}; + +struct xfrm_input_afinfo { + u8 family; + bool is_ipip; + int (*callback)(struct sk_buff *, u8, int); +}; + +struct xfrm4_protocol { + int (*handler)(struct sk_buff *); + int (*input_handler)(struct sk_buff *, int, __be32, int); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm4_protocol *next; + int priority; +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +enum { + XFRM_STATE_VOID = 0, + XFRM_STATE_ACQ = 1, + XFRM_STATE_VALID = 2, + XFRM_STATE_ERROR = 3, + XFRM_STATE_EXPIRED = 4, + XFRM_STATE_DEAD = 5, +}; + +struct xfrm_if; + +struct xfrm_if_cb { + struct xfrm_if * (*decode_session)(struct sk_buff *, short unsigned int); +}; + +struct xfrm_if_parms { + int link; + u32 if_id; +}; + +struct xfrm_if { + struct xfrm_if *next; + struct net_device *dev; + struct net *net; + struct xfrm_if_parms p; + struct gro_cells gro_cells; +}; + +struct xfrm_policy_walk { + struct xfrm_policy_walk_entry walk; + u8 type; + u32 seq; +}; + +struct xfrmk_spdinfo { + u32 incnt; + u32 outcnt; + u32 fwdcnt; + u32 inscnt; + u32 outscnt; + u32 fwdscnt; + u32 spdhcnt; + u32 spdhmcnt; +}; + +struct ip6_mh { + __u8 ip6mh_proto; + __u8 ip6mh_hdrlen; + __u8 ip6mh_type; + __u8 ip6mh_reserved; + __u16 ip6mh_cksum; + __u8 data[0]; +}; + +struct xfrm_flo { + struct dst_entry *dst_orig; + u8 flags; +}; + +struct xfrm_pol_inexact_node { + struct rb_node node; + union { + xfrm_address_t addr; + struct callback_head rcu; + }; + u8 prefixlen; + struct rb_root root; + struct hlist_head hhead; +}; + +struct xfrm_pol_inexact_key { + possible_net_t net; + u32 if_id; + u16 family; + u8 dir; + u8 type; +}; + +struct xfrm_pol_inexact_bin { + struct xfrm_pol_inexact_key k; + struct rhash_head head; + struct hlist_head hhead; + seqcount_spinlock_t count; + struct rb_root root_d; + struct rb_root root_s; + struct list_head inexact_bins; + struct callback_head rcu; +}; + +enum xfrm_pol_inexact_candidate_type { + XFRM_POL_CAND_BOTH = 0, + XFRM_POL_CAND_SADDR = 1, + XFRM_POL_CAND_DADDR = 2, + XFRM_POL_CAND_ANY = 3, + XFRM_POL_CAND_MAX = 4, +}; + +struct xfrm_pol_inexact_candidates { + struct hlist_head *res[4]; +}; + +enum xfrm_ae_ftype_t { + XFRM_AE_UNSPEC = 0, + XFRM_AE_RTHR = 1, + XFRM_AE_RVAL = 2, + XFRM_AE_LVAL = 4, + XFRM_AE_ETHR = 8, + XFRM_AE_CR = 16, + XFRM_AE_CE = 32, + XFRM_AE_CU = 64, + __XFRM_AE_MAX = 65, +}; + +enum xfrm_nlgroups { + XFRMNLGRP_NONE = 0, + XFRMNLGRP_ACQUIRE = 1, + XFRMNLGRP_EXPIRE = 2, + XFRMNLGRP_SA = 3, + XFRMNLGRP_POLICY = 4, + XFRMNLGRP_AEVENTS = 5, + XFRMNLGRP_REPORT = 6, + XFRMNLGRP_MIGRATE = 7, + XFRMNLGRP_MAPPING = 8, + __XFRMNLGRP_MAX = 9, +}; + +enum { + XFRM_MODE_FLAG_TUNNEL = 1, +}; + +struct km_event { + union { + u32 hard; + u32 proto; + u32 byid; + u32 aevent; + u32 type; + } data; + u32 seq; + u32 portid; + u32 event; + struct net *net; +}; + +struct xfrm_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + u32 reserved; + u16 family; +}; + +struct xfrm_migrate { + xfrm_address_t old_daddr; + xfrm_address_t old_saddr; + xfrm_address_t new_daddr; + xfrm_address_t new_saddr; + u8 proto; + u8 mode; + u16 reserved; + u32 reqid; + u16 old_family; + u16 new_family; +}; + +struct xfrm_mgr { + struct list_head list; + int (*notify)(struct xfrm_state *, const struct km_event *); + int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *); + struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *); + int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16); + int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *); + int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *); + int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *); + bool (*is_alive)(const struct km_event *); +}; + +struct xfrmk_sadinfo { + u32 sadhcnt; + u32 sadhmcnt; + u32 sadcnt; +}; + +struct xfrm_translator { + int (*alloc_compat)(struct sk_buff *, const struct nlmsghdr *); + struct nlmsghdr * (*rcv_msg_compat)(const struct nlmsghdr *, int, const struct nla_policy *, struct netlink_ext_ack *); + int (*xlate_user_policy_sockptr)(u8 **, int); + struct module *owner; +}; + +struct ip_beet_phdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 padlen; + __u8 reserved; +}; + +struct ip_tunnel_6rd_parm { + struct in6_addr prefix; + __be32 relay_prefix; + u16 prefixlen; + u16 relay_prefixlen; +}; + +struct ip_tunnel_fan { + struct list_head fan_maps; +}; + +struct ip_tunnel_prl_entry; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + u32 o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_6rd_parm ip6rd; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + struct ip_tunnel_fan fan; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct __ip6_tnl_parm { + char name[16]; + int link; + __u8 proto; + __u8 encap_limit; + __u8 hop_limit; + bool collect_md; + __be32 flowinfo; + __u32 flags; + struct in6_addr laddr; + struct in6_addr raddr; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + __u32 fwmark; + __u32 index; + __u8 erspan_ver; + __u8 dir; + __u16 hwid; +}; + +struct ip6_tnl { + struct ip6_tnl *next; + struct net_device *dev; + struct net *net; + struct __ip6_tnl_parm parms; + struct flowi fl; + struct dst_cache dst_cache; + struct gro_cells gro_cells; + int err_count; + long unsigned int err_time; + __u32 i_seqno; + __u32 o_seqno; + int hlen; + int tun_hlen; + int encap_hlen; + struct ip_tunnel_encap encap; + int mlink; +}; + +struct xfrm_skb_cb { + struct xfrm_tunnel_skb_cb header; + union { + struct { + __u32 low; + __u32 hi; + } output; + struct { + __be32 low; + __be32 hi; + } input; + } seq; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct xfrm_trans_tasklet { + struct tasklet_struct tasklet; + struct sk_buff_head queue; +}; + +struct xfrm_trans_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + int (*finish)(struct net *, struct sock *, struct sk_buff *); + struct net *net; +}; + +struct xfrm_user_offload { + int ifindex; + __u8 flags; +}; + +struct espintcp_msg { + struct sk_buff *skb; + struct sk_msg skmsg; + int offset; + int len; +}; + +struct espintcp_ctx { + struct strparser strp; + struct sk_buff_head ike_queue; + struct sk_buff_head out_queue; + struct espintcp_msg partial; + void (*saved_data_ready)(struct sock *); + void (*saved_write_space)(struct sock *); + void (*saved_destruct)(struct sock *); + struct work_struct work; + bool tx_running; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct compat_in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + u32 rtmsg_type; + u16 rtmsg_dst_len; + u16 rtmsg_src_len; + u32 rtmsg_metric; + u32 rtmsg_info; + u32 rtmsg_flags; + s32 rtmsg_ifindex; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __s8 dontfrag; + struct ipv6_txoptions *opt; + __u16 gso_size; +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + __IFLA_INET6_MAX = 9, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_RA_DEFRTR_METRIC = 52, + DEVCONF_MAX = 53, +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + __be16 fifo_hi; + __be32 fifo_lo; + } uc; +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct route_info { + __u8 type; + __u8 length; + __u8 prefix_len; + __u8 reserved_l: 3; + __u8 route_pref: 2; + __u8 reserved_h: 3; + __be32 lifetime; + __u8 prefix[0]; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + u32 __data_loc_name; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup { + u32 name; +}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = 4294967293, + RT6_NUD_FAIL_PROBE = 4294967294, + RT6_NUD_FAIL_DO_RR = 4294967295, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct __rt6_probe_work { + struct work_struct work; + struct in6_addr target; + struct net_device *dev; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net_device *dev; + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +enum fib6_walk_state { + FWS_S = 0, + FWS_L = 1, + FWS_R = 2, + FWS_C = 3, + FWS_U = 4, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +typedef int mh_filter_t(struct sock *, struct sk_buff *); + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +struct ipv6_destopt_hao { + __u8 type; + __u8 length; + struct in6_addr addr; +} __attribute__((packed)); + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *); + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_qrv: 3; + __u8 mld2q_suppress: 1; + __u8 mld2q_resv2: 4; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct tcp6_pseudohdr { + struct in6_addr saddr; + struct in6_addr daddr; + __be32 len; + __be32 protocol; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpre: 4; + __u32 cmpri: 4; + __u32 reserved: 4; + __u32 pad: 4; + __u32 reserved1: 16; + union { + struct in6_addr addr[0]; + __u8 data[0]; + } segments; +}; + +struct tlvtype_proc { + int type; + bool (*func)(struct sk_buff *, int); +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +struct seg6_hmac_info { + struct rhash_head node; + struct callback_head rcu; + u32 hmackeyid; + char secret[64]; + u8 slen; + u8 alg_id; +}; + +typedef short unsigned int mifi_t; + +typedef __u32 if_mask; + +struct if_set { + if_mask ifs_bits[8]; +}; + +struct mif6ctl { + mifi_t mif6c_mifi; + unsigned char mif6c_flags; + unsigned char vifc_threshold; + __u16 mif6c_pifi; + unsigned int vifc_rate_limit; +}; + +struct mf6cctl { + struct sockaddr_in6 mf6cc_origin; + struct sockaddr_in6 mf6cc_mcastgrp; + mifi_t mf6cc_parent; + struct if_set mf6cc_ifset; +}; + +struct sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + long unsigned int pktcnt; + long unsigned int bytecnt; + long unsigned int wrong_if; +}; + +struct sioc_mif_req6 { + mifi_t mifi; + long unsigned int icount; + long unsigned int ocount; + long unsigned int ibytes; + long unsigned int obytes; +}; + +struct mrt6msg { + __u8 im6_mbz; + __u8 im6_msgtype; + __u16 im6_mif; + __u32 im6_pad; + struct in6_addr im6_src; + struct in6_addr im6_dst; +}; + +enum { + IP6MRA_CREPORT_UNSPEC = 0, + IP6MRA_CREPORT_MSGTYPE = 1, + IP6MRA_CREPORT_MIF_ID = 2, + IP6MRA_CREPORT_SRC_ADDR = 3, + IP6MRA_CREPORT_DST_ADDR = 4, + IP6MRA_CREPORT_PKT = 5, + __IP6MRA_CREPORT_MAX = 6, +}; + +struct mfc6_cache_cmp_arg { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; +}; + +struct mfc6_cache { + struct mr_mfc _c; + union { + struct { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; + }; + struct mfc6_cache_cmp_arg cmparg; + }; +}; + +struct ip6mr_result { + struct mr_table *mrt; +}; + +struct compat_sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_mif_req6 { + mifi_t mifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct xfrm6_protocol { + int (*handler)(struct sk_buff *); + int (*input_handler)(struct sk_buff *, int, __be32, int); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + struct xfrm6_protocol *next; + int priority; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 vlan_filtered: 1; + u8 br_netfilter_broute: 1; + int offload_fwd_mark; +}; + +struct nf_bridge_frag_data; + +struct fib6_rule { + struct fib_rule common; + struct rt6key src; + struct rt6key dst; + u8 tclass; +}; + +struct calipso_doi; + +struct netlbl_calipso_ops { + int (*doi_add)(struct calipso_doi *, struct netlbl_audit *); + void (*doi_free)(struct calipso_doi *); + int (*doi_remove)(u32, struct netlbl_audit *); + struct calipso_doi * (*doi_getdef)(u32); + void (*doi_putdef)(struct calipso_doi *); + int (*doi_walk)(u32 *, int (*)(struct calipso_doi *, void *), void *); + int (*sock_getattr)(struct sock *, struct netlbl_lsm_secattr *); + int (*sock_setattr)(struct sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + void (*sock_delattr)(struct sock *); + int (*req_setattr)(struct request_sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + void (*req_delattr)(struct request_sock *); + int (*opt_getattr)(const unsigned char *, struct netlbl_lsm_secattr *); + unsigned char * (*skbuff_optptr)(const struct sk_buff *); + int (*skbuff_setattr)(struct sk_buff *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + int (*skbuff_delattr)(struct sk_buff *); + void (*cache_invalidate)(); + int (*cache_add)(const unsigned char *, const struct netlbl_lsm_secattr *); +}; + +struct calipso_doi { + u32 doi; + u32 type; + refcount_t refcount; + struct list_head list; + struct callback_head rcu; +}; + +struct calipso_map_cache_bkt { + spinlock_t lock; + u32 size; + struct list_head list; +}; + +struct calipso_map_cache_entry { + u32 hash; + unsigned char *key; + size_t key_len; + struct netlbl_lsm_cache *lsm_data; + u32 activity; + struct list_head list; +}; + +enum { + SEG6_IPTUNNEL_UNSPEC = 0, + SEG6_IPTUNNEL_SRH = 1, + __SEG6_IPTUNNEL_MAX = 2, +}; + +struct seg6_iptunnel_encap { + int mode; + struct ipv6_sr_hdr srh[0]; +}; + +enum { + SEG6_IPTUN_MODE_INLINE = 0, + SEG6_IPTUN_MODE_ENCAP = 1, + SEG6_IPTUN_MODE_L2ENCAP = 2, +}; + +struct seg6_lwt { + struct dst_cache cache; + struct seg6_iptunnel_encap tuninfo[0]; +}; + +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC = 0, + L3MDEV_TYPE_VRF = 1, + __L3MDEV_TYPE_MAX = 2, +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +enum { + SEG6_LOCAL_UNSPEC = 0, + SEG6_LOCAL_ACTION = 1, + SEG6_LOCAL_SRH = 2, + SEG6_LOCAL_TABLE = 3, + SEG6_LOCAL_NH4 = 4, + SEG6_LOCAL_NH6 = 5, + SEG6_LOCAL_IIF = 6, + SEG6_LOCAL_OIF = 7, + SEG6_LOCAL_BPF = 8, + SEG6_LOCAL_VRFTABLE = 9, + SEG6_LOCAL_COUNTERS = 10, + __SEG6_LOCAL_MAX = 11, +}; + +enum { + SEG6_LOCAL_BPF_PROG_UNSPEC = 0, + SEG6_LOCAL_BPF_PROG = 1, + SEG6_LOCAL_BPF_PROG_NAME = 2, + __SEG6_LOCAL_BPF_PROG_MAX = 3, +}; + +enum { + SEG6_LOCAL_CNT_UNSPEC = 0, + SEG6_LOCAL_CNT_PAD = 1, + SEG6_LOCAL_CNT_PACKETS = 2, + SEG6_LOCAL_CNT_BYTES = 3, + SEG6_LOCAL_CNT_ERRORS = 4, + __SEG6_LOCAL_CNT_MAX = 5, +}; + +struct seg6_local_lwt; + +struct seg6_local_lwtunnel_ops { + int (*build_state)(struct seg6_local_lwt *, const void *, struct netlink_ext_ack *); + void (*destroy_state)(struct seg6_local_lwt *); +}; + +enum seg6_end_dt_mode { + DT_INVALID_MODE = 4294967274, + DT_LEGACY_MODE = 0, + DT_VRF_MODE = 1, +}; + +struct seg6_end_dt_info { + enum seg6_end_dt_mode mode; + struct net *net; + int vrf_ifindex; + int vrf_table; + __be16 proto; + u16 family; + int hdrlen; +}; + +struct pcpu_seg6_local_counters; + +struct seg6_action_desc; + +struct seg6_local_lwt { + int action; + struct ipv6_sr_hdr *srh; + int table; + struct in_addr nh4; + struct in6_addr nh6; + int iif; + int oif; + struct bpf_lwt_prog bpf; + struct seg6_end_dt_info dt_info; + struct pcpu_seg6_local_counters *pcpu_counters; + int headroom; + struct seg6_action_desc *desc; + long unsigned int parsed_optattrs; +}; + +struct seg6_action_desc { + int action; + long unsigned int attrs; + long unsigned int optattrs; + int (*input)(struct sk_buff *, struct seg6_local_lwt *); + int static_headroom; + struct seg6_local_lwtunnel_ops slwt_ops; +}; + +struct pcpu_seg6_local_counters { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t errors; + struct u64_stats_sync syncp; +}; + +struct seg6_local_counters { + __u64 packets; + __u64 bytes; + __u64 errors; +}; + +struct seg6_action_param { + int (*parse)(struct nlattr **, struct seg6_local_lwt *); + int (*put)(struct sk_buff *, struct seg6_local_lwt *); + int (*cmp)(struct seg6_local_lwt *, struct seg6_local_lwt *); + void (*destroy)(struct seg6_local_lwt *); +}; + +struct sr6_tlv_hmac { + struct sr6_tlv tlvhdr; + __u16 reserved; + __be32 hmackeyid; + __u8 hmac[32]; +}; + +enum { + SEG6_HMAC_ALGO_SHA1 = 1, + SEG6_HMAC_ALGO_SHA256 = 2, +}; + +struct seg6_hmac_algo { + u8 alg_id; + char name[64]; + struct crypto_shash **tfms; + struct shash_desc **shashs; +}; + +struct sockaddr_pkt { + short unsigned int spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + short unsigned int sll_family; + __be16 sll_protocol; + int sll_ifindex; + short unsigned int sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; + +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 tp_all; + __u64 tp_huge; + __u64 tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; + +struct tpacket_hdr { + long unsigned int tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + short unsigned int tp_mac; + short unsigned int tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + __u32 blk_len; + __u64 seq_num; + struct tpacket_bd_ts ts_first_pkt; + struct tpacket_bd_ts ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + +enum tpacket_versions { + TPACKET_V1 = 0, + TPACKET_V2 = 1, + TPACKET_V3 = 2, +}; + +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct fanout_args { + __u16 id; + __u16 type_flags; + __u32 max_num_members; +}; + +struct packet_mclist { + struct packet_mclist *next; + int ifindex; + int count; + short unsigned int type; + short unsigned int alen; + unsigned char addr[32]; +}; + +struct pgv; + +struct tpacket_kbdq_core { + struct pgv *pkbdq; + unsigned int feature_req_word; + unsigned int hdrlen; + unsigned char reset_pending_on_curr_blk; + unsigned char delete_blk_timer; + short unsigned int kactive_blk_num; + short unsigned int blk_sizeof_priv; + short unsigned int last_kactive_blk_num; + char *pkblk_start; + char *pkblk_end; + int kblk_size; + unsigned int max_frame_len; + unsigned int knum_blocks; + uint64_t knxt_seq_num; + char *prev; + char *nxt_offset; + struct sk_buff *skb; + rwlock_t blk_fill_in_prog_lock; + short unsigned int retire_blk_tov; + short unsigned int version; + long unsigned int tov_in_jiffies; + struct timer_list retire_blk_timer; +}; + +struct pgv { + char *buffer; +}; + +struct packet_ring_buffer { + struct pgv *pg_vec; + unsigned int head; + unsigned int frames_per_block; + unsigned int frame_size; + unsigned int frame_max; + unsigned int pg_vec_order; + unsigned int pg_vec_pages; + unsigned int pg_vec_len; + unsigned int *pending_refcnt; + union { + long unsigned int *rx_owner_map; + struct tpacket_kbdq_core prb_bdqc; + }; +}; + +struct packet_fanout { + possible_net_t net; + unsigned int num_members; + u32 max_num_members; + u16 id; + u8 type; + u8 flags; + union { + atomic_t rr_cur; + struct bpf_prog *bpf_prog; + }; + struct list_head list; + spinlock_t lock; + refcount_t sk_ref; + long: 64; + struct packet_type prot_hook; + struct sock *arr[0]; +}; + +struct packet_rollover { + int sock; + atomic_long_t num; + atomic_long_t num_huge; + atomic_long_t num_failed; + long: 64; + long: 64; + long: 64; + long: 64; + u32 history[16]; +}; + +struct packet_sock { + struct sock sk; + struct packet_fanout *fanout; + union tpacket_stats_u stats; + struct packet_ring_buffer rx_ring; + struct packet_ring_buffer tx_ring; + int copy_thresh; + spinlock_t bind_lock; + struct mutex pg_vec_lock; + unsigned int running; + unsigned int auxdata: 1; + unsigned int origdev: 1; + unsigned int has_vnet_hdr: 1; + unsigned int tp_loss: 1; + unsigned int tp_tx_has_off: 1; + int pressure; + int ifindex; + __be16 num; + struct packet_rollover *rollover; + struct packet_mclist *mclist; + atomic_t mapped; + enum tpacket_versions tp_version; + unsigned int tp_hdrlen; + unsigned int tp_reserve; + unsigned int tp_tstamp; + struct completion skb_completion; + struct net_device *cached_dev; + int (*xmit)(struct sk_buff *); + long: 64; + struct packet_type prot_hook; + atomic_t tp_drops; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct packet_mreq_max { + int mr_ifindex; + short unsigned int mr_type; + short unsigned int mr_alen; + unsigned char mr_address[32]; +}; + +union tpacket_uhdr { + struct tpacket_hdr *h1; + struct tpacket2_hdr *h2; + struct tpacket3_hdr *h3; + void *raw; +}; + +struct packet_skb_cb { + union { + struct sockaddr_pkt pkt; + union { + unsigned int origlen; + struct sockaddr_ll ll; + }; + } sa; +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct vlan_group { + unsigned int nr_vlan_devs; + struct hlist_node hlist; + struct net_device **vlan_devices_arrays[16]; +}; + +struct vlan_info { + struct net_device *real_dev; + struct vlan_group grp; + struct list_head vid_list; + unsigned int nr_vids; + struct callback_head rcu; +}; + +enum vlan_flags { + VLAN_FLAG_REORDER_HDR = 1, + VLAN_FLAG_GVRP = 2, + VLAN_FLAG_LOOSE_BINDING = 4, + VLAN_FLAG_MVRP = 8, + VLAN_FLAG_BRIDGE_BINDING = 16, +}; + +struct vlan_priority_tci_mapping { + u32 priority; + u16 vlan_qos; + struct vlan_priority_tci_mapping *next; +}; + +struct vlan_dev_priv { + unsigned int nr_ingress_mappings; + u32 ingress_priority_map[8]; + unsigned int nr_egress_mappings; + struct vlan_priority_tci_mapping *egress_priority_map[16]; + __be16 vlan_proto; + u16 vlan_id; + u16 flags; + struct net_device *real_dev; + unsigned char real_dev_addr[6]; + struct proc_dir_entry *dent; + struct vlan_pcpu_stats *vlan_pcpu_stats; + struct netpoll *netpoll; +}; + +enum vlan_protos { + VLAN_PROTO_8021Q = 0, + VLAN_PROTO_8021AD = 1, + VLAN_PROTO_NUM = 2, +}; + +struct vlan_vid_info { + struct list_head list; + __be16 proto; + u16 vid; + int refcount; +}; + +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED = 0, + NL80211_IFTYPE_ADHOC = 1, + NL80211_IFTYPE_STATION = 2, + NL80211_IFTYPE_AP = 3, + NL80211_IFTYPE_AP_VLAN = 4, + NL80211_IFTYPE_WDS = 5, + NL80211_IFTYPE_MONITOR = 6, + NL80211_IFTYPE_MESH_POINT = 7, + NL80211_IFTYPE_P2P_CLIENT = 8, + NL80211_IFTYPE_P2P_GO = 9, + NL80211_IFTYPE_P2P_DEVICE = 10, + NL80211_IFTYPE_OCB = 11, + NL80211_IFTYPE_NAN = 12, + NUM_NL80211_IFTYPES = 13, + NL80211_IFTYPE_MAX = 12, +}; + +struct cfg80211_conn; + +struct cfg80211_cached_keys; + +enum ieee80211_bss_type { + IEEE80211_BSS_TYPE_ESS = 0, + IEEE80211_BSS_TYPE_PBSS = 1, + IEEE80211_BSS_TYPE_IBSS = 2, + IEEE80211_BSS_TYPE_MBSS = 3, + IEEE80211_BSS_TYPE_ANY = 4, +}; + +struct cfg80211_internal_bss; + +enum nl80211_chan_width { + NL80211_CHAN_WIDTH_20_NOHT = 0, + NL80211_CHAN_WIDTH_20 = 1, + NL80211_CHAN_WIDTH_40 = 2, + NL80211_CHAN_WIDTH_80 = 3, + NL80211_CHAN_WIDTH_80P80 = 4, + NL80211_CHAN_WIDTH_160 = 5, + NL80211_CHAN_WIDTH_5 = 6, + NL80211_CHAN_WIDTH_10 = 7, + NL80211_CHAN_WIDTH_1 = 8, + NL80211_CHAN_WIDTH_2 = 9, + NL80211_CHAN_WIDTH_4 = 10, + NL80211_CHAN_WIDTH_8 = 11, + NL80211_CHAN_WIDTH_16 = 12, +}; + +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + +struct ieee80211_channel; + +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; + struct ieee80211_edmg edmg; + u16 freq1_offset; +}; + +struct ieee80211_mcs_info { + u8 rx_mask[10]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +}; + +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + struct ieee80211_mcs_info mcs; + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__((packed)); + +struct key_params; + +struct cfg80211_ibss_params { + const u8 *ssid; + const u8 *bssid; + struct cfg80211_chan_def chandef; + const u8 *ie; + u8 ssid_len; + u8 ie_len; + u16 beacon_interval; + u32 basic_rates; + bool channel_fixed; + bool privacy; + bool control_port; + bool control_port_over_nl80211; + bool userspace_handles_dfs; + int: 24; + int mcast_rate[5]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; + int: 32; +} __attribute__((packed)); + +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM = 0, + NL80211_AUTHTYPE_SHARED_KEY = 1, + NL80211_AUTHTYPE_FT = 2, + NL80211_AUTHTYPE_NETWORK_EAP = 3, + NL80211_AUTHTYPE_SAE = 4, + NL80211_AUTHTYPE_FILS_SK = 5, + NL80211_AUTHTYPE_FILS_SK_PFS = 6, + NL80211_AUTHTYPE_FILS_PK = 7, + __NL80211_AUTHTYPE_NUM = 8, + NL80211_AUTHTYPE_MAX = 7, + NL80211_AUTHTYPE_AUTOMATIC = 8, +}; + +enum nl80211_mfp { + NL80211_MFP_NO = 0, + NL80211_MFP_REQUIRED = 1, + NL80211_MFP_OPTIONAL = 2, +}; + +enum nl80211_sae_pwe_mechanism { + NL80211_SAE_PWE_UNSPECIFIED = 0, + NL80211_SAE_PWE_HUNT_AND_PECK = 1, + NL80211_SAE_PWE_HASH_TO_ELEMENT = 2, + NL80211_SAE_PWE_BOTH = 3, +}; + +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[5]; + int n_akm_suites; + u32 akm_suites[2]; + bool control_port; + __be16 control_port_ethertype; + bool control_port_no_encrypt; + bool control_port_over_nl80211; + bool control_port_no_preauth; + struct key_params *wep_keys; + int wep_tx_key; + const u8 *psk; + const u8 *sae_pwd; + u8 sae_pwd_len; + enum nl80211_sae_pwe_mechanism sae_pwe; +}; + +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +}; + +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +}; + +enum nl80211_bss_select_attr { + __NL80211_BSS_SELECT_ATTR_INVALID = 0, + NL80211_BSS_SELECT_ATTR_RSSI = 1, + NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, + __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, + NL80211_BSS_SELECT_ATTR_MAX = 3, +}; + +enum nl80211_band { + NL80211_BAND_2GHZ = 0, + NL80211_BAND_5GHZ = 1, + NL80211_BAND_60GHZ = 2, + NL80211_BAND_6GHZ = 3, + NL80211_BAND_S1GHZ = 4, + NUM_NL80211_BANDS = 5, +}; + +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + +struct cfg80211_bss_selection { + enum nl80211_bss_select_attr behaviour; + union { + enum nl80211_band band_pref; + struct cfg80211_bss_select_adjust adjust; + } param; +}; + +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_hint; + const u8 *bssid; + const u8 *bssid_hint; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + int: 32; + const u8 *ie; + size_t ie_len; + bool privacy; + int: 24; + enum nl80211_mfp mfp; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len; + u8 key_idx; + short: 16; + u32 flags; + int bg_scan_period; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + bool pbss; + int: 24; + struct cfg80211_bss_selection bss_select; + const u8 *prev_bssid; + const u8 *fils_erp_username; + size_t fils_erp_username_len; + const u8 *fils_erp_realm; + size_t fils_erp_realm_len; + u16 fils_erp_next_seq_num; + long: 48; + const u8 *fils_erp_rrk; + size_t fils_erp_rrk_len; + bool want_1x; + int: 24; + struct ieee80211_edmg edmg; + int: 32; +} __attribute__((packed)); + +struct cfg80211_cqm_config; + +struct wiphy; + +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + struct list_head list; + struct net_device *netdev; + u32 identifier; + struct list_head mgmt_registrations; + spinlock_t mgmt_registrations_lock; + u8 mgmt_registrations_need_update: 1; + struct mutex mtx; + bool use_4addr; + bool is_running; + bool registered; + bool registering; + u8 address[6]; + u8 ssid[32]; + u8 ssid_len; + u8 mesh_id_len; + u8 mesh_id_up_len; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + struct work_struct disconnect_wk; + u8 disconnect_bssid[6]; + struct list_head event_list; + spinlock_t event_lock; + struct cfg80211_internal_bss *current_bss; + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + bool ibss_fixed; + bool ibss_dfs_possible; + bool ps; + int ps_timeout; + int beacon_interval; + u32 ap_unexpected_nlportid; + u32 owner_nlportid; + bool nl_owner_dead; + bool cac_started; + long unsigned int cac_start_time; + unsigned int cac_time_ms; + struct { + struct cfg80211_ibss_params ibss; + struct cfg80211_connect_params connect; + struct cfg80211_cached_keys *keys; + const u8 *ie; + size_t ie_len; + u8 bssid[6]; + u8 prev_bssid[6]; + u8 ssid[32]; + s8 default_key; + s8 default_mgmt_key; + bool prev_bssid_valid; + } wext; + struct cfg80211_cqm_config *cqm_config; + struct list_head pmsr_list; + spinlock_t pmsr_lock; + struct work_struct pmsr_free_wk; + long unsigned int unprot_beacon_reported; +}; + +struct iw_encode_ext { + __u32 ext_flags; + __u8 tx_seq[8]; + __u8 rx_seq[8]; + struct sockaddr addr; + __u16 alg; + __u16 key_len; + __u8 key[0]; +}; + +struct iwreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union iwreq_data u; +}; + +struct iw_event { + __u16 len; + __u16 cmd; + union iwreq_data u; +}; + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; + +struct __compat_iw_event { + __u16 len; + __u16 cmd; + compat_caddr_t pointer; +}; + +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE = 0, + NL80211_REGDOM_SET_BY_USER = 1, + NL80211_REGDOM_SET_BY_DRIVER = 2, + NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, +}; + +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +enum nl80211_user_reg_hint_type { + NL80211_USER_REG_HINT_USER = 0, + NL80211_USER_REG_HINT_CELL_BASE = 1, + NL80211_USER_REG_HINT_INDOOR = 2, +}; + +enum nl80211_mntr_flags { + __NL80211_MNTR_FLAG_INVALID = 0, + NL80211_MNTR_FLAG_FCSFAIL = 1, + NL80211_MNTR_FLAG_PLCPFAIL = 2, + NL80211_MNTR_FLAG_CONTROL = 3, + NL80211_MNTR_FLAG_OTHER_BSS = 4, + NL80211_MNTR_FLAG_COOK_FRAMES = 5, + NL80211_MNTR_FLAG_ACTIVE = 6, + __NL80211_MNTR_FLAG_AFTER_LAST = 7, + NL80211_MNTR_FLAG_MAX = 6, +}; + +enum nl80211_key_mode { + NL80211_KEY_RX_TX = 0, + NL80211_KEY_NO_TX = 1, + NL80211_KEY_SET_TX = 2, +}; + +enum nl80211_bss_scan_width { + NL80211_BSS_CHAN_WIDTH_20 = 0, + NL80211_BSS_CHAN_WIDTH_10 = 1, + NL80211_BSS_CHAN_WIDTH_5 = 2, + NL80211_BSS_CHAN_WIDTH_1 = 3, + NL80211_BSS_CHAN_WIDTH_2 = 4, +}; + +struct nl80211_wowlan_tcp_data_seq { + __u32 start; + __u32 offset; + __u32 len; +}; + +struct nl80211_wowlan_tcp_data_token { + __u32 offset; + __u32 len; + __u8 token_stream[0]; +}; + +struct nl80211_wowlan_tcp_data_token_feature { + __u32 min_len; + __u32 max_len; + __u32 bufsize; +}; + +enum nl80211_ext_feature_index { + NL80211_EXT_FEATURE_VHT_IBSS = 0, + NL80211_EXT_FEATURE_RRM = 1, + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 2, + NL80211_EXT_FEATURE_SCAN_START_TIME = 3, + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 4, + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 5, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 6, + NL80211_EXT_FEATURE_BEACON_RATE_HT = 7, + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 8, + NL80211_EXT_FEATURE_FILS_STA = 9, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 10, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 11, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 12, + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 13, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 14, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 15, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 16, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 17, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 18, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 19, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 20, + NL80211_EXT_FEATURE_MFP_OPTIONAL = 21, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 22, + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 23, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 24, + NL80211_EXT_FEATURE_DFS_OFFLOAD = 25, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 26, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_TXQS = 28, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 29, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 30, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 31, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 32, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 33, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 34, + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 35, + NL80211_EXT_FEATURE_EXT_KEY_ID = 36, + NL80211_EXT_FEATURE_STA_TX_PWR = 37, + NL80211_EXT_FEATURE_SAE_OFFLOAD = 38, + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 39, + NL80211_EXT_FEATURE_AQL = 40, + NL80211_EXT_FEATURE_BEACON_PROTECTION = 41, + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 42, + NL80211_EXT_FEATURE_PROTECTED_TWT = 43, + NL80211_EXT_FEATURE_DEL_IBSS_STA = 44, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 45, + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 46, + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 47, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 48, + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 49, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 50, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 51, + NL80211_EXT_FEATURE_FILS_DISCOVERY = 52, + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 53, + NL80211_EXT_FEATURE_BEACON_RATE_HE = 54, + NL80211_EXT_FEATURE_SECURE_LTF = 55, + NL80211_EXT_FEATURE_SECURE_RTT = 56, + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 57, + NUM_NL80211_EXT_FEATURES = 58, + MAX_NL80211_EXT_FEATURES = 57, +}; + +enum nl80211_dfs_state { + NL80211_DFS_USABLE = 0, + NL80211_DFS_UNAVAILABLE = 1, + NL80211_DFS_AVAILABLE = 2, +}; + +struct nl80211_vendor_cmd_info { + __u32 vendor_id; + __u32 subcmd; +}; + +enum nl80211_sar_type { + NL80211_SAR_TYPE_POWER = 0, + NUM_NL80211_SAR_TYPE = 1, +}; + +struct ieee80211_he_cap_elem { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +}; + +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +}; + +struct ieee80211_he_6ghz_capa { + __le16 capa; +}; + +enum environment_cap { + ENVIRON_ANY = 0, + ENVIRON_INDOOR = 1, + ENVIRON_OUTDOOR = 2, +}; + +struct regulatory_request { + struct callback_head callback_head; + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; + enum environment_cap country_ie_env; + struct list_head list; +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_wmm_ac { + u16 cw_min; + u16 cw_max; + u16 cot; + u8 aifsn; +}; + +struct ieee80211_wmm_rule { + struct ieee80211_wmm_ac client[4]; + struct ieee80211_wmm_ac ap[4]; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + struct ieee80211_wmm_rule wmm_rule; + u32 flags; + u32 dfs_cac_ms; + bool has_wmm; +}; + +struct ieee80211_regdomain { + struct callback_head callback_head; + u32 n_reg_rules; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + struct ieee80211_reg_rule reg_rules[0]; +}; + +struct ieee80211_channel { + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + int max_reg_power; + bool beacon_found; + u32 orig_flags; + int orig_mag; + int orig_mpwr; + enum nl80211_dfs_state dfs_state; + long unsigned int dfs_state_entered; + unsigned int dfs_cac_ms; +}; + +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value; + u16 hw_value_short; +}; + +struct ieee80211_sta_ht_cap { + u16 cap; + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; + char: 8; +} __attribute__((packed)); + +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; + struct ieee80211_vht_mcs_info vht_mcs; +}; + +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[25]; +} __attribute__((packed)); + +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + char: 8; +} __attribute__((packed)); + +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; + u8 nss_mcs[5]; +}; + +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum nl80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; + struct ieee80211_edmg edmg_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; +}; + +struct key_params { + const u8 *key; + const u8 *seq; + int key_len; + int seq_len; + u16 vlan_id; + u32 cipher; + enum nl80211_key_mode mode; +}; + +struct mac_address { + u8 addr[6]; +}; + +struct cfg80211_sar_freq_ranges { + u32 start_freq; + u32 end_freq; +}; + +struct cfg80211_sar_capa { + enum nl80211_sar_type type; + u32 num_freq_ranges; + const struct cfg80211_sar_freq_ranges *freq_ranges; +}; + +struct cfg80211_ssid { + u8 ssid[32]; + u8 ssid_len; +}; + +enum cfg80211_signal_type { + CFG80211_SIGNAL_TYPE_NONE = 0, + CFG80211_SIGNAL_TYPE_MBM = 1, + CFG80211_SIGNAL_TYPE_UNSPEC = 2, +}; + +struct ieee80211_txrx_stypes; + +struct ieee80211_iface_combination; + +struct wiphy_iftype_akm_suites; + +struct wiphy_wowlan_support; + +struct cfg80211_wowlan; + +struct wiphy_iftype_ext_capab; + +struct wiphy_coalesce_support; + +struct wiphy_vendor_command; + +struct cfg80211_pmsr_capabilities; + +struct wiphy { + struct mutex mtx; + u8 perm_addr[6]; + u8 addr_mask[6]; + struct mac_address *addresses; + const struct ieee80211_txrx_stypes *mgmt_stypes; + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u16 software_iftypes; + u16 n_addresses; + u16 interface_modes; + u16 max_acl_mac_addrs; + u32 flags; + u32 regulatory_flags; + u32 features; + u8 ext_features[8]; + u32 ap_sme_capa; + enum cfg80211_signal_type signal_type; + int bss_priv_size; + u8 max_scan_ssids; + u8 max_sched_scan_reqs; + u8 max_sched_scan_ssids; + u8 max_match_sets; + u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; + int n_cipher_suites; + const u32 *cipher_suites; + int n_akm_suites; + const u32 *akm_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + char fw_version[32]; + u32 hw_version; + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; + u16 max_remain_on_channel_duration; + u8 max_num_pmkids; + u32 available_antennas_tx; + u32 available_antennas_rx; + u32 probe_resp_offload; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + const struct wiphy_iftype_ext_capab *iftype_ext_capab; + unsigned int num_iftype_ext_capab; + const void *privid; + struct ieee80211_supported_band *bands[5]; + void (*reg_notifier)(struct wiphy *, struct regulatory_request *); + const struct ieee80211_regdomain *regd; + struct device dev; + bool registered; + struct dentry *debugfsdir; + const struct ieee80211_ht_cap *ht_capa_mod_mask; + const struct ieee80211_vht_cap *vht_capa_mod_mask; + struct list_head wdev_list; + possible_net_t _net; + const struct iw_handler_def *wext; + const struct wiphy_coalesce_support *coalesce; + const struct wiphy_vendor_command *vendor_commands; + const struct nl80211_vendor_cmd_info *vendor_events; + int n_vendor_commands; + int n_vendor_events; + u16 max_ap_assoc_sta; + u8 max_num_csa_counters; + u32 bss_select_support; + u8 nan_supported_bands; + u32 txq_limit; + u32 txq_memory_limit; + u32 txq_quantum; + long unsigned int tx_queue_len; + u8 support_mbssid: 1; + u8 support_only_he_mbssid: 1; + const struct cfg80211_pmsr_capabilities *pmsr_capa; + struct { + u64 peer; + u64 vif; + u8 max_retry; + } tid_config_support; + u8 max_data_retry_count; + const struct cfg80211_sar_capa *sar_capa; + long: 64; + long: 64; + long: 64; + char priv[0]; +}; + +struct cfg80211_match_set { + struct cfg80211_ssid ssid; + u8 bssid[6]; + s32 rssi_thold; + s32 per_band_rssi_thold[5]; +}; + +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +struct cfg80211_sched_scan_request { + u64 reqid; + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + enum nl80211_bss_scan_width scan_width; + const u8 *ie; + size_t ie_len; + u32 flags; + struct cfg80211_match_set *match_sets; + int n_match_sets; + s32 min_rssi_thold; + u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + struct wiphy *wiphy; + struct net_device *dev; + long unsigned int scan_start; + bool report_results; + struct callback_head callback_head; + u32 owner_nlportid; + bool nl_owner_dead; + struct list_head list; + struct ieee80211_channel *channels[0]; +}; + +struct cfg80211_pkt_pattern { + const u8 *mask; + const u8 *pattern; + int pattern_len; + int pkt_offset; +}; + +struct cfg80211_wowlan_tcp { + struct socket *sock; + __be32 src; + __be32 dst; + u16 src_port; + u16 dst_port; + u8 dst_mac[6]; + int payload_len; + const u8 *payload; + struct nl80211_wowlan_tcp_data_seq payload_seq; + u32 data_interval; + u32 wake_len; + const u8 *wake_data; + const u8 *wake_mask; + u32 tokens_size; + struct nl80211_wowlan_tcp_data_token payload_tok; +}; + +struct cfg80211_wowlan { + bool any; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + struct cfg80211_pkt_pattern *patterns; + struct cfg80211_wowlan_tcp *tcp; + int n_patterns; + struct cfg80211_sched_scan_request *nd_config; +}; + +struct ieee80211_iface_limit { + u16 max; + u16 types; +}; + +struct ieee80211_iface_combination { + const struct ieee80211_iface_limit *limits; + u32 num_different_channels; + u16 max_interfaces; + u8 n_limits; + bool beacon_int_infra_match; + u8 radar_detect_widths; + u8 radar_detect_regions; + u32 beacon_int_min_gcd; +}; + +struct ieee80211_txrx_stypes { + u16 tx; + u16 rx; +}; + +struct wiphy_wowlan_tcp_support { + const struct nl80211_wowlan_tcp_data_token_feature *tok; + u32 data_payload_max; + u32 data_interval_max; + u32 wake_payload_max; + bool seq; +}; + +struct wiphy_wowlan_support { + u32 flags; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; + int max_nd_match_sets; + const struct wiphy_wowlan_tcp_support *tcp; +}; + +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +struct wiphy_vendor_command { + struct nl80211_vendor_cmd_info info; + u32 flags; + int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); + int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, long unsigned int *); + const struct nla_policy *policy; + unsigned int maxattr; +}; + +struct wiphy_iftype_ext_capab { + enum nl80211_iftype iftype; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; +}; + +struct cfg80211_pmsr_capabilities { + unsigned int max_peers; + u8 report_ap_tsf: 1; + u8 randomize_mac_addr: 1; + struct { + u32 preambles; + u32 bandwidths; + s8 max_bursts_exponent; + u8 max_ftms_per_burst; + u8 supported: 1; + u8 asap: 1; + u8 non_asap: 1; + u8 request_lci: 1; + u8 request_civicloc: 1; + u8 trigger_based: 1; + u8 non_trigger_based: 1; + } ftm; +}; + +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + +struct iw_ioctl_description { + __u8 header_type; + __u8 token_type; + __u16 token_size; + __u16 min_tokens; + __u16 max_tokens; + __u32 flags; +}; + +typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); + +struct iw_thrspy { + struct sockaddr addr; + struct iw_quality qual; + struct iw_quality low; + struct iw_quality high; +}; + +struct netlbl_af4list { + __be32 addr; + __be32 mask; + u32 valid; + struct list_head list; +}; + +struct netlbl_af6list { + struct in6_addr addr; + struct in6_addr mask; + u32 valid; + struct list_head list; +}; + +struct netlbl_domaddr_map { + struct list_head list4; + struct list_head list6; +}; + +struct netlbl_dommap_def { + u32 type; + union { + struct netlbl_domaddr_map *addrsel; + struct cipso_v4_doi *cipso; + struct calipso_doi *calipso; + }; +}; + +struct netlbl_domaddr4_map { + struct netlbl_dommap_def def; + struct netlbl_af4list list; +}; + +struct netlbl_domaddr6_map { + struct netlbl_dommap_def def; + struct netlbl_af6list list; +}; + +struct netlbl_dom_map { + char *domain; + u16 family; + struct netlbl_dommap_def def; + u32 valid; + struct list_head list; + struct callback_head rcu; +}; + +struct netlbl_domhsh_tbl { + struct list_head *tbl; + u32 size; +}; + +enum { + NLBL_MGMT_C_UNSPEC = 0, + NLBL_MGMT_C_ADD = 1, + NLBL_MGMT_C_REMOVE = 2, + NLBL_MGMT_C_LISTALL = 3, + NLBL_MGMT_C_ADDDEF = 4, + NLBL_MGMT_C_REMOVEDEF = 5, + NLBL_MGMT_C_LISTDEF = 6, + NLBL_MGMT_C_PROTOCOLS = 7, + NLBL_MGMT_C_VERSION = 8, + __NLBL_MGMT_C_MAX = 9, +}; + +enum { + NLBL_MGMT_A_UNSPEC = 0, + NLBL_MGMT_A_DOMAIN = 1, + NLBL_MGMT_A_PROTOCOL = 2, + NLBL_MGMT_A_VERSION = 3, + NLBL_MGMT_A_CV4DOI = 4, + NLBL_MGMT_A_IPV6ADDR = 5, + NLBL_MGMT_A_IPV6MASK = 6, + NLBL_MGMT_A_IPV4ADDR = 7, + NLBL_MGMT_A_IPV4MASK = 8, + NLBL_MGMT_A_ADDRSELECTOR = 9, + NLBL_MGMT_A_SELECTORLIST = 10, + NLBL_MGMT_A_FAMILY = 11, + NLBL_MGMT_A_CLPDOI = 12, + __NLBL_MGMT_A_MAX = 13, +}; + +struct netlbl_domhsh_walk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +enum { + NLBL_UNLABEL_C_UNSPEC = 0, + NLBL_UNLABEL_C_ACCEPT = 1, + NLBL_UNLABEL_C_LIST = 2, + NLBL_UNLABEL_C_STATICADD = 3, + NLBL_UNLABEL_C_STATICREMOVE = 4, + NLBL_UNLABEL_C_STATICLIST = 5, + NLBL_UNLABEL_C_STATICADDDEF = 6, + NLBL_UNLABEL_C_STATICREMOVEDEF = 7, + NLBL_UNLABEL_C_STATICLISTDEF = 8, + __NLBL_UNLABEL_C_MAX = 9, +}; + +enum { + NLBL_UNLABEL_A_UNSPEC = 0, + NLBL_UNLABEL_A_ACPTFLG = 1, + NLBL_UNLABEL_A_IPV6ADDR = 2, + NLBL_UNLABEL_A_IPV6MASK = 3, + NLBL_UNLABEL_A_IPV4ADDR = 4, + NLBL_UNLABEL_A_IPV4MASK = 5, + NLBL_UNLABEL_A_IFACE = 6, + NLBL_UNLABEL_A_SECCTX = 7, + __NLBL_UNLABEL_A_MAX = 8, +}; + +struct netlbl_unlhsh_tbl { + struct list_head *tbl; + u32 size; +}; + +struct netlbl_unlhsh_addr4 { + struct lsmblob lsmblob; + struct netlbl_af4list list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_addr6 { + struct lsmblob lsmblob; + struct netlbl_af6list list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_iface { + int ifindex; + struct list_head addr4_list; + struct list_head addr6_list; + u32 valid; + struct list_head list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_walk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +enum { + NLBL_CIPSOV4_C_UNSPEC = 0, + NLBL_CIPSOV4_C_ADD = 1, + NLBL_CIPSOV4_C_REMOVE = 2, + NLBL_CIPSOV4_C_LIST = 3, + NLBL_CIPSOV4_C_LISTALL = 4, + __NLBL_CIPSOV4_C_MAX = 5, +}; + +enum { + NLBL_CIPSOV4_A_UNSPEC = 0, + NLBL_CIPSOV4_A_DOI = 1, + NLBL_CIPSOV4_A_MTYPE = 2, + NLBL_CIPSOV4_A_TAG = 3, + NLBL_CIPSOV4_A_TAGLST = 4, + NLBL_CIPSOV4_A_MLSLVLLOC = 5, + NLBL_CIPSOV4_A_MLSLVLREM = 6, + NLBL_CIPSOV4_A_MLSLVL = 7, + NLBL_CIPSOV4_A_MLSLVLLST = 8, + NLBL_CIPSOV4_A_MLSCATLOC = 9, + NLBL_CIPSOV4_A_MLSCATREM = 10, + NLBL_CIPSOV4_A_MLSCAT = 11, + NLBL_CIPSOV4_A_MLSCATLST = 12, + __NLBL_CIPSOV4_A_MAX = 13, +}; + +struct netlbl_cipsov4_doiwalk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +struct netlbl_domhsh_walk_arg___2 { + struct netlbl_audit *audit_info; + u32 doi; +}; + +enum { + NLBL_CALIPSO_C_UNSPEC = 0, + NLBL_CALIPSO_C_ADD = 1, + NLBL_CALIPSO_C_REMOVE = 2, + NLBL_CALIPSO_C_LIST = 3, + NLBL_CALIPSO_C_LISTALL = 4, + __NLBL_CALIPSO_C_MAX = 5, +}; + +enum { + NLBL_CALIPSO_A_UNSPEC = 0, + NLBL_CALIPSO_A_DOI = 1, + NLBL_CALIPSO_A_MTYPE = 2, + __NLBL_CALIPSO_A_MAX = 3, +}; + +struct netlbl_calipso_doiwalk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +enum rfkill_type { + RFKILL_TYPE_ALL = 0, + RFKILL_TYPE_WLAN = 1, + RFKILL_TYPE_BLUETOOTH = 2, + RFKILL_TYPE_UWB = 3, + RFKILL_TYPE_WIMAX = 4, + RFKILL_TYPE_WWAN = 5, + RFKILL_TYPE_GPS = 6, + RFKILL_TYPE_FM = 7, + RFKILL_TYPE_NFC = 8, + NUM_RFKILL_TYPES = 9, +}; + +enum rfkill_operation { + RFKILL_OP_ADD = 0, + RFKILL_OP_DEL = 1, + RFKILL_OP_CHANGE = 2, + RFKILL_OP_CHANGE_ALL = 3, +}; + +enum rfkill_hard_block_reasons { + RFKILL_HARD_BLOCK_SIGNAL = 1, + RFKILL_HARD_BLOCK_NOT_OWNER = 2, +}; + +struct rfkill_event_ext { + __u32 idx; + __u8 type; + __u8 op; + __u8 soft; + __u8 hard; + __u8 hard_block_reasons; +} __attribute__((packed)); + +enum rfkill_user_states { + RFKILL_USER_STATE_SOFT_BLOCKED = 0, + RFKILL_USER_STATE_UNBLOCKED = 1, + RFKILL_USER_STATE_HARD_BLOCKED = 2, +}; + +struct rfkill; + +struct rfkill_ops { + void (*poll)(struct rfkill *, void *); + void (*query)(struct rfkill *, void *); + int (*set_block)(void *, bool); +}; + +struct rfkill { + spinlock_t lock; + enum rfkill_type type; + long unsigned int state; + long unsigned int hard_block_reasons; + u32 idx; + bool registered; + bool persistent; + bool polling_paused; + bool suspended; + const struct rfkill_ops *ops; + void *data; + struct led_trigger led_trigger; + const char *ledtrigname; + struct device dev; + struct list_head node; + struct delayed_work poll_work; + struct work_struct uevent_work; + struct work_struct sync_work; + char name[0]; +}; + +struct rfkill_int_event { + struct list_head list; + struct rfkill_event_ext ev; +}; + +struct rfkill_data { + struct list_head list; + struct list_head events; + struct mutex mtx; + wait_queue_head_t read_wait; + bool input_handler; +}; + +enum rfkill_input_master_mode { + RFKILL_INPUT_MASTER_UNLOCK = 0, + RFKILL_INPUT_MASTER_RESTORE = 1, + RFKILL_INPUT_MASTER_UNBLOCKALL = 2, + NUM_RFKILL_INPUT_MASTER_MODES = 3, +}; + +enum rfkill_sched_op { + RFKILL_GLOBAL_OP_EPO = 0, + RFKILL_GLOBAL_OP_RESTORE = 1, + RFKILL_GLOBAL_OP_UNLOCK = 2, + RFKILL_GLOBAL_OP_UNBLOCK = 3, +}; + +struct dcbmsg { + __u8 dcb_family; + __u8 cmd; + __u16 dcb_pad; +}; + +enum dcbnl_commands { + DCB_CMD_UNDEFINED = 0, + DCB_CMD_GSTATE = 1, + DCB_CMD_SSTATE = 2, + DCB_CMD_PGTX_GCFG = 3, + DCB_CMD_PGTX_SCFG = 4, + DCB_CMD_PGRX_GCFG = 5, + DCB_CMD_PGRX_SCFG = 6, + DCB_CMD_PFC_GCFG = 7, + DCB_CMD_PFC_SCFG = 8, + DCB_CMD_SET_ALL = 9, + DCB_CMD_GPERM_HWADDR = 10, + DCB_CMD_GCAP = 11, + DCB_CMD_GNUMTCS = 12, + DCB_CMD_SNUMTCS = 13, + DCB_CMD_PFC_GSTATE = 14, + DCB_CMD_PFC_SSTATE = 15, + DCB_CMD_BCN_GCFG = 16, + DCB_CMD_BCN_SCFG = 17, + DCB_CMD_GAPP = 18, + DCB_CMD_SAPP = 19, + DCB_CMD_IEEE_SET = 20, + DCB_CMD_IEEE_GET = 21, + DCB_CMD_GDCBX = 22, + DCB_CMD_SDCBX = 23, + DCB_CMD_GFEATCFG = 24, + DCB_CMD_SFEATCFG = 25, + DCB_CMD_CEE_GET = 26, + DCB_CMD_IEEE_DEL = 27, + __DCB_CMD_ENUM_MAX = 28, + DCB_CMD_MAX = 27, +}; + +enum dcbnl_attrs { + DCB_ATTR_UNDEFINED = 0, + DCB_ATTR_IFNAME = 1, + DCB_ATTR_STATE = 2, + DCB_ATTR_PFC_STATE = 3, + DCB_ATTR_PFC_CFG = 4, + DCB_ATTR_NUM_TC = 5, + DCB_ATTR_PG_CFG = 6, + DCB_ATTR_SET_ALL = 7, + DCB_ATTR_PERM_HWADDR = 8, + DCB_ATTR_CAP = 9, + DCB_ATTR_NUMTCS = 10, + DCB_ATTR_BCN = 11, + DCB_ATTR_APP = 12, + DCB_ATTR_IEEE = 13, + DCB_ATTR_DCBX = 14, + DCB_ATTR_FEATCFG = 15, + DCB_ATTR_CEE = 16, + __DCB_ATTR_ENUM_MAX = 17, + DCB_ATTR_MAX = 16, +}; + +enum ieee_attrs { + DCB_ATTR_IEEE_UNSPEC = 0, + DCB_ATTR_IEEE_ETS = 1, + DCB_ATTR_IEEE_PFC = 2, + DCB_ATTR_IEEE_APP_TABLE = 3, + DCB_ATTR_IEEE_PEER_ETS = 4, + DCB_ATTR_IEEE_PEER_PFC = 5, + DCB_ATTR_IEEE_PEER_APP = 6, + DCB_ATTR_IEEE_MAXRATE = 7, + DCB_ATTR_IEEE_QCN = 8, + DCB_ATTR_IEEE_QCN_STATS = 9, + DCB_ATTR_DCB_BUFFER = 10, + __DCB_ATTR_IEEE_MAX = 11, +}; + +enum ieee_attrs_app { + DCB_ATTR_IEEE_APP_UNSPEC = 0, + DCB_ATTR_IEEE_APP = 1, + __DCB_ATTR_IEEE_APP_MAX = 2, +}; + +enum cee_attrs { + DCB_ATTR_CEE_UNSPEC = 0, + DCB_ATTR_CEE_PEER_PG = 1, + DCB_ATTR_CEE_PEER_PFC = 2, + DCB_ATTR_CEE_PEER_APP_TABLE = 3, + DCB_ATTR_CEE_TX_PG = 4, + DCB_ATTR_CEE_RX_PG = 5, + DCB_ATTR_CEE_PFC = 6, + DCB_ATTR_CEE_APP_TABLE = 7, + DCB_ATTR_CEE_FEAT = 8, + __DCB_ATTR_CEE_MAX = 9, +}; + +enum peer_app_attr { + DCB_ATTR_CEE_PEER_APP_UNSPEC = 0, + DCB_ATTR_CEE_PEER_APP_INFO = 1, + DCB_ATTR_CEE_PEER_APP = 2, + __DCB_ATTR_CEE_PEER_APP_MAX = 3, +}; + +enum dcbnl_pfc_up_attrs { + DCB_PFC_UP_ATTR_UNDEFINED = 0, + DCB_PFC_UP_ATTR_0 = 1, + DCB_PFC_UP_ATTR_1 = 2, + DCB_PFC_UP_ATTR_2 = 3, + DCB_PFC_UP_ATTR_3 = 4, + DCB_PFC_UP_ATTR_4 = 5, + DCB_PFC_UP_ATTR_5 = 6, + DCB_PFC_UP_ATTR_6 = 7, + DCB_PFC_UP_ATTR_7 = 8, + DCB_PFC_UP_ATTR_ALL = 9, + __DCB_PFC_UP_ATTR_ENUM_MAX = 10, + DCB_PFC_UP_ATTR_MAX = 9, +}; + +enum dcbnl_pg_attrs { + DCB_PG_ATTR_UNDEFINED = 0, + DCB_PG_ATTR_TC_0 = 1, + DCB_PG_ATTR_TC_1 = 2, + DCB_PG_ATTR_TC_2 = 3, + DCB_PG_ATTR_TC_3 = 4, + DCB_PG_ATTR_TC_4 = 5, + DCB_PG_ATTR_TC_5 = 6, + DCB_PG_ATTR_TC_6 = 7, + DCB_PG_ATTR_TC_7 = 8, + DCB_PG_ATTR_TC_MAX = 9, + DCB_PG_ATTR_TC_ALL = 10, + DCB_PG_ATTR_BW_ID_0 = 11, + DCB_PG_ATTR_BW_ID_1 = 12, + DCB_PG_ATTR_BW_ID_2 = 13, + DCB_PG_ATTR_BW_ID_3 = 14, + DCB_PG_ATTR_BW_ID_4 = 15, + DCB_PG_ATTR_BW_ID_5 = 16, + DCB_PG_ATTR_BW_ID_6 = 17, + DCB_PG_ATTR_BW_ID_7 = 18, + DCB_PG_ATTR_BW_ID_MAX = 19, + DCB_PG_ATTR_BW_ID_ALL = 20, + __DCB_PG_ATTR_ENUM_MAX = 21, + DCB_PG_ATTR_MAX = 20, +}; + +enum dcbnl_tc_attrs { + DCB_TC_ATTR_PARAM_UNDEFINED = 0, + DCB_TC_ATTR_PARAM_PGID = 1, + DCB_TC_ATTR_PARAM_UP_MAPPING = 2, + DCB_TC_ATTR_PARAM_STRICT_PRIO = 3, + DCB_TC_ATTR_PARAM_BW_PCT = 4, + DCB_TC_ATTR_PARAM_ALL = 5, + __DCB_TC_ATTR_PARAM_ENUM_MAX = 6, + DCB_TC_ATTR_PARAM_MAX = 5, +}; + +enum dcbnl_cap_attrs { + DCB_CAP_ATTR_UNDEFINED = 0, + DCB_CAP_ATTR_ALL = 1, + DCB_CAP_ATTR_PG = 2, + DCB_CAP_ATTR_PFC = 3, + DCB_CAP_ATTR_UP2TC = 4, + DCB_CAP_ATTR_PG_TCS = 5, + DCB_CAP_ATTR_PFC_TCS = 6, + DCB_CAP_ATTR_GSP = 7, + DCB_CAP_ATTR_BCN = 8, + DCB_CAP_ATTR_DCBX = 9, + __DCB_CAP_ATTR_ENUM_MAX = 10, + DCB_CAP_ATTR_MAX = 9, +}; + +enum dcbnl_numtcs_attrs { + DCB_NUMTCS_ATTR_UNDEFINED = 0, + DCB_NUMTCS_ATTR_ALL = 1, + DCB_NUMTCS_ATTR_PG = 2, + DCB_NUMTCS_ATTR_PFC = 3, + __DCB_NUMTCS_ATTR_ENUM_MAX = 4, + DCB_NUMTCS_ATTR_MAX = 3, +}; + +enum dcbnl_bcn_attrs { + DCB_BCN_ATTR_UNDEFINED = 0, + DCB_BCN_ATTR_RP_0 = 1, + DCB_BCN_ATTR_RP_1 = 2, + DCB_BCN_ATTR_RP_2 = 3, + DCB_BCN_ATTR_RP_3 = 4, + DCB_BCN_ATTR_RP_4 = 5, + DCB_BCN_ATTR_RP_5 = 6, + DCB_BCN_ATTR_RP_6 = 7, + DCB_BCN_ATTR_RP_7 = 8, + DCB_BCN_ATTR_RP_ALL = 9, + DCB_BCN_ATTR_BCNA_0 = 10, + DCB_BCN_ATTR_BCNA_1 = 11, + DCB_BCN_ATTR_ALPHA = 12, + DCB_BCN_ATTR_BETA = 13, + DCB_BCN_ATTR_GD = 14, + DCB_BCN_ATTR_GI = 15, + DCB_BCN_ATTR_TMAX = 16, + DCB_BCN_ATTR_TD = 17, + DCB_BCN_ATTR_RMIN = 18, + DCB_BCN_ATTR_W = 19, + DCB_BCN_ATTR_RD = 20, + DCB_BCN_ATTR_RU = 21, + DCB_BCN_ATTR_WRTT = 22, + DCB_BCN_ATTR_RI = 23, + DCB_BCN_ATTR_C = 24, + DCB_BCN_ATTR_ALL = 25, + __DCB_BCN_ATTR_ENUM_MAX = 26, + DCB_BCN_ATTR_MAX = 25, +}; + +enum dcb_general_attr_values { + DCB_ATTR_VALUE_UNDEFINED = 255, +}; + +enum dcbnl_app_attrs { + DCB_APP_ATTR_UNDEFINED = 0, + DCB_APP_ATTR_IDTYPE = 1, + DCB_APP_ATTR_ID = 2, + DCB_APP_ATTR_PRIORITY = 3, + __DCB_APP_ATTR_ENUM_MAX = 4, + DCB_APP_ATTR_MAX = 3, +}; + +enum dcbnl_featcfg_attrs { + DCB_FEATCFG_ATTR_UNDEFINED = 0, + DCB_FEATCFG_ATTR_ALL = 1, + DCB_FEATCFG_ATTR_PG = 2, + DCB_FEATCFG_ATTR_PFC = 3, + DCB_FEATCFG_ATTR_APP = 4, + __DCB_FEATCFG_ATTR_ENUM_MAX = 5, + DCB_FEATCFG_ATTR_MAX = 4, +}; + +struct dcb_app_type { + int ifindex; + struct dcb_app app; + struct list_head list; + u8 dcbx; +}; + +struct dcb_ieee_app_prio_map { + u64 map[8]; +}; + +struct dcb_ieee_app_dscp_map { + u8 map[64]; +}; + +enum dcbevent_notif_type { + DCB_APP_EVENT = 1, +}; + +struct reply_func { + int type; + int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); +}; + +enum dns_payload_content_type { + DNS_PAYLOAD_IS_SERVER_LIST = 0, +}; + +struct dns_payload_header { + __u8 zero; + __u8 content; + __u8 version; +}; + +enum { + dns_key_data = 0, + dns_key_error = 1, +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 2, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 4, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 5, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 6, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL = 7, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 8, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 9, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 10, +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + struct switchdev_brport_flags brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + u16 vlan_protocol; + bool mc_disabled; + u8 mrp_port_role; + } u; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct switchdev_notifier_port_obj_info { + struct switchdev_notifier_info info; + const struct switchdev_obj *obj; + bool handled; +}; + +struct switchdev_notifier_port_attr_info { + struct switchdev_notifier_info info; + const struct switchdev_attr *attr; + bool handled; +}; + +typedef void switchdev_deferred_func_t(struct net_device *, const void *); + +struct switchdev_deferred_item { + struct list_head list; + struct net_device *dev; + switchdev_deferred_func_t *func; + long unsigned int data[0]; +}; + +typedef int (*lookup_by_table_id_t)(struct net *, u32); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +struct ncsi_dev { + int state; + int link_up; + struct net_device *dev; + void (*handler)(struct ncsi_dev *); +}; + +enum { + NCSI_CAP_BASE = 0, + NCSI_CAP_GENERIC = 0, + NCSI_CAP_BC = 1, + NCSI_CAP_MC = 2, + NCSI_CAP_BUFFER = 3, + NCSI_CAP_AEN = 4, + NCSI_CAP_VLAN = 5, + NCSI_CAP_MAX = 6, +}; + +enum { + NCSI_MODE_BASE = 0, + NCSI_MODE_ENABLE = 0, + NCSI_MODE_TX_ENABLE = 1, + NCSI_MODE_LINK = 2, + NCSI_MODE_VLAN = 3, + NCSI_MODE_BC = 4, + NCSI_MODE_MC = 5, + NCSI_MODE_AEN = 6, + NCSI_MODE_FC = 7, + NCSI_MODE_MAX = 8, +}; + +struct ncsi_channel_version { + u32 version; + u32 alpha2; + u8 fw_name[12]; + u32 fw_version; + u16 pci_ids[4]; + u32 mf_id; +}; + +struct ncsi_channel_cap { + u32 index; + u32 cap; +}; + +struct ncsi_channel_mode { + u32 index; + u32 enable; + u32 size; + u32 data[8]; +}; + +struct ncsi_channel_mac_filter { + u8 n_uc; + u8 n_mc; + u8 n_mixed; + u64 bitmap; + unsigned char *addrs; +}; + +struct ncsi_channel_vlan_filter { + u8 n_vids; + u64 bitmap; + u16 *vids; +}; + +struct ncsi_channel_stats { + u32 hnc_cnt_hi; + u32 hnc_cnt_lo; + u32 hnc_rx_bytes; + u32 hnc_tx_bytes; + u32 hnc_rx_uc_pkts; + u32 hnc_rx_mc_pkts; + u32 hnc_rx_bc_pkts; + u32 hnc_tx_uc_pkts; + u32 hnc_tx_mc_pkts; + u32 hnc_tx_bc_pkts; + u32 hnc_fcs_err; + u32 hnc_align_err; + u32 hnc_false_carrier; + u32 hnc_runt_pkts; + u32 hnc_jabber_pkts; + u32 hnc_rx_pause_xon; + u32 hnc_rx_pause_xoff; + u32 hnc_tx_pause_xon; + u32 hnc_tx_pause_xoff; + u32 hnc_tx_s_collision; + u32 hnc_tx_m_collision; + u32 hnc_l_collision; + u32 hnc_e_collision; + u32 hnc_rx_ctl_frames; + u32 hnc_rx_64_frames; + u32 hnc_rx_127_frames; + u32 hnc_rx_255_frames; + u32 hnc_rx_511_frames; + u32 hnc_rx_1023_frames; + u32 hnc_rx_1522_frames; + u32 hnc_rx_9022_frames; + u32 hnc_tx_64_frames; + u32 hnc_tx_127_frames; + u32 hnc_tx_255_frames; + u32 hnc_tx_511_frames; + u32 hnc_tx_1023_frames; + u32 hnc_tx_1522_frames; + u32 hnc_tx_9022_frames; + u32 hnc_rx_valid_bytes; + u32 hnc_rx_runt_pkts; + u32 hnc_rx_jabber_pkts; + u32 ncsi_rx_cmds; + u32 ncsi_dropped_cmds; + u32 ncsi_cmd_type_errs; + u32 ncsi_cmd_csum_errs; + u32 ncsi_rx_pkts; + u32 ncsi_tx_pkts; + u32 ncsi_tx_aen_pkts; + u32 pt_tx_pkts; + u32 pt_tx_dropped; + u32 pt_tx_channel_err; + u32 pt_tx_us_err; + u32 pt_rx_pkts; + u32 pt_rx_dropped; + u32 pt_rx_channel_err; + u32 pt_rx_us_err; + u32 pt_rx_os_err; +}; + +struct ncsi_package; + +struct ncsi_channel { + unsigned char id; + int state; + bool reconfigure_needed; + spinlock_t lock; + struct ncsi_package *package; + struct ncsi_channel_version version; + struct ncsi_channel_cap caps[6]; + struct ncsi_channel_mode modes[8]; + struct ncsi_channel_mac_filter mac_filter; + struct ncsi_channel_vlan_filter vlan_filter; + struct ncsi_channel_stats stats; + struct { + struct timer_list timer; + bool enabled; + unsigned int state; + } monitor; + struct list_head node; + struct list_head link; +}; + +struct ncsi_dev_priv; + +struct ncsi_package { + unsigned char id; + unsigned char uuid[16]; + struct ncsi_dev_priv *ndp; + spinlock_t lock; + unsigned int channel_num; + struct list_head channels; + struct list_head node; + bool multi_channel; + u32 channel_whitelist; + struct ncsi_channel *preferred_channel; +}; + +struct ncsi_request { + unsigned char id; + bool used; + unsigned int flags; + struct ncsi_dev_priv *ndp; + struct sk_buff *cmd; + struct sk_buff *rsp; + struct timer_list timer; + bool enabled; + u32 snd_seq; + u32 snd_portid; + struct nlmsghdr nlhdr; +}; + +struct ncsi_dev_priv { + struct ncsi_dev ndev; + unsigned int flags; + unsigned int gma_flag; + spinlock_t lock; + unsigned int package_probe_id; + unsigned int package_num; + struct list_head packages; + struct ncsi_channel *hot_channel; + struct ncsi_request requests[256]; + unsigned int request_id; + unsigned int pending_req_num; + struct ncsi_package *active_package; + struct ncsi_channel *active_channel; + struct list_head channel_queue; + struct work_struct work; + struct packet_type ptype; + struct list_head node; + struct list_head vlan_vids; + bool multi_package; + bool mlx_multi_host; + u32 package_whitelist; +}; + +struct ncsi_cmd_arg { + struct ncsi_dev_priv *ndp; + unsigned char type; + unsigned char id; + unsigned char package; + unsigned char channel; + short unsigned int payload; + unsigned int req_flags; + union { + unsigned char bytes[16]; + short unsigned int words[8]; + unsigned int dwords[4]; + }; + unsigned char *data; + struct genl_info *info; +}; + +struct ncsi_pkt_hdr { + unsigned char mc_id; + unsigned char revision; + unsigned char reserved; + unsigned char id; + unsigned char type; + unsigned char channel; + __be16 length; + __be32 reserved1[2]; +}; + +struct ncsi_cmd_pkt_hdr { + struct ncsi_pkt_hdr common; +}; + +struct ncsi_cmd_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 checksum; + unsigned char pad[26]; +}; + +struct ncsi_cmd_sp_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char hw_arbitration; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_dc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char ald; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_rc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 reserved; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_ae_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mc_id; + __be32 mode; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_sl_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 oem_mode; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_svf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be16 reserved; + __be16 vlan; + __be16 reserved1; + unsigned char index; + unsigned char enable; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_ev_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_sma_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char mac[6]; + unsigned char index; + unsigned char at_e; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_ebf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_egmf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_snfc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_oem_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mfr_id; + unsigned char data[0]; +}; + +struct ncsi_cmd_handler { + unsigned char type; + int payload; + int (*handler)(struct sk_buff *, struct ncsi_cmd_arg *); +}; + +enum { + NCSI_CAP_GENERIC_HWA = 1, + NCSI_CAP_GENERIC_HDS = 2, + NCSI_CAP_GENERIC_FC = 4, + NCSI_CAP_GENERIC_FC1 = 8, + NCSI_CAP_GENERIC_MC = 16, + NCSI_CAP_GENERIC_HWA_UNKNOWN = 0, + NCSI_CAP_GENERIC_HWA_SUPPORT = 32, + NCSI_CAP_GENERIC_HWA_NOT_SUPPORT = 64, + NCSI_CAP_GENERIC_HWA_RESERVED = 96, + NCSI_CAP_GENERIC_HWA_MASK = 96, + NCSI_CAP_GENERIC_MASK = 127, + NCSI_CAP_BC_ARP = 1, + NCSI_CAP_BC_DHCPC = 2, + NCSI_CAP_BC_DHCPS = 4, + NCSI_CAP_BC_NETBIOS = 8, + NCSI_CAP_BC_MASK = 15, + NCSI_CAP_MC_IPV6_NEIGHBOR = 1, + NCSI_CAP_MC_IPV6_ROUTER = 2, + NCSI_CAP_MC_DHCPV6_RELAY = 4, + NCSI_CAP_MC_DHCPV6_WELL_KNOWN = 8, + NCSI_CAP_MC_IPV6_MLD = 16, + NCSI_CAP_MC_IPV6_NEIGHBOR_S = 32, + NCSI_CAP_MC_MASK = 63, + NCSI_CAP_AEN_LSC = 1, + NCSI_CAP_AEN_CR = 2, + NCSI_CAP_AEN_HDS = 4, + NCSI_CAP_AEN_MASK = 7, + NCSI_CAP_VLAN_ONLY = 1, + NCSI_CAP_VLAN_NO = 2, + NCSI_CAP_VLAN_ANY = 4, + NCSI_CAP_VLAN_MASK = 7, +}; + +struct ncsi_rsp_pkt_hdr { + struct ncsi_pkt_hdr common; + __be16 code; + __be16 reason; +}; + +struct ncsi_rsp_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_rsp_oem_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 mfr_id; + unsigned char data[0]; +}; + +struct ncsi_rsp_oem_mlx_pkt { + unsigned char cmd_rev; + unsigned char cmd; + unsigned char param; + unsigned char optional; + unsigned char data[0]; +}; + +struct ncsi_rsp_oem_bcm_pkt { + unsigned char ver; + unsigned char type; + __be16 len; + unsigned char data[0]; +}; + +struct ncsi_rsp_gls_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 status; + __be32 other; + __be32 oem_status; + __be32 checksum; + unsigned char pad[10]; +}; + +struct ncsi_rsp_gvi_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 ncsi_version; + unsigned char reserved[3]; + unsigned char alpha2; + unsigned char fw_name[12]; + __be32 fw_version; + __be16 pci_ids[4]; + __be32 mf_id; + __be32 checksum; +}; + +struct ncsi_rsp_gc_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 cap; + __be32 bc_cap; + __be32 mc_cap; + __be32 buf_cap; + __be32 aen_cap; + unsigned char vlan_cnt; + unsigned char mixed_cnt; + unsigned char mc_cnt; + unsigned char uc_cnt; + unsigned char reserved[2]; + unsigned char vlan_mode; + unsigned char channel_cnt; + __be32 checksum; +}; + +struct ncsi_rsp_gp_pkt { + struct ncsi_rsp_pkt_hdr rsp; + unsigned char mac_cnt; + unsigned char reserved[2]; + unsigned char mac_enable; + unsigned char vlan_cnt; + unsigned char reserved1; + __be16 vlan_enable; + __be32 link_mode; + __be32 bc_mode; + __be32 valid_modes; + unsigned char vlan_mode; + unsigned char fc_mode; + unsigned char reserved2[2]; + __be32 aen_mode; + unsigned char mac[6]; + __be16 vlan; + __be32 checksum; +}; + +struct ncsi_rsp_gcps_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 cnt_hi; + __be32 cnt_lo; + __be32 rx_bytes; + __be32 tx_bytes; + __be32 rx_uc_pkts; + __be32 rx_mc_pkts; + __be32 rx_bc_pkts; + __be32 tx_uc_pkts; + __be32 tx_mc_pkts; + __be32 tx_bc_pkts; + __be32 fcs_err; + __be32 align_err; + __be32 false_carrier; + __be32 runt_pkts; + __be32 jabber_pkts; + __be32 rx_pause_xon; + __be32 rx_pause_xoff; + __be32 tx_pause_xon; + __be32 tx_pause_xoff; + __be32 tx_s_collision; + __be32 tx_m_collision; + __be32 l_collision; + __be32 e_collision; + __be32 rx_ctl_frames; + __be32 rx_64_frames; + __be32 rx_127_frames; + __be32 rx_255_frames; + __be32 rx_511_frames; + __be32 rx_1023_frames; + __be32 rx_1522_frames; + __be32 rx_9022_frames; + __be32 tx_64_frames; + __be32 tx_127_frames; + __be32 tx_255_frames; + __be32 tx_511_frames; + __be32 tx_1023_frames; + __be32 tx_1522_frames; + __be32 tx_9022_frames; + __be32 rx_valid_bytes; + __be32 rx_runt_pkts; + __be32 rx_jabber_pkts; + __be32 checksum; +}; + +struct ncsi_rsp_gns_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 rx_cmds; + __be32 dropped_cmds; + __be32 cmd_type_errs; + __be32 cmd_csum_errs; + __be32 rx_pkts; + __be32 tx_pkts; + __be32 tx_aen_pkts; + __be32 checksum; +}; + +struct ncsi_rsp_gnpts_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 tx_pkts; + __be32 tx_dropped; + __be32 tx_channel_err; + __be32 tx_us_err; + __be32 rx_pkts; + __be32 rx_dropped; + __be32 rx_channel_err; + __be32 rx_us_err; + __be32 rx_os_err; + __be32 checksum; +}; + +struct ncsi_rsp_gps_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 status; + __be32 checksum; +}; + +struct ncsi_rsp_gpuuid_pkt { + struct ncsi_rsp_pkt_hdr rsp; + unsigned char uuid[16]; + __be32 checksum; +}; + +struct ncsi_rsp_oem_handler { + unsigned int mfr_id; + int (*handler)(struct ncsi_request *); +}; + +struct ncsi_rsp_handler { + unsigned char type; + int payload; + int (*handler)(struct ncsi_request *); +}; + +struct ncsi_aen_pkt_hdr { + struct ncsi_pkt_hdr common; + unsigned char reserved2[3]; + unsigned char type; +}; + +struct ncsi_aen_lsc_pkt { + struct ncsi_aen_pkt_hdr aen; + __be32 status; + __be32 oem_status; + __be32 checksum; + unsigned char pad[14]; +}; + +struct ncsi_aen_hncdsc_pkt { + struct ncsi_aen_pkt_hdr aen; + __be32 status; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_aen_handler { + unsigned char type; + int payload; + int (*handler)(struct ncsi_dev_priv *, struct ncsi_aen_pkt_hdr *); +}; + +enum { + ncsi_dev_state_registered = 0, + ncsi_dev_state_functional = 256, + ncsi_dev_state_probe = 512, + ncsi_dev_state_config = 768, + ncsi_dev_state_suspend = 1024, +}; + +enum { + MLX_MC_RBT_SUPPORT = 1, + MLX_MC_RBT_AVL = 8, +}; + +enum { + ncsi_dev_state_major = 65280, + ncsi_dev_state_minor = 255, + ncsi_dev_state_probe_deselect = 513, + ncsi_dev_state_probe_package = 514, + ncsi_dev_state_probe_channel = 515, + ncsi_dev_state_probe_mlx_gma = 516, + ncsi_dev_state_probe_mlx_smaf = 517, + ncsi_dev_state_probe_cis = 518, + ncsi_dev_state_probe_gvi = 519, + ncsi_dev_state_probe_gc = 520, + ncsi_dev_state_probe_gls = 521, + ncsi_dev_state_probe_dp = 522, + ncsi_dev_state_config_sp = 769, + ncsi_dev_state_config_cis = 770, + ncsi_dev_state_config_oem_gma = 771, + ncsi_dev_state_config_clear_vids = 772, + ncsi_dev_state_config_svf = 773, + ncsi_dev_state_config_ev = 774, + ncsi_dev_state_config_sma = 775, + ncsi_dev_state_config_ebf = 776, + ncsi_dev_state_config_dgmf = 777, + ncsi_dev_state_config_ecnt = 778, + ncsi_dev_state_config_ec = 779, + ncsi_dev_state_config_ae = 780, + ncsi_dev_state_config_gls = 781, + ncsi_dev_state_config_done = 782, + ncsi_dev_state_suspend_select = 1025, + ncsi_dev_state_suspend_gls = 1026, + ncsi_dev_state_suspend_dcnt = 1027, + ncsi_dev_state_suspend_dc = 1028, + ncsi_dev_state_suspend_deselect = 1029, + ncsi_dev_state_suspend_done = 1030, +}; + +struct vlan_vid { + struct list_head list; + __be16 proto; + u16 vid; +}; + +struct ncsi_oem_gma_handler { + unsigned int mfr_id; + int (*handler)(struct ncsi_cmd_arg *); +}; + +enum ncsi_nl_commands { + NCSI_CMD_UNSPEC = 0, + NCSI_CMD_PKG_INFO = 1, + NCSI_CMD_SET_INTERFACE = 2, + NCSI_CMD_CLEAR_INTERFACE = 3, + NCSI_CMD_SEND_CMD = 4, + NCSI_CMD_SET_PACKAGE_MASK = 5, + NCSI_CMD_SET_CHANNEL_MASK = 6, + __NCSI_CMD_AFTER_LAST = 7, + NCSI_CMD_MAX = 6, +}; + +enum ncsi_nl_attrs { + NCSI_ATTR_UNSPEC = 0, + NCSI_ATTR_IFINDEX = 1, + NCSI_ATTR_PACKAGE_LIST = 2, + NCSI_ATTR_PACKAGE_ID = 3, + NCSI_ATTR_CHANNEL_ID = 4, + NCSI_ATTR_DATA = 5, + NCSI_ATTR_MULTI_FLAG = 6, + NCSI_ATTR_PACKAGE_MASK = 7, + NCSI_ATTR_CHANNEL_MASK = 8, + __NCSI_ATTR_AFTER_LAST = 9, + NCSI_ATTR_MAX = 8, +}; + +enum ncsi_nl_pkg_attrs { + NCSI_PKG_ATTR_UNSPEC = 0, + NCSI_PKG_ATTR = 1, + NCSI_PKG_ATTR_ID = 2, + NCSI_PKG_ATTR_FORCED = 3, + NCSI_PKG_ATTR_CHANNEL_LIST = 4, + __NCSI_PKG_ATTR_AFTER_LAST = 5, + NCSI_PKG_ATTR_MAX = 4, +}; + +enum ncsi_nl_channel_attrs { + NCSI_CHANNEL_ATTR_UNSPEC = 0, + NCSI_CHANNEL_ATTR = 1, + NCSI_CHANNEL_ATTR_ID = 2, + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 3, + NCSI_CHANNEL_ATTR_VERSION_MINOR = 4, + NCSI_CHANNEL_ATTR_VERSION_STR = 5, + NCSI_CHANNEL_ATTR_LINK_STATE = 6, + NCSI_CHANNEL_ATTR_ACTIVE = 7, + NCSI_CHANNEL_ATTR_FORCED = 8, + NCSI_CHANNEL_ATTR_VLAN_LIST = 9, + NCSI_CHANNEL_ATTR_VLAN_ID = 10, + __NCSI_CHANNEL_ATTR_AFTER_LAST = 11, + NCSI_CHANNEL_ATTR_MAX = 10, +}; + +struct sockaddr_xdp { + __u16 sxdp_family; + __u16 sxdp_flags; + __u32 sxdp_ifindex; + __u32 sxdp_queue_id; + __u32 sxdp_shared_umem_fd; +}; + +struct xdp_ring_offset { + __u64 producer; + __u64 consumer; + __u64 desc; + __u64 flags; +}; + +struct xdp_mmap_offsets { + struct xdp_ring_offset rx; + struct xdp_ring_offset tx; + struct xdp_ring_offset fr; + struct xdp_ring_offset cr; +}; + +struct xdp_umem_reg { + __u64 addr; + __u64 len; + __u32 chunk_size; + __u32 headroom; + __u32 flags; +}; + +struct xdp_statistics { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; + __u64 rx_ring_full; + __u64 rx_fill_ring_empty_descs; + __u64 tx_ring_empty_descs; +}; + +struct xdp_options { + __u32 flags; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xsk_map { + struct bpf_map map; + spinlock_t lock; + struct xdp_sock *xsk_map[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_ring; + +struct xsk_queue { + u32 ring_mask; + u32 nentries; + u32 cached_prod; + u32 cached_cons; + struct xdp_ring *ring; + u64 invalid_descs; + u64 queue_empty_descs; +}; + +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + +struct xsk_map_node { + struct list_head node; + struct xsk_map *map; + struct xdp_sock **map_entry; +}; + +struct xdp_ring { + u32 producer; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 pad1; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 consumer; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 pad2; + u32 flags; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 pad3; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_rxtx_ring { + struct xdp_ring ptrs; + struct xdp_desc desc[0]; +}; + +struct xdp_umem_ring { + struct xdp_ring ptrs; + u64 desc[0]; +}; + +struct xsk_dma_map { + dma_addr_t *dma_pages; + struct device *dev; + struct net_device *netdev; + refcount_t users; + struct list_head list; + u32 dma_pages_cnt; + bool dma_need_sync; +}; + +struct mptcp_mib { + long unsigned int mibs[35]; +}; + +enum mptcp_event_type { + MPTCP_EVENT_UNSPEC = 0, + MPTCP_EVENT_CREATED = 1, + MPTCP_EVENT_ESTABLISHED = 2, + MPTCP_EVENT_CLOSED = 3, + MPTCP_EVENT_ANNOUNCED = 6, + MPTCP_EVENT_REMOVED = 7, + MPTCP_EVENT_SUB_ESTABLISHED = 10, + MPTCP_EVENT_SUB_CLOSED = 11, + MPTCP_EVENT_SUB_PRIORITY = 13, +}; + +struct mptcp_options_received { + u64 sndr_key; + u64 rcvr_key; + u64 data_ack; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u16 mp_capable: 1; + u16 mp_join: 1; + u16 fastclose: 1; + u16 reset: 1; + u16 dss: 1; + u16 add_addr: 1; + u16 rm_addr: 1; + u16 mp_prio: 1; + u16 echo: 1; + u16 backup: 1; + u32 token; + u32 nonce; + u64 thmac; + u8 hmac[20]; + u8 join_id; + u8 use_map: 1; + u8 dsn64: 1; + u8 data_fin: 1; + u8 use_ack: 1; + u8 ack64: 1; + u8 mpc_map: 1; + u8 __unused: 2; + struct mptcp_addr_info addr; + struct mptcp_rm_list rm_list; + u64 ahmac; + u8 reset_reason: 4; + u8 reset_transient: 1; +}; + +struct mptcp_pm_data { + struct mptcp_addr_info local; + struct mptcp_addr_info remote; + struct list_head anno_list; + spinlock_t lock; + u8 addr_signal; + bool server_side; + bool work_pending; + bool accept_addr; + bool accept_subflow; + u8 add_addr_signaled; + u8 add_addr_accepted; + u8 local_addr_used; + u8 subflows; + u8 status; + struct mptcp_rm_list rm_list_tx; + struct mptcp_rm_list rm_list_rx; +}; + +struct mptcp_data_frag { + struct list_head list; + u64 data_seq; + u16 data_len; + u16 offset; + u16 overhead; + u16 already_sent; + struct page *page; +}; + +struct mptcp_sock { + struct inet_connection_sock sk; + u64 local_key; + u64 remote_key; + u64 write_seq; + u64 snd_nxt; + u64 ack_seq; + u64 rcv_wnd_sent; + u64 rcv_data_fin_seq; + int wmem_reserved; + struct sock *last_snd; + int snd_burst; + int old_wspace; + u64 snd_una; + u64 wnd_end; + long unsigned int timer_ival; + u32 token; + int rmem_released; + long unsigned int flags; + bool can_ack; + bool fully_established; + bool rcv_data_fin; + bool snd_data_fin_enable; + bool rcv_fastclose; + bool use_64bit_ack; + spinlock_t join_list_lock; + struct work_struct work; + struct sk_buff *ooo_last_skb; + struct rb_root out_of_order_queue; + struct sk_buff_head receive_queue; + struct sk_buff_head skb_tx_cache; + int tx_pending_data; + int size_goal_cache; + struct list_head conn_list; + struct list_head rtx_queue; + struct mptcp_data_frag *first_pending; + struct list_head join_list; + struct socket *subflow; + struct sock *first; + struct mptcp_pm_data pm; + struct { + u32 space; + u32 copied; + u64 time; + u64 rtt_us; + } rcvq_space; + u32 setsockopt_seq; + char ca_name[16]; +}; + +struct mptcp_subflow_request_sock { + struct tcp_request_sock sk; + u16 mp_capable: 1; + u16 mp_join: 1; + u16 backup: 1; + u8 local_id; + u8 remote_id; + u64 local_key; + u64 idsn; + u32 token; + u32 ssn_offset; + u64 thmac; + u32 local_nonce; + u32 remote_nonce; + struct mptcp_sock *msk; + struct hlist_nulls_node token_node; +}; + +enum mptcp_data_avail { + MPTCP_SUBFLOW_NODATA = 0, + MPTCP_SUBFLOW_DATA_AVAIL = 1, +}; + +struct mptcp_delegated_action { + struct napi_struct napi; + struct list_head head; +}; + +struct mptcp_subflow_context { + struct list_head node; + u64 local_key; + u64 remote_key; + u64 idsn; + u64 map_seq; + u32 snd_isn; + u32 token; + u32 rel_write_seq; + u32 map_subflow_seq; + u32 ssn_offset; + u32 map_data_len; + u32 request_mptcp: 1; + u32 request_join: 1; + u32 request_bkup: 1; + u32 mp_capable: 1; + u32 mp_join: 1; + u32 fully_established: 1; + u32 pm_notified: 1; + u32 conn_finished: 1; + u32 map_valid: 1; + u32 mpc_map: 1; + u32 backup: 1; + u32 send_mp_prio: 1; + u32 rx_eof: 1; + u32 can_ack: 1; + u32 disposable: 1; + enum mptcp_data_avail data_avail; + u32 remote_nonce; + u64 thmac; + u32 local_nonce; + u32 remote_token; + u8 hmac[20]; + u8 local_id; + u8 remote_id; + u8 reset_seen: 1; + u8 reset_transient: 1; + u8 reset_reason: 4; + long int delegated_status; + struct list_head delegated_node; + u32 setsockopt_seq; + struct sock *tcp_sock; + struct sock *conn; + const struct inet_connection_sock_af_ops *icsk_af_ops; + void (*tcp_data_ready)(struct sock *); + void (*tcp_state_change)(struct sock *); + void (*tcp_write_space)(struct sock *); + void (*tcp_error_report)(struct sock *); + struct callback_head rcu; +}; + +enum linux_mptcp_mib_field { + MPTCP_MIB_NUM = 0, + MPTCP_MIB_MPCAPABLEPASSIVE = 1, + MPTCP_MIB_MPCAPABLEACTIVE = 2, + MPTCP_MIB_MPCAPABLEACTIVEACK = 3, + MPTCP_MIB_MPCAPABLEPASSIVEACK = 4, + MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK = 5, + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK = 6, + MPTCP_MIB_TOKENFALLBACKINIT = 7, + MPTCP_MIB_RETRANSSEGS = 8, + MPTCP_MIB_JOINNOTOKEN = 9, + MPTCP_MIB_JOINSYNRX = 10, + MPTCP_MIB_JOINSYNACKRX = 11, + MPTCP_MIB_JOINSYNACKMAC = 12, + MPTCP_MIB_JOINACKRX = 13, + MPTCP_MIB_JOINACKMAC = 14, + MPTCP_MIB_DSSNOMATCH = 15, + MPTCP_MIB_INFINITEMAPRX = 16, + MPTCP_MIB_OFOQUEUETAIL = 17, + MPTCP_MIB_OFOQUEUE = 18, + MPTCP_MIB_OFOMERGE = 19, + MPTCP_MIB_NODSSWINDOW = 20, + MPTCP_MIB_DUPDATA = 21, + MPTCP_MIB_ADDADDR = 22, + MPTCP_MIB_ECHOADD = 23, + MPTCP_MIB_PORTADD = 24, + MPTCP_MIB_JOINPORTSYNRX = 25, + MPTCP_MIB_JOINPORTSYNACKRX = 26, + MPTCP_MIB_JOINPORTACKRX = 27, + MPTCP_MIB_MISMATCHPORTSYNRX = 28, + MPTCP_MIB_MISMATCHPORTACKRX = 29, + MPTCP_MIB_RMADDR = 30, + MPTCP_MIB_RMSUBFLOW = 31, + MPTCP_MIB_MPPRIOTX = 32, + MPTCP_MIB_MPPRIORX = 33, + MPTCP_MIB_RCVPRUNED = 34, + __MPTCP_MIB_MAX = 35, +}; + +struct trace_event_raw_mptcp_subflow_get_send { + struct trace_entry ent; + bool active; + bool free; + u32 snd_wnd; + u32 pace; + u8 backup; + u64 ratio; + char __data[0]; +}; + +struct trace_event_raw_mptcp_dump_mpext { + struct trace_entry ent; + u64 data_ack; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u8 use_map; + u8 dsn64; + u8 data_fin; + u8 use_ack; + u8 ack64; + u8 mpc_map; + u8 frozen; + u8 reset_transient; + u8 reset_reason; + char __data[0]; +}; + +struct trace_event_raw_ack_update_msk { + struct trace_entry ent; + u64 data_ack; + u64 old_snd_una; + u64 new_snd_una; + u64 new_wnd_end; + u64 msk_wnd_end; + char __data[0]; +}; + +struct trace_event_raw_subflow_check_data_avail { + struct trace_entry ent; + u8 status; + const void *skb; + char __data[0]; +}; + +struct trace_event_data_offsets_mptcp_subflow_get_send {}; + +struct trace_event_data_offsets_mptcp_dump_mpext {}; + +struct trace_event_data_offsets_ack_update_msk {}; + +struct trace_event_data_offsets_subflow_check_data_avail {}; + +typedef void (*btf_trace_mptcp_subflow_get_send)(void *, struct mptcp_subflow_context *); + +typedef void (*btf_trace_get_mapping_status)(void *, struct mptcp_ext *); + +typedef void (*btf_trace_ack_update_msk)(void *, u64, u64, u64, u64, u64); + +typedef void (*btf_trace_subflow_check_data_avail)(void *, __u8, struct sk_buff *); + +struct mptcp_skb_cb { + u64 map_seq; + u64 end_seq; + u32 offset; +}; + +struct mptcp_sendmsg_info { + int mss_now; + int size_goal; + u16 limit; + u16 sent; + unsigned int flags; +}; + +struct subflow_send_info { + struct sock *ssk; + u64 ratio; +}; + +enum mapping_status { + MAPPING_OK = 0, + MAPPING_INVALID = 1, + MAPPING_EMPTY = 2, + MAPPING_DATA_FIN = 3, + MAPPING_DUMMY = 4, +}; + +enum mptcp_addr_signal_status { + MPTCP_ADD_ADDR_SIGNAL = 0, + MPTCP_ADD_ADDR_ECHO = 1, + MPTCP_ADD_ADDR_IPV6 = 2, + MPTCP_ADD_ADDR_PORT = 3, + MPTCP_RM_ADDR_SIGNAL = 4, +}; + +struct mptcp_pm_add_entry; + +struct token_bucket { + spinlock_t lock; + int chain_len; + struct hlist_nulls_head req_chain; + struct hlist_nulls_head msk_chain; +}; + +struct mptcp_pernet { + struct ctl_table_header *ctl_table_hdr; + int mptcp_enabled; + unsigned int add_addr_timeout; +}; + +enum mptcp_pm_status { + MPTCP_PM_ADD_ADDR_RECEIVED = 0, + MPTCP_PM_ADD_ADDR_SEND_ACK = 1, + MPTCP_PM_RM_ADDR_RECEIVED = 2, + MPTCP_PM_ESTABLISHED = 3, + MPTCP_PM_ALREADY_ESTABLISHED = 4, + MPTCP_PM_SUBFLOW_ESTABLISHED = 5, +}; + +enum { + INET_ULP_INFO_UNSPEC = 0, + INET_ULP_INFO_NAME = 1, + INET_ULP_INFO_TLS = 2, + INET_ULP_INFO_MPTCP = 3, + __INET_ULP_INFO_MAX = 4, +}; + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC = 0, + MPTCP_SUBFLOW_ATTR_TOKEN_REM = 1, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC = 2, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ = 3, + MPTCP_SUBFLOW_ATTR_MAP_SEQ = 4, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ = 5, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET = 6, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN = 7, + MPTCP_SUBFLOW_ATTR_FLAGS = 8, + MPTCP_SUBFLOW_ATTR_ID_REM = 9, + MPTCP_SUBFLOW_ATTR_ID_LOC = 10, + MPTCP_SUBFLOW_ATTR_PAD = 11, + __MPTCP_SUBFLOW_ATTR_MAX = 12, +}; + +enum { + MPTCP_PM_ATTR_UNSPEC = 0, + MPTCP_PM_ATTR_ADDR = 1, + MPTCP_PM_ATTR_RCV_ADD_ADDRS = 2, + MPTCP_PM_ATTR_SUBFLOWS = 3, + __MPTCP_PM_ATTR_MAX = 4, +}; + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC = 0, + MPTCP_PM_ADDR_ATTR_FAMILY = 1, + MPTCP_PM_ADDR_ATTR_ID = 2, + MPTCP_PM_ADDR_ATTR_ADDR4 = 3, + MPTCP_PM_ADDR_ATTR_ADDR6 = 4, + MPTCP_PM_ADDR_ATTR_PORT = 5, + MPTCP_PM_ADDR_ATTR_FLAGS = 6, + MPTCP_PM_ADDR_ATTR_IF_IDX = 7, + __MPTCP_PM_ADDR_ATTR_MAX = 8, +}; + +enum { + MPTCP_PM_CMD_UNSPEC = 0, + MPTCP_PM_CMD_ADD_ADDR = 1, + MPTCP_PM_CMD_DEL_ADDR = 2, + MPTCP_PM_CMD_GET_ADDR = 3, + MPTCP_PM_CMD_FLUSH_ADDRS = 4, + MPTCP_PM_CMD_SET_LIMITS = 5, + MPTCP_PM_CMD_GET_LIMITS = 6, + MPTCP_PM_CMD_SET_FLAGS = 7, + __MPTCP_PM_CMD_AFTER_LAST = 8, +}; + +enum mptcp_event_attr { + MPTCP_ATTR_UNSPEC = 0, + MPTCP_ATTR_TOKEN = 1, + MPTCP_ATTR_FAMILY = 2, + MPTCP_ATTR_LOC_ID = 3, + MPTCP_ATTR_REM_ID = 4, + MPTCP_ATTR_SADDR4 = 5, + MPTCP_ATTR_SADDR6 = 6, + MPTCP_ATTR_DADDR4 = 7, + MPTCP_ATTR_DADDR6 = 8, + MPTCP_ATTR_SPORT = 9, + MPTCP_ATTR_DPORT = 10, + MPTCP_ATTR_BACKUP = 11, + MPTCP_ATTR_ERROR = 12, + MPTCP_ATTR_FLAGS = 13, + MPTCP_ATTR_TIMEOUT = 14, + MPTCP_ATTR_IF_IDX = 15, + MPTCP_ATTR_RESET_REASON = 16, + MPTCP_ATTR_RESET_FLAGS = 17, + __MPTCP_ATTR_AFTER_LAST = 18, +}; + +struct mptcp_pm_addr_entry { + struct list_head list; + struct mptcp_addr_info addr; + u8 flags; + int ifindex; + struct callback_head rcu; + struct socket *lsk; +}; + +struct mptcp_pm_add_entry___2 { + struct list_head list; + struct mptcp_addr_info addr; + struct timer_list add_timer; + struct mptcp_sock *sock; + u8 retrans_times; +}; + +struct pm_nl_pernet { + spinlock_t lock; + struct list_head local_addr_list; + unsigned int addrs; + unsigned int add_addr_signal_max; + unsigned int add_addr_accept_max; + unsigned int local_addr_max; + unsigned int subflows_max; + unsigned int next_id; + long unsigned int id_bitmap[4]; +}; + +struct join_entry { + u32 token; + u32 remote_nonce; + u32 local_nonce; + u8 join_id; + u8 local_id; + u8 backup; + u8 valid; +}; + +struct pcibios_fwaddrmap { + struct list_head list; + struct pci_dev *dev; + resource_size_t fw_addr[17]; +}; + +struct pci_check_idx_range { + int start; + int end; +}; + +struct pci_raw_ops { + int (*read)(unsigned int, unsigned int, unsigned int, int, int, u32 *); + int (*write)(unsigned int, unsigned int, unsigned int, int, int, u32); +}; + +struct acpi_table_mcfg { + struct acpi_table_header header; + u8 reserved[8]; +}; + +struct acpi_mcfg_allocation { + u64 address; + u16 pci_segment; + u8 start_bus_number; + u8 end_bus_number; + u32 reserved; +}; + +struct pci_mmcfg_hostbridge_probe { + u32 bus; + u32 devfn; + u32 vendor; + u32 device; + const char * (*probe)(); +}; + +typedef bool (*check_reserved_t)(u64, u64, enum e820_type); + +struct physdev_restore_msi { + uint8_t bus; + uint8_t devfn; +}; + +struct physdev_setup_gsi { + int gsi; + uint8_t triggering; + uint8_t polarity; +}; + +struct xen_pci_frontend_ops { + int (*enable_msi)(struct pci_dev *, int *); + void (*disable_msi)(struct pci_dev *); + int (*enable_msix)(struct pci_dev *, int *, int); + void (*disable_msix)(struct pci_dev *); +}; + +struct xen_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *, int, int); + void (*teardown_msi_irqs)(struct pci_dev *); +}; + +struct xen_device_domain_owner { + domid_t domain; + struct pci_dev *dev; + struct list_head list; +}; + +struct pci_root_info { + struct acpi_pci_root_info common; + struct pci_sysdata sd; + bool mcfg_added; + u8 start_bus; + u8 end_bus; +}; + +struct irq_info___3 { + u8 bus; + u8 devfn; + struct { + u8 link; + u16 bitmap; + } __attribute__((packed)) irq[4]; + u8 slot; + u8 rfu; +}; + +struct irq_routing_table { + u32 signature; + u16 version; + u16 size; + u8 rtr_bus; + u8 rtr_devfn; + u16 exclusive_irqs; + u16 rtr_vendor; + u16 rtr_device; + u32 miniport_data; + u8 rfu[11]; + u8 checksum; + struct irq_info___3 slots[0]; +}; + +struct irq_router { + char *name; + u16 vendor; + u16 device; + int (*get)(struct pci_dev *, struct pci_dev *, int); + int (*set)(struct pci_dev *, struct pci_dev *, int, int); +}; + +struct irq_router_handler { + u16 vendor; + int (*probe)(struct irq_router *, struct pci_dev *, u16); +}; + +struct pci_setup_rom { + struct setup_data data; + uint16_t vendor; + uint16_t devid; + uint64_t pcilen; + long unsigned int segment; + long unsigned int bus; + long unsigned int device; + long unsigned int function; + uint8_t romdata[0]; +}; + +enum pci_bf_sort_state { + pci_bf_sort_default = 0, + pci_force_nobf = 1, + pci_force_bf = 2, + pci_dmi_bf = 3, +}; + +struct pci_root_res { + struct list_head list; + struct resource res; +}; + +struct pci_root_info___2 { + struct list_head list; + char name[12]; + struct list_head resources; + struct resource busn; + int node; + int link; +}; + +struct amd_hostbridge { + u32 bus; + u32 slot; + u32 device; +}; + +struct saved_msr { + bool valid; + struct msr_info info; +}; + +struct saved_msrs { + unsigned int num; + struct saved_msr *array; +}; + +struct saved_context { + struct pt_regs regs; + u16 ds; + u16 es; + u16 fs; + u16 gs; + long unsigned int kernelmode_gs_base; + long unsigned int usermode_gs_base; + long unsigned int fs_base; + long unsigned int cr0; + long unsigned int cr2; + long unsigned int cr3; + long unsigned int cr4; + u64 misc_enable; + bool misc_enable_saved; + struct saved_msrs saved_msrs; + long unsigned int efer; + u16 gdt_pad; + struct desc_ptr gdt_desc; + u16 idt_pad; + struct desc_ptr idt; + u16 ldt; + u16 tss; + long unsigned int tr; + long unsigned int safety; + long unsigned int return_address; +} __attribute__((packed)); + +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); + +struct restore_data_record { + long unsigned int jump_address; + long unsigned int jump_address_phys; + long unsigned int cr3; + long unsigned int magic; + long unsigned int e820_checksum; +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/pkg/plugin/lib/_arm64/placeholder.go b/pkg/plugin/lib/_arm64/placeholder.go new file mode 100644 index 000000000..9da6c4186 --- /dev/null +++ b/pkg/plugin/lib/_arm64/placeholder.go @@ -0,0 +1,3 @@ +package arm64 + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/lib/_arm64/vmlinux.h b/pkg/plugin/lib/_arm64/vmlinux.h new file mode 100644 index 000000000..f84b1347b --- /dev/null +++ b/pkg/plugin/lib/_arm64/vmlinux.h @@ -0,0 +1,110543 @@ +#ifndef __VMLINUX_H__ +#define __VMLINUX_H__ + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record) +#endif + +typedef signed char __s8; + +typedef unsigned char __u8; + +typedef short unsigned int __u16; + +typedef int __s32; + +typedef unsigned int __u32; + +typedef long long int __s64; + +typedef long long unsigned int __u64; + +typedef __s8 s8; + +typedef __u8 u8; + +typedef __u16 u16; + +typedef __s32 s32; + +typedef __u32 u32; + +typedef __s64 s64; + +typedef __u64 u64; + +enum { + false = 0, + true = 1, +}; + +typedef long int __kernel_long_t; + +typedef long unsigned int __kernel_ulong_t; + +typedef int __kernel_pid_t; + +typedef unsigned int __kernel_uid32_t; + +typedef unsigned int __kernel_gid32_t; + +typedef __kernel_ulong_t __kernel_size_t; + +typedef __kernel_long_t __kernel_ssize_t; + +typedef long long int __kernel_loff_t; + +typedef long long int __kernel_time64_t; + +typedef __kernel_long_t __kernel_clock_t; + +typedef int __kernel_timer_t; + +typedef int __kernel_clockid_t; + +typedef __u32 __le32; + +typedef unsigned int __poll_t; + +typedef u32 __kernel_dev_t; + +typedef __kernel_dev_t dev_t; + +typedef short unsigned int umode_t; + +typedef __kernel_pid_t pid_t; + +typedef __kernel_clockid_t clockid_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; + +typedef __kernel_gid32_t gid_t; + +typedef __kernel_loff_t loff_t; + +typedef __kernel_size_t size_t; + +typedef __kernel_ssize_t ssize_t; + +typedef s32 int32_t; + +typedef u32 uint32_t; + +typedef u64 sector_t; + +typedef u64 blkcnt_t; + +typedef u64 dma_addr_t; + +typedef unsigned int gfp_t; + +typedef unsigned int fmode_t; + +typedef u64 phys_addr_t; + +typedef long unsigned int irq_hw_number_t; + +typedef struct { + int counter; +} atomic_t; + +typedef struct { + s64 counter; +} atomic64_t; + +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +struct hlist_node; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next; + struct hlist_node **pprev; +}; + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; + +struct kernel_symbol { + int value_offset; + int name_offset; + int namespace_offset; +}; + +struct jump_entry { + s32 code; + s32 target; + long int key; +}; + +struct static_key_mod; + +struct static_key { + atomic_t enabled; + union { + long unsigned int type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; + +struct static_key_false { + struct static_key key; +}; + +typedef int (*initcall_t)(); + +typedef int initcall_entry_t; + +struct lock_class_key {}; + +struct fs_context; + +struct fs_parameter_spec; + +struct dentry; + +struct super_block; + +struct module; + +struct file_system_type { + const char *name; + int fs_flags; + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry * (*mount)(struct file_system_type *, int, const char *, void *); + void (*kill_sb)(struct super_block *); + struct module *owner; + struct file_system_type *next; + struct hlist_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[3]; + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; + +typedef atomic64_t atomic_long_t; + +struct qspinlock { + union { + atomic_t val; + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; + }; +}; + +typedef struct qspinlock arch_spinlock_t; + +struct qrwlock { + union { + atomic_t cnts; + struct { + u8 wlocked; + u8 __lstate[3]; + }; + }; + arch_spinlock_t wait_lock; +}; + +typedef struct qrwlock arch_rwlock_t; + +struct lockdep_map {}; + +struct raw_spinlock { + arch_spinlock_t raw_lock; +}; + +typedef struct raw_spinlock raw_spinlock_t; + +struct spinlock { + union { + struct raw_spinlock rlock; + }; +}; + +typedef struct spinlock spinlock_t; + +typedef struct { + arch_rwlock_t raw_lock; +} rwlock_t; + +struct ratelimit_state { + raw_spinlock_t lock; + int interval; + int burst; + int printed; + int missed; + long unsigned int begin; + long unsigned int flags; +}; + +typedef void *fl_owner_t; + +struct file; + +struct kiocb; + +struct iov_iter; + +struct dir_context; + +struct poll_table_struct; + +struct vm_area_struct; + +struct inode; + +struct file_lock; + +struct page; + +struct pipe_inode_info; + +struct seq_file; + +struct file_operations { + struct module *owner; + loff_t (*llseek)(struct file *, loff_t, int); + ssize_t (*read)(struct file *, char *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*write_iter)(struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *, bool); + int (*iterate)(struct file *, struct dir_context *); + int (*iterate_shared)(struct file *, struct dir_context *); + __poll_t (*poll)(struct file *, struct poll_table_struct *); + long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap)(struct file *, struct vm_area_struct *); + long unsigned int mmap_supported_flags; + int (*open)(struct inode *, struct file *); + int (*flush)(struct file *, fl_owner_t); + int (*release)(struct inode *, struct file *); + int (*fsync)(struct file *, loff_t, loff_t, int); + int (*fasync)(int, struct file *, int); + int (*lock)(struct file *, int, struct file_lock *); + ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int); + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + int (*check_flags)(int); + int (*flock)(struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long int, struct file_lock **, void **); + long int (*fallocate)(struct file *, int, loff_t, loff_t); + void (*show_fdinfo)(struct seq_file *, struct file *); + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned int); + int (*fadvise)(struct file *, loff_t, loff_t, int); +}; + +enum system_states { + SYSTEM_BOOTING = 0, + SYSTEM_SCHEDULING = 1, + SYSTEM_RUNNING = 2, + SYSTEM_HALT = 3, + SYSTEM_POWER_OFF = 4, + SYSTEM_RESTART = 5, + SYSTEM_SUSPEND = 6, +}; + +typedef __s64 time64_t; + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long int tv_nsec; +}; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + +struct bug_entry { + int bug_addr_disp; + int file_disp; + short unsigned int line; + short unsigned int flags; +}; + +struct cpumask { + long unsigned int bits[2]; +}; + +typedef struct cpumask cpumask_t; + +typedef struct cpumask cpumask_var_t[1]; + +struct llist_node { + struct llist_node *next; +}; + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; + u16 src; + u16 dst; +}; + +typedef void (*smp_call_func_t)(void *); + +struct __call_single_data { + union { + struct __call_single_node node; + struct { + struct llist_node llist; + unsigned int flags; + u16 src; + u16 dst; + }; + }; + smp_call_func_t func; + void *info; +}; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct pollfd { + int fd; + short int events; + short int revents; +}; + +struct restart_block { + long int (*fn)(struct restart_block *); + union { + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + } futex; + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + u64 expires; + } nanosleep; + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + long unsigned int tv_sec; + long unsigned int tv_nsec; + } poll; + }; +}; + +typedef long unsigned int mm_segment_t; + +struct thread_info { + long unsigned int flags; + mm_segment_t addr_limit; + u64 ttbr0; + union { + u64 preempt_count; + struct { + u32 count; + u32 need_resched; + } preempt; + }; +}; + +struct refcount_struct { + atomic_t refs; +}; + +typedef struct refcount_struct refcount_t; + +struct load_weight { + long unsigned int weight; + u32 inv_weight; +}; + +struct rb_node { + long unsigned int __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; + +struct sched_statistics { + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; +}; + +struct util_est { + unsigned int enqueued; + unsigned int ewma; +}; + +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + long unsigned int load_avg; + long unsigned int runnable_avg; + long unsigned int util_avg; + struct util_est util_est; +}; + +struct cfs_rq; + +struct sched_entity { + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + u64 nr_migrations; + struct sched_statistics statistics; + int depth; + struct sched_entity *parent; + struct cfs_rq *cfs_rq; + struct cfs_rq *my_q; + long unsigned int runnable_weight; + long: 64; + long: 64; + long: 64; + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + long unsigned int timeout; + long unsigned int watchdog_stamp; + unsigned int time_slice; + short unsigned int on_rq; + short unsigned int on_list; + struct sched_rt_entity *back; +}; + +typedef s64 ktime_t; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +enum hrtimer_restart { + HRTIMER_NORESTART = 0, + HRTIMER_RESTART = 1, +}; + +struct hrtimer_clock_base; + +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + +struct sched_dl_entity { + struct rb_node rb_node; + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + s64 runtime; + u64 deadline; + unsigned int flags; + unsigned int dl_throttled: 1; + unsigned int dl_yielded: 1; + unsigned int dl_non_contending: 1; + unsigned int dl_overrun: 1; + struct hrtimer dl_timer; + struct hrtimer inactive_timer; + struct sched_dl_entity *pi_se; +}; + +struct uclamp_se { + unsigned int value: 11; + unsigned int bucket_id: 3; + unsigned int active: 1; + unsigned int user_defined: 1; +}; + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +struct sched_info { + long unsigned int pcount; + long long unsigned int run_delay; + long long unsigned int last_arrival; + long long unsigned int last_queued; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +struct vmacache { + u64 seqnum; + struct vm_area_struct *vmas[4]; +}; + +struct task_rss_stat { + int events; + int count[4]; +}; + +struct prev_cputime { + u64 utime; + u64 stime; + raw_spinlock_t lock; +}; + +struct rb_root { + struct rb_node *rb_node; +}; + +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +struct sem_undo_list; + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +struct sysv_shm { + struct list_head shm_clist; +}; + +typedef struct { + long unsigned int sig[1]; +} sigset_t; + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +typedef struct { + uid_t val; +} kuid_t; + +struct seccomp_filter; + +struct seccomp { + int mode; + atomic_t filter_count; + struct seccomp_filter *filter; +}; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_io_accounting { + u64 rchar; + u64 wchar; + u64 syscr; + u64 syscw; + u64 read_bytes; + u64 write_bytes; + u64 cancelled_write_bytes; +}; + +typedef struct { + long unsigned int bits[2]; +} nodemask_t; + +struct seqcount { + unsigned int sequence; +}; + +typedef struct seqcount seqcount_t; + +struct seqcount_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_spinlock seqcount_spinlock_t; + +struct optimistic_spin_queue { + atomic_t tail; +}; + +struct mutex { + atomic_long_t owner; + spinlock_t wait_lock; + struct optimistic_spin_queue osq; + struct list_head wait_list; +}; + +struct tlbflush_unmap_batch {}; + +struct page_frag { + struct page *page; + __u32 offset; + __u32 size; +}; + +struct latency_record { + long unsigned int backtrace[12]; + unsigned int count; + long unsigned int time; + long unsigned int max; +}; + +struct cpu_context { + long unsigned int x19; + long unsigned int x20; + long unsigned int x21; + long unsigned int x22; + long unsigned int x23; + long unsigned int x24; + long unsigned int x25; + long unsigned int x26; + long unsigned int x27; + long unsigned int x28; + long unsigned int fp; + long unsigned int sp; + long unsigned int pc; +}; + +struct user_fpsimd_state { + __int128 unsigned vregs[32]; + __u32 fpsr; + __u32 fpcr; + __u32 __reserved[2]; +}; + +struct perf_event; + +struct debug_info { + int suspended_step; + int bps_disabled; + int wps_disabled; + struct perf_event *hbp_break[16]; + struct perf_event *hbp_watch[16]; +}; + +struct ptrauth_key { + long unsigned int lo; + long unsigned int hi; +}; + +struct ptrauth_keys_user { + struct ptrauth_key apia; + struct ptrauth_key apib; + struct ptrauth_key apda; + struct ptrauth_key apdb; + struct ptrauth_key apga; +}; + +struct ptrauth_keys_kernel { + struct ptrauth_key apia; +}; + +struct thread_struct { + struct cpu_context cpu_context; + long: 64; + struct { + long unsigned int tp_value; + long unsigned int tp2_value; + struct user_fpsimd_state fpsimd_state; + } uw; + unsigned int fpsimd_cpu; + void *sve_state; + unsigned int sve_vl; + unsigned int sve_vl_onexec; + long unsigned int fault_address; + long unsigned int fault_code; + struct debug_info debug; + struct ptrauth_keys_user keys_user; + struct ptrauth_keys_kernel keys_kernel; + u64 sctlr_tcf0; + u64 gcr_user_incl; + long: 64; +}; + +struct sched_class; + +struct task_group; + +struct mm_struct; + +struct pid; + +struct completion; + +struct cred; + +struct key; + +struct nameidata; + +struct fs_struct; + +struct files_struct; + +struct io_uring_task; + +struct nsproxy; + +struct signal_struct; + +struct sighand_struct; + +struct audit_context; + +struct rt_mutex_waiter; + +struct bio_list; + +struct blk_plug; + +struct reclaim_state; + +struct backing_dev_info; + +struct io_context; + +struct capture_control; + +struct kernel_siginfo; + +typedef struct kernel_siginfo kernel_siginfo_t; + +struct css_set; + +struct robust_list_head; + +struct compat_robust_list_head; + +struct futex_pi_state; + +struct perf_event_context; + +struct mempolicy; + +struct numa_group; + +struct rseq; + +struct task_delay_info; + +struct ftrace_ret_stack; + +struct mem_cgroup; + +struct request_queue; + +struct uprobe_task; + +struct vm_struct; + +struct task_struct { + struct thread_info thread_info; + volatile long int state; + void *stack; + refcount_t usage; + unsigned int flags; + unsigned int ptrace; + int on_cpu; + struct __call_single_node wake_entry; + unsigned int cpu; + unsigned int wakee_flips; + long unsigned int wakee_flip_decay_ts; + struct task_struct *last_wakee; + int recent_used_cpu; + int wake_cpu; + int on_rq; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + const struct sched_class *sched_class; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sched_entity se; + struct sched_rt_entity rt; + struct task_group *sched_task_group; + struct sched_dl_entity dl; + struct uclamp_se uclamp_req[2]; + struct uclamp_se uclamp[2]; + struct hlist_head preempt_notifiers; + unsigned int btrace_seq; + unsigned int policy; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; + long unsigned int rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + bool trc_reader_checked; + struct list_head trc_holdout_list; + struct sched_info sched_info; + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct vmacache vmacache; + struct task_rss_stat rss_stat; + int exit_state; + int exit_code; + int exit_signal; + int pdeath_signal; + long unsigned int jobctl; + unsigned int personality; + unsigned int sched_reset_on_fork: 1; + unsigned int sched_contributes_to_load: 1; + unsigned int sched_migrated: 1; + unsigned int sched_psi_wake_requeue: 1; + int: 28; + unsigned int sched_remote_wakeup: 1; + unsigned int in_execve: 1; + unsigned int in_iowait: 1; + unsigned int in_user_fault: 1; + unsigned int no_cgroup_migration: 1; + unsigned int frozen: 1; + unsigned int use_memdelay: 1; + unsigned int in_memstall: 1; + long unsigned int atomic_flags; + struct restart_block restart_block; + pid_t pid; + pid_t tgid; + long unsigned int stack_canary; + struct task_struct *real_parent; + struct task_struct *parent; + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + struct list_head ptraced; + struct list_head ptrace_entry; + struct pid *thread_pid; + struct hlist_node pid_links[4]; + struct list_head thread_group; + struct list_head thread_node; + struct completion *vfork_done; + int *set_child_tid; + int *clear_child_tid; + u64 utime; + u64 stime; + u64 gtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + u64 start_time; + u64 start_boottime; + long unsigned int min_flt; + long unsigned int maj_flt; + struct posix_cputimers posix_cputimers; + const struct cred *ptracer_cred; + const struct cred *real_cred; + const struct cred *cred; + struct key *cached_requested_key; + char comm[16]; + struct nameidata *nameidata; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + long unsigned int last_switch_count; + long unsigned int last_switch_time; + struct fs_struct *fs; + struct files_struct *files; + struct io_uring_task *io_uring; + struct nsproxy *nsproxy; + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + sigset_t saved_sigmask; + struct sigpending pending; + long unsigned int sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + struct callback_head *task_works; + struct audit_context *audit_context; + kuid_t loginuid; + unsigned int sessionid; + struct seccomp seccomp; + u64 parent_exec_id; + u64 self_exec_id; + spinlock_t alloc_lock; + raw_spinlock_t pi_lock; + struct wake_q_node wake_q; + struct rb_root_cached pi_waiters; + struct task_struct *pi_top_task; + struct rt_mutex_waiter *pi_blocked_on; + void *journal_info; + struct bio_list *bio_list; + struct blk_plug *plug; + struct reclaim_state *reclaim_state; + struct backing_dev_info *backing_dev_info; + struct io_context *io_context; + struct capture_control *capture_control; + long unsigned int ptrace_message; + kernel_siginfo_t *last_siginfo; + struct task_io_accounting ioac; + unsigned int psi_flags; + u64 acct_rss_mem1; + u64 acct_vm_mem1; + u64 acct_timexpd; + nodemask_t mems_allowed; + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; + struct css_set *cgroups; + struct list_head cg_list; + struct robust_list_head *robust_list; + struct compat_robust_list_head *compat_robust_list; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + struct perf_event_context *perf_event_ctxp[2]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct mempolicy *mempolicy; + short int il_prev; + short int pref_node_fork; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + long unsigned int numa_migrate_retry; + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + struct numa_group *numa_group; + long unsigned int *numa_faults; + long unsigned int total_numa_faults; + long unsigned int numa_faults_locality[3]; + long unsigned int numa_pages_migrated; + struct rseq *rseq; + u32 rseq_sig; + long unsigned int rseq_event_mask; + struct tlbflush_unmap_batch tlb_ubc; + union { + refcount_t rcu_users; + struct callback_head rcu; + }; + struct pipe_inode_info *splice_pipe; + struct page_frag task_frag; + struct task_delay_info *delays; + int nr_dirtied; + int nr_dirtied_pause; + long unsigned int dirty_paused_when; + int latency_record_count; + struct latency_record latency_record[32]; + u64 timer_slack_ns; + u64 default_timer_slack_ns; + int curr_ret_stack; + int curr_ret_depth; + struct ftrace_ret_stack *ret_stack; + long long unsigned int ftrace_timestamp; + atomic_t trace_overrun; + atomic_t tracing_graph_pause; + long unsigned int trace; + long unsigned int trace_recursion; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + unsigned int memcg_nr_pages_over_high; + struct mem_cgroup *active_memcg; + struct request_queue *throttle_queue; + struct uprobe_task *utask; + unsigned int sequential_io; + unsigned int sequential_io_avg; + int pagefault_disabled; + struct task_struct *oom_reaper_list; + struct vm_struct *stack_vm_area; + refcount_t stack_refcount; + void *security; + struct thread_struct thread; +}; + +typedef s32 compat_long_t; + +typedef u32 compat_uptr_t; + +struct user_pt_regs { + __u64 regs[31]; + __u64 sp; + __u64 pc; + __u64 pstate; +}; + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + u64 regs[31]; + u64 sp; + u64 pc; + u64 pstate; + }; + }; + u64 orig_x0; + s32 syscallno; + u32 unused2; + u64 orig_addr_limit; + u64 pmr_save; + u64 stackframe[2]; + u64 lockdep_hardirqs; + u64 exit_rcu; +}; + +struct arch_hw_breakpoint_ctrl { + u32 __reserved: 19; + u32 len: 8; + u32 type: 2; + u32 privilege: 2; + u32 enabled: 1; +}; + +struct arch_hw_breakpoint { + u64 address; + u64 trigger; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +typedef u64 pteval_t; + +typedef u64 pmdval_t; + +typedef u64 pudval_t; + +typedef u64 pgdval_t; + +typedef struct { + pteval_t pte; +} pte_t; + +typedef struct { + pmdval_t pmd; +} pmd_t; + +typedef struct { + pudval_t pud; +} pud_t; + +typedef struct { + pgdval_t pgd; +} pgd_t; + +typedef struct { + pteval_t pgprot; +} pgprot_t; + +enum module_state { + MODULE_STATE_LIVE = 0, + MODULE_STATE_COMING = 1, + MODULE_STATE_GOING = 2, + MODULE_STATE_UNFORMED = 3, +}; + +struct kref { + refcount_t refcount; +}; + +struct kset; + +struct kobj_type; + +struct kernfs_node; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + unsigned int state_initialized: 1; + unsigned int state_in_sysfs: 1; + unsigned int state_add_uevent_sent: 1; + unsigned int state_remove_uevent_sent: 1; + unsigned int uevent_suppress: 1; +}; + +struct module_param_attrs; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +}; + +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_layout { + void *base; + unsigned int size; + unsigned int text_size; + unsigned int ro_size; + unsigned int ro_after_init_size; + struct mod_tree_node mtn; +}; + +struct mod_plt_sec { + int plt_shndx; + int plt_num_entries; + int plt_max_entries; +}; + +struct plt_entry; + +struct mod_arch_specific { + struct mod_plt_sec core; + struct mod_plt_sec init; + struct plt_entry *ftrace_trampolines; +}; + +struct elf64_sym; + +typedef struct elf64_sym Elf64_Sym; + +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + +typedef const int tracepoint_ptr_t; + +struct module_attribute; + +struct kernel_param; + +struct exception_table_entry; + +struct module_sect_attrs; + +struct module_notes_attrs; + +struct srcu_struct; + +struct bpf_raw_event_map; + +struct trace_event_call; + +struct trace_eval_map; + +struct error_injection_entry; + +struct module { + enum module_state state; + struct list_head list; + char name[56]; + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + struct mutex param_lock; + struct kernel_param *kp; + unsigned int num_kp; + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + bool using_gplonly_symbols; + const struct kernel_symbol *unused_syms; + const s32 *unused_crcs; + unsigned int num_unused_syms; + unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; + const s32 *unused_gpl_crcs; + bool sig_ok; + bool async_probe_requested; + const struct kernel_symbol *gpl_future_syms; + const s32 *gpl_future_crcs; + unsigned int num_gpl_future_syms; + unsigned int num_exentries; + struct exception_table_entry *extable; + int (*init)(); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct module_layout core_layout; + struct module_layout init_layout; + struct mod_arch_specific arch; + long unsigned int taints; + unsigned int num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + struct module_sect_attrs *sect_attrs; + struct module_notes_attrs *notes_attrs; + char *args; + void *percpu; + unsigned int percpu_size; + void *noinstr_text_start; + unsigned int noinstr_text_size; + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + unsigned int num_ftrace_callsites; + long unsigned int *ftrace_callsites; + void *kprobes_text_start; + unsigned int kprobes_text_size; + long unsigned int *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + struct list_head source_list; + struct list_head target_list; + void (*exit)(); + atomic_t refcnt; + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = 4294967292, + PERF_EVENT_STATE_EXIT = 4294967293, + PERF_EVENT_STATE_ERROR = 4294967294, + PERF_EVENT_STATE_OFF = 4294967295, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +typedef struct { + atomic_long_t a; +} local_t; + +typedef struct { + local_t a; +} local64_t; + +struct perf_event_attr { + __u32 type; + __u32 size; + __u64 config; + union { + __u64 sample_period; + __u64 sample_freq; + }; + __u64 sample_type; + __u64 read_format; + __u64 disabled: 1; + __u64 inherit: 1; + __u64 pinned: 1; + __u64 exclusive: 1; + __u64 exclude_user: 1; + __u64 exclude_kernel: 1; + __u64 exclude_hv: 1; + __u64 exclude_idle: 1; + __u64 mmap: 1; + __u64 comm: 1; + __u64 freq: 1; + __u64 inherit_stat: 1; + __u64 enable_on_exec: 1; + __u64 task: 1; + __u64 watermark: 1; + __u64 precise_ip: 2; + __u64 mmap_data: 1; + __u64 sample_id_all: 1; + __u64 exclude_host: 1; + __u64 exclude_guest: 1; + __u64 exclude_callchain_kernel: 1; + __u64 exclude_callchain_user: 1; + __u64 mmap2: 1; + __u64 comm_exec: 1; + __u64 use_clockid: 1; + __u64 context_switch: 1; + __u64 write_backward: 1; + __u64 namespaces: 1; + __u64 ksymbol: 1; + __u64 bpf_event: 1; + __u64 aux_output: 1; + __u64 cgroup: 1; + __u64 text_poke: 1; + __u64 __reserved_1: 30; + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + __u64 sample_regs_user; + __u32 sample_stack_user; + __s32 clockid; + __u64 sample_regs_intr; + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + __u32 __reserved_3; +}; + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + +struct hw_perf_event { + union { + struct { + u64 config; + u64 last_tag; + long unsigned int config_base; + long unsigned int event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + struct { + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + struct task_struct *target; + void *addr_filters; + long unsigned int addr_filters_gen; + int state; + local64_t prev_count; + u64 sample_period; + union { + struct { + u64 last_period; + local64_t period_left; + }; + struct { + u64 saved_metric; + u64 saved_slots; + }; + }; + u64 interrupts_seq; + u64 interrupts; + u64 freq_time_stamp; + u64 freq_count_stamp; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; + +typedef struct wait_queue_head wait_queue_head_t; + +struct irq_work { + union { + struct __call_single_node node; + struct { + struct llist_node llnode; + atomic_t flags; + }; + }; + void (*func)(struct irq_work *); +}; + +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *); + +struct ftrace_ops; + +typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct pt_regs *); + +struct ftrace_hash; + +struct ftrace_ops_hash { + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; + struct mutex regex_lock; +}; + +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + long unsigned int flags; + void *private; + ftrace_func_t saved_func; + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + long unsigned int trampoline; + long unsigned int trampoline_size; + struct list_head list; +}; + +struct pmu; + +struct perf_buffer; + +struct fasync_struct; + +struct perf_addr_filter_range; + +struct pid_namespace; + +struct bpf_prog; + +struct event_filter; + +struct perf_cgroup; + +struct perf_event { + struct list_head event_entry; + struct list_head sibling_list; + struct list_head active_list; + struct rb_node group_node; + u64 group_index; + struct list_head migrate_entry; + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + int event_caps; + int group_caps; + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; + u64 shadow_ctx_time; + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + struct perf_event_context *ctx; + atomic_long_t refcount; + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + int oncpu; + int cpu; + struct list_head owner_entry; + struct task_struct *owner; + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_buffer *rb; + struct list_head rb_entry; + long unsigned int rcu_batches; + int rcu_pending; + wait_queue_head_t waitq; + struct fasync_struct *fasync; + int pending_wakeup; + int pending_kill; + int pending_disable; + struct irq_work pending; + atomic_t event_limit; + struct perf_addr_filters_head addr_filters; + struct perf_addr_filter_range *addr_filter_ranges; + long unsigned int addr_filters_gen; + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + struct pid_namespace *ns; + u64 id; + u64 (*clock)(); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + perf_overflow_handler_t orig_overflow_handler; + struct bpf_prog *prog; + struct trace_event_call *tp_event; + struct event_filter *filter; + struct ftrace_ops ftrace_ops; + struct perf_cgroup *cgrp; + void *security; + struct list_head sb_list; +}; + +struct wait_queue_entry; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *); + +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +typedef struct wait_queue_entry wait_queue_entry_t; + +enum pid_type { + PIDTYPE_PID = 0, + PIDTYPE_TGID = 1, + PIDTYPE_PGID = 2, + PIDTYPE_SID = 3, + PIDTYPE_MAX = 4, +}; + +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct xarray { + spinlock_t xa_lock; + gfp_t xa_flags; + void *xa_head; +}; + +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; +}; + +struct kmem_cache; + +struct fs_pin; + +struct user_namespace; + +struct ucounts; + +struct pid_namespace { + struct kref kref; + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + struct fs_pin *bacct; + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; +}; + +struct pid { + refcount_t count; + unsigned int level; + spinlock_t lock; + struct hlist_head tasks[4]; + struct hlist_head inodes; + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[1]; +}; + +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + u32 nr_extents; + union { + struct uid_gid_extent extent[5]; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + +typedef struct { + gid_t val; +} kgid_t; + +struct rw_semaphore { + atomic_long_t count; + atomic_long_t owner; + struct optimistic_spin_queue osq; + raw_spinlock_t wait_lock; + struct list_head wait_list; +}; + +struct work_struct; + +typedef void (*work_func_t)(struct work_struct *); + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +}; + +struct ctl_table; + +struct ctl_table_root; + +struct ctl_table_set; + +struct ctl_dir; + +struct ctl_node; + +struct ctl_table_header { + union { + struct { + struct ctl_table *ctl_table; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; +}; + +struct ctl_dir { + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + atomic_t count; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + long unsigned int flags; + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + struct key *persistent_keyring_register; + struct work_struct work; + struct ctl_table_set set; + struct ctl_table_header *sysctls; + struct ucounts *ucounts; + int ucount_max[10]; +}; + +struct timer_list { + struct hlist_node entry; + long unsigned int expires; + void (*function)(struct timer_list *); + u32 flags; +}; + +struct workqueue_struct; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + struct workqueue_struct *wq; +}; + +typedef struct page *pgtable_t; + +struct address_space; + +struct dev_pagemap; + +struct obj_cgroup; + +struct page { + long unsigned int flags; + union { + struct { + struct list_head lru; + struct address_space *mapping; + long unsigned int index; + long unsigned int private; + }; + struct { + dma_addr_t dma_addr; + }; + struct { + union { + struct list_head slab_list; + struct { + struct page *next; + int pages; + int pobjects; + }; + }; + struct kmem_cache *slab_cache; + void *freelist; + union { + void *s_mem; + long unsigned int counters; + struct { + unsigned int inuse: 16; + unsigned int objects: 15; + unsigned int frozen: 1; + }; + }; + }; + struct { + long unsigned int compound_head; + unsigned char compound_dtor; + unsigned char compound_order; + atomic_t compound_mapcount; + unsigned int compound_nr; + }; + struct { + long unsigned int _compound_pad_1; + atomic_t hpage_pinned_refcount; + struct list_head deferred_list; + }; + struct { + long unsigned int _pt_pad_1; + pgtable_t pmd_huge_pte; + long unsigned int _pt_pad_2; + union { + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + spinlock_t ptl; + }; + struct { + struct dev_pagemap *pgmap; + void *zone_device_data; + }; + struct callback_head callback_head; + }; + union { + atomic_t _mapcount; + unsigned int page_type; + unsigned int active; + int units; + }; + atomic_t _refcount; + union { + struct mem_cgroup *mem_cgroup; + struct obj_cgroup **obj_cgroups; + }; +}; + +struct seqcount_raw_spinlock { + seqcount_t seqcount; +}; + +typedef struct seqcount_raw_spinlock seqcount_raw_spinlock_t; + +typedef struct { + seqcount_spinlock_t seqcount; + spinlock_t lock; +} seqlock_t; + +struct hrtimer_cpu_base; + +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_raw_spinlock_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(); + ktime_t offset; +}; + +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active: 1; + unsigned int in_hrtirq: 1; + unsigned int hang_detected: 1; + unsigned int softirq_activated: 1; + unsigned int nr_events; + short unsigned int nr_retries; + short unsigned int nr_hangs; + unsigned int max_hang_time; + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[8]; +}; + +enum node_states { + N_POSSIBLE = 0, + N_ONLINE = 1, + N_NORMAL_MEMORY = 2, + N_HIGH_MEMORY = 2, + N_MEMORY = 3, + N_CPU = 4, + N_GENERIC_INITIATOR = 5, + NR_NODE_STATES = 6, +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + +struct task_cputime { + u64 stime; + u64 utime; + long long unsigned int sum_exec_runtime; +}; + +typedef void __signalfn_t(int); + +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(); + +typedef __restorefn_t *__sigrestore_t; + +union sigval { + int sival_int; + void *sival_ptr; +}; + +typedef union sigval sigval_t; + +union __sifields { + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + struct { + void *_addr; + union { + short int _addr_lsb; + struct { + char _dummy_bnd[8]; + void *_lower; + void *_upper; + } _addr_bnd; + struct { + char _dummy_pkey[8]; + __u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + struct { + long int _band; + int _fd; + } _sigpoll; + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; + +struct kernel_siginfo { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; +}; + +struct user_struct { + refcount_t __count; + atomic_t processes; + atomic_t sigpending; + atomic_t fanotify_listeners; + atomic_long_t epoll_watches; + long unsigned int mq_bytes; + long unsigned int locked_shm; + long unsigned int unix_inflight; + atomic_long_t pipe_bufs; + struct hlist_node uidhash_node; + kuid_t uid; + atomic_long_t locked_vm; + atomic_t nr_watches; + struct ratelimit_state ratelimit; +}; + +struct sigaction { + __sighandler_t sa_handler; + long unsigned int sa_flags; + __sigrestore_t sa_restorer; + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; +}; + +struct userfaultfd_ctx; + +struct vm_userfaultfd_ctx { + struct userfaultfd_ctx *ctx; +}; + +struct anon_vma; + +struct vm_operations_struct; + +struct vm_area_struct { + long unsigned int vm_start; + long unsigned int vm_end; + struct vm_area_struct *vm_next; + struct vm_area_struct *vm_prev; + struct rb_node vm_rb; + long unsigned int rb_subtree_gap; + struct mm_struct *vm_mm; + pgprot_t vm_page_prot; + long unsigned int vm_flags; + struct { + struct rb_node rb; + long unsigned int rb_subtree_last; + } shared; + struct list_head anon_vma_chain; + struct anon_vma *anon_vma; + const struct vm_operations_struct *vm_ops; + long unsigned int vm_pgoff; + struct file *vm_file; + void *vm_private_data; + atomic_long_t swap_readahead_info; + struct mempolicy *vm_policy; + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +}; + +struct mm_rss_stat { + atomic_long_t count[4]; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct pacct_struct { + int ac_flag; + long int ac_exitcode; + long unsigned int ac_mem; + u64 ac_utime; + u64 ac_stime; + long unsigned int ac_minflt; + long unsigned int ac_majflt; +}; + +struct tty_struct; + +struct autogroup; + +struct taskstats; + +struct tty_audit_buf; + +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + wait_queue_head_t wait_chldexit; + struct task_struct *curr_target; + struct sigpending shared_pending; + struct hlist_head multiprocess; + int group_exit_code; + int notify_count; + struct task_struct *group_exit_task; + int group_stop_count; + unsigned int flags; + unsigned int is_child_subreaper: 1; + unsigned int has_child_subreaper: 1; + int posix_timer_id; + struct list_head posix_timers; + struct hrtimer real_timer; + ktime_t it_real_incr; + struct cpu_itimer it[2]; + struct thread_group_cputimer cputimer; + struct posix_cputimers posix_cputimers; + struct pid *pids[4]; + struct pid *tty_old_pgrp; + int leader; + struct tty_struct *tty; + struct autogroup *autogroup; + seqlock_t stats_lock; + u64 utime; + u64 stime; + u64 cutime; + u64 cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + long unsigned int nvcsw; + long unsigned int nivcsw; + long unsigned int cnvcsw; + long unsigned int cnivcsw; + long unsigned int min_flt; + long unsigned int maj_flt; + long unsigned int cmin_flt; + long unsigned int cmaj_flt; + long unsigned int inblock; + long unsigned int oublock; + long unsigned int cinblock; + long unsigned int coublock; + long unsigned int maxrss; + long unsigned int cmaxrss; + struct task_io_accounting ioac; + long long unsigned int sum_sched_runtime; + struct rlimit rlim[16]; + struct pacct_struct pacct; + struct taskstats *stats; + unsigned int audit_tty; + struct tty_audit_buf *tty_audit_buf; + bool oom_flag_origin; + short int oom_score_adj; + short int oom_score_adj_min; + struct mm_struct *oom_mm; + struct mutex cred_guard_mutex; + struct rw_semaphore exec_update_lock; +}; + +struct rseq { + __u32 cpu_id_start; + __u32 cpu_id; + union { + __u64 ptr64; + __u64 ptr; + } rseq_cs; + __u32 flags; + long: 32; + long: 64; +}; + +struct rq; + +struct rq_flags; + +struct sched_class { + int uclamp_enabled; + void (*enqueue_task)(struct rq *, struct task_struct *, int); + void (*dequeue_task)(struct rq *, struct task_struct *, int); + void (*yield_task)(struct rq *); + bool (*yield_to_task)(struct rq *, struct task_struct *); + void (*check_preempt_curr)(struct rq *, struct task_struct *, int); + struct task_struct * (*pick_next_task)(struct rq *); + void (*put_prev_task)(struct rq *, struct task_struct *); + void (*set_next_task)(struct rq *, struct task_struct *, bool); + int (*balance)(struct rq *, struct task_struct *, struct rq_flags *); + int (*select_task_rq)(struct task_struct *, int, int, int); + void (*migrate_task_rq)(struct task_struct *, int); + void (*task_woken)(struct rq *, struct task_struct *); + void (*set_cpus_allowed)(struct task_struct *, const struct cpumask *); + void (*rq_online)(struct rq *); + void (*rq_offline)(struct rq *); + void (*task_tick)(struct rq *, struct task_struct *, int); + void (*task_fork)(struct task_struct *); + void (*task_dead)(struct task_struct *); + void (*switched_from)(struct rq *, struct task_struct *); + void (*switched_to)(struct rq *, struct task_struct *); + void (*prio_changed)(struct rq *, struct task_struct *, int); + unsigned int (*get_rr_interval)(struct rq *, struct task_struct *); + void (*update_curr)(struct rq *); + void (*task_change_group)(struct task_struct *, int); + long: 64; + long: 64; + long: 64; +}; + +typedef struct { + atomic64_t id; + void *sigpage; + refcount_t pinned; + void *vdso; + long unsigned int flags; +} mm_context_t; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +struct linux_binfmt; + +struct core_state; + +struct kioctx_table; + +struct mmu_notifier_subscriptions; + +struct mm_struct { + struct { + struct vm_area_struct *mmap; + struct rb_root mm_rb; + u64 vmacache_seqnum; + long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + long unsigned int mmap_base; + long unsigned int mmap_legacy_base; + long unsigned int task_size; + long unsigned int highest_vm_end; + pgd_t *pgd; + atomic_t membarrier_state; + atomic_t mm_users; + atomic_t mm_count; + atomic_t has_pinned; + seqcount_t write_protect_seq; + atomic_long_t pgtables_bytes; + int map_count; + spinlock_t page_table_lock; + struct rw_semaphore mmap_lock; + struct list_head mmlist; + long unsigned int hiwater_rss; + long unsigned int hiwater_vm; + long unsigned int total_vm; + long unsigned int locked_vm; + atomic64_t pinned_vm; + long unsigned int data_vm; + long unsigned int exec_vm; + long unsigned int stack_vm; + long unsigned int def_flags; + spinlock_t arg_lock; + long unsigned int start_code; + long unsigned int end_code; + long unsigned int start_data; + long unsigned int end_data; + long unsigned int start_brk; + long unsigned int brk; + long unsigned int start_stack; + long unsigned int arg_start; + long unsigned int arg_end; + long unsigned int env_start; + long unsigned int env_end; + long unsigned int saved_auxv[46]; + struct mm_rss_stat rss_stat; + struct linux_binfmt *binfmt; + mm_context_t context; + long unsigned int flags; + struct core_state *core_state; + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; + struct task_struct *owner; + struct user_namespace *user_ns; + struct file *exe_file; + struct mmu_notifier_subscriptions *notifier_subscriptions; + long unsigned int numa_next_scan; + long unsigned int numa_scan_offset; + int numa_scan_seq; + atomic_t tlb_flush_pending; + struct uprobes_state uprobes_state; + atomic_long_t hugetlb_usage; + struct work_struct async_put_work; + u32 pasid; + }; + long unsigned int cpu_bitmap[0]; +}; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +struct kernel_cap_struct { + __u32 cap[2]; +}; + +typedef struct kernel_cap_struct kernel_cap_t; + +struct group_info; + +struct cred { + atomic_t usage; + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned int securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + unsigned char jit_keyring; + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + void *security; + struct user_struct *user; + struct user_namespace *user_ns; + struct group_info *group_info; + union { + int non_rcu; + struct callback_head rcu; + }; +}; + +typedef int32_t key_serial_t; + +typedef uint32_t key_perm_t; + +struct key_type; + +struct key_tag; + +struct keyring_index_key { + long unsigned int hash; + union { + struct { + u16 desc_len; + char desc[6]; + }; + long unsigned int x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; + +struct assoc_array_ptr; + +struct assoc_array { + struct assoc_array_ptr *root; + long unsigned int nr_leaves_on_tree; +}; + +struct watch_list; + +struct key_user; + +struct key_restriction; + +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + struct watch_list *watchers; + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + short unsigned int quotalen; + short unsigned int datalen; + short int state; + long unsigned int flags; + union { + struct keyring_index_key index_key; + struct { + long unsigned int hash; + long unsigned int len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + union { + union key_payload payload; + struct { + struct list_head name_link; + struct assoc_array keys; + }; + }; + struct key_restriction *restrict_link; +}; + +struct uts_namespace; + +struct ipc_namespace; + +struct mnt_namespace; + +struct net; + +struct time_namespace; + +struct cgroup_namespace; + +struct nsproxy { + atomic_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; + +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + +struct bio; + +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +struct blk_plug { + struct list_head mq_list; + struct list_head cb_list; + short unsigned int rq_count; + bool multiple_queues; + bool nowait; +}; + +struct reclaim_state { + long unsigned int reclaimed_slab; +}; + +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + struct list_head list; + s32 *counters; +}; + +struct fprop_local_percpu { + struct percpu_counter events; + unsigned int period; + raw_spinlock_t lock; +}; + +enum wb_reason { + WB_REASON_BACKGROUND = 0, + WB_REASON_VMSCAN = 1, + WB_REASON_SYNC = 2, + WB_REASON_PERIODIC = 3, + WB_REASON_LAPTOP_TIMER = 4, + WB_REASON_FS_FREE_SPACE = 5, + WB_REASON_FORKER_THREAD = 6, + WB_REASON_FOREIGN_FLUSH = 7, + WB_REASON_MAX = 8, +}; + +struct percpu_ref_data; + +struct percpu_ref { + long unsigned int percpu_count_ptr; + struct percpu_ref_data *data; +}; + +struct cgroup_subsys_state; + +struct bdi_writeback { + struct backing_dev_info *bdi; + long unsigned int state; + long unsigned int last_old_flush; + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + struct percpu_counter stat[4]; + long unsigned int congested; + long unsigned int bw_time_stamp; + long unsigned int dirtied_stamp; + long unsigned int written_stamp; + long unsigned int write_bandwidth; + long unsigned int avg_write_bandwidth; + long unsigned int dirty_ratelimit; + long unsigned int balanced_dirty_ratelimit; + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + long unsigned int dirty_sleep; + struct list_head bdi_node; + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + union { + struct work_struct release_work; + struct callback_head rcu; + }; +}; + +struct device; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + long unsigned int ra_pages; + long unsigned int io_pages; + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio; + unsigned int max_prop_frac; + atomic_long_t tot_write_bandwidth; + struct bdi_writeback wb; + struct list_head wb_list; + struct xarray cgwb_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + wait_queue_head_t wb_waitq; + struct device *dev; + char dev_name[64]; + struct device *owner; + struct timer_list laptop_mode_wb_timer; + struct dentry *debug_dir; +}; + +struct io_cq; + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + atomic_t nr_tasks; + spinlock_t lock; + short unsigned int ioprio; + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + struct work_struct release_work; +}; + +struct cgroup; + +struct css_set { + struct cgroup_subsys_state *subsys[12]; + refcount_t refcount; + struct css_set *dom_cset; + struct cgroup *dfl_cgrp; + int nr_tasks; + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + struct list_head task_iters; + struct list_head e_cset_node[12]; + struct list_head threaded_csets; + struct list_head threaded_csets_node; + struct hlist_node hlist; + struct list_head cgrp_links; + struct list_head mg_preload_node; + struct list_head mg_node; + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + bool dead; + struct callback_head callback_head; +}; + +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + +struct perf_event_context { + struct pmu *pmu; + raw_spinlock_t lock; + struct mutex mutex; + struct list_head active_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + struct list_head pinned_active; + struct list_head flexible_active; + int nr_events; + int nr_active; + int is_active; + int nr_stat; + int nr_freq; + int rotate_disable; + int rotate_necessary; + refcount_t refcount; + struct task_struct *task; + u64 time; + u64 timestamp; + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + int nr_cgroups; + void *task_ctx_data; + struct callback_head callback_head; +}; + +struct mempolicy { + atomic_t refcnt; + short unsigned int mode; + short unsigned int flags; + union { + short int preferred_node; + nodemask_t nodes; + } v; + union { + nodemask_t cpuset_mems_allowed; + nodemask_t user_nodemask; + } w; +}; + +struct task_delay_info { + raw_spinlock_t lock; + unsigned int flags; + u64 blkio_start; + u64 blkio_delay; + u64 swapin_delay; + u32 blkio_count; + u32 swapin_count; + u64 freepages_start; + u64 freepages_delay; + u64 thrashing_start; + u64 thrashing_delay; + u32 freepages_count; + u32 thrashing_count; +}; + +struct ftrace_ret_stack { + long unsigned int ret; + long unsigned int func; + long long unsigned int calltime; + long unsigned int fp; +}; + +struct cgroup_subsys; + +struct cgroup_subsys_state { + struct cgroup *cgroup; + struct cgroup_subsys *ss; + struct percpu_ref refcnt; + struct list_head sibling; + struct list_head children; + struct list_head rstat_css_node; + int id; + unsigned int flags; + u64 serial_nr; + atomic_t online_cnt; + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + struct cgroup_subsys_state *parent; +}; + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + +struct page_counter { + atomic_long_t usage; + long unsigned int min; + long unsigned int low; + long unsigned int high; + long unsigned int max; + struct page_counter *parent; + long unsigned int emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + long unsigned int elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + long unsigned int watermark; + long unsigned int failcnt; +}; + +struct vmpressure { + long unsigned int scanned; + long unsigned int reclaimed; + long unsigned int tree_scanned; + long unsigned int tree_reclaimed; + spinlock_t sr_lock; + struct list_head events; + struct mutex events_lock; + struct work_struct work; +}; + +struct cgroup_file { + struct kernfs_node *kn; + long unsigned int notified_at; + struct timer_list notify_timer; +}; + +struct mem_cgroup_threshold_ary; + +struct mem_cgroup_thresholds { + struct mem_cgroup_threshold_ary *primary; + struct mem_cgroup_threshold_ary *spare; +}; + +struct memcg_padding { + char x[0]; +}; + +enum memcg_kmem_state { + KMEM_NONE = 0, + KMEM_ALLOCATED = 1, + KMEM_ONLINE = 2, +}; + +struct fprop_global { + struct percpu_counter events; + unsigned int period; + seqcount_t sequence; +}; + +struct wb_domain { + spinlock_t lock; + struct fprop_global completions; + struct timer_list period_timer; + long unsigned int period_time; + long unsigned int dirty_limit_tstamp; + long unsigned int dirty_limit; +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + long unsigned int split_queue_len; +}; + +struct memcg_vmstats_percpu; + +struct mem_cgroup_per_node; + +struct mem_cgroup { + struct cgroup_subsys_state css; + struct mem_cgroup_id id; + struct page_counter memory; + union { + struct page_counter swap; + struct page_counter memsw; + }; + struct page_counter kmem; + struct page_counter tcpmem; + struct work_struct high_work; + long unsigned int soft_limit; + struct vmpressure vmpressure; + bool use_hierarchy; + bool oom_group; + bool oom_lock; + int under_oom; + int swappiness; + int oom_kill_disable; + struct cgroup_file events_file; + struct cgroup_file events_local_file; + struct cgroup_file swap_events_file; + struct mutex thresholds_lock; + struct mem_cgroup_thresholds thresholds; + struct mem_cgroup_thresholds memsw_thresholds; + struct list_head oom_notify; + long unsigned int move_charge_at_immigrate; + spinlock_t move_lock; + long unsigned int move_lock_flags; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct memcg_padding _pad1_; + atomic_long_t vmstats[40]; + atomic_long_t vmevents[95]; + atomic_long_t memory_events[8]; + atomic_long_t memory_events_local[8]; + long unsigned int socket_pressure; + bool tcpmem_active; + int tcpmem_pressure; + int kmemcg_id; + enum memcg_kmem_state kmem_state; + struct obj_cgroup *objcg; + struct list_head objcg_list; + long: 64; + long: 64; + long: 64; + struct memcg_padding _pad2_; + atomic_t moving_account; + struct task_struct *move_lock_task; + struct memcg_vmstats_percpu *vmstats_local; + struct memcg_vmstats_percpu *vmstats_percpu; + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + struct list_head event_list; + spinlock_t event_list_lock; + struct deferred_split deferred_split_queue; + struct mem_cgroup_per_node *nodeinfo[0]; +}; + +struct blk_integrity_profile; + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING = 1, + RPM_SUSPENDED = 2, + RPM_SUSPENDING = 3, +}; + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; + +enum blk_zoned_model { + BLK_ZONED_NONE = 0, + BLK_ZONED_HA = 1, + BLK_ZONED_HM = 2, +}; + +struct queue_limits { + long unsigned int bounce_pfn; + long unsigned int seg_boundary_mask; + long unsigned int virt_boundary_mask; + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + short unsigned int max_segments; + short unsigned int max_integrity_segments; + short unsigned int max_discard_segments; + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +struct bsg_ops; + +struct bsg_class_device { + struct device *class_dev; + int minor; + struct request_queue *queue; + const struct bsg_ops *ops; +}; + +typedef void *mempool_alloc_t(gfp_t, void *); + +typedef void mempool_free_t(void *, void *); + +struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +typedef struct mempool_s mempool_t; + +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + mempool_t bio_pool; + mempool_t bvec_pool; + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; +}; + +struct request; + +struct elevator_queue; + +struct blk_queue_stats; + +struct rq_qos; + +struct blk_mq_ops; + +struct blk_mq_ctx; + +struct blk_mq_hw_ctx; + +struct blk_keyslot_manager; + +struct blk_stat_callback; + +struct blkcg_gq; + +struct blk_trace; + +struct blk_flush_queue; + +struct throtl_data; + +struct blk_mq_tag_set; + +struct request_queue { + struct request *last_merge; + struct elevator_queue *elevator; + struct percpu_ref q_usage_counter; + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + const struct blk_mq_ops *mq_ops; + struct blk_mq_ctx *queue_ctx; + unsigned int queue_depth; + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + struct backing_dev_info *backing_dev_info; + void *queuedata; + long unsigned int queue_flags; + atomic_t pm_only; + int id; + gfp_t bounce_gfp; + spinlock_t queue_lock; + struct kobject kobj; + struct kobject *mq_kobj; + struct blk_integrity integrity; + struct device *dev; + enum rpm_status rpm_status; + unsigned int nr_pending; + long unsigned int nr_requests; + unsigned int dma_pad_mask; + unsigned int dma_alignment; + struct blk_keyslot_manager *ksm; + unsigned int rq_timeout; + int poll_nsec; + struct blk_stat_callback *poll_cb; + struct blk_rq_stat poll_stat[16]; + struct timer_list timeout; + struct work_struct timeout_work; + atomic_t nr_active_requests_shared_sbitmap; + struct list_head icq_list; + long unsigned int blkcg_pols[1]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + struct queue_limits limits; + unsigned int required_elevator_features; + unsigned int nr_zones; + long unsigned int *conv_zones_bitmap; + long unsigned int *seq_zones_wlock; + unsigned int max_open_zones; + unsigned int max_active_zones; + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; + struct mutex debugfs_mutex; + struct blk_trace *blk_trace; + struct blk_flush_queue *fq; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + int mq_freeze_depth; + struct bsg_class_device bsg_dev; + struct throtl_data *td; + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + struct mutex mq_freeze_lock; + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct bio_set bio_split; + struct dentry *debugfs_dir; + bool mq_sysfs_init_done; + size_t cmd_size; + u64 write_hints[5]; +}; + +enum uprobe_task_state { + UTASK_RUNNING = 0, + UTASK_SSTEP = 1, + UTASK_SSTEP_ACK = 2, + UTASK_SSTEP_TRAPPED = 3, +}; + +struct arch_uprobe_task {}; + +struct uprobe; + +struct return_instance; + +struct uprobe_task { + enum uprobe_task_state state; + union { + struct { + struct arch_uprobe_task autask; + long unsigned int vaddr; + }; + struct { + struct callback_head dup_xol_work; + long unsigned int dup_xol_addr; + }; + }; + struct uprobe *active_uprobe; + long unsigned int xol_vaddr; + struct return_instance *return_instances; + unsigned int depth; +}; + +struct vm_struct { + struct vm_struct *next; + void *addr; + long unsigned int size; + long unsigned int flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; +}; + +struct return_instance { + struct uprobe *uprobe; + long unsigned int func; + long unsigned int stack; + long unsigned int orig_ret_vaddr; + bool chained; + struct return_instance *next; +}; + +typedef u32 errseq_t; + +struct address_space_operations; + +struct address_space { + struct inode *host; + struct xarray i_pages; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + atomic_t nr_thps; + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + long unsigned int nrpages; + long unsigned int nrexceptional; + long unsigned int writeback_index; + const struct address_space_operations *a_ops; + long unsigned int flags; + errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; +}; + +struct vmem_altmap { + const long unsigned int base_pfn; + const long unsigned int end_pfn; + const long unsigned int reserve; + long unsigned int free; + long unsigned int align; + long unsigned int alloc; +}; + +enum memory_type { + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_FS_DAX = 2, + MEMORY_DEVICE_GENERIC = 3, + MEMORY_DEVICE_PCI_P2PDMA = 4, +}; + +struct range { + u64 start; + u64 end; +}; + +struct dev_pagemap_ops; + +struct dev_pagemap { + struct vmem_altmap altmap; + struct percpu_ref *ref; + struct percpu_ref internal_ref; + struct completion done; + enum memory_type type; + unsigned int flags; + const struct dev_pagemap_ops *ops; + void *owner; + int nr_range; + union { + struct range range; + struct range ranges[0]; + }; +}; + +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct callback_head rcu; + }; +}; + +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +}; + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +}; + +struct fown_struct { + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid; + kuid_t euid; + int signum; +}; + +struct file_ra_state { + long unsigned int start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +struct file { + union { + struct llist_node fu_llist; + struct callback_head fu_rcuhead; + } f_u; + struct path f_path; + struct inode *f_inode; + const struct file_operations *f_op; + spinlock_t f_lock; + enum rw_hint f_write_hint; + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + struct mutex f_pos_lock; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + u64 f_version; + void *f_security; + void *private_data; + struct list_head f_ep_links; + struct list_head f_tfile_llink; + struct address_space *f_mapping; + errseq_t f_wb_err; + errseq_t f_sb_err; +}; + +struct anon_vma { + struct anon_vma *root; + struct rw_semaphore rwsem; + atomic_t refcount; + unsigned int degree; + struct anon_vma *parent; + struct rb_root_cached rb_root; +}; + +typedef unsigned int vm_fault_t; + +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD = 1, + PE_SIZE_PUD = 2, +}; + +struct vm_fault; + +struct vm_operations_struct { + void (*open)(struct vm_area_struct *); + void (*close)(struct vm_area_struct *); + int (*split)(struct vm_area_struct *, long unsigned int); + int (*mremap)(struct vm_area_struct *); + vm_fault_t (*fault)(struct vm_fault *); + vm_fault_t (*huge_fault)(struct vm_fault *, enum page_entry_size); + void (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int); + long unsigned int (*pagesize)(struct vm_area_struct *); + vm_fault_t (*page_mkwrite)(struct vm_fault *); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *); + int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int); + const char * (*name)(struct vm_area_struct *); + int (*set_policy)(struct vm_area_struct *, struct mempolicy *); + struct mempolicy * (*get_policy)(struct vm_area_struct *, long unsigned int); + struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int); +}; + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct linux_binprm; + +struct coredump_params; + +struct linux_binfmt { + struct list_head lh; + struct module *module; + int (*load_binary)(struct linux_binprm *); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *); + long unsigned int min_coredump; +}; + +struct vm_fault { + struct vm_area_struct *vma; + unsigned int flags; + gfp_t gfp_mask; + long unsigned int pgoff; + long unsigned int address; + pmd_t *pmd; + pud_t *pud; + pte_t orig_pte; + struct page *cow_page; + struct page *page; + pte_t *pte; + spinlock_t *ptl; + pgtable_t prealloc_pte; +}; + +struct free_area { + struct list_head free_list[6]; + long unsigned int nr_free; +}; + +struct zone_padding { + char x[0]; +}; + +enum node_stat_item { + NR_LRU_BASE = 0, + NR_INACTIVE_ANON = 0, + NR_ACTIVE_ANON = 1, + NR_INACTIVE_FILE = 2, + NR_ACTIVE_FILE = 3, + NR_UNEVICTABLE = 4, + NR_SLAB_RECLAIMABLE_B = 5, + NR_SLAB_UNRECLAIMABLE_B = 6, + NR_ISOLATED_ANON = 7, + NR_ISOLATED_FILE = 8, + WORKINGSET_NODES = 9, + WORKINGSET_REFAULT_BASE = 10, + WORKINGSET_REFAULT_ANON = 10, + WORKINGSET_REFAULT_FILE = 11, + WORKINGSET_ACTIVATE_BASE = 12, + WORKINGSET_ACTIVATE_ANON = 12, + WORKINGSET_ACTIVATE_FILE = 13, + WORKINGSET_RESTORE_BASE = 14, + WORKINGSET_RESTORE_ANON = 14, + WORKINGSET_RESTORE_FILE = 15, + WORKINGSET_NODERECLAIM = 16, + NR_ANON_MAPPED = 17, + NR_FILE_MAPPED = 18, + NR_FILE_PAGES = 19, + NR_FILE_DIRTY = 20, + NR_WRITEBACK = 21, + NR_WRITEBACK_TEMP = 22, + NR_SHMEM = 23, + NR_SHMEM_THPS = 24, + NR_SHMEM_PMDMAPPED = 25, + NR_FILE_THPS = 26, + NR_FILE_PMDMAPPED = 27, + NR_ANON_THPS = 28, + NR_VMSCAN_WRITE = 29, + NR_VMSCAN_IMMEDIATE = 30, + NR_DIRTIED = 31, + NR_WRITTEN = 32, + NR_KERNEL_MISC_RECLAIMABLE = 33, + NR_FOLL_PIN_ACQUIRED = 34, + NR_FOLL_PIN_RELEASED = 35, + NR_KERNEL_STACK_KB = 36, + NR_VM_NODE_STAT_ITEMS = 37, +}; + +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 1, + LRU_INACTIVE_FILE = 2, + LRU_ACTIVE_FILE = 3, + LRU_UNEVICTABLE = 4, + NR_LRU_LISTS = 5, +}; + +struct pglist_data; + +struct lruvec { + struct list_head lists[5]; + long unsigned int anon_cost; + long unsigned int file_cost; + atomic_long_t nonresident_age; + long unsigned int refaults[2]; + long unsigned int flags; + struct pglist_data *pgdat; +}; + +struct per_cpu_pageset; + +struct zone { + long unsigned int _watermark[3]; + long unsigned int watermark_boost; + long unsigned int nr_reserved_highatomic; + long int lowmem_reserve[4]; + int node; + struct pglist_data *zone_pgdat; + struct per_cpu_pageset *pageset; + long unsigned int zone_start_pfn; + atomic_long_t managed_pages; + long unsigned int spanned_pages; + long unsigned int present_pages; + const char *name; + long unsigned int nr_isolate_pageblock; + seqlock_t span_seqlock; + int initialized; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + struct zone_padding _pad1_; + struct free_area free_area[11]; + long unsigned int flags; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct zone_padding _pad2_; + long unsigned int percpu_drift_mark; + long unsigned int compact_cached_free_pfn; + long unsigned int compact_cached_migrate_pfn[2]; + long unsigned int compact_init_migrate_pfn; + long unsigned int compact_init_free_pfn; + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + bool compact_blockskip_flush; + bool contiguous; + short: 16; + struct zone_padding _pad3_; + atomic_long_t vm_stat[12]; + atomic_long_t vm_numa_stat[6]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct zoneref { + struct zone *zone; + int zone_idx; +}; + +struct zonelist { + struct zoneref _zonerefs[513]; +}; + +enum zone_type { + ZONE_DMA = 0, + ZONE_DMA32 = 1, + ZONE_NORMAL = 2, + ZONE_MOVABLE = 3, + __MAX_NR_ZONES = 4, +}; + +struct per_cpu_nodestat; + +struct pglist_data { + struct zone node_zones[4]; + struct zonelist node_zonelists[2]; + int nr_zones; + spinlock_t node_size_lock; + long unsigned int node_start_pfn; + long unsigned int node_present_pages; + long unsigned int node_spanned_pages; + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + struct task_struct *kswapd; + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + int kswapd_failures; + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + long unsigned int totalreserve_pages; + long unsigned int min_unmapped_pages; + long unsigned int min_slab_pages; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct zone_padding _pad1_; + spinlock_t lru_lock; + struct deferred_split deferred_split_queue; + struct lruvec __lruvec; + long unsigned int flags; + long: 64; + struct zone_padding _pad2_; + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[37]; + long: 64; + long: 64; +}; + +typedef unsigned int isolate_mode_t; + +struct per_cpu_pages { + int count; + int high; + int batch; + struct list_head lists[3]; +}; + +struct per_cpu_pageset { + struct per_cpu_pages pcp; + s8 expire; + u16 vm_numa_stat_diff[6]; + s8 stat_threshold; + s8 vm_stat_diff[12]; +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[37]; +}; + +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + long unsigned int gp_seq[4]; + long int len; + u8 enabled; + u8 offloaded; +}; + +struct srcu_node; + +struct srcu_data { + long unsigned int srcu_lock_count[2]; + long unsigned int srcu_unlock_count[2]; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + struct rcu_segcblist srcu_cblist; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + long unsigned int grpmask; + int cpu; + struct srcu_struct *ssp; + long: 64; + long: 64; +}; + +struct srcu_node { + spinlock_t lock; + long unsigned int srcu_have_cbs[4]; + long unsigned int srcu_data_have_cbs[4]; + long unsigned int srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + +struct srcu_struct { + struct srcu_node node[9]; + struct srcu_node *level[3]; + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + unsigned int srcu_idx; + long unsigned int srcu_gp_seq; + long unsigned int srcu_gp_seq_needed; + long unsigned int srcu_gp_seq_needed_exp; + long unsigned int srcu_last_gp_end; + struct srcu_data *sda; + long unsigned int srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + atomic_t srcu_barrier_cpu_cnt; + struct delayed_work work; +}; + +typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *); + +struct ctl_table_poll; + +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + struct ctl_table *child; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +}; + +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set * (*lookup)(struct ctl_table_root *); + void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *); + int (*permissions)(struct ctl_table_header *, struct ctl_table *); +}; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING = 1, + UMH_DISABLED = 2, +}; + +typedef __u64 Elf64_Addr; + +typedef __u16 Elf64_Half; + +typedef __u32 Elf64_Word; + +typedef __u64 Elf64_Xword; + +struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +}; + +struct hlist_bl_node; + +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next; + struct hlist_bl_node **pprev; +}; + +struct lockref { + union { + __u64 lock_count; + struct { + spinlock_t lock; + int count; + }; + }; +}; + +struct qstr { + union { + struct { + u32 hash; + u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + +struct dentry_operations; + +struct dentry { + unsigned int d_flags; + seqcount_spinlock_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + unsigned char d_iname[32]; + struct lockref d_lockref; + const struct dentry_operations *d_op; + struct super_block *d_sb; + long unsigned int d_time; + void *d_fsdata; + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct list_head d_child; + struct list_head d_subdirs; + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +}; + +struct posix_acl; + +struct inode_operations; + +struct file_lock_context; + +struct block_device; + +struct cdev; + +struct fsnotify_mark_connector; + +struct fscrypt_info; + +struct fsverity_info; + +struct inode { + umode_t i_mode; + short unsigned int i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + void *i_security; + long unsigned int i_ino; + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 i_atime; + struct timespec64 i_mtime; + struct timespec64 i_ctime; + spinlock_t i_lock; + short unsigned int i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + long unsigned int i_state; + struct rw_semaphore i_rwsem; + long unsigned int dirtied_when; + long unsigned int dirtied_time_when; + struct hlist_node i_hash; + struct list_head i_io_list; + struct bdi_writeback *i_wb; + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + atomic_t i_readcount; + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct block_device *i_bdev; + struct cdev *i_cdev; + char *i_link; + unsigned int i_dir_seq; + }; + __u32 i_generation; + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + struct fscrypt_info *i_crypt_info; + struct fsverity_info *i_verity_info; + void *i_private; +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char * (*d_dname)(struct dentry *, char *, int); + struct vfsmount * (*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry * (*d_real)(struct dentry *, const struct inode *); + long: 64; + long: 64; + long: 64; +}; + +struct mtd_info; + +typedef long long int qsize_t; + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + struct list_head dqi_dirty_list; + long unsigned int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct quota_format_ops; + +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct callback_head cb_head; +}; + +struct rcuwait { + struct task_struct *task; +}; + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +}; + +struct sb_writers { + int frozen; + wait_queue_head_t wait_unfrozen; + struct percpu_rw_semaphore rw_sem[3]; +}; + +typedef struct { + __u8 b[16]; +} uuid_t; + +struct shrink_control; + +struct shrinker { + long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *); + long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *); + long int batch; + int seeks; + unsigned int flags; + struct list_head list; + int id; + atomic_long_t *nr_deferred; +}; + +struct list_lru_node; + +struct list_lru { + struct list_lru_node *node; + struct list_head list; + int shrinker_id; + bool memcg_aware; +}; + +struct super_operations; + +struct dquot_operations; + +struct quotactl_ops; + +struct export_operations; + +struct xattr_handler; + +struct fscrypt_operations; + +struct fsverity_operations; + +struct unicode_map; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + long unsigned int s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + long unsigned int s_flags; + long unsigned int s_iflags; + long unsigned int s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + void *s_security; + const struct xattr_handler **s_xattr; + const struct fscrypt_operations *s_cop; + struct key *s_master_keys; + const struct fsverity_operations *s_vop; + struct unicode_map *s_encoding; + __u16 s_encoding_flags; + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + struct sb_writers s_writers; + void *s_fs_info; + u32 s_time_gran; + time64_t s_time_min; + time64_t s_time_max; + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector *s_fsnotify_marks; + char s_id[32]; + uuid_t s_uuid; + unsigned int s_max_links; + fmode_t s_mode; + struct mutex s_vfs_rename_mutex; + const char *s_subtype; + const struct dentry_operations *s_d_op; + int cleancache_poolid; + struct shrinker s_shrink; + atomic_long_t s_remove_count; + atomic_long_t s_fsnotify_inode_refs; + int s_readonly_remount; + errseq_t s_wb_err; + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + struct user_namespace *s_user_ns; + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + struct mutex s_sync_lock; + int s_stack_depth; + int: 32; + spinlock_t s_inode_list_lock; + struct list_head s_inodes; + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; + long: 64; + long: 64; +}; + +struct shrink_control { + gfp_t gfp_mask; + int nid; + long unsigned int nr_to_scan; + long unsigned int nr_scanned; + struct mem_cgroup *memcg; +}; + +struct list_lru_one { + struct list_head list; + long int nr_items; +}; + +struct list_lru_memcg { + struct callback_head rcu; + struct list_lru_one *lru[0]; +}; + +struct list_lru_node { + spinlock_t lock; + struct list_lru_one lru; + struct list_lru_memcg *memcg_lrus; + long int nr_items; + long: 64; + long: 64; +}; + +enum migrate_mode { + MIGRATE_ASYNC = 0, + MIGRATE_SYNC_LIGHT = 1, + MIGRATE_SYNC = 2, + MIGRATE_SYNC_NO_COPY = 3, +}; + +struct exception_table_entry { + int insn; + int fixup; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; + +struct psi_group_cpu; + +struct psi_group { + struct mutex avgs_lock; + struct psi_group_cpu *pcpu; + u64 avg_total[5]; + u64 avg_last_update; + u64 avg_next_update; + struct delayed_work avgs_work; + u64 total[10]; + long unsigned int avg[15]; + struct task_struct *poll_task; + struct timer_list poll_timer; + wait_queue_head_t poll_wait; + atomic_t poll_wakeup; + struct mutex trigger_lock; + struct list_head triggers; + u32 nr_triggers[5]; + u32 poll_states; + u64 poll_min_period; + u64 polling_total[5]; + u64 polling_next_update; + u64 polling_until; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + struct bpf_prog_array *effective[38]; + struct list_head progs[38]; + u32 flags[38]; + struct list_head storages; + struct bpf_prog_array *inactive; + struct percpu_ref refcnt; + struct work_struct release_work; +}; + +struct cgroup_freezer_state { + bool freeze; + int e_freeze; + int nr_frozen_descendants; + int nr_frozen_tasks; +}; + +struct cgroup_root; + +struct cgroup_rstat_cpu; + +struct cgroup { + struct cgroup_subsys_state self; + long unsigned int flags; + int level; + int max_depth; + int nr_descendants; + int nr_dying_descendants; + int max_descendants; + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + int nr_threaded_children; + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + struct cgroup_subsys_state *subsys[12]; + struct cgroup_root *root; + struct list_head cset_links; + struct list_head e_csets[12]; + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + struct list_head pidlists; + struct mutex pidlist_mutex; + wait_queue_head_t offline_waitq; + struct work_struct release_agent_work; + struct psi_group psi; + struct cgroup_bpf bpf; + atomic_t congestion_count; + struct cgroup_freezer_state freezer; + u64 ancestor_ids[0]; +}; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +typedef int (*request_key_actor_t)(struct key *, void *); + +struct key_preparsed_payload; + +struct key_match_data; + +struct kernel_pkey_params; + +struct kernel_pkey_query; + +struct key_type { + const char *name; + size_t def_datalen; + unsigned int flags; + int (*vet_description)(const char *); + int (*preparse)(struct key_preparsed_payload *); + void (*free_preparse)(struct key_preparsed_payload *); + int (*instantiate)(struct key *, struct key_preparsed_payload *); + int (*update)(struct key *, struct key_preparsed_payload *); + int (*match_preparse)(struct key_match_data *); + void (*match_free)(struct key_match_data *); + void (*revoke)(struct key *); + void (*destroy)(struct key *); + void (*describe)(const struct key *, struct seq_file *); + long int (*read)(const struct key *, char *, size_t); + request_key_actor_t request_key; + struct key_restriction * (*lookup_restriction)(const char *); + int (*asym_query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*asym_eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*asym_verify_signature)(struct kernel_pkey_params *, const void *, const void *); + struct list_head link; + struct lock_class_key lock_class; +}; + +typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +struct group_info { + atomic_t usage; + int ngroups; + kgid_t gid[0]; +}; + +struct taskstats { + __u16 version; + __u32 ac_exitcode; + __u8 ac_flag; + __u8 ac_nice; + __u64 cpu_count; + __u64 cpu_delay_total; + __u64 blkio_count; + __u64 blkio_delay_total; + __u64 swapin_count; + __u64 swapin_delay_total; + __u64 cpu_run_real_total; + __u64 cpu_run_virtual_total; + char ac_comm[32]; + __u8 ac_sched; + __u8 ac_pad[3]; + int: 32; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u64 ac_etime; + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; + __u64 coremem; + __u64 virtmem; + __u64 hiwater_rss; + __u64 hiwater_vm; + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + __u64 nvcsw; + __u64 nivcsw; + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + __u64 freepages_count; + __u64 freepages_delay_total; + __u64 thrashing_count; + __u64 thrashing_delay_total; + __u64 ac_btime64; +}; + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + unsigned int flags; +}; + +struct wait_page_queue; + +struct kiocb { + struct file *ki_filp; + loff_t ki_pos; + void (*ki_complete)(struct kiocb *, long int, long int); + void *private; + int ki_flags; + u16 ki_hint; + u16 ki_ioprio; + union { + unsigned int ki_cookie; + struct wait_page_queue *ki_waitq; + }; +}; + +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + kuid_t ia_uid; + kgid_t ia_gid; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct file *ia_file; +}; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + long unsigned int dq_flags; + struct mem_dqblk dq_dqb; +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *, int); + int (*read_file_info)(struct super_block *, int); + int (*write_file_info)(struct super_block *, int); + int (*free_file_info)(struct super_block *, int); + int (*read_dqblk)(struct dquot *); + int (*commit_dqblk)(struct dquot *); + int (*release_dqblk)(struct dquot *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct dquot_operations { + int (*write_dquot)(struct dquot *); + struct dquot * (*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot)(struct dquot *); + int (*release_dquot)(struct dquot *); + int (*mark_dirty)(struct dquot *); + int (*write_info)(struct super_block *, int); + qsize_t * (*get_reserved_space)(struct inode *); + int (*get_projid)(struct inode *, kprojid_t *); + int (*get_inode_usage)(struct inode *, qsize_t *); + int (*get_next_id)(struct super_block *, struct kqid *); +}; + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; + +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + long long unsigned int ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +struct writeback_control; + +struct readahead_control; + +struct swap_info_struct; + +struct address_space_operations { + int (*writepage)(struct page *, struct writeback_control *); + int (*readpage)(struct file *, struct page *); + int (*writepages)(struct address_space *, struct writeback_control *); + int (*set_page_dirty)(struct page *); + int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int); + void (*readahead)(struct readahead_control *); + int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page **, void **); + int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *); + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage)(struct page *, unsigned int, unsigned int); + int (*releasepage)(struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *); + int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); + int (*launder_page)(struct page *); + int (*is_partially_uptodate)(struct page *, long unsigned int, long unsigned int); + void (*is_dirty_writeback)(struct page *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *); + void (*swap_deactivate)(struct file *); +}; + +enum writeback_sync_modes { + WB_SYNC_NONE = 0, + WB_SYNC_ALL = 1, +}; + +struct writeback_control { + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + enum writeback_sync_modes sync_mode; + unsigned int for_kupdate: 1; + unsigned int for_background: 1; + unsigned int tagged_writepages: 1; + unsigned int for_reclaim: 1; + unsigned int range_cyclic: 1; + unsigned int for_sync: 1; + unsigned int no_cgroup_owner: 1; + unsigned int punt_to_cgroup: 1; + struct bdi_writeback *wb; + struct inode *inode; + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; +}; + +struct readahead_control { + struct file *file; + struct address_space *mapping; + long unsigned int _index; + unsigned int _nr_pages; + unsigned int _batch_count; +}; + +struct iovec; + +struct kvec; + +struct bio_vec; + +struct iov_iter { + unsigned int type; + size_t iov_offset; + size_t count; + union { + const struct iovec *iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + struct pipe_inode_info *pipe; + }; + union { + long unsigned int nr_segs; + struct { + unsigned int head; + unsigned int start_head; + }; + }; +}; + +struct swap_cluster_info { + spinlock_t lock; + unsigned int data: 24; + unsigned int flags: 8; +}; + +struct swap_cluster_list { + struct swap_cluster_info head; + struct swap_cluster_info tail; +}; + +struct percpu_cluster; + +struct swap_info_struct { + long unsigned int flags; + short int prio; + struct plist_node list; + signed char type; + unsigned int max; + unsigned char *swap_map; + struct swap_cluster_info *cluster_info; + struct swap_cluster_list free_clusters; + unsigned int lowest_bit; + unsigned int highest_bit; + unsigned int pages; + unsigned int inuse_pages; + unsigned int cluster_next; + unsigned int cluster_nr; + unsigned int *cluster_next_cpu; + struct percpu_cluster *percpu_cluster; + struct rb_root swap_extent_root; + struct block_device *bdev; + struct file *swap_file; + unsigned int old_block_size; + long unsigned int *frontswap_map; + atomic_t frontswap_pages; + spinlock_t lock; + spinlock_t cont_lock; + struct work_struct discard_work; + struct swap_cluster_list discard_clusters; + struct plist_node avail_lists[0]; +}; + +struct hd_struct; + +struct gendisk; + +struct block_device { + dev_t bd_dev; + int bd_openers; + struct inode *bd_inode; + struct super_block *bd_super; + struct mutex bd_mutex; + void *bd_claiming; + void *bd_holder; + int bd_holders; + bool bd_write_holder; + struct list_head bd_holder_disks; + struct block_device *bd_contains; + u8 bd_partno; + struct hd_struct *bd_part; + unsigned int bd_part_count; + spinlock_t bd_size_lock; + struct gendisk *bd_disk; + struct backing_dev_info *bd_bdi; + int bd_fsfreeze_count; + struct mutex bd_fsfreeze_mutex; +}; + +struct cdev { + struct kobject kobj; + struct module *owner; + const struct file_operations *ops; + struct list_head list; + dev_t dev; + unsigned int count; +}; + +struct fiemap_extent_info; + +struct inode_operations { + struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int); + const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *); + int (*permission)(struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); + int (*readlink)(struct dentry *, char *, int); + int (*create)(struct inode *, struct dentry *, umode_t, bool); + int (*link)(struct dentry *, struct inode *, struct dentry *); + int (*unlink)(struct inode *, struct dentry *); + int (*symlink)(struct inode *, struct dentry *, const char *); + int (*mkdir)(struct inode *, struct dentry *, umode_t); + int (*rmdir)(struct inode *, struct dentry *); + int (*mknod)(struct inode *, struct dentry *, umode_t, dev_t); + int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); + int (*setattr)(struct dentry *, struct iattr *); + int (*getattr)(const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr)(struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64); + int (*update_time)(struct inode *, struct timespec64 *, int); + int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t); + int (*tmpfile)(struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct inode *, struct posix_acl *, int); + long: 64; + long: 64; + long: 64; +}; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct nlm_lockowner; + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; + +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; + +struct lock_manager_operations; + +struct file_lock { + struct file_lock *fl_blocker; + struct list_head fl_list; + struct hlist_node fl_link; + struct list_head fl_blocked_requests; + struct list_head fl_blocked_member; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + struct fasync_struct *fl_fasync; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + } fl_u; +}; + +struct lock_manager_operations { + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + +struct kstatfs; + +struct super_operations { + struct inode * (*alloc_inode)(struct super_block *); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + void (*dirty_inode)(struct inode *, int); + int (*write_inode)(struct inode *, struct writeback_control *); + int (*drop_inode)(struct inode *); + void (*evict_inode)(struct inode *); + void (*put_super)(struct super_block *); + int (*sync_fs)(struct super_block *, int); + int (*freeze_super)(struct super_block *); + int (*freeze_fs)(struct super_block *); + int (*thaw_super)(struct super_block *); + int (*unfreeze_fs)(struct super_block *); + int (*statfs)(struct dentry *, struct kstatfs *); + int (*remount_fs)(struct super_block *, int *, char *); + void (*umount_begin)(struct super_block *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot ** (*get_dquots)(struct inode *); + int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t); + long int (*nr_cached_objects)(struct super_block *, struct shrink_control *); + long int (*free_cached_objects)(struct super_block *, struct shrink_control *); +}; + +struct iomap; + +struct fid; + +struct export_operations { + int (*encode_fh)(struct inode *, __u32 *, int *, struct inode *); + struct dentry * (*fh_to_dentry)(struct super_block *, struct fid *, int, int); + struct dentry * (*fh_to_parent)(struct super_block *, struct fid *, int, int); + int (*get_name)(struct dentry *, char *, struct dentry *); + struct dentry * (*get_parent)(struct dentry *); + int (*commit_metadata)(struct inode *); + int (*get_uuid)(struct super_block *, u8 *, u32 *, u64 *); + int (*map_blocks)(struct inode *, loff_t, u64, struct iomap *, bool, u32 *); + int (*commit_blocks)(struct inode *, struct iomap *, int, struct iattr *); +}; + +struct xattr_handler { + const char *name; + const char *prefix; + int flags; + bool (*list)(struct dentry *); + int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t); + int (*set)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, const void *, size_t, int); +}; + +union fscrypt_policy; + +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + const union fscrypt_policy * (*get_dummy_policy)(struct super_block *); + bool (*empty_dir)(struct inode *); + unsigned int max_namelen; + bool (*has_stable_inodes)(struct super_block *); + void (*get_ino_and_lblk_bits)(struct super_block *, int *, int *); + int (*get_num_devices)(struct super_block *); + void (*get_devices)(struct super_block *, struct request_queue **); +}; + +struct fsverity_operations { + int (*begin_enable_verity)(struct file *); + int (*end_enable_verity)(struct file *, const void *, size_t, u64); + int (*get_verity_descriptor)(struct inode *, void *, size_t); + struct page * (*read_merkle_tree_page)(struct inode *, long unsigned int, long unsigned int); + int (*write_merkle_tree_block)(struct inode *, const void *, u64, int); +}; + +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + +struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +}; + +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct fc_log; + +struct p_log { + const char *prefix; + struct fc_log *log; +}; + +enum fs_context_purpose { + FS_CONTEXT_FOR_MOUNT = 0, + FS_CONTEXT_FOR_SUBMOUNT = 1, + FS_CONTEXT_FOR_RECONFIGURE = 2, +}; + +enum fs_context_phase { + FS_CONTEXT_CREATE_PARAMS = 0, + FS_CONTEXT_CREATING = 1, + FS_CONTEXT_AWAITING_MOUNT = 2, + FS_CONTEXT_AWAITING_RECONF = 3, + FS_CONTEXT_RECONF_PARAMS = 4, + FS_CONTEXT_RECONFIGURING = 5, + FS_CONTEXT_FAILED = 6, +}; + +struct fs_context_operations; + +struct fs_context { + const struct fs_context_operations *ops; + struct mutex uapi_mutex; + struct file_system_type *fs_type; + void *fs_private; + void *sget_key; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + struct p_log log; + const char *source; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + unsigned int s_iflags; + unsigned int lsm_flags; + enum fs_context_purpose purpose: 8; + enum fs_context_phase phase: 8; + bool need_free: 1; + bool global: 1; + bool oldapi: 1; +}; + +struct fs_parameter; + +struct fs_parse_result; + +typedef int fs_param_type(struct p_log *, const struct fs_parameter_spec *, struct fs_parameter *, struct fs_parse_result *); + +struct fs_parameter_spec { + const char *name; + fs_param_type *type; + u8 opt; + short unsigned int flags; + const void *data; +}; + +struct audit_names; + +struct filename { + const char *name; + const char *uptr; + int refcnt; + struct audit_names *aname; + const char iname[0]; +}; + +typedef u8 blk_status_t; + +struct bvec_iter { + sector_t bi_sector; + unsigned int bi_size; + unsigned int bi_idx; + unsigned int bi_bvec_done; +}; + +typedef void bio_end_io_t(struct bio *); + +struct bio_issue { + u64 value; +}; + +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bio_crypt_ctx; + +struct bio_integrity_payload; + +struct bio { + struct bio *bi_next; + struct gendisk *bi_disk; + unsigned int bi_opf; + short unsigned int bi_flags; + short unsigned int bi_ioprio; + short unsigned int bi_write_hint; + blk_status_t bi_status; + u8 bi_partno; + atomic_t __bi_remaining; + struct bvec_iter bi_iter; + bio_end_io_t *bi_end_io; + void *bi_private; + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + u64 bi_iocost_cost; + struct bio_crypt_ctx *bi_crypt_context; + union { + struct bio_integrity_payload *bi_integrity; + }; + short unsigned int bi_vcnt; + short unsigned int bi_max_vecs; + atomic_t __bi_cnt; + struct bio_vec *bi_io_vec; + struct bio_set *bi_pool; + struct bio_vec bi_inline_vecs[0]; +}; + +struct kernfs_root; + +struct kernfs_elem_dir { + long unsigned int subdirs; + struct rb_root children; + struct kernfs_root *root; +}; + +struct kernfs_syscall_ops; + +struct kernfs_root { + struct kernfs_node *kn; + unsigned int flags; + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + struct list_head supers; + wait_queue_head_t deactivate_waitq; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_ops; + +struct kernfs_open_node; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; +}; + +struct kernfs_iattrs; + +struct kernfs_node { + atomic_t count; + atomic_t active; + struct kernfs_node *parent; + const char *name; + struct rb_node rb; + const void *ns; + unsigned int hash; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + void *priv; + u64 id; + short unsigned int flags; + umode_t mode; + struct kernfs_iattrs *iattr; +}; + +struct kernfs_open_file; + +struct kernfs_ops { + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t); + size_t atomic_write_len; + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); + int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *); +}; + +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *, struct kernfs_root *); + int (*mkdir)(struct kernfs_node *, const char *, umode_t); + int (*rmdir)(struct kernfs_node *); + int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *); + int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *); +}; + +struct kernfs_open_file { + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + size_t atomic_write_len; + bool mmapped: 1; + bool released: 1; + const struct vm_operations_struct *vm_ops; +}; + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET = 1, + KOBJ_NS_TYPES = 2, +}; + +struct sock; + +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(); + void * (*grab_current_ns)(); + const void * (*netlink_ns)(struct sock *); + const void * (*initial_ns)(); + void (*drop_ns)(void *); +}; + +struct attribute { + const char *name; + umode_t mode; +}; + +struct bin_attribute; + +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +}; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *); +}; + +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + +struct kset_uevent_ops; + +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +}; + +struct kobj_type { + void (*release)(struct kobject *); + const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *); + const void * (*namespace)(struct kobject *); + void (*get_ownership)(struct kobject *, kuid_t *, kgid_t *); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(struct kset *, struct kobject *); + const char * (* const name)(struct kset *, struct kobject *); + int (* const uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *); +}; + +struct kernel_param_ops { + unsigned int flags; + int (*set)(const char *, const struct kernel_param *); + int (*get)(char *, const struct kernel_param *); + void (*free)(void *); +}; + +struct kparam_string; + +struct kparam_array; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + +struct kparam_array { + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; + +struct error_injection_entry { + long unsigned int addr; + int etype; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct static_call_key; + +struct tracepoint { + const char *name; + struct static_key key; + struct static_call_key *static_call_key; + void *static_call_tramp; + void *iterator; + int (*regfunc)(); + void (*unregfunc)(); + struct tracepoint_func *funcs; +}; + +struct static_call_key { + void *func; +}; + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; + long: 64; +}; + +struct plt_entry { + __le32 adrp; + __le32 add; + __le32 br; +}; + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct trace_event_functions; + +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + +struct trace_event_class; + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + struct event_filter *filter; + void *mod; + void *data; + int flags; + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + int (*perf_perm)(struct trace_event_call *, struct perf_event *); +}; + +struct trace_eval_map { + const char *system; + const char *eval_string; + long unsigned int eval_value; +}; + +struct linux_binprm { + struct vm_area_struct *vma; + long unsigned int vma_pages; + struct mm_struct *mm; + long unsigned int p; + long unsigned int argmin; + unsigned int have_execfd: 1; + unsigned int execfd_creds: 1; + unsigned int secureexec: 1; + unsigned int point_of_no_return: 1; + struct file *executable; + struct file *interpreter; + struct file *file; + struct cred *cred; + int unsafe; + unsigned int per_clear; + int argc; + int envc; + const char *filename; + const char *interp; + const char *fdpath; + unsigned int interp_flags; + int execfd; + long unsigned int loader; + long unsigned int exec; + struct rlimit rlim_stack; + char buf[256]; +}; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + long unsigned int limit; + long unsigned int mm_flags; + loff_t written; + loff_t pos; +}; + +struct em_perf_state { + long unsigned int frequency; + long unsigned int power; + long unsigned int cost; +}; + +struct em_perf_domain { + struct em_perf_state *table; + int nr_perf_states; + long unsigned int cpus[0]; +}; + +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING = 1, + DL_DEV_DRIVER_BOUND = 2, + DL_DEV_UNBINDING = 3, +}; + +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head needs_suppliers; + struct list_head defer_hook; + bool need_for_probe; + enum dl_dev_state status; +}; + +struct pm_message { + int event; +}; + +typedef struct pm_message pm_message_t; + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE = 1, + RPM_REQ_SUSPEND = 2, + RPM_REQ_AUTOSUSPEND = 3, + RPM_REQ_RESUME = 4, +}; + +struct wakeup_source; + +struct wake_irq; + +struct pm_subsys_data; + +struct dev_pm_qos; + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup: 1; + unsigned int async_suspend: 1; + bool in_dpm_list: 1; + bool is_prepared: 1; + bool is_suspended: 1; + bool is_noirq_suspended: 1; + bool is_late_suspended: 1; + bool no_pm: 1; + bool early_init: 1; + bool direct_complete: 1; + u32 driver_flags; + spinlock_t lock; + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path: 1; + bool syscore: 1; + bool no_pm_callbacks: 1; + unsigned int must_resume: 1; + unsigned int may_skip_resume: 1; + struct hrtimer suspend_timer; + u64 timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth: 3; + unsigned int idle_notification: 1; + unsigned int request_pending: 1; + unsigned int deferred_resume: 1; + unsigned int runtime_auto: 1; + bool ignore_children: 1; + unsigned int no_callbacks: 1; + unsigned int irq_safe: 1; + unsigned int use_autosuspend: 1; + unsigned int timer_autosuspends: 1; + unsigned int memalloc_noio: 1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +struct dev_archdata {}; + +struct device_private; + +struct device_type; + +struct bus_type; + +struct device_driver; + +struct dev_pm_domain; + +struct irq_domain; + +struct dev_pin_info; + +struct dma_map_ops; + +struct bus_dma_region; + +struct device_dma_parameters; + +struct dma_coherent_mem; + +struct cma; + +struct device_node; + +struct fwnode_handle; + +struct class; + +struct iommu_group; + +struct dev_iommu; + +struct device { + struct kobject kobj; + struct device *parent; + struct device_private *p; + const char *init_name; + const struct device_type *type; + struct bus_type *bus; + struct device_driver *driver; + void *platform_data; + void *driver_data; + struct mutex mutex; + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + struct em_perf_domain *em_pd; + struct irq_domain *msi_domain; + struct dev_pin_info *pins; + struct list_head msi_list; + const struct dma_map_ops *dma_ops; + u64 *dma_mask; + u64 coherent_dma_mask; + u64 bus_dma_limit; + const struct bus_dma_region *dma_range_map; + struct device_dma_parameters *dma_parms; + struct list_head dma_pools; + struct dma_coherent_mem *dma_mem; + struct cma *cma_area; + struct dev_archdata archdata; + struct device_node *of_node; + struct fwnode_handle *fwnode; + int numa_node; + dev_t devt; + u32 id; + spinlock_t devres_lock; + struct list_head devres_head; + struct class *class; + const struct attribute_group **groups; + void (*release)(struct device *); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + bool offline_disabled: 1; + bool offline: 1; + bool of_node_reused: 1; + bool state_synced: 1; + bool dma_coherent: 1; +}; + +struct dev_pm_ops { + int (*prepare)(struct device *); + void (*complete)(struct device *); + int (*suspend)(struct device *); + int (*resume)(struct device *); + int (*freeze)(struct device *); + int (*thaw)(struct device *); + int (*poweroff)(struct device *); + int (*restore)(struct device *); + int (*suspend_late)(struct device *); + int (*resume_early)(struct device *); + int (*freeze_late)(struct device *); + int (*thaw_early)(struct device *); + int (*poweroff_late)(struct device *); + int (*restore_early)(struct device *); + int (*suspend_noirq)(struct device *); + int (*resume_noirq)(struct device *); + int (*freeze_noirq)(struct device *); + int (*thaw_noirq)(struct device *); + int (*poweroff_noirq)(struct device *); + int (*restore_noirq)(struct device *); + int (*runtime_suspend)(struct device *); + int (*runtime_resume)(struct device *); + int (*runtime_idle)(struct device *); +}; + +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + struct list_head clock_list; + struct pm_domain_data *domain_data; +}; + +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + long unsigned int timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + long unsigned int event_count; + long unsigned int active_count; + long unsigned int relax_count; + long unsigned int expire_count; + long unsigned int wakeup_count; + struct device *dev; + bool active: 1; + bool autosleep_enabled: 1; +}; + +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *); + void (*detach)(struct device *, bool); + int (*activate)(struct device *); + void (*sync)(struct device *); + void (*dismiss)(struct device *); +}; + +struct iommu_ops; + +struct subsys_private; + +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + int (*match)(struct device *, struct device_driver *); + int (*uevent)(struct device *, struct kobj_uevent_env *); + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*online)(struct device *); + int (*offline)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + int (*num_vf)(struct device *); + int (*dma_configure)(struct device *); + const struct dev_pm_ops *pm; + const struct iommu_ops *iommu_ops; + struct subsys_private *p; + struct lock_class_key lock_key; + bool need_parent_lock; +}; + +enum probe_type { + PROBE_DEFAULT_STRATEGY = 0, + PROBE_PREFER_ASYNCHRONOUS = 1, + PROBE_FORCE_SYNCHRONOUS = 2, +}; + +struct of_device_id; + +struct acpi_device_id; + +struct driver_private; + +struct device_driver { + const char *name; + struct bus_type *bus; + struct module *owner; + const char *mod_name; + bool suppress_bind_attrs; + enum probe_type probe_type; + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + int (*probe)(struct device *); + void (*sync_state)(struct device *); + int (*remove)(struct device *); + void (*shutdown)(struct device *); + int (*suspend)(struct device *, pm_message_t); + int (*resume)(struct device *); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + const struct dev_pm_ops *pm; + void (*coredump)(struct device *); + struct driver_private *p; +}; + +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY = 0, + IOMMU_CAP_INTR_REMAP = 1, + IOMMU_CAP_NOEXEC = 2, +}; + +enum iommu_attr { + DOMAIN_ATTR_GEOMETRY = 0, + DOMAIN_ATTR_PAGING = 1, + DOMAIN_ATTR_WINDOWS = 2, + DOMAIN_ATTR_FSL_PAMU_STASH = 3, + DOMAIN_ATTR_FSL_PAMU_ENABLE = 4, + DOMAIN_ATTR_FSL_PAMUV1 = 5, + DOMAIN_ATTR_NESTING = 6, + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE = 7, + DOMAIN_ATTR_MAX = 8, +}; + +enum iommu_dev_features { + IOMMU_DEV_FEAT_AUX = 0, + IOMMU_DEV_FEAT_SVA = 1, +}; + +struct iommu_domain; + +struct iommu_iotlb_gather; + +struct iommu_device; + +struct iommu_resv_region; + +struct of_phandle_args; + +struct iommu_sva; + +struct iommu_fault_event; + +struct iommu_page_response; + +struct iommu_cache_invalidate_info; + +struct iommu_gpasid_bind_data; + +struct iommu_ops { + bool (*capable)(enum iommu_cap); + struct iommu_domain * (*domain_alloc)(unsigned int); + void (*domain_free)(struct iommu_domain *); + int (*attach_dev)(struct iommu_domain *, struct device *); + void (*detach_dev)(struct iommu_domain *, struct device *); + int (*map)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, int, gfp_t); + size_t (*unmap)(struct iommu_domain *, long unsigned int, size_t, struct iommu_iotlb_gather *); + void (*flush_iotlb_all)(struct iommu_domain *); + void (*iotlb_sync_map)(struct iommu_domain *); + void (*iotlb_sync)(struct iommu_domain *, struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t); + struct iommu_device * (*probe_device)(struct device *); + void (*release_device)(struct device *); + void (*probe_finalize)(struct device *); + struct iommu_group * (*device_group)(struct device *); + int (*domain_get_attr)(struct iommu_domain *, enum iommu_attr, void *); + int (*domain_set_attr)(struct iommu_domain *, enum iommu_attr, void *); + void (*get_resv_regions)(struct device *, struct list_head *); + void (*put_resv_regions)(struct device *, struct list_head *); + void (*apply_resv_region)(struct device *, struct iommu_domain *, struct iommu_resv_region *); + int (*domain_window_enable)(struct iommu_domain *, u32, phys_addr_t, u64, int); + void (*domain_window_disable)(struct iommu_domain *, u32); + int (*of_xlate)(struct device *, struct of_phandle_args *); + bool (*is_attach_deferred)(struct iommu_domain *, struct device *); + bool (*dev_has_feat)(struct device *, enum iommu_dev_features); + bool (*dev_feat_enabled)(struct device *, enum iommu_dev_features); + int (*dev_enable_feat)(struct device *, enum iommu_dev_features); + int (*dev_disable_feat)(struct device *, enum iommu_dev_features); + int (*aux_attach_dev)(struct iommu_domain *, struct device *); + void (*aux_detach_dev)(struct iommu_domain *, struct device *); + int (*aux_get_pasid)(struct iommu_domain *, struct device *); + struct iommu_sva * (*sva_bind)(struct device *, struct mm_struct *, void *); + void (*sva_unbind)(struct iommu_sva *); + u32 (*sva_get_pasid)(struct iommu_sva *); + int (*page_response)(struct device *, struct iommu_fault_event *, struct iommu_page_response *); + int (*cache_invalidate)(struct iommu_domain *, struct device *, struct iommu_cache_invalidate_info *); + int (*sva_bind_gpasid)(struct iommu_domain *, struct device *, struct iommu_gpasid_bind_data *); + int (*sva_unbind_gpasid)(struct device *, u32); + int (*def_domain_type)(struct device *); + long unsigned int pgsize_bitmap; + struct module *owner; +}; + +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(struct device *, struct kobj_uevent_env *); + char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *); + void (*release)(struct device *); + const struct dev_pm_ops *pm; +}; + +struct class { + const char *name; + struct module *owner; + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + struct kobject *dev_kobj; + int (*dev_uevent)(struct device *, struct kobj_uevent_env *); + char * (*devnode)(struct device *, umode_t *); + void (*class_release)(struct class *); + void (*dev_release)(struct device *); + int (*shutdown_pre)(struct device *); + const struct kobj_ns_type_operations *ns_type; + const void * (*namespace)(struct device *); + void (*get_ownership)(struct device *, kuid_t *, kgid_t *); + const struct dev_pm_ops *pm; + struct subsys_private *p; +}; + +struct of_device_id { + char name[32]; + char type[32]; + char compatible[128]; + const void *data; +}; + +typedef long unsigned int kernel_ulong_t; + +struct acpi_device_id { + __u8 id[9]; + kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; +}; + +struct device_dma_parameters { + unsigned int max_segment_size; + long unsigned int segment_boundary_mask; +}; + +enum irq_domain_bus_token { + DOMAIN_BUS_ANY = 0, + DOMAIN_BUS_WIRED = 1, + DOMAIN_BUS_GENERIC_MSI = 2, + DOMAIN_BUS_PCI_MSI = 3, + DOMAIN_BUS_PLATFORM_MSI = 4, + DOMAIN_BUS_NEXUS = 5, + DOMAIN_BUS_IPI = 6, + DOMAIN_BUS_FSL_MC_MSI = 7, + DOMAIN_BUS_TI_SCI_INTA_MSI = 8, + DOMAIN_BUS_WAKEUP = 9, + DOMAIN_BUS_VMD_MSI = 10, +}; + +struct irq_domain_ops; + +struct irq_domain_chip_generic; + +struct irq_domain { + struct list_head link; + const char *name; + const struct irq_domain_ops *ops; + void *host_data; + unsigned int flags; + unsigned int mapcount; + struct fwnode_handle *fwnode; + enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; + struct irq_domain *parent; + irq_hw_number_t hwirq_max; + unsigned int revmap_direct_max_irq; + unsigned int revmap_size; + struct xarray revmap_tree; + struct mutex revmap_tree_mutex; + unsigned int linear_revmap[0]; +}; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct sg_table; + +struct scatterlist; + +struct dma_map_ops { + void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int); + void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int); + struct page * (*alloc_pages)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_pages)(struct device *, size_t, struct page *, dma_addr_t, enum dma_data_direction); + void * (*alloc_noncoherent)(struct device *, size_t, dma_addr_t *, enum dma_data_direction, gfp_t); + void (*free_noncoherent)(struct device *, size_t, void *, dma_addr_t, enum dma_data_direction); + int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int); + int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int); + dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int); + dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int); + void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction); + void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction); + void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction); + int (*dma_supported)(struct device *, u64); + u64 (*get_required_mask)(struct device *); + size_t (*max_mapping_size)(struct device *); + long unsigned int (*get_merge_boundary)(struct device *); +}; + +struct bus_dma_region { + phys_addr_t cpu_start; + dma_addr_t dma_start; + u64 size; + u64 offset; +}; + +typedef u32 phandle; + +struct fwnode_operations; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; +}; + +struct property; + +struct device_node { + const char *name; + phandle phandle; + const char *full_name; + struct fwnode_handle fwnode; + struct property *properties; + struct property *deadprops; + struct device_node *parent; + struct device_node *child; + struct device_node *sibling; + struct kobject kobj; + long unsigned int _flags; + void *data; +}; + +enum cpuhp_state { + CPUHP_INVALID = 4294967295, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS = 1, + CPUHP_PERF_PREPARE = 2, + CPUHP_PERF_X86_PREPARE = 3, + CPUHP_PERF_X86_AMD_UNCORE_PREP = 4, + CPUHP_PERF_POWER = 5, + CPUHP_PERF_SUPERH = 6, + CPUHP_X86_HPET_DEAD = 7, + CPUHP_X86_APB_DEAD = 8, + CPUHP_X86_MCE_DEAD = 9, + CPUHP_VIRT_NET_DEAD = 10, + CPUHP_SLUB_DEAD = 11, + CPUHP_DEBUG_OBJ_DEAD = 12, + CPUHP_MM_WRITEBACK_DEAD = 13, + CPUHP_MM_VMSTAT_DEAD = 14, + CPUHP_SOFTIRQ_DEAD = 15, + CPUHP_NET_MVNETA_DEAD = 16, + CPUHP_CPUIDLE_DEAD = 17, + CPUHP_ARM64_FPSIMD_DEAD = 18, + CPUHP_ARM_OMAP_WAKE_DEAD = 19, + CPUHP_IRQ_POLL_DEAD = 20, + CPUHP_BLOCK_SOFTIRQ_DEAD = 21, + CPUHP_ACPI_CPUDRV_DEAD = 22, + CPUHP_S390_PFAULT_DEAD = 23, + CPUHP_BLK_MQ_DEAD = 24, + CPUHP_FS_BUFF_DEAD = 25, + CPUHP_PRINTK_DEAD = 26, + CPUHP_MM_MEMCQ_DEAD = 27, + CPUHP_PERCPU_CNT_DEAD = 28, + CPUHP_RADIX_DEAD = 29, + CPUHP_PAGE_ALLOC_DEAD = 30, + CPUHP_NET_DEV_DEAD = 31, + CPUHP_PCI_XGENE_DEAD = 32, + CPUHP_IOMMU_INTEL_DEAD = 33, + CPUHP_LUSTRE_CFS_DEAD = 34, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 35, + CPUHP_PADATA_DEAD = 36, + CPUHP_WORKQUEUE_PREP = 37, + CPUHP_POWER_NUMA_PREPARE = 38, + CPUHP_HRTIMERS_PREPARE = 39, + CPUHP_PROFILE_PREPARE = 40, + CPUHP_X2APIC_PREPARE = 41, + CPUHP_SMPCFD_PREPARE = 42, + CPUHP_RELAY_PREPARE = 43, + CPUHP_SLAB_PREPARE = 44, + CPUHP_MD_RAID5_PREPARE = 45, + CPUHP_RCUTREE_PREP = 46, + CPUHP_CPUIDLE_COUPLED_PREPARE = 47, + CPUHP_POWERPC_PMAC_PREPARE = 48, + CPUHP_POWERPC_MMU_CTX_PREPARE = 49, + CPUHP_XEN_PREPARE = 50, + CPUHP_XEN_EVTCHN_PREPARE = 51, + CPUHP_ARM_SHMOBILE_SCU_PREPARE = 52, + CPUHP_SH_SH3X_PREPARE = 53, + CPUHP_NET_FLOW_PREPARE = 54, + CPUHP_TOPOLOGY_PREPARE = 55, + CPUHP_NET_IUCV_PREPARE = 56, + CPUHP_ARM_BL_PREPARE = 57, + CPUHP_TRACE_RB_PREPARE = 58, + CPUHP_MM_ZS_PREPARE = 59, + CPUHP_MM_ZSWP_MEM_PREPARE = 60, + CPUHP_MM_ZSWP_POOL_PREPARE = 61, + CPUHP_KVM_PPC_BOOK3S_PREPARE = 62, + CPUHP_ZCOMP_PREPARE = 63, + CPUHP_TIMERS_PREPARE = 64, + CPUHP_MIPS_SOC_PREPARE = 65, + CPUHP_BP_PREPARE_DYN = 66, + CPUHP_BP_PREPARE_DYN_END = 86, + CPUHP_BRINGUP_CPU = 87, + CPUHP_AP_IDLE_DEAD = 88, + CPUHP_AP_OFFLINE = 89, + CPUHP_AP_SCHED_STARTING = 90, + CPUHP_AP_RCUTREE_DYING = 91, + CPUHP_AP_CPU_PM_STARTING = 92, + CPUHP_AP_IRQ_GIC_STARTING = 93, + CPUHP_AP_IRQ_HIP04_STARTING = 94, + CPUHP_AP_IRQ_ARMADA_XP_STARTING = 95, + CPUHP_AP_IRQ_BCM2836_STARTING = 96, + CPUHP_AP_IRQ_MIPS_GIC_STARTING = 97, + CPUHP_AP_IRQ_RISCV_STARTING = 98, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING = 99, + CPUHP_AP_ARM_MVEBU_COHERENCY = 100, + CPUHP_AP_MICROCODE_LOADER = 101, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 102, + CPUHP_AP_PERF_X86_STARTING = 103, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 104, + CPUHP_AP_PERF_X86_CQM_STARTING = 105, + CPUHP_AP_PERF_X86_CSTATE_STARTING = 106, + CPUHP_AP_PERF_XTENSA_STARTING = 107, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 108, + CPUHP_AP_ARM_SDEI_STARTING = 109, + CPUHP_AP_ARM_VFP_STARTING = 110, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 111, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 112, + CPUHP_AP_PERF_ARM_ACPI_STARTING = 113, + CPUHP_AP_PERF_ARM_STARTING = 114, + CPUHP_AP_ARM_L2X0_STARTING = 115, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 116, + CPUHP_AP_ARM_ARCH_TIMER_STARTING = 117, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 118, + CPUHP_AP_JCORE_TIMER_STARTING = 119, + CPUHP_AP_ARM_TWD_STARTING = 120, + CPUHP_AP_QCOM_TIMER_STARTING = 121, + CPUHP_AP_TEGRA_TIMER_STARTING = 122, + CPUHP_AP_ARMADA_TIMER_STARTING = 123, + CPUHP_AP_MARCO_TIMER_STARTING = 124, + CPUHP_AP_MIPS_GIC_TIMER_STARTING = 125, + CPUHP_AP_ARC_TIMER_STARTING = 126, + CPUHP_AP_RISCV_TIMER_STARTING = 127, + CPUHP_AP_CLINT_TIMER_STARTING = 128, + CPUHP_AP_CSKY_TIMER_STARTING = 129, + CPUHP_AP_HYPERV_TIMER_STARTING = 130, + CPUHP_AP_KVM_STARTING = 131, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING = 132, + CPUHP_AP_KVM_ARM_VGIC_STARTING = 133, + CPUHP_AP_KVM_ARM_TIMER_STARTING = 134, + CPUHP_AP_DUMMY_TIMER_STARTING = 135, + CPUHP_AP_ARM_XEN_STARTING = 136, + CPUHP_AP_ARM_CORESIGHT_STARTING = 137, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING = 138, + CPUHP_AP_ARM64_ISNDEP_STARTING = 139, + CPUHP_AP_SMPCFD_DYING = 140, + CPUHP_AP_X86_TBOOT_DYING = 141, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 142, + CPUHP_AP_ONLINE = 143, + CPUHP_TEARDOWN_CPU = 144, + CPUHP_AP_ONLINE_IDLE = 145, + CPUHP_AP_SMPBOOT_THREADS = 146, + CPUHP_AP_X86_VDSO_VMA_ONLINE = 147, + CPUHP_AP_IRQ_AFFINITY_ONLINE = 148, + CPUHP_AP_BLK_MQ_ONLINE = 149, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 150, + CPUHP_AP_X86_INTEL_EPB_ONLINE = 151, + CPUHP_AP_PERF_ONLINE = 152, + CPUHP_AP_PERF_X86_ONLINE = 153, + CPUHP_AP_PERF_X86_UNCORE_ONLINE = 154, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 155, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 156, + CPUHP_AP_PERF_X86_RAPL_ONLINE = 157, + CPUHP_AP_PERF_X86_CQM_ONLINE = 158, + CPUHP_AP_PERF_X86_CSTATE_ONLINE = 159, + CPUHP_AP_PERF_S390_CF_ONLINE = 160, + CPUHP_AP_PERF_S390_SF_ONLINE = 161, + CPUHP_AP_PERF_ARM_CCI_ONLINE = 162, + CPUHP_AP_PERF_ARM_CCN_ONLINE = 163, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 164, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 165, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 166, + CPUHP_AP_PERF_ARM_L2X0_ONLINE = 167, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 168, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 169, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE = 170, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE = 171, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 172, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 173, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 174, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE = 175, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE = 176, + CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE = 177, + CPUHP_AP_WATCHDOG_ONLINE = 178, + CPUHP_AP_WORKQUEUE_ONLINE = 179, + CPUHP_AP_RCUTREE_ONLINE = 180, + CPUHP_AP_BASE_CACHEINFO_ONLINE = 181, + CPUHP_AP_ONLINE_DYN = 182, + CPUHP_AP_ONLINE_DYN_END = 212, + CPUHP_AP_X86_HPET_ONLINE = 213, + CPUHP_AP_X86_KVM_CLK_ONLINE = 214, + CPUHP_AP_ACTIVE = 215, + CPUHP_ONLINE = 216, +}; + +typedef void percpu_ref_func_t(struct percpu_ref *); + +struct percpu_ref_data { + atomic_long_t count; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic: 1; + bool allow_reinit: 1; + struct callback_head rcu; + struct percpu_ref *ref; +}; + +struct dev_pagemap_ops { + void (*page_free)(struct page *); + void (*kill)(struct dev_pagemap *); + void (*cleanup)(struct dev_pagemap *); + vm_fault_t (*migrate_to_ram)(struct vm_fault *); +}; + +enum vm_event_item { + PGPGIN = 0, + PGPGOUT = 1, + PSWPIN = 2, + PSWPOUT = 3, + PGALLOC_DMA = 4, + PGALLOC_DMA32 = 5, + PGALLOC_NORMAL = 6, + PGALLOC_MOVABLE = 7, + ALLOCSTALL_DMA = 8, + ALLOCSTALL_DMA32 = 9, + ALLOCSTALL_NORMAL = 10, + ALLOCSTALL_MOVABLE = 11, + PGSCAN_SKIP_DMA = 12, + PGSCAN_SKIP_DMA32 = 13, + PGSCAN_SKIP_NORMAL = 14, + PGSCAN_SKIP_MOVABLE = 15, + PGFREE = 16, + PGACTIVATE = 17, + PGDEACTIVATE = 18, + PGLAZYFREE = 19, + PGFAULT = 20, + PGMAJFAULT = 21, + PGLAZYFREED = 22, + PGREFILL = 23, + PGREUSE = 24, + PGSTEAL_KSWAPD = 25, + PGSTEAL_DIRECT = 26, + PGSCAN_KSWAPD = 27, + PGSCAN_DIRECT = 28, + PGSCAN_DIRECT_THROTTLE = 29, + PGSCAN_ANON = 30, + PGSCAN_FILE = 31, + PGSTEAL_ANON = 32, + PGSTEAL_FILE = 33, + PGSCAN_ZONE_RECLAIM_FAILED = 34, + PGINODESTEAL = 35, + SLABS_SCANNED = 36, + KSWAPD_INODESTEAL = 37, + KSWAPD_LOW_WMARK_HIT_QUICKLY = 38, + KSWAPD_HIGH_WMARK_HIT_QUICKLY = 39, + PAGEOUTRUN = 40, + PGROTATED = 41, + DROP_PAGECACHE = 42, + DROP_SLAB = 43, + OOM_KILL = 44, + NUMA_PTE_UPDATES = 45, + NUMA_HUGE_PTE_UPDATES = 46, + NUMA_HINT_FAULTS = 47, + NUMA_HINT_FAULTS_LOCAL = 48, + NUMA_PAGE_MIGRATE = 49, + PGMIGRATE_SUCCESS = 50, + PGMIGRATE_FAIL = 51, + THP_MIGRATION_SUCCESS = 52, + THP_MIGRATION_FAIL = 53, + THP_MIGRATION_SPLIT = 54, + COMPACTMIGRATE_SCANNED = 55, + COMPACTFREE_SCANNED = 56, + COMPACTISOLATED = 57, + COMPACTSTALL = 58, + COMPACTFAIL = 59, + COMPACTSUCCESS = 60, + KCOMPACTD_WAKE = 61, + KCOMPACTD_MIGRATE_SCANNED = 62, + KCOMPACTD_FREE_SCANNED = 63, + HTLB_BUDDY_PGALLOC = 64, + HTLB_BUDDY_PGALLOC_FAIL = 65, + UNEVICTABLE_PGCULLED = 66, + UNEVICTABLE_PGSCANNED = 67, + UNEVICTABLE_PGRESCUED = 68, + UNEVICTABLE_PGMLOCKED = 69, + UNEVICTABLE_PGMUNLOCKED = 70, + UNEVICTABLE_PGCLEARED = 71, + UNEVICTABLE_PGSTRANDED = 72, + THP_FAULT_ALLOC = 73, + THP_FAULT_FALLBACK = 74, + THP_FAULT_FALLBACK_CHARGE = 75, + THP_COLLAPSE_ALLOC = 76, + THP_COLLAPSE_ALLOC_FAILED = 77, + THP_FILE_ALLOC = 78, + THP_FILE_FALLBACK = 79, + THP_FILE_FALLBACK_CHARGE = 80, + THP_FILE_MAPPED = 81, + THP_SPLIT_PAGE = 82, + THP_SPLIT_PAGE_FAILED = 83, + THP_DEFERRED_SPLIT_PAGE = 84, + THP_SPLIT_PMD = 85, + THP_ZERO_PAGE_ALLOC = 86, + THP_ZERO_PAGE_ALLOC_FAILED = 87, + THP_SWPOUT = 88, + THP_SWPOUT_FALLBACK = 89, + BALLOON_INFLATE = 90, + BALLOON_DEFLATE = 91, + BALLOON_MIGRATE = 92, + SWAP_RA = 93, + SWAP_RA_HIT = 94, + NR_VM_EVENT_ITEMS = 95, +}; + +struct seq_operations { + void * (*start)(struct seq_file *, loff_t *); + void (*stop)(struct seq_file *, void *); + void * (*next)(struct seq_file *, void *, loff_t *); + int (*show)(struct seq_file *, void *); +}; + +struct ring_buffer_event { + u32 type_len: 5; + u32 time_delta: 27; + u32 array[0]; +}; + +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +struct trace_seq { + char buffer[4096]; + struct seq_buf seq; + int full; +}; + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_MAX = 11, +}; + +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op: 5; + __u64 mem_lvl: 14; + __u64 mem_snoop: 5; + __u64 mem_lock: 2; + __u64 mem_dtlb: 7; + __u64 mem_lvl_num: 4; + __u64 mem_remote: 1; + __u64 mem_snoopx: 2; + __u64 mem_rsvd: 24; + }; +}; + +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred: 1; + __u64 predicted: 1; + __u64 in_tx: 1; + __u64 abort: 1; + __u64 cycles: 16; + __u64 type: 4; + __u64 reserved: 40; +}; + +struct new_utsname { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct uts_namespace { + struct kref kref; + struct new_utsname name; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; +}; + +struct cgroup_namespace { + refcount_t count; + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +struct nsset { + unsigned int flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +struct proc_ns_operations { + const char *name; + const char *real_ns_name; + int type; + struct ns_common * (*get)(struct task_struct *); + void (*put)(struct ns_common *); + int (*install)(struct nsset *, struct ns_common *); + struct user_namespace * (*owner)(struct ns_common *); + struct ns_common * (*get_parent)(struct ns_common *); +}; + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + int count; + atomic_t ucount[10]; +}; + +struct iovec { + void *iov_base; + __kernel_size_t iov_len; +}; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + +struct u64_stats_sync {}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; +}; + +struct bpf_cgroup_storage; + +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage[2]; +}; + +struct bpf_storage_buffer; + +struct bpf_cgroup_storage_map; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list_map; + struct list_head list_cg; + struct rb_node node; + struct callback_head rcu; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[0]; +}; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[0]; +}; + +struct psi_group_cpu { + seqcount_t seq; + unsigned int tasks[4]; + u32 state_mask; + u32 times[6]; + u64 state_start; + long: 64; + u32 times_prev[12]; + long: 64; + long: 64; +}; + +struct cgroup_taskset; + +struct cftype; + +struct cgroup_subsys { + struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *); + int (*css_online)(struct cgroup_subsys_state *); + void (*css_offline)(struct cgroup_subsys_state *); + void (*css_released)(struct cgroup_subsys_state *); + void (*css_free)(struct cgroup_subsys_state *); + void (*css_reset)(struct cgroup_subsys_state *); + void (*css_rstat_flush)(struct cgroup_subsys_state *, int); + int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *); + int (*can_attach)(struct cgroup_taskset *); + void (*cancel_attach)(struct cgroup_taskset *); + void (*attach)(struct cgroup_taskset *); + void (*post_attach)(); + int (*can_fork)(struct task_struct *, struct css_set *); + void (*cancel_fork)(struct task_struct *, struct css_set *); + void (*fork)(struct task_struct *); + void (*exit)(struct task_struct *); + void (*release)(struct task_struct *); + void (*bind)(struct cgroup_subsys_state *); + bool early_init: 1; + bool implicit_on_dfl: 1; + bool threaded: 1; + bool broken_hierarchy: 1; + bool warned_broken_hierarchy: 1; + int id; + const char *name; + const char *legacy_name; + struct cgroup_root *root; + struct idr css_idr; + struct list_head cfts; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + unsigned int depends_on; +}; + +struct cgroup_rstat_cpu { + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + struct cgroup_base_stat last_bstat; + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_root { + struct kernfs_root *kf_root; + unsigned int subsys_mask; + int hierarchy_id; + struct cgroup cgrp; + u64 cgrp_ancestor_id_storage; + atomic_t nr_cgrps; + struct list_head root_list; + unsigned int flags; + char release_agent_path[4096]; + char name[64]; +}; + +struct cftype { + char name[64]; + long unsigned int private; + size_t max_write_len; + unsigned int flags; + unsigned int file_offset; + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *); + void (*release)(struct kernfs_open_file *); + u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *); + s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *); + int (*seq_show)(struct seq_file *, void *); + void * (*seq_start)(struct seq_file *, loff_t *); + void * (*seq_next)(struct seq_file *, void *, loff_t *); + void (*seq_stop)(struct seq_file *, void *); + int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64); + int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64); + ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t); + __poll_t (*poll)(struct kernfs_open_file *, struct poll_table_struct *); +}; + +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RECLAIM = 1, + KMALLOC_DMA = 2, + NR_KMALLOC_TYPES = 3, +}; + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[0]; +}; + +typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + long unsigned int pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((packed)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; + +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[0]; +}; + +struct perf_cpu_context; + +struct perf_output_handle; + +struct pmu { + struct list_head entry; + struct module *module; + struct device *dev; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + int capabilities; + int *pmu_disable_count; + struct perf_cpu_context *pmu_cpu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + unsigned int nr_addr_filters; + void (*pmu_enable)(struct pmu *); + void (*pmu_disable)(struct pmu *); + int (*event_init)(struct perf_event *); + void (*event_mapped)(struct perf_event *, struct mm_struct *); + void (*event_unmapped)(struct perf_event *, struct mm_struct *); + int (*add)(struct perf_event *, int); + void (*del)(struct perf_event *, int); + void (*start)(struct perf_event *, int); + void (*stop)(struct perf_event *, int); + void (*read)(struct perf_event *); + void (*start_txn)(struct pmu *, unsigned int); + int (*commit_txn)(struct pmu *); + void (*cancel_txn)(struct pmu *); + int (*event_idx)(struct perf_event *); + void (*sched_task)(struct perf_event_context *, bool); + struct kmem_cache *task_ctx_cache; + void (*swap_task_ctx)(struct perf_event_context *, struct perf_event_context *); + void * (*setup_aux)(struct perf_event *, void **, int, bool); + void (*free_aux)(void *); + long int (*snapshot_aux)(struct perf_event *, struct perf_output_handle *, long unsigned int); + int (*addr_filters_validate)(struct list_head *); + void (*addr_filters_sync)(struct perf_event *); + int (*aux_output_match)(struct perf_event *); + int (*filter_match)(struct perf_event *); + int (*check_period)(struct perf_event *, u64); +}; + +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int exclusive; + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + struct perf_cgroup *cgrp; + struct list_head cgrp_cpuctx_entry; + int sched_cb_usage; + int online; + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + long unsigned int wakeup; + long unsigned int size; + u64 aux_flags; + union { + void *addr; + long unsigned int head; + }; + int page; +}; + +struct perf_addr_filter_range { + long unsigned int start; + long unsigned int size; +}; + +struct perf_sample_data { + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + u64 weight; + u64 txn; + union perf_mem_data_src data_src; + u64 type; + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + struct perf_callchain_entry *callchain; + u64 aux_size; + struct perf_regs regs_user; + struct perf_regs regs_intr; + u64 stack_user_size; + u64 phys_addr; + u64 cgroup; + long: 64; +}; + +struct perf_cgroup_info; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; +}; + +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct trace_entry { + short unsigned int type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; + +struct trace_array; + +struct tracer; + +struct array_buffer; + +struct ring_buffer_iter; + +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + long unsigned int iter_flags; + void *temp; + unsigned int temp_size; + struct trace_seq tmp_seq; + cpumask_var_t started; + bool snapshot; + struct trace_seq seq; + struct trace_entry *ent; + long unsigned int lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + loff_t pos; + long int idx; +}; + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3, +}; + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +enum trace_reg { + TRACE_REG_REGISTER = 0, + TRACE_REG_UNREGISTER = 1, + TRACE_REG_PERF_REGISTER = 2, + TRACE_REG_PERF_UNREGISTER = 3, + TRACE_REG_PERF_OPEN = 4, + TRACE_REG_PERF_CLOSE = 5, + TRACE_REG_PERF_ADD = 6, + TRACE_REG_PERF_DEL = 7, +}; + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + void *perf_probe; + int (*reg)(struct trace_event_call *, enum trace_reg, void *); + struct trace_event_fields *fields_array; + struct list_head * (*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +struct trace_buffer; + +struct trace_event_file; + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + long unsigned int flags; + int pc; + struct pt_regs *regs; +}; + +struct trace_subsystem_dir; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct dentry *dir; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; + long unsigned int flags; + atomic_t sm_ref; + atomic_t tm_ref; +}; + +enum { + TRACE_EVENT_FL_FILTERED_BIT = 0, + TRACE_EVENT_FL_CAP_ANY_BIT = 1, + TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3, + TRACE_EVENT_FL_TRACEPOINT_BIT = 4, + TRACE_EVENT_FL_KPROBE_BIT = 5, + TRACE_EVENT_FL_UPROBE_BIT = 6, +}; + +enum { + TRACE_EVENT_FL_FILTERED = 1, + TRACE_EVENT_FL_CAP_ANY = 2, + TRACE_EVENT_FL_NO_SET_FILTER = 4, + TRACE_EVENT_FL_IGNORE_ENABLE = 8, + TRACE_EVENT_FL_TRACEPOINT = 16, + TRACE_EVENT_FL_KPROBE = 32, + TRACE_EVENT_FL_UPROBE = 64, +}; + +enum { + EVENT_FILE_FL_ENABLED_BIT = 0, + EVENT_FILE_FL_RECORDED_CMD_BIT = 1, + EVENT_FILE_FL_RECORDED_TGID_BIT = 2, + EVENT_FILE_FL_FILTERED_BIT = 3, + EVENT_FILE_FL_NO_SET_FILTER_BIT = 4, + EVENT_FILE_FL_SOFT_MODE_BIT = 5, + EVENT_FILE_FL_SOFT_DISABLED_BIT = 6, + EVENT_FILE_FL_TRIGGER_MODE_BIT = 7, + EVENT_FILE_FL_TRIGGER_COND_BIT = 8, + EVENT_FILE_FL_PID_FILTER_BIT = 9, + EVENT_FILE_FL_WAS_ENABLED_BIT = 10, +}; + +enum { + EVENT_FILE_FL_ENABLED = 1, + EVENT_FILE_FL_RECORDED_CMD = 2, + EVENT_FILE_FL_RECORDED_TGID = 4, + EVENT_FILE_FL_FILTERED = 8, + EVENT_FILE_FL_NO_SET_FILTER = 16, + EVENT_FILE_FL_SOFT_MODE = 32, + EVENT_FILE_FL_SOFT_DISABLED = 64, + EVENT_FILE_FL_TRIGGER_MODE = 128, + EVENT_FILE_FL_TRIGGER_COND = 256, + EVENT_FILE_FL_PID_FILTER = 512, + EVENT_FILE_FL_WAS_ENABLED = 1024, +}; + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING = 1, + FILTER_DYN_STRING = 2, + FILTER_PTR_STRING = 3, + FILTER_TRACE_FN = 4, + FILTER_COMM = 5, + FILTER_CPU = 6, +}; + +struct fwnode_reference_args; + +struct fwnode_endpoint; + +struct fwnode_operations { + struct fwnode_handle * (*get)(struct fwnode_handle *); + void (*put)(struct fwnode_handle *); + bool (*device_is_available)(const struct fwnode_handle *); + const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *); + bool (*property_present)(const struct fwnode_handle *, const char *); + int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t); + int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t); + const char * (*get_name)(const struct fwnode_handle *); + const char * (*get_name_prefix)(const struct fwnode_handle *); + struct fwnode_handle * (*get_parent)(const struct fwnode_handle *); + struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *); + int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *); + struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *); + struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *); + struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *); + int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *); + int (*add_links)(const struct fwnode_handle *, struct device *); +}; + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; + +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; + +struct property { + char *name; + int length; + void *value; + struct property *next; + long unsigned int _flags; + struct bin_attribute attr; +}; + +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[16]; +}; + +struct irq_data; + +struct irq_domain_ops { + int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token); + int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token); + int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t); + void (*unmap)(struct irq_domain *, unsigned int); + int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *); + int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *); + void (*free)(struct irq_domain *, unsigned int, unsigned int); + int (*activate)(struct irq_domain *, struct irq_data *, bool); + void (*deactivate)(struct irq_domain *, struct irq_data *); + int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *); +}; + +struct xbc_node { + u16 next; + u16 child; + u16 parent; + u16 data; +}; + +enum wb_stat_item { + WB_RECLAIMABLE = 0, + WB_WRITEBACK = 1, + WB_DIRTIED = 2, + WB_WRITTEN = 3, + NR_WB_STAT_ITEMS = 4, +}; + +struct disk_stats; + +struct partition_meta_info; + +struct hd_struct { + sector_t start_sect; + sector_t nr_sects; + long unsigned int stamp; + struct disk_stats *dkstats; + struct percpu_ref ref; + struct device __dev; + struct kobject *holder_dir; + int policy; + int partno; + struct partition_meta_info *info; + struct rcu_work rcu_work; +}; + +struct disk_part_tbl; + +struct block_device_operations; + +struct timer_rand_state; + +struct disk_events; + +struct cdrom_device_info; + +struct badblocks; + +struct gendisk { + int major; + int first_minor; + int minors; + char disk_name[32]; + short unsigned int events; + short unsigned int event_flags; + struct disk_part_tbl *part_tbl; + struct hd_struct part0; + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + int flags; + long unsigned int state; + struct rw_semaphore lookup_sem; + struct kobject *slave_dir; + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + struct kobject integrity_kobj; + struct cdrom_device_info *cdi; + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; +}; + +struct bio_integrity_payload { + struct bio *bip_bio; + struct bvec_iter bip_iter; + short unsigned int bip_slab; + short unsigned int bip_vcnt; + short unsigned int bip_max_vcnt; + short unsigned int bip_flags; + struct bvec_iter bio_iter; + struct work_struct bip_work; + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[0]; +}; + +struct blkg_iostat { + u64 bytes[3]; + u64 ios[3]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkg_iostat cur; + struct blkg_iostat last; +}; + +struct blkcg; + +struct blkg_policy_data; + +struct blkcg_gq { + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + struct blkcg_gq *parent; + struct percpu_ref refcnt; + bool online; + struct blkg_iostat_set *iostat_cpu; + struct blkg_iostat_set iostat; + struct blkg_policy_data *pd[5]; + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + struct callback_head callback_head; +}; + +typedef unsigned int blk_qc_t; + +struct partition_meta_info { + char uuid[37]; + u8 volname[64]; +}; + +struct disk_part_tbl { + struct callback_head callback_head; + int len; + struct hd_struct *last_lookup; + struct hd_struct *part[0]; +}; + +struct blk_integrity_iter; + +typedef blk_status_t integrity_processing_fn(struct blk_integrity_iter *); + +typedef void integrity_prepare_fn(struct request *); + +typedef void integrity_complete_fn(struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +struct blk_zone; + +typedef int (*report_zones_cb)(struct blk_zone *, unsigned int, void *); + +struct hd_geometry; + +struct pr_ops; + +struct block_device_operations { + blk_qc_t (*submit_bio)(struct bio *); + int (*open)(struct block_device *, fmode_t); + void (*release)(struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); + int (*compat_ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int); + unsigned int (*check_events)(struct gendisk *, unsigned int); + void (*unlock_native_capacity)(struct gendisk *); + int (*revalidate_disk)(struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + void (*swap_slot_free_notify)(struct block_device *, long unsigned int); + int (*report_zones)(struct gendisk *, sector_t, unsigned int, report_zones_cb, void *); + char * (*devnode)(struct gendisk *, umode_t *); + struct module *owner; + const struct pr_ops *pr_ops; +}; + +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + __u32 dout_iovec_count; + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + __u32 padding; +}; + +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *); + int (*fill_hdr)(struct request *, struct sg_io_v4 *, fmode_t); + int (*complete_rq)(struct request *, struct sg_io_v4 *); + void (*free_rq)(struct request *); +}; + +typedef __u32 req_flags_t; + +typedef void rq_end_io_fn(struct request *, blk_status_t); + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +struct blk_ksm_keyslot; + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + unsigned int cmd_flags; + req_flags_t rq_flags; + int tag; + int internal_tag; + unsigned int __data_len; + sector_t __sector; + struct bio *bio; + struct bio *biotail; + struct list_head queuelist; + union { + struct hlist_node hash; + struct list_head ipi_list; + }; + union { + struct rb_node rb_node; + struct bio_vec special_vec; + void *completion_data; + int error_count; + }; + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + struct gendisk *rq_disk; + struct hd_struct *part; + u64 alloc_time_ns; + u64 start_time_ns; + u64 io_start_time_ns; + short unsigned int wbt_flags; + short unsigned int stats_sectors; + short unsigned int nr_phys_segments; + short unsigned int nr_integrity_segments; + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; + short unsigned int write_hint; + short unsigned int ioprio; + enum mq_rq_state state; + refcount_t ref; + unsigned int timeout; + long unsigned int deadline; + union { + struct __call_single_data csd; + u64 fifo_time; + }; + rq_end_io_fn *end_io; + void *end_io_data; +}; + +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 resv[4]; + __u64 capacity; + __u8 reserved[24]; +}; + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct elevator_type; + +struct blk_mq_alloc_data; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + struct request * (*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request * (*former_request)(struct request_queue *, struct request *); + struct request * (*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + +struct elv_fs_entry; + +struct elevator_type { + struct kmem_cache *icq_cache; + struct elevator_mq_ops ops; + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + const unsigned int elevator_features; + struct module *elevator_owner; + char icq_cache_name[22]; + struct list_head list; +}; + +struct elevator_queue { + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + unsigned int registered: 1; + struct hlist_head hash[64]; +}; + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + +enum blk_eh_timer_return { + BLK_EH_DONE = 0, + BLK_EH_RESET_TIMER = 1, +}; + +struct blk_mq_queue_data; + +struct blk_mq_ops { + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); + void (*commit_rqs)(struct blk_mq_hw_ctx *); + bool (*get_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *); + enum blk_eh_timer_return (*timeout)(struct request *, bool); + int (*poll)(struct blk_mq_hw_ctx *); + void (*complete)(struct request *); + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + int (*init_request)(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int); + void (*exit_request)(struct blk_mq_tag_set *, struct request *, unsigned int); + void (*initialize_rq_fn)(struct request *); + void (*cleanup_rq)(struct request *); + bool (*busy)(struct request_queue *); + int (*map_queues)(struct blk_mq_tag_set *); +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + short unsigned int interval; + const char *disk_name; +}; + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_ops { + int (*pr_register)(struct block_device *, u64, u64, u32); + int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32); + int (*pr_release)(struct block_device *, u64, enum pr_type); + int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool); + int (*pr_clear)(struct block_device *, u64); +}; + +enum blkg_iostat_type { + BLKG_IOSTAT_READ = 0, + BLKG_IOSTAT_WRITE = 1, + BLKG_IOSTAT_DISCARD = 2, + BLKG_IOSTAT_NR = 3, +}; + +struct blkcg_policy_data; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + struct blkcg_policy_data *cpd[5]; + struct list_head all_blkcgs_node; + struct list_head cgwb_list; +}; + +struct blkcg_policy_data { + struct blkcg *blkcg; + int plid; +}; + +struct blkg_policy_data { + struct blkcg_gq *blkg; + int plid; +}; + +enum memcg_stat_item { + MEMCG_SWAP = 37, + MEMCG_SOCK = 38, + MEMCG_PERCPU_B = 39, + MEMCG_NR_STAT = 40, +}; + +enum memcg_memory_event { + MEMCG_LOW = 0, + MEMCG_HIGH = 1, + MEMCG_MAX = 2, + MEMCG_OOM = 3, + MEMCG_OOM_KILL = 4, + MEMCG_SWAP_HIGH = 5, + MEMCG_SWAP_MAX = 6, + MEMCG_SWAP_FAIL = 7, + MEMCG_NR_MEMORY_EVENTS = 8, +}; + +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH = 0, + MEM_CGROUP_TARGET_SOFTLIMIT = 1, + MEM_CGROUP_NTARGETS = 2, +}; + +struct memcg_vmstats_percpu { + long int stat[40]; + long unsigned int events[95]; + long unsigned int nr_page_events; + long unsigned int targets[2]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + unsigned int generation; +}; + +struct lruvec_stat { + long int count[37]; +}; + +struct memcg_shrinker_map { + struct callback_head rcu; + long unsigned int map[0]; +}; + +struct mem_cgroup_per_node { + struct lruvec lruvec; + struct lruvec_stat *lruvec_stat_local; + struct lruvec_stat *lruvec_stat_cpu; + atomic_long_t lruvec_stat[37]; + long unsigned int lru_zone_size[20]; + struct mem_cgroup_reclaim_iter iter; + struct memcg_shrinker_map *shrinker_map; + struct rb_node tree_node; + long unsigned int usage_in_excess; + bool on_tree; + struct mem_cgroup *memcg; +}; + +struct eventfd_ctx; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + long unsigned int threshold; +}; + +struct mem_cgroup_threshold_ary { + int current_threshold; + unsigned int size; + struct mem_cgroup_threshold entries[0]; +}; + +struct percpu_cluster { + struct swap_cluster_info index; + unsigned int next; +}; + +enum fs_value_type { + fs_value_is_undefined = 0, + fs_value_is_flag = 1, + fs_value_is_string = 2, + fs_value_is_blob = 3, + fs_value_is_filename = 4, + fs_value_is_file = 5, +}; + +struct fs_parameter { + const char *key; + enum fs_value_type type: 8; + union { + char *string; + void *blob; + struct filename *name; + struct file *file; + }; + size_t size; + int dirfd; +}; + +struct fc_log { + refcount_t usage; + u8 head; + u8 tail; + u8 need_free; + struct module *owner; + char *buffer[8]; +}; + +struct fs_context_operations { + void (*free)(struct fs_context *); + int (*dup)(struct fs_context *, struct fs_context *); + int (*parse_param)(struct fs_context *, struct fs_parameter *); + int (*parse_monolithic)(struct fs_context *, void *); + int (*get_tree)(struct fs_context *); + int (*reconfigure)(struct fs_context *); +}; + +struct fs_parse_result { + bool negated; + union { + bool boolean; + int int_32; + unsigned int uint_32; + u64 uint_64; + }; +}; + +struct trace_event_raw_initcall_level { + struct trace_entry ent; + u32 __data_loc_level; + char __data[0]; +}; + +struct trace_event_raw_initcall_start { + struct trace_entry ent; + initcall_t func; + char __data[0]; +}; + +struct trace_event_raw_initcall_finish { + struct trace_entry ent; + initcall_t func; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_initcall_level { + u32 level; +}; + +struct trace_event_data_offsets_initcall_start {}; + +struct trace_event_data_offsets_initcall_finish {}; + +typedef void (*btf_trace_initcall_level)(void *, const char *); + +typedef void (*btf_trace_initcall_start)(void *, initcall_t); + +typedef void (*btf_trace_initcall_finish)(void *, initcall_t, int); + +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +typedef __u32 Elf32_Word; + +struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +}; + +enum pcpu_fc { + PCPU_FC_AUTO = 0, + PCPU_FC_EMBED = 1, + PCPU_FC_PAGE = 2, + PCPU_FC_NR = 3, +}; + +enum { + UNAME26 = 131072, + ADDR_NO_RANDOMIZE = 262144, + FDPIC_FUNCPTRS = 524288, + MMAP_PAGE_ZERO = 1048576, + ADDR_COMPAT_LAYOUT = 2097152, + READ_IMPLIES_EXEC = 4194304, + ADDR_LIMIT_32BIT = 8388608, + SHORT_INODE = 16777216, + WHOLE_SECONDS = 33554432, + STICKY_TIMEOUTS = 67108864, + ADDR_LIMIT_3GB = 134217728, +}; + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC = 0, + HRTIMER_BASE_REALTIME = 1, + HRTIMER_BASE_BOOTTIME = 2, + HRTIMER_BASE_TAI = 3, + HRTIMER_BASE_MONOTONIC_SOFT = 4, + HRTIMER_BASE_REALTIME_SOFT = 5, + HRTIMER_BASE_BOOTTIME_SOFT = 6, + HRTIMER_BASE_TAI_SOFT = 7, + HRTIMER_MAX_CLOCK_BASES = 8, +}; + +enum { + MM_FILEPAGES = 0, + MM_ANONPAGES = 1, + MM_SWAPENTS = 2, + MM_SHMEMPAGES = 3, + NR_MM_COUNTERS = 4, +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX = 1, + UCLAMP_CNT = 2, +}; + +enum perf_event_task_context { + perf_invalid_context = 4294967295, + perf_hw_context = 0, + perf_sw_context = 1, + perf_nr_task_contexts = 2, +}; + +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = 0, + RSEQ_EVENT_SIGNAL_BIT = 1, + RSEQ_EVENT_MIGRATE_BIT = 2, +}; + +enum { + PROC_ROOT_INO = 1, + PROC_IPC_INIT_INO = 4026531839, + PROC_UTS_INIT_INO = 4026531838, + PROC_USER_INIT_INO = 4026531837, + PROC_PID_INIT_INO = 4026531836, + PROC_CGROUP_INIT_INO = 4026531835, + PROC_TIME_INIT_INO = 4026531834, +}; + +typedef short int __s16; + +typedef __s16 s16; + +typedef __u16 __le16; + +typedef __u16 __be16; + +typedef __u32 __be32; + +typedef __u64 __be64; + +typedef __u32 __wsum; + +typedef unsigned int slab_flags_t; + +struct llist_head { + struct llist_node *first; +}; + +struct rhash_head { + struct rhash_head *next; +}; + +struct rhashtable; + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *, u32, u32); + +typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32); + +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *); + +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; + +struct bucket_table; + +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + +struct fs_struct { + int users; + spinlock_t lock; + seqcount_spinlock_t seq; + int umask; + int in_exec; + struct path root; + struct path pwd; +}; + +struct pipe_buffer; + +struct watch_queue; + +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait; + wait_queue_head_t wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + bool note_loss; + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; + struct watch_queue *watch_queue; +}; + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +struct vfsmount { + struct dentry *mnt_root; + struct super_block *mnt_sb; + int mnt_flags; +}; + +struct ld_semaphore { + atomic_long_t count; + raw_spinlock_t wait_lock; + unsigned int wait_readers; + struct list_head read_wait; + struct list_head write_wait; +}; + +typedef unsigned int tcflag_t; + +typedef unsigned char cc_t; + +typedef unsigned int speed_t; + +struct ktermios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct winsize { + short unsigned int ws_row; + short unsigned int ws_col; + short unsigned int ws_xpixel; + short unsigned int ws_ypixel; +}; + +struct tty_driver; + +struct tty_operations; + +struct tty_ldisc; + +struct termiox; + +struct tty_port; + +struct tty_struct { + int magic; + struct kref kref; + struct device *dev; + struct tty_driver *driver; + const struct tty_operations *ops; + int index; + struct ld_semaphore ldisc_sem; + struct tty_ldisc *ldisc; + struct mutex atomic_write_lock; + struct mutex legacy_mutex; + struct mutex throttle_mutex; + struct rw_semaphore termios_rwsem; + struct mutex winsize_mutex; + spinlock_t ctrl_lock; + spinlock_t flow_lock; + struct ktermios termios; + struct ktermios termios_locked; + struct termiox *termiox; + char name[64]; + struct pid *pgrp; + struct pid *session; + long unsigned int flags; + int count; + struct winsize winsize; + long unsigned int stopped: 1; + long unsigned int flow_stopped: 1; + int: 30; + long unsigned int unused: 62; + int hw_stopped; + long unsigned int ctrl_status: 8; + long unsigned int packet: 1; + int: 23; + long unsigned int unused_ctrl: 55; + unsigned int receive_room; + int flow_change; + struct tty_struct *link; + struct fasync_struct *fasync; + wait_queue_head_t write_wait; + wait_queue_head_t read_wait; + struct work_struct hangup_work; + void *disc_data; + void *driver_data; + spinlock_t files_lock; + struct list_head tty_files; + int closing; + unsigned char *write_buf; + int write_cnt; + struct work_struct SAK_work; + struct tty_port *port; +}; + +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +struct posix_acl_entry { + short int e_tag; + short unsigned int e_perm; + union { + kuid_t e_uid; + kgid_t e_gid; + }; +}; + +struct posix_acl { + refcount_t a_refcount; + struct callback_head a_rcu; + unsigned int a_count; + struct posix_acl_entry a_entries[0]; +}; + +typedef __u64 __addrpair; + +typedef __u32 __portpair; + +typedef struct { + struct net *net; +} possible_net_t; + +struct in6_addr { + union { + __u8 u6_addr8[16]; + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + } in6_u; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next; + struct hlist_nulls_node **pprev; +}; + +struct proto; + +struct inet_timewait_death_row; + +struct sock_common { + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + short unsigned int skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse: 4; + unsigned char skc_reuseport: 1; + unsigned char skc_ipv6only: 1; + unsigned char skc_net_refcnt: 1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + atomic64_t skc_cookie; + union { + long unsigned int skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + int skc_dontcopy_begin[0]; + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + short unsigned int skc_tx_queue_mapping; + short unsigned int skc_rx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + refcount_t skc_refcnt; + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; +}; + +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; +} socket_lock_t; + +struct sk_buff; + +struct sk_buff_head { + struct sk_buff *next; + struct sk_buff *prev; + __u32 qlen; + spinlock_t lock; +}; + +typedef u64 netdev_features_t; + +struct sock_cgroup_data { + union { + struct { + u8 is_data: 1; + u8 no_refcnt: 1; + u8 unused: 6; + u8 padding; + u16 prioidx; + u32 classid; + }; + u64 val; + }; +}; + +struct sk_filter; + +struct socket_wq; + +struct xfrm_policy; + +struct dst_entry; + +struct socket; + +struct net_device; + +struct sock_reuseport; + +struct bpf_local_storage; + +struct sock { + struct sock_common __sk_common; + socket_lock_t sk_lock; + atomic_t sk_drops; + int sk_rcvlowat; + struct sk_buff_head sk_error_queue; + struct sk_buff *sk_rx_skb_cache; + struct sk_buff_head sk_receive_queue; + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + int sk_forward_alloc; + unsigned int sk_ll_usec; + unsigned int sk_napi_id; + int sk_rcvbuf; + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + struct socket_wq *sk_wq_raw; + }; + struct xfrm_policy *sk_policy[2]; + struct dst_entry *sk_rx_dst; + struct dst_entry *sk_dst_cache; + atomic_t sk_omem_alloc; + int sk_sndbuf; + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + long unsigned int sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff *sk_tx_skb_cache; + struct sk_buff_head sk_write_queue; + __s32 sk_peek_off; + int sk_write_pending; + __u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + long int sk_sndtimeo; + struct timer_list sk_timer; + __u32 sk_priority; + __u32 sk_mark; + long unsigned int sk_pacing_rate; + long unsigned int sk_max_pacing_rate; + struct page_frag sk_frag; + netdev_features_t sk_route_caps; + netdev_features_t sk_route_nocaps; + netdev_features_t sk_route_forced_caps; + int sk_gso_type; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + __u32 sk_txhash; + u8 sk_padding: 1; + u8 sk_kern_sock: 1; + u8 sk_no_check_tx: 1; + u8 sk_no_check_rx: 1; + u8 sk_userlocks: 4; + u8 sk_pacing_shift; + u16 sk_type; + u16 sk_protocol; + u16 sk_gso_max_segs; + long unsigned int sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err; + int sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long int sk_rcvtimeo; + ktime_t sk_stamp; + u16 sk_tsflags; + u8 sk_shutdown; + u32 sk_tskey; + atomic_t sk_zckey; + u8 sk_clockid; + u8 sk_txtime_deadline_mode: 1; + u8 sk_txtime_report_errors: 1; + u8 sk_txtime_unused: 6; + struct socket *sk_socket; + void *sk_user_data; + void *sk_security; + struct sock_cgroup_data sk_cgrp_data; + struct mem_cgroup *sk_memcg; + void (*sk_state_change)(struct sock *); + void (*sk_data_ready)(struct sock *); + void (*sk_write_space)(struct sock *); + void (*sk_error_report)(struct sock *); + int (*sk_backlog_rcv)(struct sock *, struct sk_buff *); + struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *); + void (*sk_destruct)(struct sock *); + struct sock_reuseport *sk_reuseport_cb; + struct bpf_local_storage *sk_bpf_storage; + struct callback_head sk_rcu; +}; + +typedef short unsigned int __kernel_sa_family_t; + +typedef __kernel_sa_family_t sa_family_t; + +struct sockaddr { + sa_family_t sa_family; + char sa_data[14]; +}; + +struct msghdr { + void *msg_name; + int msg_namelen; + struct iov_iter msg_iter; + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user: 1; + __kernel_size_t msg_controllen; + unsigned int msg_flags; + struct kiocb *msg_iocb; +}; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + short unsigned int loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + short unsigned int encoding; + short unsigned int parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + short unsigned int lmi; + short unsigned int dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +} fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + short unsigned int dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; + +struct ifmap { + long unsigned int mem_start; + long unsigned int mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct if_settings { + unsigned int type; + unsigned int size; + union { + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; + +struct ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void *ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; + +struct termiox { + __u16 x_hflag; + __u16 x_cflag; + __u16 x_rflag[5]; + __u16 x_sflag; +}; + +struct serial_icounter_struct; + +struct serial_struct; + +struct tty_operations { + struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int); + int (*install)(struct tty_driver *, struct tty_struct *); + void (*remove)(struct tty_driver *, struct tty_struct *); + int (*open)(struct tty_struct *, struct file *); + void (*close)(struct tty_struct *, struct file *); + void (*shutdown)(struct tty_struct *); + void (*cleanup)(struct tty_struct *); + int (*write)(struct tty_struct *, const unsigned char *, int); + int (*put_char)(struct tty_struct *, unsigned char); + void (*flush_chars)(struct tty_struct *); + int (*write_room)(struct tty_struct *); + int (*chars_in_buffer)(struct tty_struct *); + int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int); + long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, struct ktermios *); + void (*throttle)(struct tty_struct *); + void (*unthrottle)(struct tty_struct *); + void (*stop)(struct tty_struct *); + void (*start)(struct tty_struct *); + void (*hangup)(struct tty_struct *); + int (*break_ctl)(struct tty_struct *, int); + void (*flush_buffer)(struct tty_struct *); + void (*set_ldisc)(struct tty_struct *); + void (*wait_until_sent)(struct tty_struct *, int); + void (*send_xchar)(struct tty_struct *, char); + int (*tiocmget)(struct tty_struct *); + int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int); + int (*resize)(struct tty_struct *, struct winsize *); + int (*set_termiox)(struct tty_struct *, struct termiox *); + int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *); + int (*get_serial)(struct tty_struct *, struct serial_struct *); + int (*set_serial)(struct tty_struct *, struct serial_struct *); + void (*show_fdinfo)(struct tty_struct *, struct seq_file *); + int (*poll_init)(struct tty_driver *, int, char *); + int (*poll_get_char)(struct tty_driver *, int); + void (*poll_put_char)(struct tty_driver *, int, char); + int (*proc_show)(struct seq_file *, void *); +}; + +struct proc_dir_entry; + +struct tty_driver { + int magic; + struct kref kref; + struct cdev **cdevs; + struct module *owner; + const char *driver_name; + const char *name; + int name_base; + int major; + int minor_start; + unsigned int num; + short int type; + short int subtype; + struct ktermios init_termios; + long unsigned int flags; + struct proc_dir_entry *proc_entry; + struct tty_driver *other; + struct tty_struct **ttys; + struct tty_port **ports; + struct ktermios **termios; + void *driver_state; + const struct tty_operations *ops; + struct list_head tty_drivers; +}; + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + long unsigned int data[0]; +}; + +struct tty_bufhead { + struct tty_buffer *head; + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; + atomic_t mem_used; + int mem_limit; + struct tty_buffer *tail; +}; + +struct tty_port_operations; + +struct tty_port_client_operations; + +struct tty_port { + struct tty_bufhead buf; + struct tty_struct *tty; + struct tty_struct *itty; + const struct tty_port_operations *ops; + const struct tty_port_client_operations *client_ops; + spinlock_t lock; + int blocked_open; + int count; + wait_queue_head_t open_wait; + wait_queue_head_t delta_msr_wait; + long unsigned int flags; + long unsigned int iflags; + unsigned char console: 1; + unsigned char low_latency: 1; + struct mutex mutex; + struct mutex buf_mutex; + unsigned char *xmit_buf; + unsigned int close_delay; + unsigned int closing_wait; + int drain_delay; + struct kref kref; + void *client_data; +}; + +struct tty_ldisc_ops { + int magic; + char *name; + int num; + int flags; + int (*open)(struct tty_struct *); + void (*close)(struct tty_struct *); + void (*flush_buffer)(struct tty_struct *); + ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t); + ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t); + int (*ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int); + void (*set_termios)(struct tty_struct *, struct ktermios *); + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); + int (*hangup)(struct tty_struct *); + void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int); + void (*write_wakeup)(struct tty_struct *); + void (*dcd_change)(struct tty_struct *, unsigned int); + int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int); + struct module *owner; + int refcount; +}; + +struct tty_ldisc { + struct tty_ldisc_ops *ops; + struct tty_struct *tty; +}; + +struct tty_port_operations { + int (*carrier_raised)(struct tty_port *); + void (*dtr_rts)(struct tty_port *, int); + void (*shutdown)(struct tty_port *); + int (*activate)(struct tty_port *, struct tty_struct *); + void (*destruct)(struct tty_port *); +}; + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *); +}; + +struct prot_inuse; + +struct netns_core { + struct ctl_table_header *sysctl_hdr; + int sysctl_somaxconn; + int *sock_inuse; + struct prot_inuse *prot_inuse; +}; + +struct tcp_mib; + +struct ipstats_mib; + +struct linux_mib; + +struct udp_mib; + +struct icmp_mib; + +struct icmpmsg_mib; + +struct icmpv6_mib; + +struct icmpv6msg_mib; + +struct linux_tls_mib; + +struct mptcp_mib; + +struct netns_mib { + struct tcp_mib *tcp_statistics; + struct ipstats_mib *ip_statistics; + struct linux_mib *net_statistics; + struct udp_mib *udp_statistics; + struct udp_mib *udplite_statistics; + struct icmp_mib *icmp_statistics; + struct icmpmsg_mib *icmpmsg_statistics; + struct proc_dir_entry *proc_net_devsnmp6; + struct udp_mib *udp_stats_in6; + struct udp_mib *udplite_stats_in6; + struct ipstats_mib *ipv6_statistics; + struct icmpv6_mib *icmpv6_statistics; + struct icmpv6msg_mib *icmpv6msg_statistics; + struct linux_tls_mib *tls_statistics; + struct mptcp_mib *mptcp_statistics; +}; + +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; + +struct netns_unix { + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; + +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + unsigned int seq; + u32 last_id_allocated; + struct blocking_notifier_head notifier_chain; +}; + +struct local_ports { + seqlock_t lock; + int range[2]; + bool warned; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + atomic_t tw_count; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct inet_hashinfo *hashinfo; + int sysctl_max_tw_buckets; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +typedef struct { + u64 key[2]; +} siphash_key_t; + +struct ipv4_devconf; + +struct ip_ra_chain; + +struct fib_rules_ops; + +struct fib_table; + +struct inet_peer_base; + +struct fqdir; + +struct xt_table; + +struct tcp_congestion_ops; + +struct tcp_fastopen_context; + +struct fib_notifier_ops; + +struct netns_ipv4 { + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + struct fib_rules_ops *rules_ops; + bool fib_has_custom_rules; + unsigned int fib_rules_require_fldissect; + struct fib_table *fib_main; + struct fib_table *fib_default; + bool fib_has_custom_local_routes; + int fib_num_tclassid_users; + struct hlist_head *fib_table_hash; + bool fib_offload_disabled; + struct sock *fibnl; + struct sock **icmp_sk; + struct sock *mc_autojoin_sk; + struct inet_peer_base *peers; + struct sock **tcp_sk; + struct fqdir *fqdir; + struct xt_table *iptable_filter; + struct xt_table *iptable_mangle; + struct xt_table *iptable_raw; + struct xt_table *arptable_filter; + struct xt_table *iptable_security; + struct xt_table *nat_table; + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_errors_use_inbound_ifaddr; + struct local_ports ip_local_ports; + int sysctl_tcp_ecn; + int sysctl_tcp_ecn_fallback; + int sysctl_ip_default_ttl; + int sysctl_ip_no_pmtu_disc; + int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_fwd_update_priority; + int sysctl_ip_nonlocal_bind; + int sysctl_ip_autobind_reuse; + int sysctl_ip_dynaddr; + int sysctl_ip_early_demux; + int sysctl_raw_l3mdev_accept; + int sysctl_tcp_early_demux; + int sysctl_udp_early_demux; + int sysctl_nexthop_compat_mode; + int sysctl_fwmark_reflect; + int sysctl_tcp_fwmark_accept; + int sysctl_tcp_l3mdev_accept; + int sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_probes; + int sysctl_tcp_keepalive_intvl; + int sysctl_tcp_syn_retries; + int sysctl_tcp_synack_retries; + int sysctl_tcp_syncookies; + int sysctl_tcp_reordering; + int sysctl_tcp_retries1; + int sysctl_tcp_retries2; + int sysctl_tcp_orphan_retries; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_tw_reuse; + int sysctl_tcp_sack; + int sysctl_tcp_window_scaling; + int sysctl_tcp_timestamps; + int sysctl_tcp_early_retrans; + int sysctl_tcp_recovery; + int sysctl_tcp_thin_linear_timeouts; + int sysctl_tcp_slow_start_after_idle; + int sysctl_tcp_retrans_collapse; + int sysctl_tcp_stdurg; + int sysctl_tcp_rfc1337; + int sysctl_tcp_abort_on_overflow; + int sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_dsack; + int sysctl_tcp_app_win; + int sysctl_tcp_adv_win_scale; + int sysctl_tcp_frto; + int sysctl_tcp_nometrics_save; + int sysctl_tcp_no_ssthresh_metrics_save; + int sysctl_tcp_moderate_rcvbuf; + int sysctl_tcp_tso_win_divisor; + int sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_challenge_ack_limit; + int sysctl_tcp_min_tso_segs; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_autocorking; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + int sysctl_tcp_wmem[3]; + int sysctl_tcp_rmem[3]; + int sysctl_tcp_comp_sack_nr; + long unsigned int sysctl_tcp_comp_sack_delay_ns; + long unsigned int sysctl_tcp_comp_sack_slack_ns; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct inet_timewait_death_row tcp_death_row; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + spinlock_t tcp_fastopen_ctx_lock; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + long unsigned int tfo_active_disable_stamp; + int sysctl_tcp_reflect_tos; + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + int sysctl_udp_l3mdev_accept; + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_llm_reports; + int sysctl_igmp_qrv; + struct ping_group_range ping_group_range; + atomic_t dev_addr_genid; + long unsigned int *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; + int sysctl_fib_multipath_use_neigh; + int sysctl_fib_multipath_hash_policy; + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + atomic_t rt_genid; + siphash_key_t ip_id_key; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct netns_sysctl_ipv6 { + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + int bindv6only; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + int multipath_hash_policy; + int flowlabel_consistency; + int auto_flowlabels; + int icmpv6_time; + int icmpv6_echo_ignore_all; + int icmpv6_echo_ignore_multicast; + int icmpv6_echo_ignore_anycast; + long unsigned int icmpv6_ratemask[4]; + long unsigned int *icmpv6_ratemask_ptr; + int anycast_src_echo_reply; + int ip_nonlocal_bind; + int fwmark_reflect; + int idgen_retries; + int idgen_delay; + int flowlabel_state_ranges; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + bool skip_notify_on_dev_down; +}; + +struct neighbour; + +struct dst_ops { + short unsigned int family; + unsigned int gc_thresh; + int (*gc)(struct dst_ops *); + struct dst_entry * (*check)(struct dst_entry *, __u32); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, long unsigned int); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, struct net_device *, int); + struct dst_entry * (*negative_advice)(struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool); + void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *); + int (*local_out)(struct net *, struct sock *, struct sk_buff *); + struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *); + void (*confirm_neigh)(const struct dst_entry *, const void *); + struct kmem_cache *kmem_cachep; + struct percpu_counter pcpuc_entries; + long: 64; + long: 64; + long: 64; +}; + +struct ipv6_devconf; + +struct fib6_info; + +struct rt6_info; + +struct rt6_statistics; + +struct fib6_table; + +struct seg6_pernet_data; + +struct netns_ipv6 { + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + struct xt_table *ip6table_filter; + struct xt_table *ip6table_mangle; + struct xt_table *ip6table_raw; + struct xt_table *ip6table_security; + struct xt_table *ip6table_nat; + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + long: 64; + long: 64; + struct dst_ops ip6_dst_ops; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + unsigned int ip6_rt_gc_expire; + long unsigned int ip6_rt_last_gc; + unsigned int fib6_rules_require_fldissect; + bool fib6_has_custom_rules; + unsigned int fib6_routes_require_src; + struct rt6_info *ip6_prohibit_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; + struct sock **icmp_sk; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + struct list_head mr6_tables; + struct fib_rules_ops *mr6_rules_ops; + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; +}; + +struct netns_sysctl_lowpan { + struct ctl_table_header *frags_hdr; +}; + +struct netns_ieee802154_lowpan { + struct netns_sysctl_lowpan sysctl; + struct fqdir *fqdir; +}; + +struct sctp_mib; + +struct netns_sctp { + struct sctp_mib *sctp_statistics; + struct proc_dir_entry *proc_net_sctp; + struct ctl_table_header *sysctl_header; + struct sock *ctl_sock; + struct list_head local_addr_list; + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; + spinlock_t addr_wq_lock; + spinlock_t local_addr_lock; + unsigned int rto_initial; + unsigned int rto_min; + unsigned int rto_max; + int rto_alpha; + int rto_beta; + int max_burst; + int cookie_preserve_enable; + char *sctp_hmac_alg; + unsigned int valid_cookie_life; + unsigned int sack_timeout; + unsigned int hb_interval; + int max_retrans_association; + int max_retrans_path; + int max_retrans_init; + int pf_retrans; + int ps_retrans; + int pf_enable; + int pf_expose; + int sndbuf_policy; + int rcvbuf_policy; + int default_auto_asconf; + int addip_enable; + int addip_noauth; + int prsctp_enable; + int reconf_enable; + int auth_enable; + int intl_enable; + int ecn_enable; + int scope_policy; + int rwnd_upd_shift; + long unsigned int max_autoclose; +}; + +struct netns_dccp { + struct sock *v4_ctl_sk; + struct sock *v6_ctl_sk; +}; + +struct nf_queue_handler; + +struct nf_logger; + +struct nf_hook_entries; + +struct netns_nf { + struct proc_dir_entry *proc_netfilter; + const struct nf_queue_handler *queue_handler; + const struct nf_logger *nf_loggers[13]; + struct ctl_table_header *nf_log_dir_header; + struct nf_hook_entries *hooks_ipv4[5]; + struct nf_hook_entries *hooks_ipv6[5]; + struct nf_hook_entries *hooks_arp[3]; + struct nf_hook_entries *hooks_bridge[5]; + struct nf_hook_entries *hooks_decnet[7]; + bool defrag_ipv4; + bool defrag_ipv6; +}; + +struct ebt_table; + +struct netns_xt { + struct list_head tables[13]; + bool notrack_deprecated_warning; + bool clusterip_deprecated_warning; + struct ebt_table *broute_table; + struct ebt_table *frame_filter; + struct ebt_table *frame_nat; +}; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[14]; + int tcp_loose; + int tcp_be_liberal; + int tcp_max_retrans; +}; + +struct nf_udp_net { + unsigned int timeouts[2]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + +struct nf_dccp_net { + int dccp_loose; + unsigned int dccp_timeout[10]; +}; + +struct nf_sctp_net { + unsigned int timeouts[10]; +}; + +struct nf_gre_net { + struct list_head keymap_list; + unsigned int timeouts[2]; +}; + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; + struct nf_dccp_net dccp; + struct nf_sctp_net sctp; + struct nf_gre_net gre; +}; + +struct ct_pcpu; + +struct ip_conntrack_stat; + +struct nf_ct_event_notifier; + +struct nf_exp_event_notifier; + +struct netns_ct { + atomic_t count; + unsigned int expect_count; + struct delayed_work ecache_dwork; + bool ecache_dwork_pending; + bool auto_assign_helper_warned; + struct ctl_table_header *sysctl_header; + unsigned int sysctl_log_invalid; + int sysctl_events; + int sysctl_acct; + int sysctl_auto_assign_helper; + int sysctl_tstamp; + int sysctl_checksum; + struct ct_pcpu *pcpu_lists; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_exp_event_notifier *nf_expect_event_cb; + struct nf_ip_net nf_ct_proto; + unsigned int labels_used; +}; + +struct netns_nftables { + struct list_head tables; + struct list_head commit_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + unsigned int base_seq; + u8 gencursor; + u8 validate_state; +}; + +struct netns_nf_frag { + struct fqdir *fqdir; +}; + +struct netns_bpf { + struct bpf_prog_array *run_array[2]; + struct bpf_prog *progs[2]; + struct list_head links[2]; +}; + +struct xfrm_policy_hash { + struct hlist_head *table; + unsigned int hmask; + u8 dbits4; + u8 sbits4; + u8 dbits6; + u8 sbits6; +}; + +struct xfrm_policy_hthresh { + struct work_struct work; + seqlock_t lock; + u8 lbits4; + u8 rbits4; + u8 lbits6; + u8 rbits6; +}; + +struct netns_xfrm { + struct list_head state_all; + struct hlist_head *state_bydst; + struct hlist_head *state_bysrc; + struct hlist_head *state_byspi; + unsigned int state_hmask; + unsigned int state_num; + struct work_struct state_hash_work; + struct list_head policy_all; + struct hlist_head *policy_byidx; + unsigned int policy_idx_hmask; + struct hlist_head policy_inexact[3]; + struct xfrm_policy_hash policy_bydst[3]; + unsigned int policy_count[6]; + struct work_struct policy_hash_work; + struct xfrm_policy_hthresh policy_hthresh; + struct list_head inexact_bins; + struct sock *nlsk; + struct sock *nlsk_stash; + u32 sysctl_aevent_etime; + u32 sysctl_aevent_rseqth; + int sysctl_larval_drop; + u32 sysctl_acq_expires; + struct ctl_table_header *sysctl_hdr; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct dst_ops xfrm4_dst_ops; + struct dst_ops xfrm6_dst_ops; + spinlock_t xfrm_state_lock; + spinlock_t xfrm_policy_lock; + struct mutex xfrm_cfg_mutex; + long: 64; + long: 64; + long: 64; +}; + +struct netns_ipvs; + +struct mpls_route; + +struct netns_mpls { + int ip_ttl_propagate; + int default_ttl; + size_t platform_labels; + struct mpls_route **platform_label; + struct ctl_table_header *ctl; +}; + +struct can_dev_rcv_lists; + +struct can_pkg_stats; + +struct can_rcv_lists_stats; + +struct netns_can { + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde_stats; + struct proc_dir_entry *pde_reset_stats; + struct proc_dir_entry *pde_rcvlist_all; + struct proc_dir_entry *pde_rcvlist_fil; + struct proc_dir_entry *pde_rcvlist_inv; + struct proc_dir_entry *pde_rcvlist_sff; + struct proc_dir_entry *pde_rcvlist_eff; + struct proc_dir_entry *pde_rcvlist_err; + struct proc_dir_entry *bcmproc_dir; + struct can_dev_rcv_lists *rx_alldev_list; + spinlock_t rcvlists_lock; + struct timer_list stattimer; + struct can_pkg_stats *pkg_stats; + struct can_rcv_lists_stats *rcv_lists_stats; + struct hlist_head cgw_list; +}; + +struct netns_xdp { + struct mutex lock; + struct hlist_head list; +}; + +struct uevent_sock; + +struct net_generic; + +struct net { + refcount_t passive; + refcount_t count; + spinlock_t rules_mod_lock; + unsigned int dev_unreg_count; + unsigned int dev_base_seq; + int ifindex; + spinlock_t nsid_lock; + atomic_t fnhe_genid; + struct list_head list; + struct list_head exit_list; + struct llist_node cleanup_list; + struct key_tag *key_domain; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + struct ns_common ns; + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct ctl_table_set sysctls; + struct sock *rtnl; + struct sock *genl_sock; + struct uevent_sock *uevent_sock; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct raw_notifier_head netdev_chain; + u32 hash_mix; + struct net_device *loopback_dev; + struct list_head rules_ops; + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + long: 64; + struct netns_ipv4 ipv4; + struct netns_ipv6 ipv6; + struct netns_ieee802154_lowpan ieee802154_lowpan; + struct netns_sctp sctp; + struct netns_dccp dccp; + struct netns_nf nf; + struct netns_xt xt; + struct netns_ct ct; + struct netns_nftables nft; + struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; + struct sock *nfnl; + struct sock *nfnl_stash; + struct list_head nfnl_acct_list; + struct list_head nfct_timeout_list; + struct sk_buff_head wext_nlevents; + struct net_generic *gen; + struct netns_bpf bpf; + long: 64; + long: 64; + long: 64; + struct netns_xfrm xfrm; + atomic64_t net_cookie; + struct netns_ipvs *ipvs; + struct netns_mpls mpls; + struct netns_can can; + struct netns_xdp xdp; + struct sock *crypto_nlsk; + struct sock *diag_nlsk; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef struct { + local64_t v; +} u64_stats_t; + +struct bpf_insn { + __u8 code; + __u8 dst_reg: 4; + __u8 src_reg: 4; + __s16 off; + __s32 imm; +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + __MAX_BPF_ATTACH_TYPE = 38, +}; + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[16]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + __u32 attach_prog_fd; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + }; + struct { + __u32 target_fd; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + __u32 target_fd; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + __u32 prog_cnt; + } query; + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + __u32 prog_fd; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + }; + } link_create; + struct { + __u32 link_fd; + __u32 new_prog_fd; + __u32 flags; + __u32 old_prog_fd; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct bpf_iter_aux_info; + +typedef int (*bpf_iter_init_seq_priv_t)(void *, struct bpf_iter_aux_info *); + +struct bpf_map; + +struct bpf_iter_aux_info { + struct bpf_map *map; +}; + +typedef void (*bpf_iter_fini_seq_priv_t)(void *); + +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + +struct btf; + +struct btf_type; + +struct bpf_prog_aux; + +struct bpf_local_storage_map; + +struct bpf_map_ops { + int (*map_alloc_check)(union bpf_attr *); + struct bpf_map * (*map_alloc)(union bpf_attr *); + void (*map_release)(struct bpf_map *, struct file *); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *, void *, void *); + void (*map_release_uref)(struct bpf_map *); + void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *); + int (*map_lookup_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_lookup_and_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_update_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + int (*map_delete_batch)(struct bpf_map *, const union bpf_attr *, union bpf_attr *); + void * (*map_lookup_elem)(struct bpf_map *, void *); + int (*map_update_elem)(struct bpf_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_map *, void *); + int (*map_push_elem)(struct bpf_map *, void *, u64); + int (*map_pop_elem)(struct bpf_map *, void *); + int (*map_peek_elem)(struct bpf_map *, void *); + void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int); + void (*map_fd_put_ptr)(void *); + int (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *); + u32 (*map_fd_sys_lookup_elem)(void *); + void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *); + int (*map_check_btf)(const struct bpf_map *, const struct btf *, const struct btf_type *, const struct btf_type *); + int (*map_poke_track)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_untrack)(struct bpf_map *, struct bpf_prog_aux *); + void (*map_poke_run)(struct bpf_map *, u32, struct bpf_prog *, struct bpf_prog *); + int (*map_direct_value_addr)(const struct bpf_map *, u64 *, u32); + int (*map_direct_value_meta)(const struct bpf_map *, u64, u32 *); + int (*map_mmap)(struct bpf_map *, struct vm_area_struct *); + __poll_t (*map_poll)(struct bpf_map *, struct file *, struct poll_table_struct *); + int (*map_local_storage_charge)(struct bpf_local_storage_map *, void *, u32); + void (*map_local_storage_uncharge)(struct bpf_local_storage_map *, void *, u32); + struct bpf_local_storage ** (*map_owner_storage_ptr)(void *); + bool (*map_meta_equal)(const struct bpf_map *, const struct bpf_map *); + const char * const map_btf_name; + int *map_btf_id; + const struct bpf_iter_seq_info *iter_seq_info; +}; + +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + +struct bpf_map { + const struct bpf_map_ops *ops; + struct bpf_map *inner_map_meta; + void *security; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u32 map_flags; + int spin_lock_off; + u32 id; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + struct btf *btf; + struct bpf_map_memory memory; + char name[16]; + u32 btf_vmlinux_value_type_id; + bool bypass_spec_v1; + bool frozen; + long: 16; + long: 64; + long: 64; + atomic64_t refcnt; + atomic64_t usercnt; + struct work_struct work; + struct mutex freeze_mutex; + u64 writecnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + __u32 type_off; + __u32 type_len; + __u32 str_off; + __u32 str_len; +}; + +struct btf { + void *data; + struct btf_type **types; + u32 *resolved_ids; + u32 *resolved_sizes; + const char *strings; + void *nohdr_data; + struct btf_header hdr; + u32 nr_types; + u32 types_size; + u32 data_size; + refcount_t refcnt; + u32 id; + struct callback_head rcu; +}; + +struct btf_type { + __u32 name_off; + __u32 info; + union { + __u32 size; + __u32 type; + }; +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY = 0, + BPF_TRAMP_FEXIT = 1, + BPF_TRAMP_MODIFY_RETURN = 2, + BPF_TRAMP_MAX = 3, + BPF_TRAMP_REPLACE = 4, +}; + +struct bpf_ksym { + long unsigned int start; + long unsigned int end; + char name[128]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +struct bpf_ctx_arg_aux; + +struct bpf_trampoline; + +struct bpf_jit_poke_descriptor; + +struct bpf_prog_ops; + +struct bpf_prog_offload; + +struct bpf_func_info_aux; + +struct bpf_prog_stats; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + const struct bpf_ctx_arg_aux *ctx_arg_info; + struct mutex dst_mutex; + struct bpf_prog *dst_prog; + struct bpf_trampoline *dst_trampoline; + enum bpf_prog_type saved_dst_prog_type; + enum bpf_attach_type saved_dst_attach_type; + bool verifier_zext; + bool offload_requested; + bool attach_btf_trace; + bool func_proto_unreliable; + bool sleepable; + bool tail_call_reachable; + enum bpf_tramp_prog_type trampoline_prog_type; + struct hlist_node tramp_hlist; + const struct btf_type *attach_func_proto; + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct mutex used_maps_mutex; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + struct bpf_map *cgroup_storage[2]; + char name[16]; + void *security; + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + struct bpf_line_info *linfo; + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + u32 linfo_idx; + u32 num_exentries; + struct exception_table_entry *extable; + struct bpf_prog_stats *stats; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct sock_fprog_kern; + +struct bpf_prog { + u16 pages; + u16 jited: 1; + u16 jit_requested: 1; + u16 gpl_compatible: 1; + u16 cb_access: 1; + u16 dst_needed: 1; + u16 blinded: 1; + u16 is_func: 1; + u16 kprobe_override: 1; + u16 has_callchain_buf: 1; + u16 enforce_expected_attach_type: 1; + u16 call_get_stack: 1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + unsigned int (*bpf_func)(const void *, const struct bpf_insn *); + struct sock_filter insns[0]; + struct bpf_insn insnsi[0]; +}; + +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *); + int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *); + int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64); + int (*map_delete_elem)(struct bpf_offloaded_map *, void *); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; + long: 64; + long: 64; + long: 64; +}; + +struct net_device_stats { + long unsigned int rx_packets; + long unsigned int tx_packets; + long unsigned int rx_bytes; + long unsigned int tx_bytes; + long unsigned int rx_errors; + long unsigned int tx_errors; + long unsigned int rx_dropped; + long unsigned int tx_dropped; + long unsigned int multicast; + long unsigned int collisions; + long unsigned int rx_length_errors; + long unsigned int rx_over_errors; + long unsigned int rx_crc_errors; + long unsigned int rx_frame_errors; + long unsigned int rx_fifo_errors; + long unsigned int rx_missed_errors; + long unsigned int tx_aborted_errors; + long unsigned int tx_carrier_errors; + long unsigned int tx_fifo_errors; + long unsigned int tx_heartbeat_errors; + long unsigned int tx_window_errors; + long unsigned int rx_compressed; + long unsigned int tx_compressed; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + +struct tipc_bearer; + +struct dn_dev; + +struct mpls_dev; + +enum rx_handler_result { + RX_HANDLER_CONSUMED = 0, + RX_HANDLER_ANOTHER = 1, + RX_HANDLER_EXACT = 2, + RX_HANDLER_PASS = 3, +}; + +typedef enum rx_handler_result rx_handler_result_t; + +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **); + +struct pcpu_dstats; + +struct garp_port; + +struct mrp_port; + +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + +struct macsec_ops; + +struct udp_tunnel_nic; + +struct bpf_xdp_link; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; + +struct netdev_name_node; + +struct dev_ifalias; + +struct iw_handler_def; + +struct iw_public_data; + +struct net_device_ops; + +struct ethtool_ops; + +struct l3mdev_ops; + +struct ndisc_ops; + +struct xfrmdev_ops; + +struct tlsdev_ops; + +struct header_ops; + +struct vlan_info; + +struct dsa_port; + +struct in_device; + +struct inet6_dev; + +struct wireless_dev; + +struct wpan_dev; + +struct netdev_rx_queue; + +struct mini_Qdisc; + +struct netdev_queue; + +struct cpu_rmap; + +struct Qdisc; + +struct xdp_dev_bulk_queue; + +struct xps_dev_maps; + +struct netpoll_info; + +struct pcpu_lstats; + +struct pcpu_sw_netstats; + +struct rtnl_link_ops; + +struct dcbnl_rtnl_ops; + +struct netprio_map; + +struct phy_device; + +struct sfp_bus; + +struct udp_tunnel_nic_info; + +struct net_device { + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + long unsigned int mem_end; + long unsigned int mem_start; + long unsigned int base_addr; + int irq; + long unsigned int state; + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + netdev_features_t features; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + netdev_features_t gso_partial_features; + int ifindex; + int group; + struct net_device_stats stats; + atomic_long_t rx_dropped; + atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; + atomic_t carrier_up_count; + atomic_t carrier_down_count; + const struct iw_handler_def *wireless_handlers; + struct iw_public_data *wireless_data; + const struct net_device_ops *netdev_ops; + const struct ethtool_ops *ethtool_ops; + const struct l3mdev_ops *l3mdev_ops; + const struct ndisc_ops *ndisc_ops; + const struct xfrmdev_ops *xfrmdev_ops; + const struct tlsdev_ops *tlsdev_ops; + const struct header_ops *header_ops; + unsigned int flags; + unsigned int priv_flags; + short unsigned int gflags; + short unsigned int padded; + unsigned char operstate; + unsigned char link_mode; + unsigned char if_port; + unsigned char dma; + unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; + short unsigned int type; + short unsigned int hard_header_len; + unsigned char min_header_len; + unsigned char name_assign_type; + short unsigned int needed_headroom; + short unsigned int needed_tailroom; + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + short unsigned int neigh_priv_len; + short unsigned int dev_id; + short unsigned int dev_port; + spinlock_t addr_list_lock; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + struct kset *queues_kset; + unsigned int promiscuity; + unsigned int allmulti; + bool uc_promisc; + struct vlan_info *vlan_info; + struct dsa_port *dsa_ptr; + struct tipc_bearer *tipc_ptr; + void *atalk_ptr; + struct in_device *ip_ptr; + struct dn_dev *dn_ptr; + struct inet6_dev *ip6_ptr; + void *ax25_ptr; + struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; + struct mpls_dev *mpls_ptr; + unsigned char *dev_addr; + struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; + unsigned int real_num_rx_queues; + struct bpf_prog *xdp_prog; + long unsigned int gro_flush_timeout; + int napi_defer_hard_irqs; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + struct mini_Qdisc *miniq_ingress; + struct netdev_queue *ingress_queue; + struct nf_hook_entries *nf_hooks_ingress; + unsigned char broadcast[32]; + struct cpu_rmap *rx_cpu_rmap; + struct hlist_node index_hlist; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct netdev_queue *_tx; + unsigned int num_tx_queues; + unsigned int real_num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + struct xdp_dev_bulk_queue *xdp_bulkq; + struct xps_dev_maps *xps_cpus_map; + struct xps_dev_maps *xps_rxqs_map; + struct mini_Qdisc *miniq_egress; + struct hlist_head qdisc_hash[16]; + struct timer_list watchdog_timer; + int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; + int *pcpu_refcnt; + struct list_head link_watch_list; + enum { + NETREG_UNINITIALIZED = 0, + NETREG_REGISTERED = 1, + NETREG_UNREGISTERING = 2, + NETREG_UNREGISTERED = 3, + NETREG_RELEASED = 4, + NETREG_DUMMY = 5, + } reg_state: 8; + bool dismantle; + enum { + RTNL_LINK_INITIALIZED = 0, + RTNL_LINK_INITIALIZING = 1, + } rtnl_link_state: 16; + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *); + struct netpoll_info *npinfo; + possible_net_t nd_net; + union { + void *ml_priv; + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + struct garp_port *garp_port; + struct mrp_port *mrp_port; + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + const struct rtnl_link_ops *rtnl_link_ops; + unsigned int gso_max_size; + u16 gso_max_segs; + const struct dcbnl_rtnl_ops *dcbnl_ops; + s16 num_tc; + struct netdev_tc_txq tc_to_txq[16]; + u8 prio_tc_map[16]; + unsigned int fcoe_ddp_xid; + struct netprio_map *priomap; + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; + bool proto_down; + unsigned int wol_enabled: 1; + struct list_head net_notifier_list; + const struct macsec_ops *macsec_ops; + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + struct bpf_xdp_entity xdp_state[3]; + long: 64; + long: 64; + long: 64; +}; + +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE = 1, + PTR_TO_CTX = 2, + CONST_PTR_TO_MAP = 3, + PTR_TO_MAP_VALUE = 4, + PTR_TO_MAP_VALUE_OR_NULL = 5, + PTR_TO_STACK = 6, + PTR_TO_PACKET_META = 7, + PTR_TO_PACKET = 8, + PTR_TO_PACKET_END = 9, + PTR_TO_FLOW_KEYS = 10, + PTR_TO_SOCKET = 11, + PTR_TO_SOCKET_OR_NULL = 12, + PTR_TO_SOCK_COMMON = 13, + PTR_TO_SOCK_COMMON_OR_NULL = 14, + PTR_TO_TCP_SOCK = 15, + PTR_TO_TCP_SOCK_OR_NULL = 16, + PTR_TO_TP_BUFFER = 17, + PTR_TO_XDP_SOCK = 18, + PTR_TO_BTF_ID = 19, + PTR_TO_BTF_ID_OR_NULL = 20, + PTR_TO_MEM = 21, + PTR_TO_MEM_OR_NULL = 22, + PTR_TO_RDONLY_BUF = 23, + PTR_TO_RDONLY_BUF_OR_NULL = 24, + PTR_TO_RDWR_BUF = 25, + PTR_TO_RDWR_BUF_OR_NULL = 26, + PTR_TO_PERCPU_BTF_ID = 27, +}; + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *); +}; + +struct bpf_offload_dev; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +struct bpf_prog_stats { + u64 cnt; + u64 nsecs; + struct u64_stats_sync syncp; +}; + +struct btf_func_model { + u8 ret_size; + u8 nr_args; + u8 arg_size[12]; +}; + +struct bpf_trampoline { + struct hlist_node hlist; + struct mutex mutex; + refcount_t refcnt; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + struct bpf_prog *extension_prog; + struct hlist_head progs_hlist[3]; + int progs_cnt[3]; + void *image; + u64 selector; + struct bpf_ksym ksym; +}; + +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; +}; + +struct bpf_jit_poke_descriptor { + void *tailcall_target; + void *tailcall_bypass; + void *bypass_addr; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool tailcall_target_stable; + u8 adj_off; + u16 reason; + u32 insn_idx; +}; + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + u32 btf_id; +}; + +typedef unsigned int sk_buff_data_t; + +struct skb_ext; + +struct sk_buff { + union { + struct { + struct sk_buff *next; + struct sk_buff *prev; + union { + struct net_device *dev; + long unsigned int dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + }; + union { + struct sock *sk; + int ip_defrag_offset; + }; + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + char cb[48]; + union { + struct { + long unsigned int _skb_refdst; + void (*destructor)(struct sk_buff *); + }; + struct list_head tcp_tsorted_anchor; + }; + long unsigned int _nfct; + unsigned int len; + unsigned int data_len; + __u16 mac_len; + __u16 hdr_len; + __u16 queue_mapping; + __u8 __cloned_offset[0]; + __u8 cloned: 1; + __u8 nohdr: 1; + __u8 fclone: 2; + __u8 peeked: 1; + __u8 head_frag: 1; + __u8 pfmemalloc: 1; + __u8 active_extensions; + __u32 headers_start[0]; + __u8 __pkt_type_offset[0]; + __u8 pkt_type: 3; + __u8 ignore_df: 1; + __u8 nf_trace: 1; + __u8 ip_summed: 2; + __u8 ooo_okay: 1; + __u8 l4_hash: 1; + __u8 sw_hash: 1; + __u8 wifi_acked_valid: 1; + __u8 wifi_acked: 1; + __u8 no_fcs: 1; + __u8 encapsulation: 1; + __u8 encap_hdr_csum: 1; + __u8 csum_valid: 1; + __u8 __pkt_vlan_present_offset[0]; + __u8 vlan_present: 1; + __u8 csum_complete_sw: 1; + __u8 csum_level: 2; + __u8 csum_not_inet: 1; + __u8 dst_pending_confirm: 1; + __u8 ndisc_nodetype: 2; + __u8 ipvs_property: 1; + __u8 inner_protocol_type: 1; + __u8 remcsum_offload: 1; + __u8 offload_fwd_mark: 1; + __u8 offload_l3_fwd_mark: 1; + __u8 tc_skip_classify: 1; + __u8 tc_at_ingress: 1; + __u8 redirected: 1; + __u8 from_ingress: 1; + __u8 decrypted: 1; + __u16 tc_index; + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + __u32 secmark; + union { + __u32 mark; + __u32 reserved_tailroom; + }; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + __u32 headers_end[0]; + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head; + unsigned char *data; + unsigned int truesize; + refcount_t users; + struct skb_ext *extensions; +}; + +struct scatterlist { + long unsigned int page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; +}; + +enum { + Root_NFS = 255, + Root_CIFS = 254, + Root_RAM0 = 1048576, + Root_RAM1 = 1048577, + Root_FD0 = 2097152, + Root_HDA1 = 3145729, + Root_HDA2 = 3145730, + Root_SDA1 = 8388609, + Root_SDA2 = 8388610, + Root_HDC1 = 23068673, + Root_SR0 = 11534336, +}; + +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +}; + +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + __u32 flowic_secid; + kuid_t flowic_uid; + struct flowi_tunnel flowic_tun_key; + __u32 flowic_multipath_hash; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + struct { + __u8 type; + __u8 code; + } icmpt; + struct { + __le16 dport; + __le16 sport; + } dnports; + __be32 spi; + __be32 gre_key; + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; + __be32 saddr; + __be32 daddr; + union flowi_uli uli; +}; + +struct flowi6 { + struct flowi_common __fl_common; + struct in6_addr daddr; + struct in6_addr saddr; + __be32 flowlabel; + union flowi_uli uli; + __u32 mp_hash; +}; + +struct flowidn { + struct flowi_common __fl_common; + __le16 daddr; + __le16 saddr; + union flowi_uli uli; +}; + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + struct flowidn dn; + } u; +}; + +struct ipstats_mib { + u64 mibs[37]; + struct u64_stats_sync syncp; +}; + +struct icmp_mib { + long unsigned int mibs[28]; +}; + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6_mib { + long unsigned int mibs[6]; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[6]; +}; + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + +struct tcp_mib { + long unsigned int mibs[16]; +}; + +struct udp_mib { + long unsigned int mibs[9]; +}; + +struct linux_mib { + long unsigned int mibs[124]; +}; + +struct linux_tls_mib { + long unsigned int mibs[11]; +}; + +struct inet_frags; + +struct fqdir { + long int high_thresh; + long int low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + long: 56; + long: 64; + long: 64; + struct rhashtable rhashtable; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t mem; + struct work_struct destroy_work; + long: 64; + long: 64; + long: 64; +}; + +struct inet_frag_queue; + +struct inet_frags { + unsigned int qsize; + void (*constructor)(struct inet_frag_queue *, const void *); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +struct fib_rule; + +struct fib_lookup_arg; + +struct fib_rule_hdr; + +struct nlattr; + +struct netlink_ext_ack; + +struct nla_policy; + +struct fib_rules_ops { + int family; + struct list_head list; + int rule_size; + int addr_size; + int unresolved_rules; + int nr_goto_rules; + unsigned int fib_rules_seq; + int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *); + int (*match)(struct fib_rule *, struct flowi *, int); + int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *); + int (*delete)(struct fib_rule *); + int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); + int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *); + size_t (*nlmsg_payload)(struct fib_rule *); + void (*flush_cache)(struct fib_rules_ops *); + int nlgroup; + const struct nla_policy *policy; + struct list_head rules_list; + struct module *owner; + struct net *fro_net; + struct callback_head rcu; +}; + +enum tcp_ca_event { + CA_EVENT_TX_START = 0, + CA_EVENT_CWND_RESTART = 1, + CA_EVENT_COMPLETE_CWR = 2, + CA_EVENT_LOSS = 3, + CA_EVENT_ECN_NO_CE = 4, + CA_EVENT_ECN_IS_CE = 5, +}; + +struct ack_sample; + +struct rate_sample; + +union tcp_cc_info; + +struct tcp_congestion_ops { + struct list_head list; + u32 key; + u32 flags; + void (*init)(struct sock *); + void (*release)(struct sock *); + u32 (*ssthresh)(struct sock *); + void (*cong_avoid)(struct sock *, u32, u32); + void (*set_state)(struct sock *, u8); + void (*cwnd_event)(struct sock *, enum tcp_ca_event); + void (*in_ack_event)(struct sock *, u32); + u32 (*undo_cwnd)(struct sock *); + void (*pkts_acked)(struct sock *, const struct ack_sample *); + u32 (*min_tso_segs)(struct sock *); + u32 (*sndbuf_expand)(struct sock *); + void (*cong_control)(struct sock *, const struct rate_sample *); + size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *); + char name[16]; + struct module *owner; +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(struct net *); + int (*fib_dump)(struct net *, struct notifier_block *, struct netlink_ext_ack *); + struct module *owner; + struct callback_head rcu; +}; + +struct xfrm_state; + +struct lwtunnel_state; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + long unsigned int _metrics; + long unsigned int expires; + struct xfrm_state *xfrm; + int (*input)(struct sk_buff *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + short unsigned int flags; + short int obsolete; + short unsigned int header_len; + short unsigned int trailer_len; + atomic_t __refcnt; + int __use; + long unsigned int lastuse; + struct lwtunnel_state *lwtstate; + struct callback_head callback_head; + short int error; + short int __pad; + __u32 tclassid; +}; + +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + long unsigned int hh_data[16]; +}; + +struct neigh_table; + +struct neigh_parms; + +struct neigh_ops; + +struct neighbour { + struct neighbour *next; + struct neigh_table *tbl; + struct neigh_parms *parms; + long unsigned int confirmed; + long unsigned int updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + long unsigned int used; + atomic_t probes; + __u8 flags; + __u8 nud_state; + __u8 type; + __u8 dead; + u8 protocol; + seqlock_t ha_lock; + int: 32; + unsigned char ha[32]; + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct callback_head rcu; + struct net_device *dev; + u8 primary_key[0]; +}; + +struct ipv6_stable_secret { + bool initialized; + struct in6_addr secret; +}; + +struct ipv6_devconf { + __s32 forwarding; + __s32 hop_limit; + __s32 mtu6; + __s32 accept_ra; + __s32 accept_redirects; + __s32 autoconf; + __s32 dad_transmits; + __s32 rtr_solicits; + __s32 rtr_solicit_interval; + __s32 rtr_solicit_max_interval; + __s32 rtr_solicit_delay; + __s32 force_mld_version; + __s32 mldv1_unsolicited_report_interval; + __s32 mldv2_unsolicited_report_interval; + __s32 use_tempaddr; + __s32 temp_valid_lft; + __s32 temp_prefered_lft; + __s32 regen_max_retry; + __s32 max_desync_factor; + __s32 max_addresses; + __s32 accept_ra_defrtr; + __s32 accept_ra_min_hop_limit; + __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; + __s32 accept_ra_rtr_pref; + __s32 rtr_probe_interval; + __s32 accept_ra_rt_info_min_plen; + __s32 accept_ra_rt_info_max_plen; + __s32 proxy_ndp; + __s32 accept_source_route; + __s32 accept_ra_from_local; + __s32 mc_forwarding; + __s32 disable_ipv6; + __s32 drop_unicast_in_l2_multicast; + __s32 accept_dad; + __s32 force_tllao; + __s32 ndisc_notify; + __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; + __s32 drop_unsolicited_na; + struct ipv6_stable_secret stable_secret; + __s32 use_oif_addrs_only; + __s32 keep_addr_on_down; + __s32 seg6_enabled; + __u32 enhanced_dad; + __u32 addr_gen_mode; + __s32 disable_policy; + __s32 ndisc_tclass; + __s32 rpl_seg_enabled; + struct ctl_table_header *sysctl_header; +}; + +struct nf_queue_entry; + +struct nf_queue_handler { + int (*outfn)(struct nf_queue_entry *, unsigned int); + void (*nf_hook_drop)(struct net *); +}; + +enum nf_log_type { + NF_LOG_TYPE_LOG = 0, + NF_LOG_TYPE_ULOG = 1, + NF_LOG_TYPE_MAX = 2, +}; + +typedef u8 u_int8_t; + +struct nf_loginfo; + +typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *); + +struct nf_logger { + char *name; + enum nf_log_type type; + nf_logfn *logfn; + struct module *me; +}; + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int insert; + unsigned int insert_failed; + unsigned int clash_resolve; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; +}; + +struct ct_pcpu { + spinlock_t lock; + struct hlist_nulls_head unconfirmed; + struct hlist_nulls_head dying; +}; + +typedef struct { + union { + void *kernel; + void *user; + }; + bool is_kernel: 1; +} sockptr_t; + +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED = 1, + SS_CONNECTING = 2, + SS_CONNECTED = 3, + SS_DISCONNECTING = 4, +} socket_state; + +struct socket_wq { + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + long unsigned int flags; + struct callback_head rcu; + long: 64; +}; + +struct proto_ops; + +struct socket { + socket_state state; + short int type; + long unsigned int flags; + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + long: 64; + long: 64; + long: 64; + struct socket_wq wq; +}; + +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); + +struct proto_ops { + int family; + unsigned int flags; + struct module *owner; + int (*release)(struct socket *); + int (*bind)(struct socket *, struct sockaddr *, int); + int (*connect)(struct socket *, struct sockaddr *, int, int); + int (*socketpair)(struct socket *, struct socket *); + int (*accept)(struct socket *, struct socket *, int, bool); + int (*getname)(struct socket *, struct sockaddr *, int); + __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *); + int (*ioctl)(struct socket *, unsigned int, long unsigned int); + int (*compat_ioctl)(struct socket *, unsigned int, long unsigned int); + int (*gettstamp)(struct socket *, void *, bool, bool); + int (*listen)(struct socket *, int); + int (*shutdown)(struct socket *, int); + int (*setsockopt)(struct socket *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct socket *, int, int, char *, int *); + void (*show_fdinfo)(struct seq_file *, struct socket *); + int (*sendmsg)(struct socket *, struct msghdr *, size_t); + int (*recvmsg)(struct socket *, struct msghdr *, size_t, int); + int (*mmap)(struct file *, struct socket *, struct vm_area_struct *); + ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*set_peek_off)(struct sock *, int); + int (*peek_len)(struct socket *); + int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t); + int (*sendpage_locked)(struct sock *, struct page *, int, size_t, int); + int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t); + int (*set_rcvlowat)(struct sock *, int); +}; + +struct pipe_buf_operations; + +struct pipe_buffer { + struct page *page; + unsigned int offset; + unsigned int len; + const struct pipe_buf_operations *ops; + unsigned int flags; + long unsigned int private; +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct skb_ext { + refcount_t refcnt; + u8 offset[4]; + u8 chunks; + long: 56; + char data[0]; +}; + +struct dql { + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + unsigned int limit; + unsigned int num_completed; + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + unsigned int lowest_slack; + long unsigned int slack_start_time; + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; + long: 32; + long: 64; + long: 64; +}; + +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; + +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG = 0, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE = 1, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH = 2, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY = 3, + ETHTOOL_LINK_EXT_STATE_NO_CABLE = 4, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE = 5, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE = 6, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE = 7, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED = 8, + ETHTOOL_LINK_EXT_STATE_OVERHEAT = 9, +}; + +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED = 2, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED = 3, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE = 4, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE = 5, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD = 6, +}; + +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT = 4, +}; + +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK = 2, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS = 3, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED = 4, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED = 5, +}; + +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 2, +}; + +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 2, +}; + +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; + +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; + +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; + +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; + +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; +}; + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +struct ethtool_fecparam { + __u32 cmd; + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 reserved1[1]; + __u32 reserved[7]; + __u32 link_mode_masks[0]; +}; + +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE = 0, + ETHTOOL_ID_ACTIVE = 1, + ETHTOOL_ID_ON = 2, + ETHTOOL_ID_OFF = 3, +}; + +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + u8 __link_ext_substate; + }; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + } link_modes; +}; + +struct ethtool_pause_stats { + u64 tx_pause_frames; + u64 rx_pause_frames; +}; + +struct ethtool_ops { + u32 supported_coalesce_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, struct ethtool_link_ext_state_info *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pause_stats)(struct net_device *, struct ethtool_pause_stats *); + void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *); + int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8); + int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32); + int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_eee *); + int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + int (*get_phy_tunable)(struct net_device *, const struct ethtool_tunable *, void *); + int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); +}; + +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + const struct nla_policy *policy; + u8 cookie[20]; + u8 cookie_len; +}; + +struct ieee_ets { + __u8 willing; + __u8 ets_cap; + __u8 cbs; + __u8 tc_tx_bw[8]; + __u8 tc_rx_bw[8]; + __u8 tc_tsa[8]; + __u8 prio_tc[8]; + __u8 tc_reco_bw[8]; + __u8 tc_reco_tsa[8]; + __u8 reco_prio_tc[8]; +}; + +struct ieee_maxrate { + __u64 tc_maxrate[8]; +}; + +struct ieee_qcn { + __u8 rpg_enable[8]; + __u32 rppp_max_rps[8]; + __u32 rpg_time_reset[8]; + __u32 rpg_byte_reset[8]; + __u32 rpg_threshold[8]; + __u32 rpg_max_rate[8]; + __u32 rpg_ai_rate[8]; + __u32 rpg_hai_rate[8]; + __u32 rpg_gd[8]; + __u32 rpg_min_dec_fac[8]; + __u32 rpg_min_rate[8]; + __u32 cndd_state_machine[8]; +}; + +struct ieee_qcn_stats { + __u64 rppp_rp_centiseconds[8]; + __u32 rppp_created_rps[8]; +}; + +struct ieee_pfc { + __u8 pfc_cap; + __u8 pfc_en; + __u8 mbc; + __u16 delay; + __u64 requests[8]; + __u64 indications[8]; +}; + +struct dcbnl_buffer { + __u8 prio2buffer[8]; + __u32 buffer_size[8]; + __u32 total_size; +}; + +struct cee_pg { + __u8 willing; + __u8 error; + __u8 pg_en; + __u8 tcs_supported; + __u8 pg_bw[8]; + __u8 prio_pg[8]; +}; + +struct cee_pfc { + __u8 willing; + __u8 error; + __u8 pfc_en; + __u8 tcs_supported; +}; + +struct dcb_app { + __u8 selector; + __u8 priority; + __u16 protocol; +}; + +struct dcb_peer_app_info { + __u8 willing; + __u8 error; +}; + +struct dcbnl_rtnl_ops { + int (*ieee_getets)(struct net_device *, struct ieee_ets *); + int (*ieee_setets)(struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *); + int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *); + int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *); + int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *); + int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *); + int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *); + int (*ieee_getapp)(struct net_device *, struct dcb_app *); + int (*ieee_setapp)(struct net_device *, struct dcb_app *); + int (*ieee_delapp)(struct net_device *, struct dcb_app *); + int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *); + int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *); + u8 (*getstate)(struct net_device *); + u8 (*setstate)(struct net_device *, u8); + void (*getpermhwaddr)(struct net_device *, u8 *); + void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgtx)(struct net_device *, int, u8); + void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgrx)(struct net_device *, int, u8); + void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); + void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); + void (*setpfccfg)(struct net_device *, int, u8); + void (*getpfccfg)(struct net_device *, int, u8 *); + u8 (*setall)(struct net_device *); + u8 (*getcap)(struct net_device *, int, u8 *); + int (*getnumtcs)(struct net_device *, int, u8 *); + int (*setnumtcs)(struct net_device *, int, u8); + u8 (*getpfcstate)(struct net_device *); + void (*setpfcstate)(struct net_device *, u8); + void (*getbcncfg)(struct net_device *, int, u32 *); + void (*setbcncfg)(struct net_device *, int, u32); + void (*getbcnrp)(struct net_device *, int, u8 *); + void (*setbcnrp)(struct net_device *, int, u8); + int (*setapp)(struct net_device *, u8, u16, u8); + int (*getapp)(struct net_device *, u8, u16); + u8 (*getfeatcfg)(struct net_device *, int, u8 *); + u8 (*setfeatcfg)(struct net_device *, int, u8); + u8 (*getdcbx)(struct net_device *); + u8 (*setdcbx)(struct net_device *, u8); + int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *); + int (*peer_getapptable)(struct net_device *, struct dcb_app *); + int (*cee_peer_getpg)(struct net_device *, struct cee_pg *); + int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *); + int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); + int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); +}; + +struct netprio_map { + struct callback_head rcu; + u32 priomap_len; + u32 priomap[0]; +}; + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize: 8; + u32 frame_sz: 24; + struct xdp_mem_info mem; + struct net_device *dev_rx; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; + +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; + +struct netlink_range_validation; + +struct netlink_range_validation_signed; + +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + const u32 bitfield32_valid; + const u32 mask; + const char *reject_message; + const struct nla_policy *nested_policy; + struct netlink_range_validation *range; + struct netlink_range_validation_signed *range_signed; + struct { + s16 min; + s16 max; + }; + int (*validate)(const struct nlattr *, struct netlink_ext_ack *); + u16 strict_start_type; + }; +}; + +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq; + unsigned int seq; + bool strict_check; + union { + u8 ctx[48]; + long int args[6]; + }; +}; + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + __u64 rx_compressed; + __u64 tx_compressed; + __u64 rx_nohandler; +}; + +struct ifla_vf_guid { + __u32 vf; + __u64 guid; +}; + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; + +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; +}; + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short int cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum netdev_tx { + __NETDEV_TX_MIN = 2147483648, + NETDEV_TX_OK = 0, + NETDEV_TX_BUSY = 16, +}; + +typedef enum netdev_tx netdev_tx_t; + +struct header_ops { + int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int); + int (*parse)(const struct sk_buff *, unsigned char *); + int (*cache)(const struct neighbour *, struct hh_cache *, __be16); + void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *); + bool (*validate)(const char *, unsigned int); + __be16 (*parse_protocol)(const struct sk_buff *); +}; + +struct xsk_buff_pool; + +struct netdev_queue { + struct net_device *dev; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + struct kobject kobj; + int numa_node; + long unsigned int tx_maxrate; + long unsigned int trans_timeout; + struct net_device *sb_dev; + struct xsk_buff_pool *pool; + spinlock_t _xmit_lock; + int xmit_lock_owner; + long unsigned int trans_start; + long unsigned int state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct dql dql; +}; + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct gnet_stats_basic_packed { + __u64 bytes; + __u64 packets; +}; + +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + +struct Qdisc_ops; + +struct qdisc_size_table; + +struct net_rate_estimator; + +struct gnet_stats_basic_cpu; + +struct Qdisc { + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + unsigned int flags; + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + struct netdev_queue *dev_queue; + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int pad; + refcount_t refcnt; + long: 64; + long: 64; + long: 64; + struct sk_buff_head gso_skb; + struct qdisc_skb_head q; + struct gnet_stats_basic_packed bstats; + seqcount_t running; + struct gnet_stats_queue qstats; + long unsigned int state; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + spinlock_t busylock; + spinlock_t seqlock; + bool empty; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long int privdata[0]; +}; + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[0]; +}; + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[0]; +}; + +struct netdev_rx_queue { + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + struct kobject kobj; + struct net_device *dev; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *pool; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[0]; +}; + +struct xps_dev_maps { + struct callback_head rcu; + struct xps_map *attr_map[0]; +}; + +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +enum tc_setup_type { + TC_SETUP_QDISC_MQPRIO = 0, + TC_SETUP_CLSU32 = 1, + TC_SETUP_CLSFLOWER = 2, + TC_SETUP_CLSMATCHALL = 3, + TC_SETUP_CLSBPF = 4, + TC_SETUP_BLOCK = 5, + TC_SETUP_QDISC_CBS = 6, + TC_SETUP_QDISC_RED = 7, + TC_SETUP_QDISC_PRIO = 8, + TC_SETUP_QDISC_MQ = 9, + TC_SETUP_QDISC_ETF = 10, + TC_SETUP_ROOT_QDISC = 11, + TC_SETUP_QDISC_GRED = 12, + TC_SETUP_QDISC_TAPRIO = 13, + TC_SETUP_FT = 14, + TC_SETUP_QDISC_ETS = 15, + TC_SETUP_QDISC_TBF = 16, + TC_SETUP_QDISC_FIFO = 17, +}; + +enum bpf_netdev_command { + XDP_SETUP_PROG = 0, + XDP_SETUP_PROG_HW = 1, + BPF_OFFLOAD_MAP_ALLOC = 2, + BPF_OFFLOAD_MAP_FREE = 3, + XDP_SETUP_XSK_POOL = 4, +}; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + struct { + struct bpf_offloaded_map *offmap; + }; + struct { + struct xsk_buff_pool *pool; + u16 queue_id; + } xsk; + }; +}; + +struct xfrmdev_ops { + int (*xdo_dev_state_add)(struct xfrm_state *); + void (*xdo_dev_state_delete)(struct xfrm_state *); + void (*xdo_dev_state_free)(struct xfrm_state *); + bool (*xdo_dev_offload_ok)(struct sk_buff *, struct xfrm_state *); + void (*xdo_dev_state_advance_esn)(struct xfrm_state *); +}; + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[0]; +}; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; +}; + +struct udp_tunnel_info; + +struct devlink_port; + +struct ip_tunnel_parm; + +struct net_device_ops { + int (*ndo_init)(struct net_device *); + void (*ndo_uninit)(struct net_device *); + int (*ndo_open)(struct net_device *); + int (*ndo_stop)(struct net_device *); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *); + netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t); + u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *); + void (*ndo_change_rx_flags)(struct net_device *, int); + void (*ndo_set_rx_mode)(struct net_device *); + int (*ndo_set_mac_address)(struct net_device *, void *); + int (*ndo_validate_addr)(struct net_device *); + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); + int (*ndo_set_config)(struct net_device *, struct ifmap *); + int (*ndo_change_mtu)(struct net_device *, int); + int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *); + void (*ndo_tx_timeout)(struct net_device *, unsigned int); + void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *); + bool (*ndo_has_offload_stats)(const struct net_device *, int); + int (*ndo_get_offload_stats)(int, const struct net_device *, void *); + struct net_device_stats * (*ndo_get_stats)(struct net_device *); + int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16); + int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16); + void (*ndo_poll_controller)(struct net_device *); + int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *); + void (*ndo_netpoll_cleanup)(struct net_device *); + int (*ndo_set_vf_mac)(struct net_device *, int, u8 *); + int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16); + int (*ndo_set_vf_rate)(struct net_device *, int, int, int); + int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool); + int (*ndo_set_vf_trust)(struct net_device *, int, bool); + int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *); + int (*ndo_set_vf_link_state)(struct net_device *, int, int); + int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *); + int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **); + int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *); + int (*ndo_get_vf_guid)(struct net_device *, int, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*ndo_set_vf_guid)(struct net_device *, int, u64, int); + int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool); + int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *); + int (*ndo_fcoe_enable)(struct net_device *); + int (*ndo_fcoe_disable)(struct net_device *); + int (*ndo_fcoe_ddp_setup)(struct net_device *, u16, struct scatterlist *, unsigned int); + int (*ndo_fcoe_ddp_done)(struct net_device *, u16); + int (*ndo_fcoe_ddp_target)(struct net_device *, u16, struct scatterlist *, unsigned int); + int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *); + int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int); + int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32); + int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *); + int (*ndo_del_slave)(struct net_device *, struct net_device *); + struct net_device * (*ndo_get_xmit_slave)(struct net_device *, struct sk_buff *, bool); + netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t); + int (*ndo_set_features)(struct net_device *, netdev_features_t); + int (*ndo_neigh_construct)(struct net_device *, struct neighbour *); + void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *); + int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16, struct netlink_ext_ack *); + int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16); + int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *); + int (*ndo_fdb_get)(struct sk_buff *, struct nlattr **, struct net_device *, const unsigned char *, u16, u32, u32, struct netlink_ext_ack *); + int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16, struct netlink_ext_ack *); + int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int); + int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16); + int (*ndo_change_carrier)(struct net_device *, bool); + int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_port_parent_id)(struct net_device *, struct netdev_phys_item_id *); + int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t); + void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *); + void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *); + void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *); + void (*ndo_dfwd_del_station)(struct net_device *, void *); + int (*ndo_set_tx_maxrate)(struct net_device *, int, u32); + int (*ndo_get_iflink)(const struct net_device *); + int (*ndo_change_proto_down)(struct net_device *, bool); + int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *); + void (*ndo_set_rx_headroom)(struct net_device *, int); + int (*ndo_bpf)(struct net_device *, struct netdev_bpf *); + int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32); + int (*ndo_xsk_wakeup)(struct net_device *, u32, u32); + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *); + int (*ndo_tunnel_ctl)(struct net_device *, struct ip_tunnel_parm *, int); + struct net_device * (*ndo_get_peer_dev)(struct net_device *); +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + void *sysctl_table; + int dead; + refcount_t refcnt; + struct callback_head callback_head; + int reachable_time; + int data[13]; + long unsigned int data_state[1]; +}; + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; +}; + +struct pcpu_sw_netstats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +}; + +struct iw_request_info; + +union iwreq_data; + +typedef int (*iw_handler)(struct net_device *, struct iw_request_info *, union iwreq_data *, char *); + +struct iw_priv_args; + +struct iw_statistics; + +struct iw_handler_def { + const iw_handler *standard; + __u16 num_standard; + __u16 num_private; + __u16 num_private_args; + const iw_handler *private; + const struct iw_priv_args *private_args; + struct iw_statistics * (*get_wireless_stats)(struct net_device *); +}; + +struct l3mdev_ops { + u32 (*l3mdev_fib_table)(const struct net_device *); + struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16); + struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16); + struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *); +}; + +struct nd_opt_hdr; + +struct ndisc_options; + +struct prefix_info; + +struct ndisc_ops { + int (*is_useropt)(u8); + int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *); + void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *); + int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **); + void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *); + void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool); +}; + +enum tls_offload_ctx_dir { + TLS_OFFLOAD_CTX_DIR_RX = 0, + TLS_OFFLOAD_CTX_DIR_TX = 1, +}; + +struct tls_crypto_info; + +struct tls_context; + +struct tlsdev_ops { + int (*tls_dev_add)(struct net_device *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32); + void (*tls_dev_del)(struct net_device *, struct tls_context *, enum tls_offload_ctx_dir); + int (*tls_dev_resync)(struct net_device *, struct sock *, u32, u8 *, enum tls_offload_ctx_dir); +}; + +struct ipv6_devstat { + struct proc_dir_entry *proc_dir_entry; + struct ipstats_mib *ipv6; + struct icmpv6_mib_device *icmpv6dev; + struct icmpv6msg_mib_device *icmpv6msgdev; +}; + +struct ifmcaddr6; + +struct ifacaddr6; + +struct inet6_dev { + struct net_device *dev; + struct list_head addr_list; + struct ifmcaddr6 *mc_list; + struct ifmcaddr6 *mc_tomb; + spinlock_t mc_lock; + unsigned char mc_qrv; + unsigned char mc_gq_running; + unsigned char mc_ifc_count; + unsigned char mc_dad_count; + long unsigned int mc_v1_seen; + long unsigned int mc_qi; + long unsigned int mc_qri; + long unsigned int mc_maxdelay; + struct timer_list mc_gq_timer; + struct timer_list mc_ifc_timer; + struct timer_list mc_dad_timer; + struct ifacaddr6 *ac_list; + rwlock_t lock; + refcount_t refcnt; + __u32 if_flags; + int dead; + u32 desync_factor; + struct list_head tempaddr_list; + struct in6_addr token; + struct neigh_parms *nd_parms; + struct ipv6_devconf cnf; + struct ipv6_devstat stats; + struct timer_list rs_timer; + __s32 rs_interval; + __u8 rs_probes; + long unsigned int tstamp; + struct callback_head rcu; +}; + +struct tcf_proto; + +struct tcf_block; + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + struct callback_head rcu; +}; + +struct rtnl_link_ops { + struct list_head list; + const char *kind; + size_t priv_size; + void (*setup)(struct net_device *); + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + void (*dellink)(struct net_device *, struct list_head *); + size_t (*get_size)(const struct net_device *); + int (*fill_info)(struct sk_buff *, const struct net_device *); + size_t (*get_xstats_size)(const struct net_device *); + int (*fill_xstats)(struct sk_buff *, const struct net_device *); + unsigned int (*get_num_tx_queues)(); + unsigned int (*get_num_rx_queues)(); + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *); + size_t (*get_slave_size)(const struct net_device *, const struct net_device *); + int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *); + struct net * (*get_link_net)(const struct net_device *); + size_t (*get_linkxstats_size)(const struct net_device *, int); + int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int); +}; + +struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; +}; + +struct udp_tunnel_nic_shared; + +struct udp_tunnel_nic_info { + int (*set_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*unset_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + int (*sync_table)(struct net_device *, unsigned int); + struct udp_tunnel_nic_shared *shared; + unsigned int flags; + struct udp_tunnel_nic_table_info tables[4]; +}; + +enum { + RTAX_UNSPEC = 0, + RTAX_LOCK = 1, + RTAX_MTU = 2, + RTAX_WINDOW = 3, + RTAX_RTT = 4, + RTAX_RTTVAR = 5, + RTAX_SSTHRESH = 6, + RTAX_CWND = 7, + RTAX_ADVMSS = 8, + RTAX_REORDERING = 9, + RTAX_HOPLIMIT = 10, + RTAX_INITCWND = 11, + RTAX_FEATURES = 12, + RTAX_RTO_MIN = 13, + RTAX_INITRWND = 14, + RTAX_QUICKACK = 15, + RTAX_CC_ALGO = 16, + RTAX_FASTOPEN_NO_COOKIE = 17, + __RTAX_MAX = 18, +}; + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + short unsigned int tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + __u32 tcm_info; +}; + +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + +struct gnet_dump { + spinlock_t *lock; + struct sk_buff *skb; + struct nlattr *tail; + int compat_tc_stats; + int compat_xstats; + int padattr; + void *xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +struct netlink_range_validation { + u64 min; + u64 max; +}; + +struct netlink_range_validation_signed { + s64 min; + s64 max; +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT = 0, + FLOW_ACTION_HW_STATS_DELAYED_BIT = 1, + FLOW_ACTION_HW_STATS_DISABLED_BIT = 2, + FLOW_ACTION_HW_STATS_NUM_BITS = 3, +}; + +struct flow_block { + struct list_head cb_list; +}; + +typedef int flow_setup_cb_t(enum tc_setup_type, void *, void *); + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[0]; +}; + +struct Qdisc_class_ops; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *); + void (*attach)(struct Qdisc *); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *, u32); + void (*egress_block_set)(struct Qdisc *, u32); + u32 (*ingress_block_get)(struct Qdisc *); + u32 (*egress_block_get)(struct Qdisc *); + struct module *owner; +}; + +struct qdisc_walker; + +struct Qdisc_class_ops { + unsigned int flags; + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *); + struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int); + void (*qlen_notify)(struct Qdisc *, long unsigned int); + long unsigned int (*find)(struct Qdisc *, u32); + int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, long unsigned int); + void (*walk)(struct Qdisc *, struct qdisc_walker *); + struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *); + long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32); + void (*unbind_tcf)(struct Qdisc *, long unsigned int); + int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *); + int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *); +}; + +struct tcf_chain; + +struct tcf_block { + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[128]; + struct mutex proto_destroy_lock; +}; + +struct tcf_result; + +struct tcf_proto_ops; + +struct tcf_proto { + struct tcf_proto *next; + void *root; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + __be16 protocol; + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + spinlock_t lock; + bool deleting; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct tcf_result { + union { + struct { + long unsigned int class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + struct { + bool ingress; + struct gnet_stats_queue *qstats; + }; + }; +}; + +struct tcf_walker; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); + int (*init)(struct tcf_proto *); + void (*destroy)(struct tcf_proto *, bool, struct netlink_ext_ack *); + void * (*get)(struct tcf_proto *, u32); + void (*put)(struct tcf_proto *, void *); + int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, bool, bool, struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *, void *, bool *, bool, struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *); + void (*walk)(struct tcf_proto *, struct tcf_walker *, bool); + int (*reoffload)(struct tcf_proto *, bool, flow_setup_cb_t *, void *, struct netlink_ext_ack *); + void (*hw_add)(struct tcf_proto *, void *); + void (*hw_del)(struct tcf_proto *, void *); + void (*bind_class)(void *, u32, long unsigned int, void *, long unsigned int); + void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *); + void (*tmplt_destroy)(void *); + int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*terse_dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *, bool); + int (*tmplt_dump)(struct sk_buff *, struct net *, void *); + struct module *owner; + int flags; +}; + +struct tcf_chain { + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +enum { + NEIGH_VAR_MCAST_PROBES = 0, + NEIGH_VAR_UCAST_PROBES = 1, + NEIGH_VAR_APP_PROBES = 2, + NEIGH_VAR_MCAST_REPROBES = 3, + NEIGH_VAR_RETRANS_TIME = 4, + NEIGH_VAR_BASE_REACHABLE_TIME = 5, + NEIGH_VAR_DELAY_PROBE_TIME = 6, + NEIGH_VAR_GC_STALETIME = 7, + NEIGH_VAR_QUEUE_LEN_BYTES = 8, + NEIGH_VAR_PROXY_QLEN = 9, + NEIGH_VAR_ANYCAST_DELAY = 10, + NEIGH_VAR_PROXY_DELAY = 11, + NEIGH_VAR_LOCKTIME = 12, + NEIGH_VAR_QUEUE_LEN = 13, + NEIGH_VAR_RETRANS_TIME_MS = 14, + NEIGH_VAR_BASE_REACHABLE_TIME_MS = 15, + NEIGH_VAR_GC_INTERVAL = 16, + NEIGH_VAR_GC_THRESH1 = 17, + NEIGH_VAR_GC_THRESH2 = 18, + NEIGH_VAR_GC_THRESH3 = 19, + NEIGH_VAR_MAX = 20, +}; + +struct pneigh_entry; + +struct neigh_statistics; + +struct neigh_hash_table; + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *, const struct net_device *, __u32 *); + bool (*key_eq)(const struct neighbour *, const void *); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *); + int (*is_multicast)(const void *); + bool (*allow_add)(const struct net_device *, struct netlink_ext_ack *); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + long unsigned int last_flush; + struct delayed_work gc_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + rwlock_t lock; + long unsigned int last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +struct neigh_statistics { + long unsigned int allocs; + long unsigned int destroys; + long unsigned int hash_grows; + long unsigned int res_failed; + long unsigned int lookups; + long unsigned int hits; + long unsigned int rcv_probes_mcast; + long unsigned int rcv_probes_ucast; + long unsigned int periodic_gc_runs; + long unsigned int forced_gc_runs; + long unsigned int unres_discards; + long unsigned int table_fulls; +}; + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + u8 flags; + u8 protocol; + u8 key[0]; +}; + +struct neigh_hash_table { + struct neighbour **hash_buckets; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT = 2, + TCP_SYN_RECV = 3, + TCP_FIN_WAIT1 = 4, + TCP_FIN_WAIT2 = 5, + TCP_TIME_WAIT = 6, + TCP_CLOSE = 7, + TCP_CLOSE_WAIT = 8, + TCP_LAST_ACK = 9, + TCP_LISTEN = 10, + TCP_CLOSING = 11, + TCP_NEW_SYN_RECV = 12, + TCP_MAX_STATES = 13, +}; + +struct fib_rule_hdr { + __u8 family; + __u8 dst_len; + __u8 src_len; + __u8 tos; + __u8 table; + __u8 res1; + __u8 res2; + __u8 action; + __u32 flags; +}; + +struct fib_rule_port_range { + __u16 start; + __u16 end; +}; + +struct fib_kuid_range { + kuid_t start; + kuid_t end; +}; + +struct fib_rule { + struct list_head list; + int iifindex; + int oifindex; + u32 mark; + u32 mark_mask; + u32 flags; + u32 table; + u8 action; + u8 l3mdev; + u8 proto; + u8 ip_proto; + u32 target; + __be64 tun_id; + struct fib_rule *ctarget; + struct net *fr_net; + refcount_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; + char iifname[16]; + char oifname[16]; + struct fib_kuid_range uid_range; + struct fib_rule_port_range sport_range; + struct fib_rule_port_range dport_range; + struct callback_head rcu; +}; + +struct fib_lookup_arg { + void *lookup_ptr; + const void *lookup_data; + void *result; + struct fib_rule *rule; + u32 table; + int flags; +}; + +struct smc_hashinfo; + +struct request_sock_ops; + +struct timewait_sock_ops; + +struct udp_table; + +struct raw_hashinfo; + +struct proto { + void (*close)(struct sock *, long int); + int (*pre_connect)(struct sock *, struct sockaddr *, int); + int (*connect)(struct sock *, struct sockaddr *, int); + int (*disconnect)(struct sock *, int); + struct sock * (*accept)(struct sock *, int, int *, bool); + int (*ioctl)(struct sock *, int, long unsigned int); + int (*init)(struct sock *); + void (*destroy)(struct sock *); + void (*shutdown)(struct sock *, int); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*keepalive)(struct sock *, int); + int (*compat_ioctl)(struct sock *, unsigned int, long unsigned int); + int (*sendmsg)(struct sock *, struct msghdr *, size_t); + int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int, int *); + int (*sendpage)(struct sock *, struct page *, int, size_t, int); + int (*bind)(struct sock *, struct sockaddr *, int); + int (*bind_add)(struct sock *, struct sockaddr *, int); + int (*backlog_rcv)(struct sock *, struct sk_buff *); + void (*release_cb)(struct sock *); + int (*hash)(struct sock *); + void (*unhash)(struct sock *); + void (*rehash)(struct sock *); + int (*get_port)(struct sock *, short unsigned int); + unsigned int inuse_idx; + bool (*stream_memory_free)(const struct sock *, int); + bool (*stream_memory_read)(const struct sock *); + void (*enter_memory_pressure)(struct sock *); + void (*leave_memory_pressure)(struct sock *); + atomic_long_t *memory_allocated; + struct percpu_counter *sockets_allocated; + long unsigned int *memory_pressure; + long int *sysctl_mem; + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + int max_header; + bool no_autobind; + struct kmem_cache *slab; + unsigned int obj_size; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + struct percpu_counter *orphan_count; + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + struct module *owner; + char name[32]; + struct list_head node; + int (*diag_destroy)(struct sock *, int); +}; + +struct request_sock; + +struct request_sock_ops { + int family; + unsigned int obj_size; + struct kmem_cache *slab; + char *slab_name; + int (*rtx_syn_ack)(const struct sock *, struct request_sock *); + void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *); + void (*send_reset)(const struct sock *, struct sk_buff *); + void (*destructor)(struct request_sock *); + void (*syn_ack_timeout)(const struct request_sock *); +}; + +struct timewait_sock_ops { + struct kmem_cache *twsk_slab; + char *twsk_slab_name; + unsigned int twsk_obj_size; + int (*twsk_unique)(struct sock *, struct sock *, void *); + void (*twsk_destructor)(struct sock *); +}; + +struct saved_syn; + +struct request_sock { + struct sock_common __req_common; + struct request_sock *dl_next; + u16 mss; + u8 num_retrans; + u8 syncookie: 1; + u8 num_timeout: 7; + u32 ts_recent; + struct timer_list rsk_timer; + const struct request_sock_ops *rsk_ops; + struct sock *sk; + struct saved_syn *saved_syn; + u32 secid; + u32 peer_secid; +}; + +struct saved_syn { + u32 mac_hdrlen; + u32 network_hdrlen; + u32 tcp_hdrlen; + u8 data[0]; +}; + +enum tsq_enum { + TSQ_THROTTLED = 0, + TSQ_QUEUED = 1, + TCP_TSQ_DEFERRED = 2, + TCP_WRITE_TIMER_DEFERRED = 3, + TCP_DELACK_TIMER_DEFERRED = 4, + TCP_MTU_REDUCED_DEFERRED = 5, +}; + +struct ip6_sf_list { + struct ip6_sf_list *sf_next; + struct in6_addr sf_addr; + long unsigned int sf_count[2]; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct ifmcaddr6 { + struct in6_addr mca_addr; + struct inet6_dev *idev; + struct ifmcaddr6 *next; + struct ip6_sf_list *mca_sources; + struct ip6_sf_list *mca_tomb; + unsigned int mca_sfmode; + unsigned char mca_crcount; + long unsigned int mca_sfcount[2]; + struct timer_list mca_timer; + unsigned int mca_flags; + int mca_users; + refcount_t mca_refcnt; + spinlock_t mca_lock; + long unsigned int mca_cstamp; + long unsigned int mca_tstamp; +}; + +struct ifacaddr6 { + struct in6_addr aca_addr; + struct fib6_info *aca_rt; + struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; + int aca_users; + refcount_t aca_refcnt; + long unsigned int aca_cstamp; + long unsigned int aca_tstamp; + struct callback_head rcu; +}; + +enum { + __ND_OPT_PREFIX_INFO_END = 0, + ND_OPT_SOURCE_LL_ADDR = 1, + ND_OPT_TARGET_LL_ADDR = 2, + ND_OPT_PREFIX_INFO = 3, + ND_OPT_REDIRECT_HDR = 4, + ND_OPT_MTU = 5, + ND_OPT_NONCE = 14, + __ND_OPT_ARRAY_MAX = 15, + ND_OPT_ROUTE_INFO = 24, + ND_OPT_RDNSS = 25, + ND_OPT_DNSSL = 31, + ND_OPT_6CO = 34, + ND_OPT_CAPTIVE_PORTAL = 37, + ND_OPT_PREF64 = 38, + __ND_OPT_MAX = 39, +}; + +struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +}; + +struct ndisc_options { + struct nd_opt_hdr *nd_opt_array[15]; + struct nd_opt_hdr *nd_opts_ri; + struct nd_opt_hdr *nd_opts_ri_end; + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; + struct nd_opt_hdr *nd_802154_opt_array[3]; +}; + +struct prefix_info { + __u8 type; + __u8 length; + __u8 prefix_len; + __u8 reserved: 6; + __u8 autoconf: 1; + __u8 onlink: 1; + __be32 valid; + __be32 prefered; + __be32 reserved2; + struct in6_addr prefix; +}; + +enum nfs_opnum4 { + OP_ACCESS = 3, + OP_CLOSE = 4, + OP_COMMIT = 5, + OP_CREATE = 6, + OP_DELEGPURGE = 7, + OP_DELEGRETURN = 8, + OP_GETATTR = 9, + OP_GETFH = 10, + OP_LINK = 11, + OP_LOCK = 12, + OP_LOCKT = 13, + OP_LOCKU = 14, + OP_LOOKUP = 15, + OP_LOOKUPP = 16, + OP_NVERIFY = 17, + OP_OPEN = 18, + OP_OPENATTR = 19, + OP_OPEN_CONFIRM = 20, + OP_OPEN_DOWNGRADE = 21, + OP_PUTFH = 22, + OP_PUTPUBFH = 23, + OP_PUTROOTFH = 24, + OP_READ = 25, + OP_READDIR = 26, + OP_READLINK = 27, + OP_REMOVE = 28, + OP_RENAME = 29, + OP_RENEW = 30, + OP_RESTOREFH = 31, + OP_SAVEFH = 32, + OP_SECINFO = 33, + OP_SETATTR = 34, + OP_SETCLIENTID = 35, + OP_SETCLIENTID_CONFIRM = 36, + OP_VERIFY = 37, + OP_WRITE = 38, + OP_RELEASE_LOCKOWNER = 39, + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_CLONE = 71, + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, +}; + +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + PERF_SAMPLE_BRANCH_MAX_SHIFT = 18, +}; + +struct uuidcmp { + const char *uuid; + int len; +}; + +typedef __u64 __le64; + +struct minix_super_block { + __u16 s_ninodes; + __u16 s_nzones; + __u16 s_imap_blocks; + __u16 s_zmap_blocks; + __u16 s_firstdatazone; + __u16 s_log_zone_size; + __u32 s_max_size; + __u16 s_magic; + __u16 s_state; + __u32 s_zones; +}; + +struct romfs_super_block { + __be32 word0; + __be32 word1; + __be32 size; + __be32 checksum; + char name[0]; +}; + +struct cramfs_inode { + __u32 mode: 16; + __u32 uid: 16; + __u32 size: 24; + __u32 gid: 8; + __u32 namelen: 6; + __u32 offset: 26; +}; + +struct cramfs_info { + __u32 crc; + __u32 edition; + __u32 blocks; + __u32 files; +}; + +struct cramfs_super { + __u32 magic; + __u32 size; + __u32 flags; + __u32 future; + __u8 signature[16]; + struct cramfs_info fsid; + __u8 name[16]; + struct cramfs_inode root; +}; + +struct squashfs_super_block { + __le32 s_magic; + __le32 inodes; + __le32 mkfs_time; + __le32 block_size; + __le32 fragments; + __le16 compression; + __le16 block_log; + __le16 flags; + __le16 no_ids; + __le16 s_major; + __le16 s_minor; + __le64 root_inode; + __le64 bytes_used; + __le64 id_table_start; + __le64 xattr_id_table_start; + __le64 inode_table_start; + __le64 directory_table_start; + __le64 fragment_table_start; + __le64 lookup_table_start; +}; + +typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *)); + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + int wait; + int retval; + int (*init)(struct subprocess_info *, struct cred *); + void (*cleanup)(struct subprocess_info *); + void *data; +}; + +typedef phys_addr_t resource_size_t; + +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + long unsigned int flags; + long unsigned int desc; + struct resource *parent; + struct resource *sibling; + struct resource *child; +}; + +struct hash { + int ino; + int minor; + int major; + umode_t mode; + struct hash *next; + char name[4098]; +}; + +struct dir_entry { + struct list_head list; + char *name; + time64_t mtime; +}; + +enum state { + Start = 0, + Collect = 1, + GotHeader = 2, + SkipIt = 3, + GotName = 4, + CopyFile = 5, + GotSymlink = 6, + Reset = 7, +}; + +enum migratetype { + MIGRATE_UNMOVABLE = 0, + MIGRATE_MOVABLE = 1, + MIGRATE_RECLAIMABLE = 2, + MIGRATE_PCPTYPES = 3, + MIGRATE_HIGHATOMIC = 3, + MIGRATE_CMA = 4, + MIGRATE_ISOLATE = 5, + MIGRATE_TYPES = 6, +}; + +enum numa_stat_item { + NUMA_HIT = 0, + NUMA_MISS = 1, + NUMA_FOREIGN = 2, + NUMA_INTERLEAVE_HIT = 3, + NUMA_LOCAL = 4, + NUMA_OTHER = 5, + NR_VM_NUMA_STAT_ITEMS = 6, +}; + +enum zone_stat_item { + NR_FREE_PAGES = 0, + NR_ZONE_LRU_BASE = 1, + NR_ZONE_INACTIVE_ANON = 1, + NR_ZONE_ACTIVE_ANON = 2, + NR_ZONE_INACTIVE_FILE = 3, + NR_ZONE_ACTIVE_FILE = 4, + NR_ZONE_UNEVICTABLE = 5, + NR_ZONE_WRITE_PENDING = 6, + NR_MLOCK = 7, + NR_PAGETABLE = 8, + NR_BOUNCE = 9, + NR_ZSPAGES = 10, + NR_FREE_CMA_PAGES = 11, + NR_VM_ZONE_STAT_ITEMS = 12, +}; + +enum zone_watermarks { + WMARK_MIN = 0, + WMARK_LOW = 1, + WMARK_HIGH = 2, + NR_WMARK = 3, +}; + +enum { + ZONELIST_FALLBACK = 0, + ZONELIST_NOFALLBACK = 1, + MAX_ZONELISTS = 2, +}; + +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + DQF_PRIVATE = 17, +}; + +enum { + DQST_LOOKUPS = 0, + DQST_DROPS = 1, + DQST_READS = 2, + DQST_WRITES = 3, + DQST_CACHE_HITS = 4, + DQST_ALLOC_DQUOTS = 5, + DQST_FREE_DQUOTS = 6, + DQST_SYNCS = 7, + _DQST_DQSTAT_LAST = 8, +}; + +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + SB_FREEZE_COMPLETE = 4, +}; + +enum compound_dtor_id { + NULL_COMPOUND_DTOR = 0, + COMPOUND_PAGE_DTOR = 1, + HUGETLB_PAGE_DTOR = 2, + TRANSHUGE_PAGE_DTOR = 3, + NR_COMPOUND_DTORS = 4, +}; + +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; + +enum ucount_type { + UCOUNT_USER_NAMESPACES = 0, + UCOUNT_PID_NAMESPACES = 1, + UCOUNT_UTS_NAMESPACES = 2, + UCOUNT_IPC_NAMESPACES = 3, + UCOUNT_NET_NAMESPACES = 4, + UCOUNT_MNT_NAMESPACES = 5, + UCOUNT_CGROUP_NAMESPACES = 6, + UCOUNT_TIME_NAMESPACES = 7, + UCOUNT_INOTIFY_INSTANCES = 8, + UCOUNT_INOTIFY_WATCHES = 9, + UCOUNT_COUNTS = 10, +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL = 0, + FLOW_DISSECTOR_KEY_BASIC = 1, + FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2, + FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3, + FLOW_DISSECTOR_KEY_PORTS = 4, + FLOW_DISSECTOR_KEY_PORTS_RANGE = 5, + FLOW_DISSECTOR_KEY_ICMP = 6, + FLOW_DISSECTOR_KEY_ETH_ADDRS = 7, + FLOW_DISSECTOR_KEY_TIPC = 8, + FLOW_DISSECTOR_KEY_ARP = 9, + FLOW_DISSECTOR_KEY_VLAN = 10, + FLOW_DISSECTOR_KEY_FLOW_LABEL = 11, + FLOW_DISSECTOR_KEY_GRE_KEYID = 12, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 13, + FLOW_DISSECTOR_KEY_ENC_KEYID = 14, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 15, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 16, + FLOW_DISSECTOR_KEY_ENC_CONTROL = 17, + FLOW_DISSECTOR_KEY_ENC_PORTS = 18, + FLOW_DISSECTOR_KEY_MPLS = 19, + FLOW_DISSECTOR_KEY_TCP = 20, + FLOW_DISSECTOR_KEY_IP = 21, + FLOW_DISSECTOR_KEY_CVLAN = 22, + FLOW_DISSECTOR_KEY_ENC_IP = 23, + FLOW_DISSECTOR_KEY_ENC_OPTS = 24, + FLOW_DISSECTOR_KEY_META = 25, + FLOW_DISSECTOR_KEY_CT = 26, + FLOW_DISSECTOR_KEY_HASH = 27, + FLOW_DISSECTOR_KEY_MAX = 28, +}; + +enum { + IPSTATS_MIB_NUM = 0, + IPSTATS_MIB_INPKTS = 1, + IPSTATS_MIB_INOCTETS = 2, + IPSTATS_MIB_INDELIVERS = 3, + IPSTATS_MIB_OUTFORWDATAGRAMS = 4, + IPSTATS_MIB_OUTPKTS = 5, + IPSTATS_MIB_OUTOCTETS = 6, + IPSTATS_MIB_INHDRERRORS = 7, + IPSTATS_MIB_INTOOBIGERRORS = 8, + IPSTATS_MIB_INNOROUTES = 9, + IPSTATS_MIB_INADDRERRORS = 10, + IPSTATS_MIB_INUNKNOWNPROTOS = 11, + IPSTATS_MIB_INTRUNCATEDPKTS = 12, + IPSTATS_MIB_INDISCARDS = 13, + IPSTATS_MIB_OUTDISCARDS = 14, + IPSTATS_MIB_OUTNOROUTES = 15, + IPSTATS_MIB_REASMTIMEOUT = 16, + IPSTATS_MIB_REASMREQDS = 17, + IPSTATS_MIB_REASMOKS = 18, + IPSTATS_MIB_REASMFAILS = 19, + IPSTATS_MIB_FRAGOKS = 20, + IPSTATS_MIB_FRAGFAILS = 21, + IPSTATS_MIB_FRAGCREATES = 22, + IPSTATS_MIB_INMCASTPKTS = 23, + IPSTATS_MIB_OUTMCASTPKTS = 24, + IPSTATS_MIB_INBCASTPKTS = 25, + IPSTATS_MIB_OUTBCASTPKTS = 26, + IPSTATS_MIB_INMCASTOCTETS = 27, + IPSTATS_MIB_OUTMCASTOCTETS = 28, + IPSTATS_MIB_INBCASTOCTETS = 29, + IPSTATS_MIB_OUTBCASTOCTETS = 30, + IPSTATS_MIB_CSUMERRORS = 31, + IPSTATS_MIB_NOECTPKTS = 32, + IPSTATS_MIB_ECT1PKTS = 33, + IPSTATS_MIB_ECT0PKTS = 34, + IPSTATS_MIB_CEPKTS = 35, + IPSTATS_MIB_REASM_OVERLAPS = 36, + __IPSTATS_MIB_MAX = 37, +}; + +enum { + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS = 1, + ICMP_MIB_INERRORS = 2, + ICMP_MIB_INDESTUNREACHS = 3, + ICMP_MIB_INTIMEEXCDS = 4, + ICMP_MIB_INPARMPROBS = 5, + ICMP_MIB_INSRCQUENCHS = 6, + ICMP_MIB_INREDIRECTS = 7, + ICMP_MIB_INECHOS = 8, + ICMP_MIB_INECHOREPS = 9, + ICMP_MIB_INTIMESTAMPS = 10, + ICMP_MIB_INTIMESTAMPREPS = 11, + ICMP_MIB_INADDRMASKS = 12, + ICMP_MIB_INADDRMASKREPS = 13, + ICMP_MIB_OUTMSGS = 14, + ICMP_MIB_OUTERRORS = 15, + ICMP_MIB_OUTDESTUNREACHS = 16, + ICMP_MIB_OUTTIMEEXCDS = 17, + ICMP_MIB_OUTPARMPROBS = 18, + ICMP_MIB_OUTSRCQUENCHS = 19, + ICMP_MIB_OUTREDIRECTS = 20, + ICMP_MIB_OUTECHOS = 21, + ICMP_MIB_OUTECHOREPS = 22, + ICMP_MIB_OUTTIMESTAMPS = 23, + ICMP_MIB_OUTTIMESTAMPREPS = 24, + ICMP_MIB_OUTADDRMASKS = 25, + ICMP_MIB_OUTADDRMASKREPS = 26, + ICMP_MIB_CSUMERRORS = 27, + __ICMP_MIB_MAX = 28, +}; + +enum { + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS = 1, + ICMP6_MIB_INERRORS = 2, + ICMP6_MIB_OUTMSGS = 3, + ICMP6_MIB_OUTERRORS = 4, + ICMP6_MIB_CSUMERRORS = 5, + __ICMP6_MIB_MAX = 6, +}; + +enum { + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM = 1, + TCP_MIB_RTOMIN = 2, + TCP_MIB_RTOMAX = 3, + TCP_MIB_MAXCONN = 4, + TCP_MIB_ACTIVEOPENS = 5, + TCP_MIB_PASSIVEOPENS = 6, + TCP_MIB_ATTEMPTFAILS = 7, + TCP_MIB_ESTABRESETS = 8, + TCP_MIB_CURRESTAB = 9, + TCP_MIB_INSEGS = 10, + TCP_MIB_OUTSEGS = 11, + TCP_MIB_RETRANSSEGS = 12, + TCP_MIB_INERRS = 13, + TCP_MIB_OUTRSTS = 14, + TCP_MIB_CSUMERRORS = 15, + __TCP_MIB_MAX = 16, +}; + +enum { + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS = 1, + UDP_MIB_NOPORTS = 2, + UDP_MIB_INERRORS = 3, + UDP_MIB_OUTDATAGRAMS = 4, + UDP_MIB_RCVBUFERRORS = 5, + UDP_MIB_SNDBUFERRORS = 6, + UDP_MIB_CSUMERRORS = 7, + UDP_MIB_IGNOREDMULTI = 8, + __UDP_MIB_MAX = 9, +}; + +enum { + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT = 1, + LINUX_MIB_SYNCOOKIESRECV = 2, + LINUX_MIB_SYNCOOKIESFAILED = 3, + LINUX_MIB_EMBRYONICRSTS = 4, + LINUX_MIB_PRUNECALLED = 5, + LINUX_MIB_RCVPRUNED = 6, + LINUX_MIB_OFOPRUNED = 7, + LINUX_MIB_OUTOFWINDOWICMPS = 8, + LINUX_MIB_LOCKDROPPEDICMPS = 9, + LINUX_MIB_ARPFILTER = 10, + LINUX_MIB_TIMEWAITED = 11, + LINUX_MIB_TIMEWAITRECYCLED = 12, + LINUX_MIB_TIMEWAITKILLED = 13, + LINUX_MIB_PAWSACTIVEREJECTED = 14, + LINUX_MIB_PAWSESTABREJECTED = 15, + LINUX_MIB_DELAYEDACKS = 16, + LINUX_MIB_DELAYEDACKLOCKED = 17, + LINUX_MIB_DELAYEDACKLOST = 18, + LINUX_MIB_LISTENOVERFLOWS = 19, + LINUX_MIB_LISTENDROPS = 20, + LINUX_MIB_TCPHPHITS = 21, + LINUX_MIB_TCPPUREACKS = 22, + LINUX_MIB_TCPHPACKS = 23, + LINUX_MIB_TCPRENORECOVERY = 24, + LINUX_MIB_TCPSACKRECOVERY = 25, + LINUX_MIB_TCPSACKRENEGING = 26, + LINUX_MIB_TCPSACKREORDER = 27, + LINUX_MIB_TCPRENOREORDER = 28, + LINUX_MIB_TCPTSREORDER = 29, + LINUX_MIB_TCPFULLUNDO = 30, + LINUX_MIB_TCPPARTIALUNDO = 31, + LINUX_MIB_TCPDSACKUNDO = 32, + LINUX_MIB_TCPLOSSUNDO = 33, + LINUX_MIB_TCPLOSTRETRANSMIT = 34, + LINUX_MIB_TCPRENOFAILURES = 35, + LINUX_MIB_TCPSACKFAILURES = 36, + LINUX_MIB_TCPLOSSFAILURES = 37, + LINUX_MIB_TCPFASTRETRANS = 38, + LINUX_MIB_TCPSLOWSTARTRETRANS = 39, + LINUX_MIB_TCPTIMEOUTS = 40, + LINUX_MIB_TCPLOSSPROBES = 41, + LINUX_MIB_TCPLOSSPROBERECOVERY = 42, + LINUX_MIB_TCPRENORECOVERYFAIL = 43, + LINUX_MIB_TCPSACKRECOVERYFAIL = 44, + LINUX_MIB_TCPRCVCOLLAPSED = 45, + LINUX_MIB_TCPDSACKOLDSENT = 46, + LINUX_MIB_TCPDSACKOFOSENT = 47, + LINUX_MIB_TCPDSACKRECV = 48, + LINUX_MIB_TCPDSACKOFORECV = 49, + LINUX_MIB_TCPABORTONDATA = 50, + LINUX_MIB_TCPABORTONCLOSE = 51, + LINUX_MIB_TCPABORTONMEMORY = 52, + LINUX_MIB_TCPABORTONTIMEOUT = 53, + LINUX_MIB_TCPABORTONLINGER = 54, + LINUX_MIB_TCPABORTFAILED = 55, + LINUX_MIB_TCPMEMORYPRESSURES = 56, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57, + LINUX_MIB_TCPSACKDISCARD = 58, + LINUX_MIB_TCPDSACKIGNOREDOLD = 59, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60, + LINUX_MIB_TCPSPURIOUSRTOS = 61, + LINUX_MIB_TCPMD5NOTFOUND = 62, + LINUX_MIB_TCPMD5UNEXPECTED = 63, + LINUX_MIB_TCPMD5FAILURE = 64, + LINUX_MIB_SACKSHIFTED = 65, + LINUX_MIB_SACKMERGED = 66, + LINUX_MIB_SACKSHIFTFALLBACK = 67, + LINUX_MIB_TCPBACKLOGDROP = 68, + LINUX_MIB_PFMEMALLOCDROP = 69, + LINUX_MIB_TCPMINTTLDROP = 70, + LINUX_MIB_TCPDEFERACCEPTDROP = 71, + LINUX_MIB_IPRPFILTER = 72, + LINUX_MIB_TCPTIMEWAITOVERFLOW = 73, + LINUX_MIB_TCPREQQFULLDOCOOKIES = 74, + LINUX_MIB_TCPREQQFULLDROP = 75, + LINUX_MIB_TCPRETRANSFAIL = 76, + LINUX_MIB_TCPRCVCOALESCE = 77, + LINUX_MIB_TCPBACKLOGCOALESCE = 78, + LINUX_MIB_TCPOFOQUEUE = 79, + LINUX_MIB_TCPOFODROP = 80, + LINUX_MIB_TCPOFOMERGE = 81, + LINUX_MIB_TCPCHALLENGEACK = 82, + LINUX_MIB_TCPSYNCHALLENGE = 83, + LINUX_MIB_TCPFASTOPENACTIVE = 84, + LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85, + LINUX_MIB_TCPFASTOPENPASSIVE = 86, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88, + LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89, + LINUX_MIB_TCPFASTOPENBLACKHOLE = 90, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91, + LINUX_MIB_BUSYPOLLRXPACKETS = 92, + LINUX_MIB_TCPAUTOCORKING = 93, + LINUX_MIB_TCPFROMZEROWINDOWADV = 94, + LINUX_MIB_TCPTOZEROWINDOWADV = 95, + LINUX_MIB_TCPWANTZEROWINDOWADV = 96, + LINUX_MIB_TCPSYNRETRANS = 97, + LINUX_MIB_TCPORIGDATASENT = 98, + LINUX_MIB_TCPHYSTARTTRAINDETECT = 99, + LINUX_MIB_TCPHYSTARTTRAINCWND = 100, + LINUX_MIB_TCPHYSTARTDELAYDETECT = 101, + LINUX_MIB_TCPHYSTARTDELAYCWND = 102, + LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103, + LINUX_MIB_TCPACKSKIPPEDPAWS = 104, + LINUX_MIB_TCPACKSKIPPEDSEQ = 105, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108, + LINUX_MIB_TCPWINPROBE = 109, + LINUX_MIB_TCPKEEPALIVE = 110, + LINUX_MIB_TCPMTUPFAIL = 111, + LINUX_MIB_TCPMTUPSUCCESS = 112, + LINUX_MIB_TCPDELIVERED = 113, + LINUX_MIB_TCPDELIVEREDCE = 114, + LINUX_MIB_TCPACKCOMPRESSED = 115, + LINUX_MIB_TCPZEROWINDOWDROP = 116, + LINUX_MIB_TCPRCVQDROP = 117, + LINUX_MIB_TCPWQUEUETOOBIG = 118, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY = 119, + LINUX_MIB_TCPTIMEOUTREHASH = 120, + LINUX_MIB_TCPDUPLICATEDATAREHASH = 121, + LINUX_MIB_TCPDSACKRECVSEGS = 122, + LINUX_MIB_TCPDSACKIGNOREDDUBIOUS = 123, + __LINUX_MIB_MAX = 124, +}; + +enum { + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR = 1, + LINUX_MIB_XFRMINBUFFERERROR = 2, + LINUX_MIB_XFRMINHDRERROR = 3, + LINUX_MIB_XFRMINNOSTATES = 4, + LINUX_MIB_XFRMINSTATEPROTOERROR = 5, + LINUX_MIB_XFRMINSTATEMODEERROR = 6, + LINUX_MIB_XFRMINSTATESEQERROR = 7, + LINUX_MIB_XFRMINSTATEEXPIRED = 8, + LINUX_MIB_XFRMINSTATEMISMATCH = 9, + LINUX_MIB_XFRMINSTATEINVALID = 10, + LINUX_MIB_XFRMINTMPLMISMATCH = 11, + LINUX_MIB_XFRMINNOPOLS = 12, + LINUX_MIB_XFRMINPOLBLOCK = 13, + LINUX_MIB_XFRMINPOLERROR = 14, + LINUX_MIB_XFRMOUTERROR = 15, + LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17, + LINUX_MIB_XFRMOUTNOSTATES = 18, + LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19, + LINUX_MIB_XFRMOUTSTATEMODEERROR = 20, + LINUX_MIB_XFRMOUTSTATESEQERROR = 21, + LINUX_MIB_XFRMOUTSTATEEXPIRED = 22, + LINUX_MIB_XFRMOUTPOLBLOCK = 23, + LINUX_MIB_XFRMOUTPOLDEAD = 24, + LINUX_MIB_XFRMOUTPOLERROR = 25, + LINUX_MIB_XFRMFWDHDRERROR = 26, + LINUX_MIB_XFRMOUTSTATEINVALID = 27, + LINUX_MIB_XFRMACQUIREERROR = 28, + __LINUX_MIB_XFRMMAX = 29, +}; + +enum { + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW = 1, + LINUX_MIB_TLSCURRRXSW = 2, + LINUX_MIB_TLSCURRTXDEVICE = 3, + LINUX_MIB_TLSCURRRXDEVICE = 4, + LINUX_MIB_TLSTXSW = 5, + LINUX_MIB_TLSRXSW = 6, + LINUX_MIB_TLSTXDEVICE = 7, + LINUX_MIB_TLSRXDEVICE = 8, + LINUX_MIB_TLSDECRYPTERROR = 9, + LINUX_MIB_TLSRXDEVICERESYNC = 10, + __LINUX_MIB_TLSMAX = 11, +}; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5, + NF_INET_INGRESS = 5, +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO = 13, +}; + +enum tcp_conntrack { + TCP_CONNTRACK_NONE = 0, + TCP_CONNTRACK_SYN_SENT = 1, + TCP_CONNTRACK_SYN_RECV = 2, + TCP_CONNTRACK_ESTABLISHED = 3, + TCP_CONNTRACK_FIN_WAIT = 4, + TCP_CONNTRACK_CLOSE_WAIT = 5, + TCP_CONNTRACK_LAST_ACK = 6, + TCP_CONNTRACK_TIME_WAIT = 7, + TCP_CONNTRACK_CLOSE = 8, + TCP_CONNTRACK_LISTEN = 9, + TCP_CONNTRACK_MAX = 10, + TCP_CONNTRACK_IGNORE = 11, + TCP_CONNTRACK_RETRANS = 12, + TCP_CONNTRACK_UNACK = 13, + TCP_CONNTRACK_TIMEOUT_MAX = 14, +}; + +enum ct_dccp_states { + CT_DCCP_NONE = 0, + CT_DCCP_REQUEST = 1, + CT_DCCP_RESPOND = 2, + CT_DCCP_PARTOPEN = 3, + CT_DCCP_OPEN = 4, + CT_DCCP_CLOSEREQ = 5, + CT_DCCP_CLOSING = 6, + CT_DCCP_TIMEWAIT = 7, + CT_DCCP_IGNORE = 8, + CT_DCCP_INVALID = 9, + __CT_DCCP_MAX = 10, +}; + +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL = 0, + IP_CT_DIR_REPLY = 1, + IP_CT_DIR_MAX = 2, +}; + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE = 0, + SCTP_CONNTRACK_CLOSED = 1, + SCTP_CONNTRACK_COOKIE_WAIT = 2, + SCTP_CONNTRACK_COOKIE_ECHOED = 3, + SCTP_CONNTRACK_ESTABLISHED = 4, + SCTP_CONNTRACK_SHUTDOWN_SENT = 5, + SCTP_CONNTRACK_SHUTDOWN_RECD = 6, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7, + SCTP_CONNTRACK_HEARTBEAT_SENT = 8, + SCTP_CONNTRACK_HEARTBEAT_ACKED = 9, + SCTP_CONNTRACK_MAX = 10, +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED = 0, + UDP_CT_REPLIED = 1, + UDP_CT_MAX = 2, +}; + +enum gre_conntrack { + GRE_CT_UNREPLIED = 0, + GRE_CT_REPLIED = 1, + GRE_CT_MAX = 2, +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3, +}; + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = 4294967295, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP = 1, + MAX_NETNS_BPF_ATTACH_TYPE = 2, +}; + +enum cpu_idle_type { + CPU_IDLE = 0, + CPU_NOT_IDLE = 1, + CPU_NEWLY_IDLE = 2, + CPU_MAX_IDLE_TYPES = 3, +}; + +enum { + __SD_BALANCE_NEWIDLE = 0, + __SD_BALANCE_EXEC = 1, + __SD_BALANCE_FORK = 2, + __SD_BALANCE_WAKE = 3, + __SD_WAKE_AFFINE = 4, + __SD_ASYM_CPUCAPACITY = 5, + __SD_SHARE_CPUCAPACITY = 6, + __SD_SHARE_PKG_RESOURCES = 7, + __SD_SERIALIZE = 8, + __SD_ASYM_PACKING = 9, + __SD_PREFER_SIBLING = 10, + __SD_OVERLAP = 11, + __SD_NUMA = 12, + __SD_FLAG_CNT = 13, +}; + +enum skb_ext_id { + SKB_EXT_BRIDGE_NF = 0, + SKB_EXT_SEC_PATH = 1, + TC_SKB_EXT = 2, + SKB_EXT_MPTCP = 3, + SKB_EXT_NUM = 4, +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET = 0, + AUDIT_NTP_FREQ = 1, + AUDIT_NTP_STATUS = 2, + AUDIT_NTP_TAI = 3, + AUDIT_NTP_TICK = 4, + AUDIT_NTP_ADJUST = 5, + AUDIT_NTP_NVALS = 6, +}; + +typedef long unsigned int uintptr_t; + +struct step_hook { + struct list_head node; + int (*fn)(struct pt_regs *, unsigned int); +}; + +struct break_hook { + struct list_head node; + int (*fn)(struct pt_regs *, unsigned int); + u16 imm; + u16 mask; +}; + +enum dbg_active_el { + DBG_ACTIVE_EL0 = 0, + DBG_ACTIVE_EL1 = 1, +}; + +struct nmi_ctx { + u64 hcr; + unsigned int cnt; +}; + +enum { + HI_SOFTIRQ = 0, + TIMER_SOFTIRQ = 1, + NET_TX_SOFTIRQ = 2, + NET_RX_SOFTIRQ = 3, + BLOCK_SOFTIRQ = 4, + IRQ_POLL_SOFTIRQ = 5, + TASKLET_SOFTIRQ = 6, + SCHED_SOFTIRQ = 7, + HRTIMER_SOFTIRQ = 8, + RCU_SOFTIRQ = 9, + NR_SOFTIRQS = 10, +}; + +struct midr_range { + u32 model; + u32 rv_min; + u32 rv_max; +}; + +struct arm64_midr_revidr { + u32 midr_rv; + u32 revidr_mask; +}; + +struct arm64_cpu_capabilities { + const char *desc; + u16 capability; + u16 type; + bool (*matches)(const struct arm64_cpu_capabilities *, int); + void (*cpu_enable)(const struct arm64_cpu_capabilities *); + union { + struct { + struct midr_range midr_range; + const struct arm64_midr_revidr * const fixed_revs; + }; + const struct midr_range *midr_range_list; + struct { + u32 sys_reg; + u8 field_pos; + u8 min_field_value; + u8 hwcap_type; + bool sign; + long unsigned int hwcap; + }; + }; + const struct arm64_cpu_capabilities *match_list; +}; + +enum cpu_pm_event { + CPU_PM_ENTER = 0, + CPU_PM_ENTER_FAILED = 1, + CPU_PM_EXIT = 2, + CPU_CLUSTER_PM_ENTER = 3, + CPU_CLUSTER_PM_ENTER_FAILED = 4, + CPU_CLUSTER_PM_EXIT = 5, +}; + +struct fpsimd_last_state_struct { + struct user_fpsimd_state *st; + void *sve_state; + unsigned int sve_vl; +}; + +enum ctx_state { + CONTEXT_DISABLED = 4294967295, + CONTEXT_KERNEL = 0, + CONTEXT_USER = 1, + CONTEXT_GUEST = 2, +}; + +typedef void (*bp_hardening_cb_t)(); + +struct bp_hardening_data { + int hyp_vectors_slot; + bp_hardening_cb_t fn; +}; + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF = 0, + REFCOUNT_ADD_OVF = 1, + REFCOUNT_ADD_UAF = 2, + REFCOUNT_SUB_UAF = 3, + REFCOUNT_DEC_LEAK = 4, +}; + +struct plist_head { + struct list_head node_list; +}; + +typedef struct { + __u8 b[16]; +} guid_t; + +enum pm_qos_type { + PM_QOS_UNITIALIZED = 0, + PM_QOS_MAX = 1, + PM_QOS_MIN = 2, +}; + +struct pm_qos_constraints { + struct plist_head list; + s32 target_value; + s32 default_value; + s32 no_constraint_value; + enum pm_qos_type type; + struct blocking_notifier_head *notifiers; +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct pm_qos_flags { + struct list_head list; + s32 effective_flags; +}; + +struct dev_pm_qos_request; + +struct dev_pm_qos { + struct pm_qos_constraints resume_latency; + struct pm_qos_constraints latency_tolerance; + struct freq_constraints freq; + struct pm_qos_flags flags; + struct dev_pm_qos_request *resume_latency_req; + struct dev_pm_qos_request *latency_tolerance_req; + struct dev_pm_qos_request *flags_req; +}; + +enum reboot_mode { + REBOOT_UNDEFINED = 4294967295, + REBOOT_COLD = 0, + REBOOT_WARM = 1, + REBOOT_HARD = 2, + REBOOT_SOFT = 3, + REBOOT_GPIO = 4, +}; + +typedef long unsigned int efi_status_t; + +typedef u8 efi_bool_t; + +typedef u16 efi_char16_t; + +typedef guid_t efi_guid_t; + +typedef struct { + u64 signature; + u32 revision; + u32 headersize; + u32 crc32; + u32 reserved; +} efi_table_hdr_t; + +typedef struct { + u32 type; + u32 pad; + u64 phys_addr; + u64 virt_addr; + u64 num_pages; + u64 attribute; +} efi_memory_desc_t; + +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + +typedef struct { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 pad1; + u32 nanosecond; + s16 timezone; + u8 daylight; + u8 pad2; +} efi_time_t; + +typedef struct { + u32 resolution; + u32 accuracy; + u8 sets_to_zero; +} efi_time_cap_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 get_time; + u32 set_time; + u32 get_wakeup_time; + u32 set_wakeup_time; + u32 set_virtual_address_map; + u32 convert_pointer; + u32 get_variable; + u32 get_next_variable; + u32 set_variable; + u32 get_next_high_mono_count; + u32 reset_system; + u32 update_capsule; + u32 query_capsule_caps; + u32 query_variable_info; +} efi_runtime_services_32_t; + +typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *); + +typedef efi_status_t efi_set_time_t(efi_time_t *); + +typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *); + +typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *); + +typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *); + +typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *); + +typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *); + +typedef efi_status_t efi_get_next_high_mono_count_t(u32 *); + +typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *); + +typedef efi_status_t efi_set_virtual_address_map_t(long unsigned int, long unsigned int, u32, efi_memory_desc_t *); + +typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *); + +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int); + +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *); + +typedef union { + struct { + efi_table_hdr_t hdr; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_set_virtual_address_map_t *set_virtual_address_map; + void *convert_pointer; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_query_variable_info_t *query_variable_info; + }; + efi_runtime_services_32_t mixed_mode; +} efi_runtime_services_t; + +struct efi_memory_map { + phys_addr_t phys_map; + void *map; + void *map_end; + int nr_map; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi { + const efi_runtime_services_t *runtime; + unsigned int runtime_version; + unsigned int runtime_supported_mask; + long unsigned int acpi; + long unsigned int acpi20; + long unsigned int smbios; + long unsigned int smbios3; + long unsigned int esrt; + long unsigned int tpm_log; + long unsigned int tpm_final_log; + long unsigned int mokvar_table; + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + struct efi_memory_map memmap; + long unsigned int flags; +}; + +struct arch_elf_state { + int flags; +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; +}; + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX = 2, +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +enum dev_pm_qos_req_type { + DEV_PM_QOS_RESUME_LATENCY = 1, + DEV_PM_QOS_LATENCY_TOLERANCE = 2, + DEV_PM_QOS_MIN_FREQUENCY = 3, + DEV_PM_QOS_MAX_FREQUENCY = 4, + DEV_PM_QOS_FLAGS = 5, +}; + +struct dev_pm_qos_request { + enum dev_pm_qos_req_type type; + union { + struct plist_node pnode; + struct pm_qos_flags_request flr; + struct freq_qos_request freq; + } data; + struct device *dev; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN = 0, + STACK_TYPE_TASK = 1, + STACK_TYPE_IRQ = 2, + STACK_TYPE_OVERFLOW = 3, + STACK_TYPE_SDEI_NORMAL = 4, + STACK_TYPE_SDEI_CRITICAL = 5, + __NR_STACK_TYPES = 6, +}; + +struct stackframe { + long unsigned int fp; + long unsigned int pc; + long unsigned int stacks_done[1]; + long unsigned int prev_fp; + enum stack_type prev_type; + int graph; +}; + +struct user_sve_header { + __u32 size; + __u32 max_size; + __u16 vl; + __u16 max_vl; + __u16 flags; + __u16 __reserved; +}; + +struct user_pac_mask { + __u64 data_mask; + __u64 insn_mask; +}; + +struct user_pac_address_keys { + __int128 unsigned apiakey; + __int128 unsigned apibkey; + __int128 unsigned apdakey; + __int128 unsigned apdbkey; +}; + +struct user_pac_generic_keys { + __int128 unsigned apgakey; +}; + +typedef u32 compat_ulong_t; + +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +}; + +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET = 1, + TASKSTATS_CMD_NEW = 2, + __TASKSTATS_CMD_MAX = 3, +}; + +enum cpu_usage_stat { + CPUTIME_USER = 0, + CPUTIME_NICE = 1, + CPUTIME_SYSTEM = 2, + CPUTIME_SOFTIRQ = 3, + CPUTIME_IRQ = 4, + CPUTIME_IDLE = 5, + CPUTIME_IOWAIT = 6, + CPUTIME_STEAL = 7, + CPUTIME_GUEST = 8, + CPUTIME_GUEST_NICE = 9, + NR_STATS = 10, +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED = 0, + BPF_CGROUP_STORAGE_PERCPU = 1, + __BPF_CGROUP_STORAGE_MAX = 2, +}; + +enum psi_task_count { + NR_IOWAIT = 0, + NR_MEMSTALL = 1, + NR_RUNNING = 2, + NR_ONCPU = 3, + NR_PSI_TASK_COUNTS = 4, +}; + +enum psi_states { + PSI_IO_SOME = 0, + PSI_IO_FULL = 1, + PSI_MEM_SOME = 2, + PSI_MEM_FULL = 3, + PSI_CPU_SOME = 4, + PSI_NONIDLE = 5, + NR_PSI_STATES = 6, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL = 1, + NR_PSI_AGGREGATORS = 2, +}; + +enum cgroup_subsys_id { + cpuset_cgrp_id = 0, + cpu_cgrp_id = 1, + cpuacct_cgrp_id = 2, + io_cgrp_id = 3, + memory_cgrp_id = 4, + devices_cgrp_id = 5, + freezer_cgrp_id = 6, + net_cls_cgrp_id = 7, + perf_event_cgrp_id = 8, + net_prio_cgrp_id = 9, + hugetlb_cgrp_id = 10, + pids_cgrp_id = 11, + CGROUP_SUBSYS_COUNT = 12, +}; + +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_3 = 3, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_5 = 5, + HW_BREAKPOINT_LEN_6 = 6, + HW_BREAKPOINT_LEN_7 = 7, + HW_BREAKPOINT_LEN_8 = 8, +}; + +enum { + HW_BREAKPOINT_EMPTY = 0, + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_RW = 3, + HW_BREAKPOINT_X = 4, + HW_BREAKPOINT_INVALID = 7, +}; + +enum bp_type_idx { + TYPE_INST = 0, + TYPE_DATA = 1, + TYPE_MAX = 2, +}; + +struct membuf { + void *p; + size_t left; +}; + +struct user_regset; + +typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *); + +typedef int user_regset_get2_fn(struct task_struct *, const struct user_regset *, struct membuf); + +typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *); + +typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int); + +struct user_regset { + user_regset_get2_fn *regset_get; + user_regset_set_fn *set; + user_regset_active_fn *active; + user_regset_writeback_fn *writeback; + unsigned int n; + unsigned int size; + unsigned int align; + unsigned int bias; + unsigned int core_note_type; +}; + +struct user_regset_view { + const char *name; + const struct user_regset *regsets; + unsigned int n; + u32 e_flags; + u16 e_machine; + u8 ei_osabi; +}; + +struct stack_info { + long unsigned int low; + long unsigned int high; + enum stack_type type; +}; + +struct trace_event_raw_sys_enter { + struct trace_entry ent; + long int id; + long unsigned int args[6]; + char __data[0]; +}; + +struct trace_event_raw_sys_exit { + struct trace_entry ent; + long int id; + long int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_sys_enter {}; + +struct trace_event_data_offsets_sys_exit {}; + +typedef void (*btf_trace_sys_enter)(void *, struct pt_regs *, long int); + +typedef void (*btf_trace_sys_exit)(void *, struct pt_regs *, long int); + +struct pt_regs_offset { + const char *name; + int offset; +}; + +enum aarch64_regset { + REGSET_GPR = 0, + REGSET_FPR = 1, + REGSET_TLS = 2, + REGSET_HW_BREAK = 3, + REGSET_HW_WATCH = 4, + REGSET_SYSTEM_CALL = 5, + REGSET_SVE = 6, + REGSET_PAC_MASK = 7, + REGSET_PACA_KEYS = 8, + REGSET_PACG_KEYS = 9, + REGSET_TAGGED_ADDR_CTRL = 10, +}; + +enum compat_regset { + REGSET_COMPAT_GPR = 0, + REGSET_COMPAT_VFP = 1, +}; + +enum ptrace_syscall_dir { + PTRACE_SYSCALL_ENTER = 0, + PTRACE_SYSCALL_EXIT = 1, +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +typedef struct pglist_data pg_data_t; + +enum meminit_context { + MEMINIT_EARLY = 0, + MEMINIT_HOTPLUG = 1, +}; + +enum memblock_flags { + MEMBLOCK_NONE = 0, + MEMBLOCK_HOTPLUG = 1, + MEMBLOCK_MIRROR = 2, + MEMBLOCK_NOMAP = 4, +}; + +struct memblock_region { + phys_addr_t base; + phys_addr_t size; + enum memblock_flags flags; + int nid; +}; + +struct memblock_type { + long unsigned int cnt; + long unsigned int max; + phys_addr_t total_size; + struct memblock_region *regions; + char *name; +}; + +struct memblock { + bool bottom_up; + phys_addr_t current_limit; + struct memblock_type memory; + struct memblock_type reserved; +}; + +struct mpidr_hash { + u64 mask; + u32 shift_aff[4]; + u32 bits; +}; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +struct cpuinfo_arm64 { + struct cpu cpu; + struct kobject kobj; + u32 reg_ctr; + u32 reg_cntfrq; + u32 reg_dczid; + u32 reg_midr; + u32 reg_revidr; + u64 reg_id_aa64dfr0; + u64 reg_id_aa64dfr1; + u64 reg_id_aa64isar0; + u64 reg_id_aa64isar1; + u64 reg_id_aa64mmfr0; + u64 reg_id_aa64mmfr1; + u64 reg_id_aa64mmfr2; + u64 reg_id_aa64pfr0; + u64 reg_id_aa64pfr1; + u64 reg_id_aa64zfr0; + u32 reg_id_dfr0; + u32 reg_id_dfr1; + u32 reg_id_isar0; + u32 reg_id_isar1; + u32 reg_id_isar2; + u32 reg_id_isar3; + u32 reg_id_isar4; + u32 reg_id_isar5; + u32 reg_id_isar6; + u32 reg_id_mmfr0; + u32 reg_id_mmfr1; + u32 reg_id_mmfr2; + u32 reg_id_mmfr3; + u32 reg_id_mmfr4; + u32 reg_id_mmfr5; + u32 reg_id_pfr0; + u32 reg_id_pfr1; + u32 reg_id_pfr2; + u32 reg_mvfr0; + u32 reg_mvfr1; + u32 reg_mvfr2; + u64 reg_zcr; +}; + +struct cpu_operations { + const char *name; + int (*cpu_init)(unsigned int); + int (*cpu_prepare)(unsigned int); + int (*cpu_boot)(unsigned int); + void (*cpu_postboot)(); + bool (*cpu_can_disable)(unsigned int); + int (*cpu_disable)(unsigned int); + void (*cpu_die)(unsigned int); + int (*cpu_kill)(unsigned int); + int (*cpu_init_idle)(unsigned int); + int (*cpu_suspend)(long unsigned int); +}; + +struct sigcontext { + __u64 fault_address; + __u64 regs[31]; + __u64 sp; + __u64 pc; + __u64 pstate; + long: 64; + __u8 __reserved[4096]; +}; + +struct _aarch64_ctx { + __u32 magic; + __u32 size; +}; + +struct fpsimd_context { + struct _aarch64_ctx head; + __u32 fpsr; + __u32 fpcr; + __int128 unsigned vregs[32]; +}; + +struct esr_context { + struct _aarch64_ctx head; + __u64 esr; +}; + +struct extra_context { + struct _aarch64_ctx head; + __u64 datap; + __u32 size; + __u32 __reserved[3]; +}; + +struct sve_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 __reserved[3]; +}; + +struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +}; + +typedef struct sigaltstack stack_t; + +struct siginfo { + union { + struct { + int si_signo; + int si_errno; + int si_code; + union __sifields _sifields; + }; + int _si_pad[32]; + }; +}; + +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; + +enum { + EI_ETYPE_NONE = 0, + EI_ETYPE_NULL = 1, + EI_ETYPE_ERRNO = 2, + EI_ETYPE_ERRNO_NULL = 3, + EI_ETYPE_TRUE = 4, +}; + +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + +struct ucontext { + long unsigned int uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + __u8 __unused[120]; + long: 64; + struct sigcontext uc_mcontext; +}; + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +struct frame_record { + u64 fp; + u64 lr; +}; + +struct rt_sigframe_user_layout { + struct rt_sigframe *sigframe; + struct frame_record *next_frame; + long unsigned int size; + long unsigned int limit; + long unsigned int fpsimd_offset; + long unsigned int esr_offset; + long unsigned int sve_offset; + long unsigned int extra_offset; + long unsigned int end_offset; +}; + +struct user_ctxs { + struct fpsimd_context *fpsimd; + struct sve_context *sve; +}; + +enum { + PER_LINUX = 0, + PER_LINUX_32BIT = 8388608, + PER_LINUX_FDPIC = 524288, + PER_SVR4 = 68157441, + PER_SVR3 = 83886082, + PER_SCOSVR3 = 117440515, + PER_OSR5 = 100663299, + PER_WYSEV386 = 83886084, + PER_ISCR4 = 67108869, + PER_BSD = 6, + PER_SUNOS = 67108870, + PER_XENIX = 83886087, + PER_LINUX32 = 8, + PER_LINUX32_3GB = 134217736, + PER_IRIX32 = 67108873, + PER_IRIXN32 = 67108874, + PER_IRIX64 = 67108875, + PER_RISCOS = 12, + PER_SOLARIS = 67108877, + PER_UW7 = 68157454, + PER_OSF4 = 15, + PER_HPUX = 16, + PER_MASK = 255, +}; + +typedef long int (*syscall_fn_t)(const struct pt_regs *); + +typedef bool (*stack_trace_consume_fn)(void *, long unsigned int); + +typedef bool pstate_check_t(long unsigned int); + +enum lockdep_ok { + LOCKDEP_STILL_OK = 0, + LOCKDEP_NOW_UNRELIABLE = 1, +}; + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +enum ftr_type { + FTR_EXACT = 0, + FTR_LOWER_SAFE = 1, + FTR_HIGHER_SAFE = 2, + FTR_HIGHER_OR_ZERO_SAFE = 3, +}; + +struct arm64_ftr_bits { + bool sign; + bool visible; + bool strict; + enum ftr_type type; + u8 shift; + u8 width; + s64 safe_val; +}; + +struct arm64_ftr_reg { + const char *name; + u64 strict_mask; + u64 user_mask; + u64 sys_val; + u64 user_val; + const struct arm64_ftr_bits *ftr_bits; +}; + +enum siginfo_layout { + SIL_KILL = 0, + SIL_TIMER = 1, + SIL_POLL = 2, + SIL_FAULT = 3, + SIL_FAULT_MCEERR = 4, + SIL_FAULT_BNDERR = 5, + SIL_FAULT_PKUERR = 6, + SIL_CHLD = 7, + SIL_RT = 8, + SIL_SYS = 9, +}; + +enum die_val { + DIE_UNUSED = 0, + DIE_OOPS = 1, +}; + +struct undef_hook { + struct list_head node; + u32 instr_mask; + u32 instr_val; + u64 pstate_mask; + u64 pstate_val; + int (*fn)(struct pt_regs *, u32); +}; + +struct sys64_hook { + unsigned int esr_mask; + unsigned int esr_val; + void (*handler)(unsigned int, struct pt_regs *); +}; + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +enum vm_fault_reason { + VM_FAULT_OOM = 1, + VM_FAULT_SIGBUS = 2, + VM_FAULT_MAJOR = 4, + VM_FAULT_WRITE = 8, + VM_FAULT_HWPOISON = 16, + VM_FAULT_HWPOISON_LARGE = 32, + VM_FAULT_SIGSEGV = 64, + VM_FAULT_NOPAGE = 256, + VM_FAULT_LOCKED = 512, + VM_FAULT_RETRY = 1024, + VM_FAULT_FALLBACK = 2048, + VM_FAULT_DONE_COW = 4096, + VM_FAULT_NEEDDSYNC = 8192, + VM_FAULT_HINDEX_MASK = 983040, +}; + +struct vm_special_mapping { + const char *name; + struct page **pages; + vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *); + int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *); +}; + +struct timens_offsets { + struct timespec64 monotonic; + struct timespec64 boottime; +}; + +struct time_namespace { + struct kref kref; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct ns_common ns; + struct timens_offsets offsets; + struct page *vvar_page; + bool frozen_offsets; +}; + +struct arch_vdso_data {}; + +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +struct vdso_data { + u32 seq; + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + union { + struct vdso_timestamp basetime[12]; + struct timens_offset offset[12]; + }; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; + struct arch_vdso_data arch_data; +}; + +enum vdso_abi { + VDSO_ABI_AA64 = 0, + VDSO_ABI_AA32 = 1, +}; + +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET = 0, + VVAR_TIMENS_PAGE_OFFSET = 1, + VVAR_NR_PAGES = 2, +}; + +struct vdso_abi_info { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + long unsigned int vdso_pages; + struct vm_special_mapping *dm; + struct vm_special_mapping *cm; +}; + +enum aarch32_map { + AA32_MAP_VECTORS = 0, + AA32_MAP_SIGPAGE = 1, + AA32_MAP_VVAR = 2, + AA32_MAP_VDSO = 3, +}; + +enum aarch64_map { + AA64_MAP_VVAR = 0, + AA64_MAP_VDSO = 1, +}; + +struct psci_operations { + u32 (*get_version)(); + int (*cpu_suspend)(u32, long unsigned int); + int (*cpu_off)(u32); + int (*cpu_on)(long unsigned int, long unsigned int); + int (*migrate)(long unsigned int); + int (*affinity_info)(long unsigned int, long unsigned int); + int (*migrate_info_type)(); +}; + +enum aarch64_insn_encoding_class { + AARCH64_INSN_CLS_UNKNOWN = 0, + AARCH64_INSN_CLS_DP_IMM = 1, + AARCH64_INSN_CLS_DP_REG = 2, + AARCH64_INSN_CLS_DP_FPSIMD = 3, + AARCH64_INSN_CLS_LDST = 4, + AARCH64_INSN_CLS_BR_SYS = 5, +}; + +enum aarch64_insn_hint_cr_op { + AARCH64_INSN_HINT_NOP = 0, + AARCH64_INSN_HINT_YIELD = 32, + AARCH64_INSN_HINT_WFE = 64, + AARCH64_INSN_HINT_WFI = 96, + AARCH64_INSN_HINT_SEV = 128, + AARCH64_INSN_HINT_SEVL = 160, + AARCH64_INSN_HINT_XPACLRI = 224, + AARCH64_INSN_HINT_PACIA_1716 = 256, + AARCH64_INSN_HINT_PACIB_1716 = 320, + AARCH64_INSN_HINT_AUTIA_1716 = 384, + AARCH64_INSN_HINT_AUTIB_1716 = 448, + AARCH64_INSN_HINT_PACIAZ = 768, + AARCH64_INSN_HINT_PACIASP = 800, + AARCH64_INSN_HINT_PACIBZ = 832, + AARCH64_INSN_HINT_PACIBSP = 864, + AARCH64_INSN_HINT_AUTIAZ = 896, + AARCH64_INSN_HINT_AUTIASP = 928, + AARCH64_INSN_HINT_AUTIBZ = 960, + AARCH64_INSN_HINT_AUTIBSP = 992, + AARCH64_INSN_HINT_ESB = 512, + AARCH64_INSN_HINT_PSB = 544, + AARCH64_INSN_HINT_TSB = 576, + AARCH64_INSN_HINT_CSDB = 640, + AARCH64_INSN_HINT_BTI = 1024, + AARCH64_INSN_HINT_BTIC = 1088, + AARCH64_INSN_HINT_BTIJ = 1152, + AARCH64_INSN_HINT_BTIJC = 1216, +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR = 0, + AARCH64_INSN_IMM_26 = 1, + AARCH64_INSN_IMM_19 = 2, + AARCH64_INSN_IMM_16 = 3, + AARCH64_INSN_IMM_14 = 4, + AARCH64_INSN_IMM_12 = 5, + AARCH64_INSN_IMM_9 = 6, + AARCH64_INSN_IMM_7 = 7, + AARCH64_INSN_IMM_6 = 8, + AARCH64_INSN_IMM_S = 9, + AARCH64_INSN_IMM_R = 10, + AARCH64_INSN_IMM_N = 11, + AARCH64_INSN_IMM_MAX = 12, +}; + +enum aarch64_insn_register_type { + AARCH64_INSN_REGTYPE_RT = 0, + AARCH64_INSN_REGTYPE_RN = 1, + AARCH64_INSN_REGTYPE_RT2 = 2, + AARCH64_INSN_REGTYPE_RM = 3, + AARCH64_INSN_REGTYPE_RD = 4, + AARCH64_INSN_REGTYPE_RA = 5, + AARCH64_INSN_REGTYPE_RS = 6, +}; + +enum aarch64_insn_register { + AARCH64_INSN_REG_0 = 0, + AARCH64_INSN_REG_1 = 1, + AARCH64_INSN_REG_2 = 2, + AARCH64_INSN_REG_3 = 3, + AARCH64_INSN_REG_4 = 4, + AARCH64_INSN_REG_5 = 5, + AARCH64_INSN_REG_6 = 6, + AARCH64_INSN_REG_7 = 7, + AARCH64_INSN_REG_8 = 8, + AARCH64_INSN_REG_9 = 9, + AARCH64_INSN_REG_10 = 10, + AARCH64_INSN_REG_11 = 11, + AARCH64_INSN_REG_12 = 12, + AARCH64_INSN_REG_13 = 13, + AARCH64_INSN_REG_14 = 14, + AARCH64_INSN_REG_15 = 15, + AARCH64_INSN_REG_16 = 16, + AARCH64_INSN_REG_17 = 17, + AARCH64_INSN_REG_18 = 18, + AARCH64_INSN_REG_19 = 19, + AARCH64_INSN_REG_20 = 20, + AARCH64_INSN_REG_21 = 21, + AARCH64_INSN_REG_22 = 22, + AARCH64_INSN_REG_23 = 23, + AARCH64_INSN_REG_24 = 24, + AARCH64_INSN_REG_25 = 25, + AARCH64_INSN_REG_26 = 26, + AARCH64_INSN_REG_27 = 27, + AARCH64_INSN_REG_28 = 28, + AARCH64_INSN_REG_29 = 29, + AARCH64_INSN_REG_FP = 29, + AARCH64_INSN_REG_30 = 30, + AARCH64_INSN_REG_LR = 30, + AARCH64_INSN_REG_ZR = 31, + AARCH64_INSN_REG_SP = 31, +}; + +enum aarch64_insn_variant { + AARCH64_INSN_VARIANT_32BIT = 0, + AARCH64_INSN_VARIANT_64BIT = 1, +}; + +enum aarch64_insn_condition { + AARCH64_INSN_COND_EQ = 0, + AARCH64_INSN_COND_NE = 1, + AARCH64_INSN_COND_CS = 2, + AARCH64_INSN_COND_CC = 3, + AARCH64_INSN_COND_MI = 4, + AARCH64_INSN_COND_PL = 5, + AARCH64_INSN_COND_VS = 6, + AARCH64_INSN_COND_VC = 7, + AARCH64_INSN_COND_HI = 8, + AARCH64_INSN_COND_LS = 9, + AARCH64_INSN_COND_GE = 10, + AARCH64_INSN_COND_LT = 11, + AARCH64_INSN_COND_GT = 12, + AARCH64_INSN_COND_LE = 13, + AARCH64_INSN_COND_AL = 14, +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK = 0, + AARCH64_INSN_BRANCH_LINK = 1, + AARCH64_INSN_BRANCH_RETURN = 2, + AARCH64_INSN_BRANCH_COMP_ZERO = 3, + AARCH64_INSN_BRANCH_COMP_NONZERO = 4, +}; + +enum aarch64_insn_size_type { + AARCH64_INSN_SIZE_8 = 0, + AARCH64_INSN_SIZE_16 = 1, + AARCH64_INSN_SIZE_32 = 2, + AARCH64_INSN_SIZE_64 = 3, +}; + +enum aarch64_insn_ldst_type { + AARCH64_INSN_LDST_LOAD_REG_OFFSET = 0, + AARCH64_INSN_LDST_STORE_REG_OFFSET = 1, + AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX = 2, + AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX = 3, + AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX = 4, + AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX = 5, + AARCH64_INSN_LDST_LOAD_EX = 6, + AARCH64_INSN_LDST_STORE_EX = 7, +}; + +enum aarch64_insn_adsb_type { + AARCH64_INSN_ADSB_ADD = 0, + AARCH64_INSN_ADSB_SUB = 1, + AARCH64_INSN_ADSB_ADD_SETFLAGS = 2, + AARCH64_INSN_ADSB_SUB_SETFLAGS = 3, +}; + +enum aarch64_insn_movewide_type { + AARCH64_INSN_MOVEWIDE_ZERO = 0, + AARCH64_INSN_MOVEWIDE_KEEP = 1, + AARCH64_INSN_MOVEWIDE_INVERSE = 2, +}; + +enum aarch64_insn_bitfield_type { + AARCH64_INSN_BITFIELD_MOVE = 0, + AARCH64_INSN_BITFIELD_MOVE_UNSIGNED = 1, + AARCH64_INSN_BITFIELD_MOVE_SIGNED = 2, +}; + +enum aarch64_insn_data1_type { + AARCH64_INSN_DATA1_REVERSE_16 = 0, + AARCH64_INSN_DATA1_REVERSE_32 = 1, + AARCH64_INSN_DATA1_REVERSE_64 = 2, +}; + +enum aarch64_insn_data2_type { + AARCH64_INSN_DATA2_UDIV = 0, + AARCH64_INSN_DATA2_SDIV = 1, + AARCH64_INSN_DATA2_LSLV = 2, + AARCH64_INSN_DATA2_LSRV = 3, + AARCH64_INSN_DATA2_ASRV = 4, + AARCH64_INSN_DATA2_RORV = 5, +}; + +enum aarch64_insn_data3_type { + AARCH64_INSN_DATA3_MADD = 0, + AARCH64_INSN_DATA3_MSUB = 1, +}; + +enum aarch64_insn_logic_type { + AARCH64_INSN_LOGIC_AND = 0, + AARCH64_INSN_LOGIC_BIC = 1, + AARCH64_INSN_LOGIC_ORR = 2, + AARCH64_INSN_LOGIC_ORN = 3, + AARCH64_INSN_LOGIC_EOR = 4, + AARCH64_INSN_LOGIC_EON = 5, + AARCH64_INSN_LOGIC_AND_SETFLAGS = 6, + AARCH64_INSN_LOGIC_BIC_SETFLAGS = 7, +}; + +enum aarch64_insn_prfm_type { + AARCH64_INSN_PRFM_TYPE_PLD = 0, + AARCH64_INSN_PRFM_TYPE_PLI = 1, + AARCH64_INSN_PRFM_TYPE_PST = 2, +}; + +enum aarch64_insn_prfm_target { + AARCH64_INSN_PRFM_TARGET_L1 = 0, + AARCH64_INSN_PRFM_TARGET_L2 = 1, + AARCH64_INSN_PRFM_TARGET_L3 = 2, +}; + +enum aarch64_insn_prfm_policy { + AARCH64_INSN_PRFM_POLICY_KEEP = 0, + AARCH64_INSN_PRFM_POLICY_STRM = 1, +}; + +enum aarch64_insn_adr_type { + AARCH64_INSN_ADR_TYPE_ADRP = 0, + AARCH64_INSN_ADR_TYPE_ADR = 1, +}; + +enum fixed_addresses { + FIX_HOLE = 0, + FIX_FDT_END = 1, + FIX_FDT = 1024, + FIX_EARLYCON_MEM_BASE = 1025, + FIX_TEXT_POKE0 = 1026, + FIX_ENTRY_TRAMP_DATA = 1027, + FIX_ENTRY_TRAMP_TEXT = 1028, + __end_of_permanent_fixed_addresses = 1029, + FIX_BTMAP_END = 1029, + FIX_BTMAP_BEGIN = 1476, + FIX_PTE = 1477, + FIX_PMD = 1478, + FIX_PUD = 1479, + FIX_PGD = 1480, + __end_of_fixed_addresses = 1481, +}; + +struct aarch64_insn_patch { + void **text_addrs; + u32 *new_insns; + int insn_cnt; + atomic_t cpu_count; +}; + +struct return_address_data { + unsigned int level; + void *addr; +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *); + ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t); +}; + +enum { + CAP_HWCAP = 1, + CAP_COMPAT_HWCAP = 2, + CAP_COMPAT_HWCAP2 = 3, +}; + +struct secondary_data { + void *stack; + struct task_struct *task; + long int status; +}; + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *, struct device_attribute *, char *); + ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); +}; + +struct __ftr_reg_entry { + u32 sys_id; + struct arm64_ftr_reg *reg; +}; + +typedef void kpti_remap_fn(int, int, phys_addr_t); + +typedef void ttbr_replace_func(phys_addr_t); + +struct alt_instr { + s32 orig_offset; + s32 alt_offset; + u16 cpufeature; + u8 orig_len; + u8 alt_len; +}; + +typedef void (*alternative_cb_t)(struct alt_instr *, __le32 *, __le32 *, int); + +struct alt_region { + struct alt_instr *begin; + struct alt_instr *end; +}; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = 1, + CACHE_TYPE_DATA = 2, + CACHE_TYPE_SEPARATE = 3, + CACHE_TYPE_UNIFIED = 4, +}; + +struct cacheinfo { + unsigned int id; + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; + void *fw_token; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int num_levels; + unsigned int num_leaves; + bool cpu_map_populated; +}; + +struct irq_desc; + +typedef void (*irq_flow_handler_t)(struct irq_desc *); + +struct msi_desc; + +struct irq_common_data { + unsigned int state_use_accessors; + unsigned int node; + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; + cpumask_var_t effective_affinity; + unsigned int ipi_offset; +}; + +struct irq_chip; + +struct irq_data { + u32 mask; + unsigned int irq; + long unsigned int hwirq; + struct irq_common_data *common; + struct irq_chip *chip; + struct irq_domain *domain; + struct irq_data *parent_data; + void *chip_data; +}; + +struct irqaction; + +struct irq_affinity_notify; + +struct irq_desc { + struct irq_common_data irq_common_data; + struct irq_data irq_data; + unsigned int *kstat_irqs; + irq_flow_handler_t handle_irq; + struct irqaction *action; + unsigned int status_use_accessors; + unsigned int core_internal_state__do_not_mess_with_it; + unsigned int depth; + unsigned int wake_depth; + unsigned int tot_count; + unsigned int irq_count; + long unsigned int last_unhandled; + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; + const struct cpumask *affinity_hint; + struct irq_affinity_notify *affinity_notify; + long unsigned int threads_oneshot; + atomic_t threads_active; + wait_queue_head_t wait_for_threads; + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; + unsigned int force_resume_depth; + struct proc_dir_entry *dir; + struct callback_head rcu; + struct kobject kobj; + struct mutex request_mutex; + int parent_irq; + struct module *owner; + const char *name; + long: 64; + long: 64; + long: 64; +}; + +enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1, + IRQ_GC_INIT_NESTED_LOCK = 2, + IRQ_GC_MASK_CACHE_PER_TYPE = 4, + IRQ_GC_NO_MASK = 8, + IRQ_GC_BE_IO = 16, +}; + +struct irq_chip_generic; + +struct irq_domain_chip_generic { + unsigned int irqs_per_chip; + unsigned int num_chips; + unsigned int irq_flags_to_clear; + unsigned int irq_flags_to_set; + enum irq_gc_flags gc_flags; + struct irq_chip_generic *gc[0]; +}; + +struct acpi_subtable_header { + u8 type; + u8 length; +}; + +struct acpi_hmat_structure { + u16 type; + u16 reserved; + u32 length; +}; + +enum acpi_madt_type { + ACPI_MADT_TYPE_LOCAL_APIC = 0, + ACPI_MADT_TYPE_IO_APIC = 1, + ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2, + ACPI_MADT_TYPE_NMI_SOURCE = 3, + ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4, + ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5, + ACPI_MADT_TYPE_IO_SAPIC = 6, + ACPI_MADT_TYPE_LOCAL_SAPIC = 7, + ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, + ACPI_MADT_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, + ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11, + ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12, + ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, + ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, + ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, + ACPI_MADT_TYPE_RESERVED = 16, +}; + +struct acpi_madt_generic_interrupt { + struct acpi_subtable_header header; + u16 reserved; + u32 cpu_interface_number; + u32 uid; + u32 flags; + u32 parking_version; + u32 performance_interrupt; + u64 parked_address; + u64 base_address; + u64 gicv_base_address; + u64 gich_base_address; + u32 vgic_interrupt; + u64 gicr_base_address; + u64 arm_mpidr; + u8 efficiency_class; + u8 reserved2[1]; + u16 spe_interrupt; +} __attribute__((packed)); + +enum irqreturn { + IRQ_NONE = 0, + IRQ_HANDLED = 1, + IRQ_WAKE_THREAD = 2, +}; + +typedef enum irqreturn irqreturn_t; + +typedef irqreturn_t (*irq_handler_t)(int, void *); + +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + long unsigned int thread_flags; + long unsigned int thread_mask; + const char *name; + struct proc_dir_entry *dir; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *); + void (*release)(struct kref *); +}; + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed: 1; +}; + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING = 0, + IRQCHIP_STATE_ACTIVE = 1, + IRQCHIP_STATE_MASKED = 2, + IRQCHIP_STATE_LINE_LEVEL = 3, +}; + +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; +}; + +enum { + IRQ_TYPE_NONE = 0, + IRQ_TYPE_EDGE_RISING = 1, + IRQ_TYPE_EDGE_FALLING = 2, + IRQ_TYPE_EDGE_BOTH = 3, + IRQ_TYPE_LEVEL_HIGH = 4, + IRQ_TYPE_LEVEL_LOW = 8, + IRQ_TYPE_LEVEL_MASK = 12, + IRQ_TYPE_SENSE_MASK = 15, + IRQ_TYPE_DEFAULT = 15, + IRQ_TYPE_PROBE = 16, + IRQ_LEVEL = 256, + IRQ_PER_CPU = 512, + IRQ_NOPROBE = 1024, + IRQ_NOREQUEST = 2048, + IRQ_NOAUTOEN = 4096, + IRQ_NO_BALANCING = 8192, + IRQ_MOVE_PCNTXT = 16384, + IRQ_NESTED_THREAD = 32768, + IRQ_NOTHREAD = 65536, + IRQ_PER_CPU_DEVID = 131072, + IRQ_IS_POLLED = 262144, + IRQ_DISABLE_UNLAZY = 524288, + IRQ_HIDDEN = 1048576, +}; + +struct msi_msg { + u32 address_lo; + u32 address_hi; + u32 data; +}; + +struct platform_msi_priv_data; + +struct platform_msi_desc { + struct platform_msi_priv_data *msi_priv_data; + u16 msi_index; +}; + +struct fsl_mc_msi_desc { + u16 msi_index; +}; + +struct ti_sci_inta_msi_desc { + u16 dev_index; +}; + +struct msi_desc { + struct list_head list; + unsigned int irq; + unsigned int nvec_used; + struct device *dev; + struct msi_msg msg; + struct irq_affinity_desc *affinity; + const void *iommu_cookie; + void (*write_msi_msg)(struct msi_desc *, void *); + void *write_msi_msg_data; + union { + struct { + u32 masked; + struct { + u8 is_msix: 1; + u8 multiple: 3; + u8 multi_cap: 3; + u8 maskbit: 1; + u8 is_64: 1; + u8 is_virtual: 1; + u16 entry_nr; + unsigned int default_irq; + } msi_attrib; + union { + u8 mask_pos; + void *mask_base; + }; + }; + struct platform_msi_desc platform; + struct fsl_mc_msi_desc fsl_mc; + struct ti_sci_inta_msi_desc inta; + }; +}; + +struct irq_chip { + struct device *parent_device; + const char *name; + unsigned int (*irq_startup)(struct irq_data *); + void (*irq_shutdown)(struct irq_data *); + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_ack)(struct irq_data *); + void (*irq_mask)(struct irq_data *); + void (*irq_mask_ack)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_eoi)(struct irq_data *); + int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool); + int (*irq_retrigger)(struct irq_data *); + int (*irq_set_type)(struct irq_data *, unsigned int); + int (*irq_set_wake)(struct irq_data *, unsigned int); + void (*irq_bus_lock)(struct irq_data *); + void (*irq_bus_sync_unlock)(struct irq_data *); + void (*irq_cpu_online)(struct irq_data *); + void (*irq_cpu_offline)(struct irq_data *); + void (*irq_suspend)(struct irq_data *); + void (*irq_resume)(struct irq_data *); + void (*irq_pm_shutdown)(struct irq_data *); + void (*irq_calc_mask)(struct irq_data *); + void (*irq_print_chip)(struct irq_data *, struct seq_file *); + int (*irq_request_resources)(struct irq_data *); + void (*irq_release_resources)(struct irq_data *); + void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *); + void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *); + int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *); + int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool); + int (*irq_set_vcpu_affinity)(struct irq_data *, void *); + void (*ipi_send_single)(struct irq_data *, unsigned int); + void (*ipi_send_mask)(struct irq_data *, const struct cpumask *); + int (*irq_nmi_setup)(struct irq_data *); + void (*irq_nmi_teardown)(struct irq_data *); + long unsigned int flags; +}; + +struct irq_chip_regs { + long unsigned int enable; + long unsigned int disable; + long unsigned int mask; + long unsigned int ack; + long unsigned int eoi; + long unsigned int type; + long unsigned int polarity; +}; + +struct irq_chip_type { + struct irq_chip chip; + struct irq_chip_regs regs; + irq_flow_handler_t handler; + u32 type; + u32 mask_cache_priv; + u32 *mask_cache; +}; + +struct irq_chip_generic { + raw_spinlock_t lock; + void *reg_base; + u32 (*reg_readl)(void *); + void (*reg_writel)(u32, void *); + void (*suspend)(struct irq_chip_generic *); + void (*resume)(struct irq_chip_generic *); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; + u32 type_cache; + u32 polarity_cache; + u32 wake_enabled; + u32 wake_active; + unsigned int num_ct; + void *private; + long unsigned int installed; + long unsigned int unused; + struct irq_domain *domain; + struct list_head list; + struct irq_chip_type chip_types[0]; +}; + +enum vcpu_sysreg { + __INVALID_SYSREG__ = 0, + MPIDR_EL1 = 1, + CSSELR_EL1 = 2, + SCTLR_EL1 = 3, + ACTLR_EL1 = 4, + CPACR_EL1 = 5, + ZCR_EL1 = 6, + TTBR0_EL1 = 7, + TTBR1_EL1 = 8, + TCR_EL1 = 9, + ESR_EL1 = 10, + AFSR0_EL1 = 11, + AFSR1_EL1 = 12, + FAR_EL1 = 13, + MAIR_EL1 = 14, + VBAR_EL1 = 15, + CONTEXTIDR_EL1 = 16, + TPIDR_EL0 = 17, + TPIDRRO_EL0 = 18, + TPIDR_EL1 = 19, + AMAIR_EL1 = 20, + CNTKCTL_EL1 = 21, + PAR_EL1 = 22, + MDSCR_EL1 = 23, + MDCCINT_EL1 = 24, + DISR_EL1 = 25, + PMCR_EL0 = 26, + PMSELR_EL0 = 27, + PMEVCNTR0_EL0 = 28, + PMEVCNTR30_EL0 = 58, + PMCCNTR_EL0 = 59, + PMEVTYPER0_EL0 = 60, + PMEVTYPER30_EL0 = 90, + PMCCFILTR_EL0 = 91, + PMCNTENSET_EL0 = 92, + PMINTENSET_EL1 = 93, + PMOVSSET_EL0 = 94, + PMSWINC_EL0 = 95, + PMUSERENR_EL0 = 96, + APIAKEYLO_EL1 = 97, + APIAKEYHI_EL1 = 98, + APIBKEYLO_EL1 = 99, + APIBKEYHI_EL1 = 100, + APDAKEYLO_EL1 = 101, + APDAKEYHI_EL1 = 102, + APDBKEYLO_EL1 = 103, + APDBKEYHI_EL1 = 104, + APGAKEYLO_EL1 = 105, + APGAKEYHI_EL1 = 106, + ELR_EL1 = 107, + SP_EL1 = 108, + SPSR_EL1 = 109, + CNTVOFF_EL2 = 110, + CNTV_CVAL_EL0 = 111, + CNTV_CTL_EL0 = 112, + CNTP_CVAL_EL0 = 113, + CNTP_CTL_EL0 = 114, + DACR32_EL2 = 115, + IFSR32_EL2 = 116, + FPEXC32_EL2 = 117, + DBGVCR32_EL2 = 118, + NR_SYS_REGS = 119, +}; + +enum kvm_bus { + KVM_MMIO_BUS = 0, + KVM_PIO_BUS = 1, + KVM_VIRTIO_CCW_NOTIFY_BUS = 2, + KVM_FAST_MMIO_BUS = 3, + KVM_NR_BUSES = 4, +}; + +struct trace_event_raw_ipi_raise { + struct trace_entry ent; + u32 __data_loc_target_cpus; + const char *reason; + char __data[0]; +}; + +struct trace_event_raw_ipi_handler { + struct trace_entry ent; + const char *reason; + char __data[0]; +}; + +struct trace_event_data_offsets_ipi_raise { + u32 target_cpus; +}; + +struct trace_event_data_offsets_ipi_handler {}; + +typedef void (*btf_trace_ipi_raise)(void *, const struct cpumask *, const char *); + +typedef void (*btf_trace_ipi_entry)(void *, const char *); + +typedef void (*btf_trace_ipi_exit)(void *, const char *); + +enum ipi_msg_type { + IPI_RESCHEDULE = 0, + IPI_CALL_FUNC = 1, + IPI_CPU_STOP = 2, + IPI_CPU_CRASH_STOP = 3, + IPI_TIMER = 4, + IPI_IRQ_WORK = 5, + IPI_WAKEUP = 6, + NR_IPI = 7, +}; + +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; + +enum cpufreq_table_sorting { + CPUFREQ_TABLE_UNSORTED = 0, + CPUFREQ_TABLE_SORTED_ASCENDING = 1, + CPUFREQ_TABLE_SORTED_DESCENDING = 2, +}; + +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct clk; + +struct cpufreq_governor; + +struct cpufreq_frequency_table; + +struct cpufreq_stats; + +struct thermal_cooling_device; + +struct cpufreq_policy { + cpumask_var_t cpus; + cpumask_var_t related_cpus; + cpumask_var_t real_cpus; + unsigned int shared_type; + unsigned int cpu; + struct clk *clk; + struct cpufreq_cpuinfo cpuinfo; + unsigned int min; + unsigned int max; + unsigned int cur; + unsigned int restore_freq; + unsigned int suspend_freq; + unsigned int policy; + unsigned int last_policy; + struct cpufreq_governor *governor; + void *governor_data; + char last_governor[16]; + struct work_struct update; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; + enum cpufreq_table_sorting freq_table_sorted; + struct list_head policy_list; + struct kobject kobj; + struct completion kobj_unregister; + struct rw_semaphore rwsem; + bool fast_switch_possible; + bool fast_switch_enabled; + bool strict_target; + unsigned int transition_delay_us; + bool dvfs_possible_from_any_cpu; + unsigned int cached_target_freq; + unsigned int cached_resolved_idx; + bool transition_ongoing; + spinlock_t transition_lock; + wait_queue_head_t transition_wait; + struct task_struct *transition_task; + struct cpufreq_stats *stats; + void *driver_data; + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct cpufreq_governor { + char name[16]; + int (*init)(struct cpufreq_policy *); + void (*exit)(struct cpufreq_policy *); + int (*start)(struct cpufreq_policy *); + void (*stop)(struct cpufreq_policy *); + void (*limits)(struct cpufreq_policy *); + ssize_t (*show_setspeed)(struct cpufreq_policy *, char *); + int (*store_setspeed)(struct cpufreq_policy *, unsigned int); + struct list_head governor_list; + struct module *owner; + u8 flags; +}; + +struct cpufreq_frequency_table { + unsigned int flags; + unsigned int driver_data; + unsigned int frequency; +}; + +enum arm_smccc_conduit { + SMCCC_CONDUIT_NONE = 0, + SMCCC_CONDUIT_SMC = 1, + SMCCC_CONDUIT_HVC = 2, +}; + +struct arm_smccc_res { + long unsigned int a0; + long unsigned int a1; + long unsigned int a2; + long unsigned int a3; +}; + +enum mitigation_state { + SPECTRE_UNAFFECTED = 0, + SPECTRE_MITIGATED = 1, + SPECTRE_VULNERABLE = 2, +}; + +enum spectre_v4_policy { + SPECTRE_V4_POLICY_MITIGATION_DYNAMIC = 0, + SPECTRE_V4_POLICY_MITIGATION_ENABLED = 1, + SPECTRE_V4_POLICY_MITIGATION_DISABLED = 2, +}; + +struct spectre_v4_param { + const char *str; + enum spectre_v4_policy policy; +}; + +typedef u32 compat_size_t; + +struct compat_statfs64; + +typedef s32 compat_clock_t; + +typedef s32 compat_pid_t; + +typedef s32 compat_timer_t; + +typedef s32 compat_int_t; + +typedef u64 compat_u64; + +typedef u32 __compat_uid32_t; + +typedef u32 compat_sigset_word; + +struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +}; + +typedef struct compat_sigaltstack compat_stack_t; + +typedef struct { + compat_sigset_word sig[2]; +} compat_sigset_t; + +union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +}; + +typedef union compat_sigval compat_sigval_t; + +struct compat_siginfo { + int si_signo; + int si_errno; + int si_code; + union { + int _pad[29]; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + } _kill; + struct { + compat_timer_t _tid; + int _overrun; + compat_sigval_t _sigval; + } _timer; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + compat_sigval_t _sigval; + } _rt; + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + struct { + compat_uptr_t _addr; + union { + short int _addr_lsb; + struct { + char _dummy_bnd[4]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + struct { + char _dummy_pkey[4]; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + struct { + compat_long_t _band; + int _fd; + } _sigpoll; + struct { + compat_uptr_t _call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; + } _sifields; +}; + +struct compat_sigcontext { + compat_ulong_t trap_no; + compat_ulong_t error_code; + compat_ulong_t oldmask; + compat_ulong_t arm_r0; + compat_ulong_t arm_r1; + compat_ulong_t arm_r2; + compat_ulong_t arm_r3; + compat_ulong_t arm_r4; + compat_ulong_t arm_r5; + compat_ulong_t arm_r6; + compat_ulong_t arm_r7; + compat_ulong_t arm_r8; + compat_ulong_t arm_r9; + compat_ulong_t arm_r10; + compat_ulong_t arm_fp; + compat_ulong_t arm_ip; + compat_ulong_t arm_sp; + compat_ulong_t arm_lr; + compat_ulong_t arm_pc; + compat_ulong_t arm_cpsr; + compat_ulong_t fault_address; +}; + +struct compat_ucontext { + compat_ulong_t uc_flags; + compat_uptr_t uc_link; + compat_stack_t uc_stack; + struct compat_sigcontext uc_mcontext; + compat_sigset_t uc_sigmask; + int __unused[30]; + compat_ulong_t uc_regspace[128]; +}; + +struct compat_sigframe { + struct compat_ucontext uc; + compat_ulong_t retcode[2]; +}; + +struct compat_rt_sigframe { + struct compat_siginfo info; + struct compat_sigframe sig; +}; + +struct compat_user_vfp { + compat_u64 fpregs[32]; + compat_ulong_t fpscr; +}; + +struct compat_user_vfp_exc { + compat_ulong_t fpexc; + compat_ulong_t fpinst; + compat_ulong_t fpinst2; +}; + +struct compat_vfp_sigframe { + compat_ulong_t magic; + compat_ulong_t size; + struct compat_user_vfp ufp; + struct compat_user_vfp_exc ufp_exc; +}; + +struct compat_aux_sigframe { + struct compat_vfp_sigframe vfp; + long unsigned int end_magic; +}; + +union __fpsimd_vreg { + __int128 unsigned raw; + struct { + u64 lo; + u64 hi; + }; +}; + +struct dyn_arch_ftrace {}; + +struct dyn_ftrace { + long unsigned int ip; + long unsigned int flags; + struct dyn_arch_ftrace arch; +}; + +enum { + FTRACE_UPDATE_CALLS = 1, + FTRACE_DISABLE_CALLS = 2, + FTRACE_UPDATE_TRACE_FUNC = 4, + FTRACE_START_FUNC_RET = 8, + FTRACE_STOP_FUNC_RET = 16, + FTRACE_MAY_SLEEP = 32, +}; + +typedef __u64 Elf64_Off; + +typedef __s64 Elf64_Sxword; + +struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +}; + +typedef struct elf64_rela Elf64_Rela; + +struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +}; + +typedef struct elf64_hdr Elf64_Ehdr; + +struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +}; + +typedef struct elf64_shdr Elf64_Shdr; + +enum aarch64_reloc_op { + RELOC_OP_NONE = 0, + RELOC_OP_ABS = 1, + RELOC_OP_PREL = 2, + RELOC_OP_PAGE = 3, +}; + +enum aarch64_insn_movw_imm_type { + AARCH64_INSN_IMM_MOVNZ = 0, + AARCH64_INSN_IMM_MOVKZ = 1, +}; + +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + +enum perf_event_arm_regs { + PERF_REG_ARM64_X0 = 0, + PERF_REG_ARM64_X1 = 1, + PERF_REG_ARM64_X2 = 2, + PERF_REG_ARM64_X3 = 3, + PERF_REG_ARM64_X4 = 4, + PERF_REG_ARM64_X5 = 5, + PERF_REG_ARM64_X6 = 6, + PERF_REG_ARM64_X7 = 7, + PERF_REG_ARM64_X8 = 8, + PERF_REG_ARM64_X9 = 9, + PERF_REG_ARM64_X10 = 10, + PERF_REG_ARM64_X11 = 11, + PERF_REG_ARM64_X12 = 12, + PERF_REG_ARM64_X13 = 13, + PERF_REG_ARM64_X14 = 14, + PERF_REG_ARM64_X15 = 15, + PERF_REG_ARM64_X16 = 16, + PERF_REG_ARM64_X17 = 17, + PERF_REG_ARM64_X18 = 18, + PERF_REG_ARM64_X19 = 19, + PERF_REG_ARM64_X20 = 20, + PERF_REG_ARM64_X21 = 21, + PERF_REG_ARM64_X22 = 22, + PERF_REG_ARM64_X23 = 23, + PERF_REG_ARM64_X24 = 24, + PERF_REG_ARM64_X25 = 25, + PERF_REG_ARM64_X26 = 26, + PERF_REG_ARM64_X27 = 27, + PERF_REG_ARM64_X28 = 28, + PERF_REG_ARM64_X29 = 29, + PERF_REG_ARM64_LR = 30, + PERF_REG_ARM64_SP = 31, + PERF_REG_ARM64_PC = 32, + PERF_REG_ARM64_MAX = 33, +}; + +struct perf_guest_info_callbacks { + int (*is_in_guest)(); + int (*is_user_mode)(); + long unsigned int (*get_guest_ip)(); + void (*handle_intel_pt_intr)(); +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short int contexts; + bool contexts_maxed; +}; + +struct frame_tail { + struct frame_tail *fp; + long unsigned int lr; +}; + +struct compat_frame_tail { + compat_uptr_t fp; + u32 sp; + u32 lr; +}; + +struct platform_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct pdev_archdata {}; + +enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +}; + +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +}; + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0: 1; + __u64 cap_bit0_is_deprecated: 1; + __u64 cap_user_rdpmc: 1; + __u64 cap_user_time: 1; + __u64 cap_user_time_zero: 1; + __u64 cap_user_time_short: 1; + __u64 cap_____res: 58; + }; + }; + __u16 pmc_width; + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; + __u64 time_zero; + __u32 size; + __u32 __reserved_1; + __u64 time_cycles; + __u64 time_mask; + __u8 __reserved[928]; + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +struct mfd_cell; + +struct platform_device { + const char *name; + int id; + bool id_auto; + struct device dev; + u64 platform_dma_mask; + struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + const struct platform_device_id *id_entry; + char *driver_override; + struct mfd_cell *mfd_cell; + struct pdev_archdata archdata; +}; + +struct platform_driver { + int (*probe)(struct platform_device *); + int (*remove)(struct platform_device *); + void (*shutdown)(struct platform_device *); + int (*suspend)(struct platform_device *, pm_message_t); + int (*resume)(struct platform_device *); + struct device_driver driver; + const struct platform_device_id *id_table; + bool prevent_deferred_probe; +}; + +struct arm_pmu; + +struct pmu_hw_events { + struct perf_event *events[32]; + long unsigned int used_mask[1]; + raw_spinlock_t pmu_lock; + struct arm_pmu *percpu_pmu; + int irq; +}; + +struct arm_pmu { + struct pmu pmu; + cpumask_t supported_cpus; + char *name; + int pmuver; + irqreturn_t (*handle_irq)(struct arm_pmu *); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); + int (*get_event_idx)(struct pmu_hw_events *, struct perf_event *); + void (*clear_event_idx)(struct pmu_hw_events *, struct perf_event *); + int (*set_event_filter)(struct hw_perf_event *, struct perf_event_attr *); + u64 (*read_counter)(struct perf_event *); + void (*write_counter)(struct perf_event *, u64); + void (*start)(struct arm_pmu *); + void (*stop)(struct arm_pmu *); + void (*reset)(void *); + int (*map_event)(struct perf_event *); + int (*filter_match)(struct perf_event *); + int num_events; + bool secure_access; + long unsigned int pmceid_bitmap[1]; + long unsigned int pmceid_ext_bitmap[1]; + struct platform_device *plat_device; + struct pmu_hw_events *hw_events; + struct hlist_node node; + struct notifier_block cpu_pm_nb; + const struct attribute_group *attr_groups[5]; + u64 reg_pmmir; + long unsigned int acpi_cpuid; +}; + +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON = 0, + ARMPMU_ATTR_GROUP_EVENTS = 1, + ARMPMU_ATTR_GROUP_FORMATS = 2, + ARMPMU_ATTR_GROUP_CAPS = 3, + ARMPMU_NR_ATTR_GROUPS = 4, +}; + +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(); + u32 mult; + u32 shift; +}; + +struct armv8pmu_probe_info { + struct arm_pmu *pmu; + bool present; +}; + +enum hw_breakpoint_ops { + HW_BREAKPOINT_INSTALL = 0, + HW_BREAKPOINT_UNINSTALL = 1, + HW_BREAKPOINT_RESTORE = 2, +}; + +struct cpu_suspend_ctx { + u64 ctx_regs[13]; + u64 sp; +}; + +struct sleep_stack_data { + struct cpu_suspend_ctx system_regs; + long unsigned int callee_saved_regs[12]; +}; + +typedef void *acpi_handle; + +typedef u64 phys_cpuid_t; + +struct thermal_cooling_device_ops; + +struct thermal_cooling_device { + int id; + char type[20]; + struct device device; + struct device_node *np; + void *devdata; + void *stats; + const struct thermal_cooling_device_ops *ops; + bool updated; + struct mutex lock; + struct list_head thermal_instances; + struct list_head node; +}; + +struct thermal_cooling_device_ops { + int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *); + int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *); + int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int); + int (*get_requested_power)(struct thermal_cooling_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, long unsigned int, u32 *); + int (*power2state)(struct thermal_cooling_device *, u32, long unsigned int *); +}; + +struct acpi_processor_cx { + u8 valid; + u8 type; + u32 address; + u8 entry_method; + u8 index; + u32 latency; + u8 bm_sts_skip; + char desc[32]; +}; + +struct acpi_lpi_state { + u32 min_residency; + u32 wake_latency; + u32 flags; + u32 arch_flags; + u32 res_cnt_freq; + u32 enable_parent_state; + u64 address; + u8 index; + u8 entry_method; + char desc[32]; +}; + +struct acpi_processor_power { + int count; + union { + struct acpi_processor_cx states[8]; + struct acpi_lpi_state lpi_states[8]; + }; + int timer_broadcast_on_state; +}; + +struct acpi_psd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_pct_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 reserved; + u64 address; +} __attribute__((packed)); + +struct acpi_processor_px { + u64 core_frequency; + u64 power; + u64 transition_latency; + u64 bus_master_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_performance { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + short: 16; + unsigned int state_count; + int: 32; + struct acpi_processor_px *states; + struct acpi_psd_package domain_info; + cpumask_var_t shared_cpu_map; + unsigned int shared_type; + int: 32; +} __attribute__((packed)); + +struct acpi_tsd_package { + u64 num_entries; + u64 revision; + u64 domain; + u64 coord_type; + u64 num_processors; +}; + +struct acpi_processor_tx_tss { + u64 freqpercentage; + u64 power; + u64 transition_latency; + u64 control; + u64 status; +}; + +struct acpi_processor_tx { + u16 power; + u16 performance; +}; + +struct acpi_processor; + +struct acpi_processor_throttling { + unsigned int state; + unsigned int platform_limit; + struct acpi_pct_register control_register; + struct acpi_pct_register status_register; + short: 16; + unsigned int state_count; + int: 32; + struct acpi_processor_tx_tss *states_tss; + struct acpi_tsd_package domain_info; + cpumask_var_t shared_cpu_map; + int (*acpi_processor_get_throttling)(struct acpi_processor *); + int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool); + u32 address; + u8 duty_offset; + u8 duty_width; + u8 tsd_valid_flag; + char: 8; + unsigned int shared_type; + struct acpi_processor_tx states[16]; + int: 32; +} __attribute__((packed)); + +struct acpi_processor_flags { + u8 power: 1; + u8 performance: 1; + u8 throttling: 1; + u8 limit: 1; + u8 bm_control: 1; + u8 bm_check: 1; + u8 has_cst: 1; + u8 has_lpi: 1; + u8 power_setup_done: 1; + u8 bm_rld_set: 1; + u8 need_hotplug_init: 1; +}; + +struct acpi_processor_lx { + int px; + int tx; +}; + +struct acpi_processor_limit { + struct acpi_processor_lx state; + struct acpi_processor_lx thermal; + struct acpi_processor_lx user; +}; + +struct acpi_processor { + acpi_handle handle; + u32 acpi_id; + phys_cpuid_t phys_id; + u32 id; + u32 pblk; + int performance_platform_limit; + int throttling_platform_limit; + struct acpi_processor_flags flags; + struct acpi_processor_power power; + struct acpi_processor_performance *performance; + struct acpi_processor_throttling throttling; + struct acpi_processor_limit limit; + struct thermal_cooling_device *cdev; + struct device *dev; + struct freq_qos_request perflib_req; + struct freq_qos_request thermal_req; +}; + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP = 1, +}; + +struct die_args { + struct pt_regs *regs; + const char *str; + long int err; + int trapnr; + int signr; +}; + +enum kgdb_bptype { + BP_BREAKPOINT = 0, + BP_HARDWARE_BREAKPOINT = 1, + BP_WRITE_WATCHPOINT = 2, + BP_READ_WATCHPOINT = 3, + BP_ACCESS_WATCHPOINT = 4, + BP_POKE_BREAKPOINT = 5, +}; + +enum kgdb_bpstate { + BP_UNDEFINED = 0, + BP_REMOVED = 1, + BP_SET = 2, + BP_ACTIVE = 3, +}; + +struct kgdb_bkpt { + long unsigned int bpt_addr; + unsigned char saved_instr[4]; + enum kgdb_bptype type; + enum kgdb_bpstate state; +}; + +struct dbg_reg_def_t { + char *name; + int size; + int offset; +}; + +struct kgdb_arch { + unsigned char gdb_bpt_instr[4]; + long unsigned int flags; + int (*set_breakpoint)(long unsigned int, char *); + int (*remove_breakpoint)(long unsigned int, char *); + int (*set_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); + int (*remove_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype); + void (*disable_hw_break)(struct pt_regs *); + void (*remove_all_hw_break)(); + void (*correct_hw_break)(); + void (*enable_nmi)(bool); +}; + +struct screen_info { + __u8 orig_x; + __u8 orig_y; + __u16 ext_mem_k; + __u16 orig_video_page; + __u8 orig_video_mode; + __u8 orig_video_cols; + __u8 flags; + __u8 unused2; + __u16 orig_video_ega_bx; + __u16 unused3; + __u8 orig_video_lines; + __u8 orig_video_isVGA; + __u16 orig_video_points; + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u32 lfb_base; + __u32 lfb_size; + __u16 cl_magic; + __u16 cl_offset; + __u16 lfb_linelength; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; + __u16 vesapm_seg; + __u16 vesapm_off; + __u16 pages; + __u16 vesa_attributes; + __u32 capabilities; + __u32 ext_lfb_base; + __u8 _reserved[2]; +} __attribute__((packed)); + +struct pci_device_id { + __u32 vendor; + __u32 device; + __u32 subvendor; + __u32 subdevice; + __u32 class; + __u32 class_mask; + kernel_ulong_t driver_data; +}; + +struct resource_entry { + struct list_head node; + struct resource *res; + resource_size_t offset; + struct resource __res; +}; + +typedef u64 acpi_io_address; + +typedef u32 acpi_object_type; + +union acpi_object { + acpi_object_type type; + struct { + acpi_object_type type; + u64 value; + } integer; + struct { + acpi_object_type type; + u32 length; + char *pointer; + } string; + struct { + acpi_object_type type; + u32 length; + u8 *pointer; + } buffer; + struct { + acpi_object_type type; + u32 count; + union acpi_object *elements; + } package; + struct { + acpi_object_type type; + acpi_object_type actual_type; + acpi_handle handle; + } reference; + struct { + acpi_object_type type; + u32 proc_id; + acpi_io_address pblk_address; + u32 pblk_length; + } processor; + struct { + acpi_object_type type; + u32 system_level; + u32 resource_order; + } power_resource; +}; + +struct acpi_device; + +struct acpi_hotplug_profile { + struct kobject kobj; + int (*scan_dependent)(struct acpi_device *); + void (*notify_online)(struct acpi_device *); + bool enabled: 1; + bool demand_offline: 1; +}; + +struct acpi_device_status { + u32 present: 1; + u32 enabled: 1; + u32 show_in_ui: 1; + u32 functional: 1; + u32 battery_present: 1; + u32 reserved: 27; +}; + +struct acpi_device_flags { + u32 dynamic_status: 1; + u32 removable: 1; + u32 ejectable: 1; + u32 power_manageable: 1; + u32 match_driver: 1; + u32 initialized: 1; + u32 visited: 1; + u32 hotplug_notify: 1; + u32 is_dock_station: 1; + u32 of_compatible_ok: 1; + u32 coherent_dma: 1; + u32 cca_seen: 1; + u32 enumeration_by_parent: 1; + u32 reserved: 19; +}; + +typedef char acpi_bus_id[8]; + +struct acpi_pnp_type { + u32 hardware_id: 1; + u32 bus_address: 1; + u32 platform_id: 1; + u32 reserved: 29; +}; + +typedef u64 acpi_bus_address; + +typedef char acpi_device_name[40]; + +typedef char acpi_device_class[20]; + +struct acpi_device_pnp { + acpi_bus_id bus_id; + struct acpi_pnp_type type; + acpi_bus_address bus_address; + char *unique_id; + struct list_head ids; + acpi_device_name device_name; + acpi_device_class device_class; + union acpi_object *str_obj; +}; + +struct acpi_device_power_flags { + u32 explicit_get: 1; + u32 power_resources: 1; + u32 inrush_current: 1; + u32 power_removed: 1; + u32 ignore_parent: 1; + u32 dsw_present: 1; + u32 reserved: 26; +}; + +struct acpi_device_power_state { + struct { + u8 valid: 1; + u8 explicit_set: 1; + u8 reserved: 6; + } flags; + int power; + int latency; + struct list_head resources; +}; + +struct acpi_device_power { + int state; + struct acpi_device_power_flags flags; + struct acpi_device_power_state states[5]; +}; + +struct acpi_device_wakeup_flags { + u8 valid: 1; + u8 notifier_present: 1; +}; + +struct acpi_device_wakeup_context { + void (*func)(struct acpi_device_wakeup_context *); + struct device *dev; +}; + +struct acpi_device_wakeup { + acpi_handle gpe_device; + u64 gpe_number; + u64 sleep_state; + struct list_head resources; + struct acpi_device_wakeup_flags flags; + struct acpi_device_wakeup_context context; + struct wakeup_source *ws; + int prepare_count; + int enable_count; +}; + +struct acpi_device_perf_flags { + u8 reserved: 8; +}; + +struct acpi_device_perf_state; + +struct acpi_device_perf { + int state; + struct acpi_device_perf_flags flags; + int state_count; + struct acpi_device_perf_state *states; +}; + +struct acpi_device_dir { + struct proc_dir_entry *entry; +}; + +struct acpi_device_data { + const union acpi_object *pointer; + struct list_head properties; + const union acpi_object *of_compatible; + struct list_head subnodes; +}; + +struct acpi_scan_handler; + +struct acpi_hotplug_context; + +struct acpi_driver; + +struct acpi_gpio_mapping; + +struct acpi_device { + int device_type; + acpi_handle handle; + struct fwnode_handle fwnode; + struct acpi_device *parent; + struct list_head children; + struct list_head node; + struct list_head wakeup_list; + struct list_head del_list; + struct acpi_device_status status; + struct acpi_device_flags flags; + struct acpi_device_pnp pnp; + struct acpi_device_power power; + struct acpi_device_wakeup wakeup; + struct acpi_device_perf performance; + struct acpi_device_dir dir; + struct acpi_device_data data; + struct acpi_scan_handler *handler; + struct acpi_hotplug_context *hp; + struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; + void *driver_data; + struct device dev; + unsigned int physical_node_count; + unsigned int dep_unmet; + struct list_head physical_node_list; + struct mutex physical_node_lock; + void (*remove)(struct acpi_device *); +}; + +struct acpi_scan_handler { + const struct acpi_device_id *ids; + struct list_head list_node; + bool (*match)(const char *, const struct acpi_device_id **); + int (*attach)(struct acpi_device *, const struct acpi_device_id *); + void (*detach)(struct acpi_device *); + void (*bind)(struct device *); + void (*unbind)(struct device *); + struct acpi_hotplug_profile hotplug; +}; + +struct acpi_hotplug_context { + struct acpi_device *self; + int (*notify)(struct acpi_device *, u32); + void (*uevent)(struct acpi_device *, u32); + void (*fixup)(struct acpi_device *); +}; + +typedef int (*acpi_op_add)(struct acpi_device *); + +typedef int (*acpi_op_remove)(struct acpi_device *); + +typedef void (*acpi_op_notify)(struct acpi_device *, u32); + +struct acpi_device_ops { + acpi_op_add add; + acpi_op_remove remove; + acpi_op_notify notify; +}; + +struct acpi_driver { + char name[80]; + char class[80]; + const struct acpi_device_id *ids; + unsigned int flags; + struct acpi_device_ops ops; + struct device_driver drv; + struct module *owner; +}; + +struct acpi_device_perf_state { + struct { + u8 valid: 1; + u8 reserved: 7; + } flags; + u8 power; + u8 performance; + int latency; +}; + +struct acpi_gpio_params; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + unsigned int quirks; +}; + +struct pci_bus; + +struct acpi_pci_root { + struct acpi_device *device; + struct pci_bus *bus; + u16 segment; + struct resource secondary; + u32 osc_support_set; + u32 osc_control_set; + phys_addr_t mcfg_addr; +}; + +typedef short unsigned int pci_bus_flags_t; + +struct pci_dev; + +struct pci_ops; + +struct msi_controller; + +struct pci_bus { + struct list_head node; + struct pci_bus *parent; + struct list_head children; + struct list_head devices; + struct pci_dev *self; + struct list_head slots; + struct resource *resource[4]; + struct list_head resources; + struct resource busn_res; + struct pci_ops *ops; + struct msi_controller *msi; + void *sysdata; + struct proc_dir_entry *procdir; + unsigned char number; + unsigned char primary; + unsigned char max_bus_speed; + unsigned char cur_bus_speed; + int domain_nr; + char name[48]; + short unsigned int bridge_ctl; + pci_bus_flags_t bus_flags; + struct device *bridge; + struct device dev; + struct bin_attribute *legacy_io; + struct bin_attribute *legacy_mem; + unsigned int is_added: 1; +}; + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct hotplug_slot; + +struct pci_slot { + struct pci_bus *bus; + struct list_head list; + struct hotplug_slot *hotplug; + unsigned char number; + struct kobject kobj; +}; + +enum { + PCI_STD_RESOURCES = 0, + PCI_STD_RESOURCE_END = 5, + PCI_ROM_RESOURCE = 6, + PCI_IOV_RESOURCES = 7, + PCI_IOV_RESOURCE_END = 12, + PCI_BRIDGE_RESOURCES = 13, + PCI_BRIDGE_RESOURCE_END = 16, + PCI_NUM_RESOURCES = 17, + DEVICE_COUNT_RESOURCE = 17, +}; + +typedef int pci_power_t; + +typedef unsigned int pci_channel_state_t; + +typedef unsigned int pcie_reset_state_t; + +typedef short unsigned int pci_dev_flags_t; + +struct aer_stats; + +struct pci_driver; + +struct pcie_link_state; + +struct pci_vpd; + +struct pci_sriov; + +struct pci_dev { + struct list_head bus_list; + struct pci_bus *bus; + struct pci_bus *subordinate; + void *sysdata; + struct proc_dir_entry *procent; + struct pci_slot *slot; + unsigned int devfn; + short unsigned int vendor; + short unsigned int device; + short unsigned int subsystem_vendor; + short unsigned int subsystem_device; + unsigned int class; + u8 revision; + u8 hdr_type; + u16 aer_cap; + struct aer_stats *aer_stats; + u8 pcie_cap; + u8 msi_cap; + u8 msix_cap; + u8 pcie_mpss: 3; + u8 rom_base_reg; + u8 pin; + u16 pcie_flags_reg; + long unsigned int *dma_alias_mask; + struct pci_driver *driver; + u64 dma_mask; + struct device_dma_parameters dma_parms; + pci_power_t current_state; + unsigned int imm_ready: 1; + u8 pm_cap; + unsigned int pme_support: 5; + unsigned int pme_poll: 1; + unsigned int d1_support: 1; + unsigned int d2_support: 1; + unsigned int no_d1d2: 1; + unsigned int no_d3cold: 1; + unsigned int bridge_d3: 1; + unsigned int d3cold_allowed: 1; + unsigned int mmio_always_on: 1; + unsigned int wakeup_prepared: 1; + unsigned int runtime_d3cold: 1; + unsigned int skip_bus_pm: 1; + unsigned int ignore_hotplug: 1; + unsigned int hotplug_user_indicators: 1; + unsigned int clear_retrain_link: 1; + unsigned int d3hot_delay; + unsigned int d3cold_delay; + struct pcie_link_state *link_state; + unsigned int ltr_path: 1; + int l1ss; + unsigned int eetlp_prefix_path: 1; + pci_channel_state_t error_state; + struct device dev; + int cfg_size; + unsigned int irq; + struct resource resource[17]; + bool match_driver; + unsigned int transparent: 1; + unsigned int io_window: 1; + unsigned int pref_window: 1; + unsigned int pref_64_window: 1; + unsigned int multifunction: 1; + unsigned int is_busmaster: 1; + unsigned int no_msi: 1; + unsigned int no_64bit_msi: 1; + unsigned int block_cfg_access: 1; + unsigned int broken_parity_status: 1; + unsigned int irq_reroute_variant: 2; + unsigned int msi_enabled: 1; + unsigned int msix_enabled: 1; + unsigned int ari_enabled: 1; + unsigned int ats_enabled: 1; + unsigned int pasid_enabled: 1; + unsigned int pri_enabled: 1; + unsigned int is_managed: 1; + unsigned int needs_freset: 1; + unsigned int state_saved: 1; + unsigned int is_physfn: 1; + unsigned int is_virtfn: 1; + unsigned int reset_fn: 1; + unsigned int is_hotplug_bridge: 1; + unsigned int shpc_managed: 1; + unsigned int is_thunderbolt: 1; + unsigned int untrusted: 1; + unsigned int external_facing: 1; + unsigned int broken_intx_masking: 1; + unsigned int io_window_1k: 1; + unsigned int irq_managed: 1; + unsigned int non_compliant_bars: 1; + unsigned int is_probed: 1; + unsigned int link_active_reporting: 1; + unsigned int no_vf_scan: 1; + unsigned int no_command_memory: 1; + pci_dev_flags_t dev_flags; + atomic_t enable_cnt; + u32 saved_config_space[16]; + struct hlist_head saved_cap_space; + struct bin_attribute *rom_attr; + int rom_attr_enabled; + struct bin_attribute *res_attr[17]; + struct bin_attribute *res_attr_wc[17]; + unsigned int broken_cmd_compl: 1; + unsigned int ptm_root: 1; + unsigned int ptm_enabled: 1; + u8 ptm_granularity; + const struct attribute_group **msi_irq_groups; + struct pci_vpd *vpd; + u16 dpc_cap; + unsigned int dpc_rp_extensions: 1; + u8 dpc_rp_log_size; + union { + struct pci_sriov *sriov; + struct pci_dev *physfn; + }; + u16 ats_cap; + u8 ats_stu; + u16 pri_cap; + u32 pri_reqs_alloc; + unsigned int pasid_required: 1; + u16 pasid_cap; + u16 pasid_features; + u16 acs_cap; + phys_addr_t rom; + size_t romlen; + char *driver_override; + long unsigned int priv_flags; +}; + +struct pci_dynids { + spinlock_t lock; + struct list_head list; +}; + +struct pci_error_handlers; + +struct pci_driver { + struct list_head node; + const char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *, const struct pci_device_id *); + void (*remove)(struct pci_dev *); + int (*suspend)(struct pci_dev *, pm_message_t); + int (*resume)(struct pci_dev *); + void (*shutdown)(struct pci_dev *); + int (*sriov_configure)(struct pci_dev *, int); + const struct pci_error_handlers *err_handler; + const struct attribute_group **groups; + struct device_driver driver; + struct pci_dynids dynids; +}; + +struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; + struct pci_ops *ops; + struct pci_ops *child_ops; + void *sysdata; + int busnr; + struct list_head windows; + struct list_head dma_ranges; + u8 (*swizzle_irq)(struct pci_dev *, u8 *); + int (*map_irq)(const struct pci_dev *, u8, u8); + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + struct msi_controller *msi; + unsigned int ignore_reset_delay: 1; + unsigned int no_ext_tags: 1; + unsigned int native_aer: 1; + unsigned int native_pcie_hotplug: 1; + unsigned int native_shpc_hotplug: 1; + unsigned int native_pme: 1; + unsigned int native_ltr: 1; + unsigned int native_dpc: 1; + unsigned int preserve_config: 1; + unsigned int size_windows: 1; + resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t); + long: 64; + long unsigned int private[0]; +}; + +struct pci_ops { + int (*add_bus)(struct pci_bus *); + void (*remove_bus)(struct pci_bus *); + void * (*map_bus)(struct pci_bus *, unsigned int, int); + int (*read)(struct pci_bus *, unsigned int, int, int, u32 *); + int (*write)(struct pci_bus *, unsigned int, int, int, u32); +}; + +typedef unsigned int pci_ers_result_t; + +struct pci_error_handlers { + pci_ers_result_t (*error_detected)(struct pci_dev *, pci_channel_state_t); + pci_ers_result_t (*mmio_enabled)(struct pci_dev *); + pci_ers_result_t (*slot_reset)(struct pci_dev *); + void (*reset_prepare)(struct pci_dev *); + void (*reset_done)(struct pci_dev *); + void (*resume)(struct pci_dev *); +}; + +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *); + void (*release_info)(struct acpi_pci_root_info *); + int (*prepare_resources)(struct acpi_pci_root_info *); +}; + +struct pci_config_window; + +struct pci_ecam_ops { + unsigned int bus_shift; + struct pci_ops pci_ops; + int (*init)(struct pci_config_window *); +}; + +struct pci_config_window { + struct resource res; + struct resource busr; + void *priv; + const struct pci_ecam_ops *ops; + union { + void *win; + void **winp; + }; + struct device *parent; +}; + +struct acpi_pci_generic_root_info { + struct acpi_pci_root_info common; + struct pci_config_window *cfg; +}; + +struct trace_event_raw_instruction_emulation { + struct trace_entry ent; + u32 __data_loc_instr; + u64 addr; + char __data[0]; +}; + +struct trace_event_data_offsets_instruction_emulation { + u32 instr; +}; + +typedef void (*btf_trace_instruction_emulation)(void *, const char *, u64); + +enum insn_emulation_mode { + INSN_UNDEF = 0, + INSN_EMULATE = 1, + INSN_HW = 2, +}; + +enum legacy_insn_status { + INSN_DEPRECATED = 0, + INSN_OBSOLETE = 1, +}; + +struct insn_emulation_ops { + const char *name; + enum legacy_insn_status status; + struct undef_hook *hooks; + int (*set_hw_mode)(bool); +}; + +struct insn_emulation { + struct list_head node; + struct insn_emulation_ops *ops; + int current_mode; + int min; + int max; +}; + +typedef u64 acpi_size; + +typedef u64 acpi_physical_address; + +typedef u32 acpi_status; + +struct acpi_table_header { + char signature[4]; + u32 length; + u8 revision; + u8 checksum; + char oem_id[6]; + char oem_table_id[8]; + u32 oem_revision; + char asl_compiler_id[4]; + u32 asl_compiler_revision; +}; + +struct acpi_generic_address { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct acpi_table_fadt { + struct acpi_table_header header; + u32 facs; + u32 dsdt; + u8 model; + u8 preferred_profile; + u16 sci_interrupt; + u32 smi_command; + u8 acpi_enable; + u8 acpi_disable; + u8 s4_bios_request; + u8 pstate_control; + u32 pm1a_event_block; + u32 pm1b_event_block; + u32 pm1a_control_block; + u32 pm1b_control_block; + u32 pm2_control_block; + u32 pm_timer_block; + u32 gpe0_block; + u32 gpe1_block; + u8 pm1_event_length; + u8 pm1_control_length; + u8 pm2_control_length; + u8 pm_timer_length; + u8 gpe0_block_length; + u8 gpe1_block_length; + u8 gpe1_base; + u8 cst_control; + u16 c2_latency; + u16 c3_latency; + u16 flush_size; + u16 flush_stride; + u8 duty_offset; + u8 duty_width; + u8 day_alarm; + u8 month_alarm; + u8 century; + u16 boot_flags; + u8 reserved; + u32 flags; + struct acpi_generic_address reset_register; + u8 reset_value; + u16 arm_boot_flags; + u8 minor_revision; + u64 Xfacs; + u64 Xdsdt; + struct acpi_generic_address xpm1a_event_block; + struct acpi_generic_address xpm1b_event_block; + struct acpi_generic_address xpm1a_control_block; + struct acpi_generic_address xpm1b_control_block; + struct acpi_generic_address xpm2_control_block; + struct acpi_generic_address xpm_timer_block; + struct acpi_generic_address xgpe0_block; + struct acpi_generic_address xgpe1_block; + struct acpi_generic_address sleep_control; + struct acpi_generic_address sleep_status; + u64 hypervisor_id; +} __attribute__((packed)); + +enum acpi_srat_type { + ACPI_SRAT_TYPE_CPU_AFFINITY = 0, + ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, + ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, + ACPI_SRAT_TYPE_RESERVED = 6, +}; + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +} __attribute__((packed)); + +struct parking_protocol_mailbox { + __le32 cpu_id; + __le32 reserved; + __le64 entry_point; +}; + +struct cpu_mailbox_entry { + struct parking_protocol_mailbox *mailbox; + phys_addr_t mailbox_addr; + u8 version; + u8 gic_cpu_id; +}; + +enum { + MEMREMAP_WB = 1, + MEMREMAP_WT = 2, + MEMREMAP_WC = 4, + MEMREMAP_ENC = 8, + MEMREMAP_DEC = 16, +}; + +struct pv_time_ops { + long long unsigned int (*steal_clock)(int); +}; + +struct paravirt_patch_template { + struct pv_time_ops time; +}; + +struct pvclock_vcpu_stolen_time { + __le32 revision; + __le32 attributes; + __le64 stolen_time; + u8 padding[48]; +}; + +struct pv_time_stolen_time_region { + struct pvclock_vcpu_stolen_time *kaddr; +}; + +typedef u64 p4dval_t; + +typedef struct { + pgd_t pgd; +} p4d_t; + +enum pageflags { + PG_locked = 0, + PG_referenced = 1, + PG_uptodate = 2, + PG_dirty = 3, + PG_lru = 4, + PG_active = 5, + PG_workingset = 6, + PG_waiters = 7, + PG_error = 8, + PG_slab = 9, + PG_owner_priv_1 = 10, + PG_arch_1 = 11, + PG_reserved = 12, + PG_private = 13, + PG_private_2 = 14, + PG_writeback = 15, + PG_head = 16, + PG_mappedtodisk = 17, + PG_reclaim = 18, + PG_swapbacked = 19, + PG_unevictable = 20, + PG_mlocked = 21, + PG_hwpoison = 22, + PG_young = 23, + PG_idle = 24, + PG_arch_2 = 25, + __NR_PAGEFLAGS = 26, + PG_checked = 10, + PG_swapcache = 10, + PG_fscache = 14, + PG_pinned = 10, + PG_savepinned = 3, + PG_foreign = 10, + PG_xen_remapped = 10, + PG_slob_free = 13, + PG_double_map = 6, + PG_isolated = 18, + PG_reported = 2, +}; + +struct mem_section_usage { + long unsigned int subsection_map[8]; + long unsigned int pageblock_flags[0]; +}; + +struct page_ext; + +struct mem_section { + long unsigned int section_mem_map; + struct mem_section_usage *usage; + struct page_ext *page_ext; + long unsigned int pad; +}; + +struct page_ext { + long unsigned int flags; +}; + +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[64]; + union { + long unsigned int tags[3]; + long unsigned int marks[3]; + }; +}; + +typedef void (*xa_update_node_t)(struct xa_node *); + +struct xa_state { + struct xarray *xa; + long unsigned int xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; + +struct pbe { + void *address; + void *orig_address; + struct pbe *next; +}; + +struct arch_hibernate_hdr_invariants { + char uts_version[65]; +}; + +struct arch_hibernate_hdr { + struct arch_hibernate_hdr_invariants invariants; + phys_addr_t ttbr1_el1; + void (*reenter_kernel)(); + phys_addr_t __hyp_stub_vectors; + u64 sleep_cpu_mpidr; +}; + +enum { + IRQD_TRIGGER_MASK = 15, + IRQD_SETAFFINITY_PENDING = 256, + IRQD_ACTIVATED = 512, + IRQD_NO_BALANCING = 1024, + IRQD_PER_CPU = 2048, + IRQD_AFFINITY_SET = 4096, + IRQD_LEVEL = 8192, + IRQD_WAKEUP_STATE = 16384, + IRQD_MOVE_PCNTXT = 32768, + IRQD_IRQ_DISABLED = 65536, + IRQD_IRQ_MASKED = 131072, + IRQD_IRQ_INPROGRESS = 262144, + IRQD_WAKEUP_ARMED = 524288, + IRQD_FORWARDED_TO_VCPU = 1048576, + IRQD_AFFINITY_MANAGED = 2097152, + IRQD_IRQ_STARTED = 4194304, + IRQD_MANAGED_SHUTDOWN = 8388608, + IRQD_SINGLE_TARGET = 16777216, + IRQD_DEFAULT_TRIGGER_SET = 33554432, + IRQD_CAN_RESERVE = 67108864, + IRQD_MSI_NOMASK_QUIRK = 134217728, + IRQD_HANDLE_ENFORCE_IRQCTX = 268435456, + IRQD_AFFINITY_ON_ACTIVATE = 536870912, + IRQD_IRQ_ENABLED_ON_SUSPEND = 1073741824, +}; + +struct kimage_arch { + void *dtb; + long unsigned int dtb_mem; + void *elf_headers; + long unsigned int elf_headers_mem; + long unsigned int elf_headers_sz; +}; + +typedef int kexec_probe_t(const char *, long unsigned int); + +struct kimage; + +typedef void *kexec_load_t(struct kimage *, char *, long unsigned int, char *, long unsigned int, char *, long unsigned int); + +typedef int kexec_cleanup_t(void *); + +struct kexec_file_ops { + kexec_probe_t *probe; + kexec_load_t *load; + kexec_cleanup_t *cleanup; +}; + +typedef long unsigned int kimage_entry_t; + +struct kexec_segment { + union { + void *buf; + void *kbuf; + }; + size_t bufsz; + long unsigned int mem; + size_t memsz; +}; + +struct purgatory_info { + const Elf64_Ehdr *ehdr; + Elf64_Shdr *sechdrs; + void *purgatory_buf; +}; + +struct kimage { + kimage_entry_t head; + kimage_entry_t *entry; + kimage_entry_t *last_entry; + long unsigned int start; + struct page *control_code_page; + struct page *swap_page; + void *vmcoreinfo_data_copy; + long unsigned int nr_segments; + struct kexec_segment segment[16]; + struct list_head control_pages; + struct list_head dest_pages; + struct list_head unusable_pages; + long unsigned int control_page; + unsigned int type: 1; + unsigned int preserve_context: 1; + unsigned int file_mode: 1; + struct kimage_arch arch; + void *kernel_buf; + long unsigned int kernel_buf_len; + void *initrd_buf; + long unsigned int initrd_buf_len; + char *cmdline_buf; + long unsigned int cmdline_buf_len; + const struct kexec_file_ops *fops; + void *image_loader_data; + struct purgatory_info purgatory_info; +}; + +typedef u8 uint8_t; + +typedef u64 uint64_t; + +struct kexec_buf { + struct kimage *image; + void *buffer; + long unsigned int bufsz; + long unsigned int mem; + long unsigned int memsz; + long unsigned int buf_align; + long unsigned int buf_min; + long unsigned int buf_max; + bool top_down; +}; + +struct crash_mem_range { + u64 start; + u64 end; +}; + +struct crash_mem { + unsigned int max_nr_ranges; + unsigned int nr_ranges; + struct crash_mem_range ranges[0]; +}; + +typedef __be32 fdt32_t; + +typedef __be64 fdt64_t; + +struct fdt_header { + fdt32_t magic; + fdt32_t totalsize; + fdt32_t off_dt_struct; + fdt32_t off_dt_strings; + fdt32_t off_mem_rsvmap; + fdt32_t version; + fdt32_t last_comp_version; + fdt32_t boot_cpuid_phys; + fdt32_t size_dt_strings; + fdt32_t size_dt_struct; +}; + +enum key_being_used_for { + VERIFYING_MODULE_SIGNATURE = 0, + VERIFYING_FIRMWARE_SIGNATURE = 1, + VERIFYING_KEXEC_PE_SIGNATURE = 2, + VERIFYING_KEY_SIGNATURE = 3, + VERIFYING_KEY_SELF_SIGNATURE = 4, + VERIFYING_UNSPECIFIED_SIGNATURE = 5, + NR__KEY_BEING_USED_FOR = 6, +}; + +struct arm64_image_header { + __le32 code0; + __le32 code1; + __le64 text_offset; + __le64 image_size; + __le64 flags; + __le64 res2; + __le64 res3; + __le64 res4; + __le32 magic; + __le32 res5; +}; + +typedef struct { + long unsigned int val; +} swp_entry_t; + +typedef u32 probe_opcode_t; + +typedef void probes_handler_t(u32, long int, struct pt_regs *); + +struct arch_probe_insn { + probe_opcode_t *insn; + pstate_check_t *pstate_cc; + probes_handler_t *handler; + long unsigned int restore; +}; + +typedef u32 kprobe_opcode_t; + +struct arch_specific_insn { + struct arch_probe_insn api; +}; + +struct kprobe; + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *); + +typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int); + +typedef int (*kprobe_fault_handler_t)(struct kprobe *, struct pt_regs *, int); + +struct kprobe { + struct hlist_node hlist; + struct list_head list; + long unsigned int nmissed; + kprobe_opcode_t *addr; + const char *symbol_name; + unsigned int offset; + kprobe_pre_handler_t pre_handler; + kprobe_post_handler_t post_handler; + kprobe_fault_handler_t fault_handler; + kprobe_opcode_t opcode; + struct arch_specific_insn ainsn; + u32 flags; +}; + +struct kprobe_step_ctx { + long unsigned int ss_pending; + long unsigned int match_addr; +}; + +struct kprobe_ctlblk { + unsigned int kprobe_status; + long unsigned int saved_irqflag; + struct prev_kprobe prev_kprobe; + struct kprobe_step_ctx ss_ctx; +}; + +struct kretprobe_instance; + +typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *); + +struct kretprobe; + +struct kretprobe_instance { + union { + struct hlist_node hlist; + struct callback_head rcu; + }; + struct kretprobe *rp; + kprobe_opcode_t *ret_addr; + struct task_struct *task; + void *fp; + char data[0]; +}; + +struct kretprobe { + struct kprobe kp; + kretprobe_handler_t handler; + kretprobe_handler_t entry_handler; + int maxactive; + int nmissed; + size_t data_size; + struct hlist_head free_instances; + raw_spinlock_t lock; +}; + +struct kprobe_insn_cache { + struct mutex mutex; + void * (*alloc)(); + void (*free)(void *); + const char *sym; + struct list_head pages; + size_t insn_size; + int nr_garbage; +}; + +enum probe_insn { + INSN_REJECTED = 0, + INSN_GOOD_NO_SLOT = 1, + INSN_GOOD = 2, +}; + +enum aarch64_insn_special_register { + AARCH64_INSN_SPCLREG_SPSR_EL1 = 49664, + AARCH64_INSN_SPCLREG_ELR_EL1 = 49665, + AARCH64_INSN_SPCLREG_SP_EL0 = 49672, + AARCH64_INSN_SPCLREG_SPSEL = 49680, + AARCH64_INSN_SPCLREG_CURRENTEL = 49682, + AARCH64_INSN_SPCLREG_DAIF = 55825, + AARCH64_INSN_SPCLREG_NZCV = 55824, + AARCH64_INSN_SPCLREG_FPCR = 55840, + AARCH64_INSN_SPCLREG_DSPSR_EL0 = 55848, + AARCH64_INSN_SPCLREG_DLR_EL0 = 55849, + AARCH64_INSN_SPCLREG_SPSR_EL2 = 57856, + AARCH64_INSN_SPCLREG_ELR_EL2 = 57857, + AARCH64_INSN_SPCLREG_SP_EL1 = 57864, + AARCH64_INSN_SPCLREG_SPSR_INQ = 57880, + AARCH64_INSN_SPCLREG_SPSR_ABT = 57881, + AARCH64_INSN_SPCLREG_SPSR_UND = 57882, + AARCH64_INSN_SPCLREG_SPSR_FIQ = 57883, + AARCH64_INSN_SPCLREG_SPSR_EL3 = 61952, + AARCH64_INSN_SPCLREG_ELR_EL3 = 61953, + AARCH64_INSN_SPCLREG_SP_EL2 = 61968, +}; + +struct arch_uprobe { + union { + u8 insn[4]; + u8 ixol[4]; + }; + struct arch_probe_insn api; + bool simulate; +}; + +enum rp_check { + RP_CHECK_CALL = 0, + RP_CHECK_CHAIN_CALL = 1, + RP_CHECK_RET = 2, +}; + +struct iommu_fault_param; + +struct iommu_fwspec; + +struct dev_iommu { + struct mutex lock; + struct iommu_fault_param *fault_param; + struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; + void *priv; +}; + +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; + +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[16]; +}; + +struct iommu_fault_unrecoverable { + __u32 reason; + __u32 flags; + __u32 pasid; + __u32 perm; + __u64 addr; + __u64 fetch_addr; +}; + +struct iommu_fault_page_request { + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u64 private_data[2]; +}; + +struct iommu_fault { + __u32 type; + __u32 padding; + union { + struct iommu_fault_unrecoverable event; + struct iommu_fault_page_request prm; + __u8 padding2[56]; + }; +}; + +struct iommu_page_response { + __u32 argsz; + __u32 version; + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 code; +}; + +struct iommu_inv_addr_info { + __u32 flags; + __u32 archid; + __u64 pasid; + __u64 addr; + __u64 granule_size; + __u64 nb_granules; +}; + +struct iommu_inv_pasid_info { + __u32 flags; + __u32 archid; + __u64 pasid; +}; + +struct iommu_cache_invalidate_info { + __u32 argsz; + __u32 version; + __u8 cache; + __u8 granularity; + __u8 padding[6]; + union { + struct iommu_inv_pasid_info pasid_info; + struct iommu_inv_addr_info addr_info; + } granu; +}; + +struct iommu_gpasid_bind_data_vtd { + __u64 flags; + __u32 pat; + __u32 emt; +}; + +struct iommu_gpasid_bind_data { + __u32 argsz; + __u32 version; + __u32 format; + __u32 addr_width; + __u64 flags; + __u64 gpgd; + __u64 hpasid; + __u64 gpasid; + __u8 padding[8]; + union { + struct iommu_gpasid_bind_data_vtd vtd; + } vendor; +}; + +typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *); + +struct iommu_domain_geometry { + dma_addr_t aperture_start; + dma_addr_t aperture_end; + bool force_aperture; +}; + +struct iommu_domain { + unsigned int type; + const struct iommu_ops *ops; + long unsigned int pgsize_bitmap; + iommu_fault_handler_t handler; + void *handler_token; + struct iommu_domain_geometry geometry; + void *iova_cookie; +}; + +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); + +enum iommu_resv_type { + IOMMU_RESV_DIRECT = 0, + IOMMU_RESV_DIRECT_RELAXABLE = 1, + IOMMU_RESV_RESERVED = 2, + IOMMU_RESV_MSI = 3, + IOMMU_RESV_SW_MSI = 4, +}; + +struct iommu_resv_region { + struct list_head list; + phys_addr_t start; + size_t length; + int prot; + enum iommu_resv_type type; +}; + +struct iommu_iotlb_gather { + long unsigned int start; + long unsigned int end; + size_t pgsize; +}; + +struct iommu_device { + struct list_head list; + const struct iommu_ops *ops; + struct fwnode_handle *fwnode; + struct device *dev; +}; + +struct iommu_sva { + struct device *dev; +}; + +struct iommu_fault_event { + struct iommu_fault fault; + struct list_head list; +}; + +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; + struct list_head faults; + struct mutex lock; +}; + +struct iommu_fwspec { + const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + u32 flags; + u32 num_pasid_bits; + unsigned int num_ids; + u32 ids[0]; +}; + +enum xen_domain_type { + XEN_NATIVE = 0, + XEN_PV_DOMAIN = 1, + XEN_HVM_DOMAIN = 2, +}; + +struct hstate { + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + long unsigned int mask; + long unsigned int max_huge_pages; + long unsigned int nr_huge_pages; + long unsigned int free_huge_pages; + long unsigned int resv_huge_pages; + long unsigned int surplus_huge_pages; + long unsigned int nr_overcommit_huge_pages; + struct list_head hugepage_activelist; + struct list_head hugepage_freelists[128]; + unsigned int nr_huge_pages_node[128]; + unsigned int free_huge_pages_node[128]; + unsigned int surplus_huge_pages_node[128]; + struct cftype cgroup_files_dfl[7]; + struct cftype cgroup_files_legacy[9]; + char name[32]; +}; + +struct fault_info { + int (*fn)(long unsigned int, unsigned int, struct pt_regs *); + int sig; + int code; + const char *name; +}; + +enum swiotlb_force { + SWIOTLB_NORMAL = 0, + SWIOTLB_FORCE = 1, + SWIOTLB_NO_FORCE = 2, +}; + +struct mhp_params { + struct vmem_altmap *altmap; + pgprot_t pgprot; +}; + +struct memory_notify { + long unsigned int start_pfn; + long unsigned int nr_pages; + int status_change_nid_normal; + int status_change_nid_high; + int status_change_nid; +}; + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +struct hugepage_subpool { + spinlock_t lock; + long int count; + long int max_hpages; + long int used_hpages; + struct hstate *hstate; + long int min_hpages; + long int rsv_hpages; +}; + +struct hugetlbfs_sb_info { + long int max_inodes; + long int free_inodes; + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +typedef __kernel_long_t __kernel_off_t; + +typedef __kernel_off_t off_t; + +enum { + BPF_REG_0 = 0, + BPF_REG_1 = 1, + BPF_REG_2 = 2, + BPF_REG_3 = 3, + BPF_REG_4 = 4, + BPF_REG_5 = 5, + BPF_REG_6 = 6, + BPF_REG_7 = 7, + BPF_REG_8 = 8, + BPF_REG_9 = 9, + BPF_REG_10 = 10, + __MAX_BPF_REG = 11, +}; + +enum { + DUMP_PREFIX_NONE = 0, + DUMP_PREFIX_ADDRESS = 1, + DUMP_PREFIX_OFFSET = 2, +}; + +struct bpf_binary_header { + u32 pages; + int: 32; + u8 image[0]; +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; + int epilogue_offset; + int *offset; + int exentry_idx; + __le32 *image; + u32 stack_size; +}; + +struct arm64_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct jit_ctx ctx; +}; + +typedef long unsigned int ulong; + +typedef u64 gpa_t; + +typedef u64 gfn_t; + +typedef u64 hpa_t; + +typedef u64 hfn_t; + +typedef hfn_t kvm_pfn_t; + +struct kvm_memory_slot; + +struct gfn_to_hva_cache { + u64 generation; + gpa_t gpa; + long unsigned int hva; + long unsigned int len; + struct kvm_memory_slot *memslot; +}; + +struct kvm_arch_memory_slot {}; + +struct kvm_memory_slot { + gfn_t base_gfn; + long unsigned int npages; + long unsigned int *dirty_bitmap; + struct kvm_arch_memory_slot arch; + long unsigned int userspace_addr; + u32 flags; + short int id; + u16 as_id; +}; + +struct gfn_to_pfn_cache { + u64 generation; + gfn_t gfn; + kvm_pfn_t pfn; + bool dirty; +}; + +struct kvm_mmu_memory_cache { + int nobjs; + gfp_t gfp_zero; + struct kmem_cache *kmem_cache; + void *objects[40]; +}; + +struct kvm_vcpu; + +struct kvm_io_device; + +struct kvm_io_device_ops { + int (*read)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, void *); + int (*write)(struct kvm_vcpu *, struct kvm_io_device *, gpa_t, int, const void *); + void (*destructor)(struct kvm_io_device *); +}; + +struct preempt_ops; + +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +struct kvm_vcpu_stat { + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_poll_invalid; + u64 halt_wakeup; + u64 hvc_exit_stat; + u64 wfe_exit_stat; + u64 wfi_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 exits; +}; + +struct kvm_mmio_fragment { + gpa_t gpa; + void *data; + unsigned int len; +}; + +struct kvm_cpu_context { + struct user_pt_regs regs; + u64 spsr_abt; + u64 spsr_und; + u64 spsr_irq; + u64 spsr_fiq; + struct user_fpsimd_state fp_regs; + union { + u64 sys_regs[119]; + u32 copro[238]; + }; + struct kvm_vcpu *__hyp_running_vcpu; +}; + +struct kvm_vcpu_fault_info { + u32 esr_el2; + u64 far_el2; + u64 hpfar_el2; + u64 disr_el1; +}; + +struct kvm_guest_debug_arch { + __u64 dbg_bcr[16]; + __u64 dbg_bvr[16]; + __u64 dbg_wcr[16]; + __u64 dbg_wvr[16]; +}; + +struct vgic_v2_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_apr; + u32 vgic_lr[64]; + unsigned int used_lrs; +}; + +struct its_vm; + +struct its_vpe { + struct page *vpt_page; + struct its_vm *its_vm; + atomic_t vlpi_count; + int irq; + irq_hw_number_t vpe_db_lpi; + bool resident; + union { + struct { + int vpe_proxy_event; + bool idai; + }; + struct { + struct fwnode_handle *fwnode; + struct irq_domain *sgi_domain; + struct { + u8 priority; + bool enabled; + bool group; + } sgi_config[16]; + atomic_t vmapp_count; + }; + }; + raw_spinlock_t vpe_lock; + u16 col_idx; + u16 vpe_id; + bool pending_last; +}; + +struct vgic_v3_cpu_if { + u32 vgic_hcr; + u32 vgic_vmcr; + u32 vgic_sre; + u32 vgic_ap0r[4]; + u32 vgic_ap1r[4]; + u64 vgic_lr[16]; + struct its_vpe its_vpe; + unsigned int used_lrs; +}; + +enum vgic_irq_config { + VGIC_CONFIG_EDGE = 0, + VGIC_CONFIG_LEVEL = 1, +}; + +struct vgic_irq { + raw_spinlock_t irq_lock; + struct list_head lpi_list; + struct list_head ap_list; + struct kvm_vcpu *vcpu; + struct kvm_vcpu *target_vcpu; + u32 intid; + bool line_level; + bool pending_latch; + bool active; + bool enabled; + bool hw; + struct kref refcount; + u32 hwintid; + unsigned int host_irq; + union { + u8 targets; + u32 mpidr; + }; + u8 source; + u8 active_source; + u8 priority; + u8 group; + enum vgic_irq_config config; + bool (*get_input_level)(int); + void *owner; +}; + +enum iodev_type { + IODEV_CPUIF = 0, + IODEV_DIST = 1, + IODEV_REDIST = 2, + IODEV_ITS = 3, +}; + +struct kvm_io_device { + const struct kvm_io_device_ops *ops; +}; + +struct vgic_its; + +struct vgic_register_region; + +struct vgic_io_device { + gpa_t base_addr; + union { + struct kvm_vcpu *redist_vcpu; + struct vgic_its *its; + }; + const struct vgic_register_region *regions; + enum iodev_type iodev_type; + int nr_regions; + struct kvm_io_device dev; +}; + +struct vgic_redist_region; + +struct vgic_cpu { + union { + struct vgic_v2_cpu_if vgic_v2; + struct vgic_v3_cpu_if vgic_v3; + }; + struct vgic_irq private_irqs[32]; + raw_spinlock_t ap_list_lock; + struct list_head ap_list_head; + struct vgic_io_device rd_iodev; + struct vgic_redist_region *rdreg; + u64 pendbaser; + bool lpis_enabled; + u32 num_pri_bits; + u32 num_id_bits; +}; + +struct kvm_irq_level { + union { + __u32 irq; + __s32 status; + }; + __u32 level; +}; + +struct arch_timer_context { + struct kvm_vcpu *vcpu; + struct kvm_irq_level irq; + struct hrtimer hrtimer; + bool loaded; + u32 host_timer_irq; + u32 host_timer_irq_flags; +}; + +struct arch_timer_cpu { + struct arch_timer_context timers[2]; + struct hrtimer bg_timer; + bool enabled; +}; + +struct kvm_pmc { + u8 idx; + struct perf_event *perf_event; +}; + +struct kvm_pmu { + int irq_num; + struct kvm_pmc pmc[32]; + long unsigned int chained[1]; + bool ready; + bool created; + bool irq_level; + struct irq_work overflow_work; +}; + +struct vcpu_reset_state { + long unsigned int pc; + long unsigned int r0; + bool be; + bool reset; +}; + +struct kvm_s2_mmu; + +struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + void *sve_state; + unsigned int sve_max_vl; + struct kvm_s2_mmu *hw_mmu; + u64 hcr_el2; + u32 mdcr_el2; + struct kvm_vcpu_fault_info fault; + u64 workaround_flags; + u64 flags; + struct kvm_guest_debug_arch *debug_ptr; + struct kvm_guest_debug_arch vcpu_debug_state; + struct kvm_guest_debug_arch external_debug_state; + struct thread_info *host_thread_info; + struct user_fpsimd_state *host_fpsimd_state; + struct { + struct kvm_guest_debug_arch regs; + u64 pmscr_el1; + } host_debug_state; + struct vgic_cpu vgic_cpu; + struct arch_timer_cpu timer_cpu; + struct kvm_pmu pmu; + struct { + u32 mdscr_el1; + } guest_debug_preserved; + bool power_off; + bool pause; + struct kvm_mmu_memory_cache mmu_page_cache; + int target; + long unsigned int features[1]; + bool has_run_once; + u64 vsesr_el2; + struct vcpu_reset_state reset_state; + bool sysregs_loaded_on_cpu; + struct { + u64 last_steal; + gpa_t base; + } steal; +}; + +struct kvm; + +struct kvm_run; + +struct kvm_vcpu { + struct kvm *kvm; + struct preempt_notifier preempt_notifier; + int cpu; + int vcpu_id; + int vcpu_idx; + int srcu_idx; + int mode; + u64 requests; + long unsigned int guest_debug; + int pre_pcpu; + struct list_head blocked_vcpu_list; + struct mutex mutex; + struct kvm_run *run; + struct rcuwait wait; + struct pid *pid; + int sigset_active; + sigset_t sigset; + struct kvm_vcpu_stat stat; + unsigned int halt_poll_ns; + bool valid_wakeup; + int mmio_needed; + int mmio_read_completed; + int mmio_is_write; + int mmio_cur_fragment; + int mmio_nr_fragments; + struct kvm_mmio_fragment mmio_fragments[2]; + struct { + bool in_spin_loop; + bool dy_eligible; + } spin_loop; + bool preempted; + bool ready; + struct kvm_vcpu_arch arch; +}; + +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *, int); + void (*sched_out)(struct preempt_notifier *, struct task_struct *); +}; + +struct trace_print_flags { + long unsigned int mask; + const char *name; +}; + +enum mmu_notifier_event { + MMU_NOTIFY_UNMAP = 0, + MMU_NOTIFY_CLEAR = 1, + MMU_NOTIFY_PROTECTION_VMA = 2, + MMU_NOTIFY_PROTECTION_PAGE = 3, + MMU_NOTIFY_SOFT_DIRTY = 4, + MMU_NOTIFY_RELEASE = 5, + MMU_NOTIFY_MIGRATE = 6, +}; + +struct mmu_notifier; + +struct mmu_notifier_range; + +struct mmu_notifier_ops { + void (*release)(struct mmu_notifier *, struct mm_struct *); + int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*clear_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + int (*test_young)(struct mmu_notifier *, struct mm_struct *, long unsigned int); + void (*change_pte)(struct mmu_notifier *, struct mm_struct *, long unsigned int, pte_t); + int (*invalidate_range_start)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range_end)(struct mmu_notifier *, const struct mmu_notifier_range *); + void (*invalidate_range)(struct mmu_notifier *, struct mm_struct *, long unsigned int, long unsigned int); + struct mmu_notifier * (*alloc_notifier)(struct mm_struct *); + void (*free_notifier)(struct mmu_notifier *); +}; + +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct callback_head rcu; + unsigned int users; +}; + +struct mmu_notifier_range { + struct vm_area_struct *vma; + struct mm_struct *mm; + long unsigned int start; + long unsigned int end; + unsigned int flags; + enum mmu_notifier_event event; + void *migrate_pgmap_owner; +}; + +enum kobject_action { + KOBJ_ADD = 0, + KOBJ_REMOVE = 1, + KOBJ_CHANGE = 2, + KOBJ_MOVE = 3, + KOBJ_ONLINE = 4, + KOBJ_OFFLINE = 5, + KOBJ_BIND = 6, + KOBJ_UNBIND = 7, +}; + +struct kvm_regs { + struct user_pt_regs regs; + __u64 sp_el1; + __u64 elr_el1; + __u64 spsr[5]; + long: 64; + struct user_fpsimd_state fp_regs; +}; + +struct kvm_sregs {}; + +struct kvm_fpu {}; + +struct kvm_debug_exit_arch { + __u32 hsr; + __u64 far; +}; + +struct kvm_sync_regs { + __u64 device_irq_level; +}; + +struct kvm_userspace_memory_region { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; + __u64 userspace_addr; +}; + +struct kvm_hyperv_exit { + __u32 type; + __u32 pad1; + union { + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; + } u; +}; + +struct kvm_run { + __u8 request_interrupt_window; + __u8 immediate_exit; + __u8 padding1[6]; + __u32 exit_reason; + __u8 ready_for_interrupt_injection; + __u8 if_flag; + __u16 flags; + __u64 cr8; + __u64 apic_base; + union { + struct { + __u64 hardware_exit_reason; + } hw; + struct { + __u64 hardware_entry_failure_reason; + __u32 cpu; + } fail_entry; + struct { + __u32 exception; + __u32 error_code; + } ex; + struct { + __u8 direction; + __u8 size; + __u16 port; + __u32 count; + __u64 data_offset; + } io; + struct { + struct kvm_debug_exit_arch arch; + } debug; + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + __u32 longmode; + __u32 pad; + } hypercall; + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + struct { + __u8 icptcode; + __u16 ipa; + __u32 ipb; + } s390_sieic; + __u64 s390_reset_flags; + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + struct { + __u32 suberror; + __u32 ndata; + __u64 data[16]; + } internal; + struct { + __u64 gprs[32]; + } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + struct { + __u32 epr; + } epr; + struct { + __u32 type; + __u64 flags; + } system_event; + struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; + } s390_stsi; + struct { + __u8 vector; + } eoi; + struct kvm_hyperv_exit hyperv; + struct { + __u64 esr_iss; + __u64 fault_ipa; + } arm_nisv; + struct { + __u8 error; + __u8 pad[7]; + __u32 reason; + __u32 index; + __u64 data; + } msr; + char padding[256]; + }; + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[2048]; + } s; +}; + +struct kvm_coalesced_mmio_zone { + __u64 addr; + __u32 size; + union { + __u32 pad; + __u32 pio; + }; +}; + +struct kvm_coalesced_mmio { + __u64 phys_addr; + __u32 len; + union { + __u32 pad; + __u32 pio; + }; + __u8 data[8]; +}; + +struct kvm_coalesced_mmio_ring { + __u32 first; + __u32 last; + struct kvm_coalesced_mmio coalesced_mmio[0]; +}; + +struct kvm_translation { + __u64 linear_address; + __u64 physical_address; + __u8 valid; + __u8 writeable; + __u8 usermode; + __u8 pad[5]; +}; + +struct kvm_dirty_log { + __u32 slot; + __u32 padding1; + union { + void *dirty_bitmap; + __u64 padding2; + }; +}; + +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void *dirty_bitmap; + __u64 padding2; + }; +}; + +struct kvm_signal_mask { + __u32 len; + __u8 sigset[0]; +}; + +struct kvm_mp_state { + __u32 mp_state; +}; + +struct kvm_guest_debug { + __u32 control; + __u32 pad; + struct kvm_guest_debug_arch arch; +}; + +struct kvm_ioeventfd { + __u64 datamatch; + __u64 addr; + __u32 len; + __s32 fd; + __u32 flags; + __u8 pad[36]; +}; + +struct kvm_enable_cap { + __u32 cap; + __u32 flags; + __u64 args[4]; + __u8 pad[64]; +}; + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + union { + __u32 pad; + __u32 devid; + }; +}; + +struct kvm_irq_routing_s390_adapter { + __u64 ind_addr; + __u64 summary_addr; + __u64 ind_offset; + __u32 summary_offset; + __u32 adapter_id; +}; + +struct kvm_irq_routing_hv_sint { + __u32 vcpu; + __u32 sint; +}; + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + struct kvm_irq_routing_s390_adapter adapter; + struct kvm_irq_routing_hv_sint hv_sint; + __u32 pad[8]; + } u; +}; + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +struct kvm_irqfd { + __u32 fd; + __u32 gsi; + __u32 flags; + __u32 resamplefd; + __u8 pad[16]; +}; + +struct kvm_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 flags; + __u32 devid; + __u8 pad[12]; +}; + +struct kvm_create_device { + __u32 type; + __u32 fd; + __u32 flags; +}; + +struct kvm_device_attr { + __u32 flags; + __u32 group; + __u64 attr; + __u64 addr; +}; + +enum kvm_device_type { + KVM_DEV_TYPE_FSL_MPIC_20 = 1, + KVM_DEV_TYPE_FSL_MPIC_42 = 2, + KVM_DEV_TYPE_XICS = 3, + KVM_DEV_TYPE_VFIO = 4, + KVM_DEV_TYPE_ARM_VGIC_V2 = 5, + KVM_DEV_TYPE_FLIC = 6, + KVM_DEV_TYPE_ARM_VGIC_V3 = 7, + KVM_DEV_TYPE_ARM_VGIC_ITS = 8, + KVM_DEV_TYPE_XIVE = 9, + KVM_DEV_TYPE_ARM_PV_TIME = 10, + KVM_DEV_TYPE_MAX = 11, +}; + +struct its_vm { + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct page *vprop_page; + struct its_vpe **vpes; + int nr_vpes; + irq_hw_number_t db_lpi_base; + long unsigned int *db_bitmap; + int nr_db_lpis; + u32 vlpi_count[16]; +}; + +struct kvm_device; + +struct vgic_its { + gpa_t vgic_its_base; + bool enabled; + struct vgic_io_device iodev; + struct kvm_device *dev; + u64 baser_device_table; + u64 baser_coll_table; + struct mutex cmd_lock; + u64 cbaser; + u32 creadr; + u32 cwriter; + u32 abi_rev; + struct mutex its_lock; + struct list_head device_list; + struct list_head collection_list; +}; + +struct vgic_register_region { + unsigned int reg_offset; + unsigned int len; + unsigned int bits_per_irq; + unsigned int access_flags; + union { + long unsigned int (*read)(struct kvm_vcpu *, gpa_t, unsigned int); + long unsigned int (*its_read)(struct kvm *, struct vgic_its *, gpa_t, unsigned int); + }; + union { + void (*write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); + void (*its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); + }; + long unsigned int (*uaccess_read)(struct kvm_vcpu *, gpa_t, unsigned int); + union { + int (*uaccess_write)(struct kvm_vcpu *, gpa_t, unsigned int, long unsigned int); + int (*uaccess_its_write)(struct kvm *, struct vgic_its *, gpa_t, unsigned int, long unsigned int); + }; +}; + +struct kvm_device_ops; + +struct kvm_device { + const struct kvm_device_ops *ops; + struct kvm *kvm; + void *private; + struct list_head vm_node; +}; + +struct vgic_redist_region { + u32 index; + gpa_t base; + u32 count; + u32 free_index; + struct list_head list; +}; + +struct vgic_state_iter; + +struct vgic_dist { + bool in_kernel; + bool ready; + bool initialized; + u32 vgic_model; + u32 implementation_rev; + bool v2_groups_user_writable; + bool msis_require_devid; + int nr_spis; + gpa_t vgic_dist_base; + union { + gpa_t vgic_cpu_base; + struct list_head rd_regions; + }; + bool enabled; + bool nassgireq; + struct vgic_irq *spis; + struct vgic_io_device dist_iodev; + bool has_its; + u64 propbaser; + raw_spinlock_t lpi_list_lock; + struct list_head lpi_list_head; + int lpi_list_count; + struct list_head lpi_translation_cache; + struct vgic_state_iter *iter; + struct its_vm its_vm; +}; + +struct kvm_vmid { + u64 vmid_gen; + u32 vmid; +}; + +struct kvm_pgtable; + +struct kvm_s2_mmu { + struct kvm_vmid vmid; + phys_addr_t pgd_phys; + struct kvm_pgtable *pgt; + int *last_vcpu_ran; + struct kvm *kvm; +}; + +struct kvm_vm_stat { + ulong remote_tlb_flush; +}; + +struct kvm_arch { + struct kvm_s2_mmu mmu; + u64 vtcr; + int max_vcpus; + struct vgic_dist vgic; + u32 psci_version; + bool return_nisv_io_abort_to_user; + long unsigned int *pmu_filter; + unsigned int pmuver; + u8 pfr0_csv2; +}; + +struct kvm_memslots; + +struct kvm_io_bus; + +struct kvm_irq_routing_table; + +struct kvm_stat_data; + +struct kvm { + spinlock_t mmu_lock; + struct mutex slots_lock; + struct mm_struct *mm; + struct kvm_memslots *memslots[1]; + struct kvm_vcpu *vcpus[512]; + atomic_t online_vcpus; + int created_vcpus; + int last_boosted_vcpu; + struct list_head vm_list; + struct mutex lock; + struct kvm_io_bus *buses[4]; + struct { + spinlock_t lock; + struct list_head items; + struct list_head resampler_list; + struct mutex resampler_lock; + } irqfds; + struct list_head ioeventfds; + struct kvm_vm_stat stat; + struct kvm_arch arch; + refcount_t users_count; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + spinlock_t ring_lock; + struct list_head coalesced_zones; + struct mutex irq_lock; + struct kvm_irq_routing_table *irq_routing; + struct hlist_head irq_ack_notifier_list; + struct mmu_notifier mmu_notifier; + long unsigned int mmu_notifier_seq; + long int mmu_notifier_count; + long int tlbs_dirty; + struct list_head devices; + u64 manual_dirty_log_protect; + struct dentry *debugfs_dentry; + struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; + pid_t userspace_pid; + unsigned int max_halt_poll_ns; +}; + +struct kvm_io_range { + gpa_t addr; + int len; + struct kvm_io_device *dev; +}; + +struct kvm_io_bus { + int dev_count; + int ioeventfd_count; + struct kvm_io_range range[0]; +}; + +enum { + OUTSIDE_GUEST_MODE = 0, + IN_GUEST_MODE = 1, + EXITING_GUEST_MODE = 2, + READING_SHADOW_PAGE_TABLES = 3, +}; + +struct kvm_host_map { + struct page *page; + void *hva; + kvm_pfn_t pfn; + kvm_pfn_t gfn; +}; + +struct kvm_irq_routing_table { + int chip[988]; + u32 nr_rt_entries; + struct hlist_head map[0]; +}; + +struct kvm_memslots { + u64 generation; + short int id_to_index[512]; + atomic_t lru_slot; + int used_slots; + struct kvm_memory_slot memslots[0]; +}; + +struct kvm_stats_debugfs_item; + +struct kvm_stat_data { + struct kvm *kvm; + struct kvm_stats_debugfs_item *dbgfs_item; +}; + +enum kvm_mr_change { + KVM_MR_CREATE = 0, + KVM_MR_DELETE = 1, + KVM_MR_MOVE = 2, + KVM_MR_FLAGS_ONLY = 3, +}; + +enum kvm_stat_kind { + KVM_STAT_VM = 0, + KVM_STAT_VCPU = 1, +}; + +struct kvm_stats_debugfs_item { + const char *name; + int offset; + enum kvm_stat_kind kind; + int mode; +}; + +struct kvm_device_ops { + const char *name; + int (*create)(struct kvm_device *, u32); + void (*init)(struct kvm_device *); + void (*destroy)(struct kvm_device *); + void (*release)(struct kvm_device *); + int (*set_attr)(struct kvm_device *, struct kvm_device_attr *); + int (*get_attr)(struct kvm_device *, struct kvm_device_attr *); + int (*has_attr)(struct kvm_device *, struct kvm_device_attr *); + long int (*ioctl)(struct kvm_device *, unsigned int, long unsigned int); + int (*mmap)(struct kvm_device *, struct vm_area_struct *); +}; + +typedef int (*kvm_vm_thread_fn_t)(struct kvm *, uintptr_t); + +struct miscdevice { + int minor; + const char *name; + const struct file_operations *fops; + struct list_head list; + struct device *parent; + struct device *this_device; + const struct attribute_group **groups; + const char *nodename; + umode_t mode; +}; + +struct syscore_ops { + struct list_head node; + int (*suspend)(); + void (*resume)(); + void (*shutdown)(); +}; + +struct trace_event_raw_kvm_userspace_exit { + struct trace_entry ent; + __u32 reason; + int errno; + char __data[0]; +}; + +struct trace_event_raw_kvm_vcpu_wakeup { + struct trace_entry ent; + __u64 ns; + bool waited; + bool valid; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_irq { + struct trace_entry ent; + unsigned int gsi; + int level; + int irq_source_id; + char __data[0]; +}; + +struct trace_event_raw_kvm_ack_irq { + struct trace_entry ent; + unsigned int irqchip; + unsigned int pin; + char __data[0]; +}; + +struct trace_event_raw_kvm_mmio { + struct trace_entry ent; + u32 type; + u32 len; + u64 gpa; + u64 val; + char __data[0]; +}; + +struct trace_event_raw_kvm_fpu { + struct trace_entry ent; + u32 load; + char __data[0]; +}; + +struct trace_event_raw_kvm_age_page { + struct trace_entry ent; + u64 hva; + u64 gfn; + u8 level; + u8 referenced; + char __data[0]; +}; + +struct trace_event_raw_kvm_halt_poll_ns { + struct trace_entry ent; + bool grow; + unsigned int vcpu_id; + unsigned int new; + unsigned int old; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_userspace_exit {}; + +struct trace_event_data_offsets_kvm_vcpu_wakeup {}; + +struct trace_event_data_offsets_kvm_set_irq {}; + +struct trace_event_data_offsets_kvm_ack_irq {}; + +struct trace_event_data_offsets_kvm_mmio {}; + +struct trace_event_data_offsets_kvm_fpu {}; + +struct trace_event_data_offsets_kvm_age_page {}; + +struct trace_event_data_offsets_kvm_halt_poll_ns {}; + +typedef void (*btf_trace_kvm_userspace_exit)(void *, __u32, int); + +typedef void (*btf_trace_kvm_vcpu_wakeup)(void *, __u64, bool, bool); + +typedef void (*btf_trace_kvm_set_irq)(void *, unsigned int, int, int); + +typedef void (*btf_trace_kvm_ack_irq)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_kvm_mmio)(void *, int, int, u64, void *); + +typedef void (*btf_trace_kvm_fpu)(void *, int); + +typedef void (*btf_trace_kvm_age_page)(void *, ulong, int, struct kvm_memory_slot *, int); + +typedef void (*btf_trace_kvm_halt_poll_ns)(void *, bool, unsigned int, unsigned int, unsigned int); + +struct kvm_cpu_compat_check { + void *opaque; + int *ret; +}; + +struct kvm_vm_worker_thread_context { + struct kvm *kvm; + struct task_struct *parent; + struct completion init_done; + kvm_vm_thread_fn_t thread_fn; + uintptr_t data; + int err; +}; + +struct kvm_coalesced_mmio_dev { + struct list_head list; + struct kvm_io_device dev; + struct kvm *kvm; + struct kvm_coalesced_mmio_zone zone; +}; + +enum { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_DELAYED_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + WORK_STRUCT_COLOR_SHIFT = 4, + WORK_STRUCT_COLOR_BITS = 4, + WORK_STRUCT_PENDING = 1, + WORK_STRUCT_DELAYED = 2, + WORK_STRUCT_PWQ = 4, + WORK_STRUCT_LINKED = 8, + WORK_STRUCT_STATIC = 0, + WORK_NR_COLORS = 15, + WORK_NO_COLOR = 15, + WORK_CPU_UNBOUND = 128, + WORK_STRUCT_FLAG_BITS = 8, + WORK_OFFQ_FLAG_BASE = 4, + __WORK_OFFQ_CANCELING = 4, + WORK_OFFQ_CANCELING = 16, + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_POOL_SHIFT = 5, + WORK_OFFQ_LEFT = 59, + WORK_OFFQ_POOL_BITS = 31, + WORK_OFFQ_POOL_NONE = 2147483647, + WORK_STRUCT_FLAG_MASK = 255, + WORK_STRUCT_WQ_DATA_MASK = 4294967040, + WORK_STRUCT_NO_POOL = 4294967264, + WORK_BUSY_PENDING = 1, + WORK_BUSY_RUNNING = 2, + WORKER_DESC_LEN = 24, +}; + +struct irq_bypass_consumer; + +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +enum { + kvm_ioeventfd_flag_nr_datamatch = 0, + kvm_ioeventfd_flag_nr_pio = 1, + kvm_ioeventfd_flag_nr_deassign = 2, + kvm_ioeventfd_flag_nr_virtio_ccw_notify = 3, + kvm_ioeventfd_flag_nr_fast_mmio = 4, + kvm_ioeventfd_flag_nr_max = 5, +}; + +struct fd { + struct file *file; + unsigned int flags; +}; + +struct kvm_s390_adapter_int { + u64 ind_addr; + u64 summary_addr; + u64 ind_offset; + u32 summary_offset; + u32 adapter_id; +}; + +struct kvm_hv_sint { + u32 vcpu; + u32 sint; +}; + +struct kvm_kernel_irq_routing_entry { + u32 gsi; + u32 type; + int (*set)(struct kvm_kernel_irq_routing_entry *, struct kvm *, int, int, bool); + union { + struct { + unsigned int irqchip; + unsigned int pin; + } irqchip; + struct { + u32 address_lo; + u32 address_hi; + u32 data; + u32 flags; + u32 devid; + } msi; + struct kvm_s390_adapter_int adapter; + struct kvm_hv_sint hv_sint; + }; + struct hlist_node link; +}; + +struct kvm_irq_ack_notifier { + struct hlist_node link; + unsigned int gsi; + void (*irq_acked)(struct kvm_irq_ack_notifier *); +}; + +typedef struct poll_table_struct poll_table; + +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + struct list_head list; + struct kvm_irq_ack_notifier notifier; + struct list_head link; +}; + +struct kvm_kernel_irqfd { + struct kvm *kvm; + wait_queue_entry_t wait; + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_spinlock_t irq_entry_sc; + int gsi; + struct work_struct inject; + struct kvm_kernel_irqfd_resampler *resampler; + struct eventfd_ctx *resamplefd; + struct list_head resampler_link; + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +struct _ioeventfd { + struct list_head list; + u64 addr; + int length; + struct eventfd_ctx *eventfd; + u64 datamatch; + struct kvm_io_device dev; + u8 bus_idx; + bool wildcard; +}; + +struct vfio_group; + +struct kvm_vfio_group { + struct list_head node; + struct vfio_group *vfio_group; +}; + +struct kvm_vfio { + struct list_head group_list; + struct mutex lock; + bool noncoherent; +}; + +struct kvm_vcpu_init { + __u32 target; + __u32 features[7]; +}; + +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + __u8 ext_dabt_pending; + __u8 pad[5]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + +struct kvm_reg_list { + __u64 n; + __u64 reg[0]; +}; + +struct kvm_one_reg { + __u64 id; + __u64 addr; +}; + +struct kvm_arm_device_addr { + __u64 id; + __u64 addr; +}; + +enum vgic_type { + VGIC_V2 = 0, + VGIC_V3 = 1, +}; + +struct vgic_global { + enum vgic_type type; + phys_addr_t vcpu_base; + void *vcpu_base_va; + void *vcpu_hyp_va; + void *vctrl_base; + void *vctrl_hyp; + int nr_lr; + unsigned int maint_irq; + int max_gic_vcpus; + bool can_emulate_gicv2; + bool has_gicv4; + bool has_gicv4_1; + struct static_key_false gicv3_cpuif; + u32 ich_vtr_el2; +}; + +struct timer_map { + struct arch_timer_context *direct_vtimer; + struct arch_timer_context *direct_ptimer; + struct arch_timer_context *emul_ptimer; +}; + +typedef u64 kvm_pte_t; + +struct kvm_pgtable { + u32 ia_bits; + u32 start_level; + kvm_pte_t *pgd; + struct kvm_s2_mmu *mmu; +}; + +struct kvm_pmu_events { + u32 events_host; + u32 events_guest; +}; + +struct kvm_host_data { + struct kvm_cpu_context host_ctxt; + struct kvm_pmu_events pmu_events; + long: 64; +}; + +struct trace_event_raw_kvm_entry { + struct trace_entry ent; + long unsigned int vcpu_pc; + char __data[0]; +}; + +struct trace_event_raw_kvm_exit { + struct trace_entry ent; + int ret; + unsigned int esr_ec; + long unsigned int vcpu_pc; + char __data[0]; +}; + +struct trace_event_raw_kvm_guest_fault { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int hsr; + long unsigned int hxfar; + long long unsigned int ipa; + char __data[0]; +}; + +struct trace_event_raw_kvm_access_fault { + struct trace_entry ent; + long unsigned int ipa; + char __data[0]; +}; + +struct trace_event_raw_kvm_irq_line { + struct trace_entry ent; + unsigned int type; + int vcpu_idx; + int irq_num; + int level; + char __data[0]; +}; + +struct trace_event_raw_kvm_mmio_emulate { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int instr; + long unsigned int cpsr; + char __data[0]; +}; + +struct trace_event_raw_kvm_unmap_hva_range { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_spte_hva { + struct trace_entry ent; + long unsigned int hva; + char __data[0]; +}; + +struct trace_event_raw_kvm_age_hva { + struct trace_entry ent; + long unsigned int start; + long unsigned int end; + char __data[0]; +}; + +struct trace_event_raw_kvm_test_age_hva { + struct trace_entry ent; + long unsigned int hva; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_way_flush { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool cache; + char __data[0]; +}; + +struct trace_event_raw_kvm_toggle_cache { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool was; + bool now; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_update_irq { + struct trace_entry ent; + long unsigned int vcpu_id; + __u32 irq; + int level; + char __data[0]; +}; + +struct trace_event_raw_kvm_get_timer_map { + struct trace_entry ent; + long unsigned int vcpu_id; + int direct_vtimer; + int direct_ptimer; + int emul_ptimer; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_save_state { + struct trace_entry ent; + long unsigned int ctl; + long long unsigned int cval; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_restore_state { + struct trace_entry ent; + long unsigned int ctl; + long long unsigned int cval; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_hrtimer_expire { + struct trace_entry ent; + int timer_idx; + char __data[0]; +}; + +struct trace_event_raw_kvm_timer_emulate { + struct trace_entry ent; + int timer_idx; + bool should_fire; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_entry {}; + +struct trace_event_data_offsets_kvm_exit {}; + +struct trace_event_data_offsets_kvm_guest_fault {}; + +struct trace_event_data_offsets_kvm_access_fault {}; + +struct trace_event_data_offsets_kvm_irq_line {}; + +struct trace_event_data_offsets_kvm_mmio_emulate {}; + +struct trace_event_data_offsets_kvm_unmap_hva_range {}; + +struct trace_event_data_offsets_kvm_set_spte_hva {}; + +struct trace_event_data_offsets_kvm_age_hva {}; + +struct trace_event_data_offsets_kvm_test_age_hva {}; + +struct trace_event_data_offsets_kvm_set_way_flush {}; + +struct trace_event_data_offsets_kvm_toggle_cache {}; + +struct trace_event_data_offsets_kvm_timer_update_irq {}; + +struct trace_event_data_offsets_kvm_get_timer_map {}; + +struct trace_event_data_offsets_kvm_timer_save_state {}; + +struct trace_event_data_offsets_kvm_timer_restore_state {}; + +struct trace_event_data_offsets_kvm_timer_hrtimer_expire {}; + +struct trace_event_data_offsets_kvm_timer_emulate {}; + +typedef void (*btf_trace_kvm_entry)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_exit)(void *, int, unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_guest_fault)(void *, long unsigned int, long unsigned int, long unsigned int, long long unsigned int); + +typedef void (*btf_trace_kvm_access_fault)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_irq_line)(void *, unsigned int, int, int, int); + +typedef void (*btf_trace_kvm_mmio_emulate)(void *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_unmap_hva_range)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_set_spte_hva)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_age_hva)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_test_age_hva)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_set_way_flush)(void *, long unsigned int, bool); + +typedef void (*btf_trace_kvm_toggle_cache)(void *, long unsigned int, bool, bool); + +typedef void (*btf_trace_kvm_timer_update_irq)(void *, long unsigned int, __u32, int); + +typedef void (*btf_trace_kvm_get_timer_map)(void *, long unsigned int, struct timer_map *); + +typedef void (*btf_trace_kvm_timer_save_state)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_restore_state)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_hrtimer_expire)(void *, struct arch_timer_context *); + +typedef void (*btf_trace_kvm_timer_emulate)(void *, struct arch_timer_context *, bool); + +enum kvm_pgtable_prot { + KVM_PGTABLE_PROT_X = 1, + KVM_PGTABLE_PROT_W = 2, + KVM_PGTABLE_PROT_R = 4, + KVM_PGTABLE_PROT_DEVICE = 8, +}; + +typedef long unsigned int hva_t; + +enum kvm_arch_timers { + TIMER_PTIMER = 0, + TIMER_VTIMER = 1, + NR_KVM_TIMERS = 2, +}; + +enum exception_type { + except_type_sync = 0, + except_type_irq = 128, + except_type_fiq = 256, + except_type_serror = 384, +}; + +struct sys_reg_params; + +struct sys_reg_desc { + const char *name; + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + bool (*access)(struct kvm_vcpu *, struct sys_reg_params *, const struct sys_reg_desc *); + void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); + int reg; + u64 val; + int (*__get_user)(struct kvm_vcpu *, const struct sys_reg_desc *, const struct kvm_one_reg *, void *); + int (*set_user)(struct kvm_vcpu *, const struct sys_reg_desc *, const struct kvm_one_reg *, void *); + unsigned int (*visibility)(const struct kvm_vcpu *, const struct sys_reg_desc *); +}; + +struct sys_reg_params { + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + u64 regval; + bool is_write; + bool is_aarch32; + bool is_32bit; +}; + +struct trace_event_raw_kvm_wfx_arm64 { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool is_wfe; + char __data[0]; +}; + +struct trace_event_raw_kvm_hvc_arm64 { + struct trace_entry ent; + long unsigned int vcpu_pc; + long unsigned int r0; + long unsigned int imm; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_setup_debug { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_clear_debug { + struct trace_entry ent; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_set_dreg32 { + struct trace_entry ent; + const char *name; + __u32 value; + char __data[0]; +}; + +struct trace_event_raw_kvm_arm_set_regset { + struct trace_entry ent; + const char *name; + int len; + u64 ctrls[16]; + u64 values[16]; + char __data[0]; +}; + +struct trace_event_raw_trap_reg { + struct trace_entry ent; + const char *fn; + int reg; + bool is_write; + u64 write_value; + char __data[0]; +}; + +struct trace_event_raw_kvm_handle_sys_reg { + struct trace_entry ent; + long unsigned int hsr; + char __data[0]; +}; + +struct trace_event_raw_kvm_sys_access { + struct trace_entry ent; + long unsigned int vcpu_pc; + bool is_write; + const char *name; + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + char __data[0]; +}; + +struct trace_event_raw_kvm_set_guest_debug { + struct trace_entry ent; + struct kvm_vcpu *vcpu; + __u32 guest_debug; + char __data[0]; +}; + +struct trace_event_data_offsets_kvm_wfx_arm64 {}; + +struct trace_event_data_offsets_kvm_hvc_arm64 {}; + +struct trace_event_data_offsets_kvm_arm_setup_debug {}; + +struct trace_event_data_offsets_kvm_arm_clear_debug {}; + +struct trace_event_data_offsets_kvm_arm_set_dreg32 {}; + +struct trace_event_data_offsets_kvm_arm_set_regset {}; + +struct trace_event_data_offsets_trap_reg {}; + +struct trace_event_data_offsets_kvm_handle_sys_reg {}; + +struct trace_event_data_offsets_kvm_sys_access {}; + +struct trace_event_data_offsets_kvm_set_guest_debug {}; + +typedef void (*btf_trace_kvm_wfx_arm64)(void *, long unsigned int, bool); + +typedef void (*btf_trace_kvm_hvc_arm64)(void *, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_kvm_arm_setup_debug)(void *, struct kvm_vcpu *, __u32); + +typedef void (*btf_trace_kvm_arm_clear_debug)(void *, __u32); + +typedef void (*btf_trace_kvm_arm_set_dreg32)(void *, const char *, __u32); + +typedef void (*btf_trace_kvm_arm_set_regset)(void *, const char *, int, __u64 *, __u64 *); + +typedef void (*btf_trace_trap_reg)(void *, const char *, int, bool, u64); + +typedef void (*btf_trace_kvm_handle_sys_reg)(void *, long unsigned int); + +typedef void (*btf_trace_kvm_sys_access)(void *, long unsigned int, struct sys_reg_params *, const struct sys_reg_desc *); + +typedef void (*btf_trace_kvm_set_guest_debug)(void *, struct kvm_vcpu *, __u32); + +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +struct sve_state_reg_region { + unsigned int koffset; + unsigned int klen; + unsigned int upad; +}; + +struct __va_list { + void *__stack; + void *__gr_top; + void *__vr_top; + int __gr_offs; + int __vr_offs; +}; + +typedef struct __va_list __gnuc_va_list; + +typedef __gnuc_va_list va_list; + +struct va_format { + const char *fmt; + va_list *va; +}; + +enum kvm_arch_timer_regs { + TIMER_REG_CNT = 0, + TIMER_REG_CVAL = 1, + TIMER_REG_TVAL = 2, + TIMER_REG_CTL = 3, +}; + +struct vgic_vmcr { + u32 grpen0; + u32 grpen1; + u32 ackctl; + u32 fiqen; + u32 cbpr; + u32 eoim; + u32 abpr; + u32 bpr; + u32 pmr; +}; + +struct cyclecounter { + u64 (*read)(const struct cyclecounter *); + u64 mask; + u32 mult; + u32 shift; +}; + +struct timecounter { + const struct cyclecounter *cc; + u64 cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +struct arch_timer_kvm_info { + struct timecounter timecounter; + int virtual_irq; + int physical_irq; +}; + +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0, + HRTIMER_MODE_REL = 1, + HRTIMER_MODE_PINNED = 2, + HRTIMER_MODE_SOFT = 4, + HRTIMER_MODE_HARD = 8, + HRTIMER_MODE_ABS_PINNED = 2, + HRTIMER_MODE_REL_PINNED = 3, + HRTIMER_MODE_ABS_SOFT = 4, + HRTIMER_MODE_REL_SOFT = 5, + HRTIMER_MODE_ABS_PINNED_SOFT = 6, + HRTIMER_MODE_REL_PINNED_SOFT = 7, + HRTIMER_MODE_ABS_HARD = 8, + HRTIMER_MODE_REL_HARD = 9, + HRTIMER_MODE_ABS_PINNED_HARD = 10, + HRTIMER_MODE_REL_PINNED_HARD = 11, +}; + +struct trace_event_raw_vgic_update_irq_pending { + struct trace_entry ent; + long unsigned int vcpu_id; + __u32 irq; + bool level; + char __data[0]; +}; + +struct trace_event_data_offsets_vgic_update_irq_pending {}; + +typedef void (*btf_trace_vgic_update_irq_pending)(void *, long unsigned int, __u32, bool); + +enum gic_type { + GIC_V2 = 0, + GIC_V3 = 1, +}; + +struct gic_kvm_info { + enum gic_type type; + struct resource vcpu; + unsigned int maint_irq; + struct resource vctrl; + bool has_v4; + bool has_v4_1; +}; + +struct its_vlpi_map { + struct its_vm *vm; + struct its_vpe *vpe; + u32 vintid; + u8 properties; + bool db_enabled; +}; + +struct vgic_reg_attr { + struct kvm_vcpu *vcpu; + gpa_t addr; +}; + +struct its_device { + struct list_head dev_list; + struct list_head itt_head; + u32 num_eventid_bits; + gpa_t itt_addr; + u32 device_id; +}; + +struct its_collection { + struct list_head coll_list; + u32 collection_id; + u32 target_addr; +}; + +struct its_ite { + struct list_head ite_list; + struct vgic_irq *irq; + struct its_collection *collection; + u32 event_id; +}; + +struct vgic_translation_cache_entry { + struct list_head entry; + phys_addr_t db; + u32 devid; + u32 eventid; + struct vgic_irq *irq; +}; + +struct vgic_its_abi { + int cte_esz; + int dte_esz; + int ite_esz; + int (*save_tables)(struct vgic_its *); + int (*restore_tables)(struct vgic_its *); + int (*commit)(struct vgic_its *); +}; + +typedef int (*entry_fn_t)(struct vgic_its *, u32, void *, void *); + +struct vgic_state_iter { + int nr_cpus; + int nr_spis; + int nr_lpis; + int dist_id; + int vcpu_id; + int intid; + int lpi_idx; + u32 *lpi_array; +}; + +struct kvm_pmu_event_filter { + __u16 base_event; + __u16 nevents; + __u8 action; + __u8 pad[3]; +}; + +struct tlb_inv_context { + long unsigned int flags; + u64 tcr; + u64 sctlr; +}; + +struct tlb_inv_context___2 { + u64 tcr; +}; + +enum kvm_pgtable_walk_flags { + KVM_PGTABLE_WALK_LEAF = 1, + KVM_PGTABLE_WALK_TABLE_PRE = 2, + KVM_PGTABLE_WALK_TABLE_POST = 4, +}; + +typedef int (*kvm_pgtable_visitor_fn_t)(u64, u64, u32, kvm_pte_t *, enum kvm_pgtable_walk_flags, void * const); + +struct kvm_pgtable_walker { + const kvm_pgtable_visitor_fn_t cb; + void * const arg; + const enum kvm_pgtable_walk_flags flags; +}; + +struct kvm_pgtable_walk_data { + struct kvm_pgtable *pgt; + struct kvm_pgtable_walker *walker; + u64 addr; + u64 end; +}; + +struct hyp_map_data { + u64 phys; + kvm_pte_t attr; +}; + +struct stage2_map_data { + u64 phys; + kvm_pte_t attr; + kvm_pte_t *anchor; + struct kvm_s2_mmu *mmu; + struct kvm_mmu_memory_cache *memcache; +}; + +struct stage2_attr_data { + kvm_pte_t attr_set; + kvm_pte_t attr_clr; + kvm_pte_t pte; + u32 level; +}; + +typedef s8 int8_t; + +typedef s16 int16_t; + +typedef u16 uint16_t; + +typedef uint64_t xen_pfn_t; + +typedef uint64_t xen_ulong_t; + +typedef struct { + union { + unsigned char *p; + uint64_t q; + }; +} __guest_handle_uchar; + +typedef struct { + union { + char *p; + uint64_t q; + }; +} __guest_handle_char; + +typedef struct { + union { + void *p; + uint64_t q; + }; +} __guest_handle_void; + +typedef struct { + union { + uint64_t *p; + uint64_t q; + }; +} __guest_handle_uint64_t; + +typedef struct { + union { + uint32_t *p; + uint64_t q; + }; +} __guest_handle_uint32_t; + +struct arch_vcpu_info {}; + +struct arch_shared_info {}; + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +}; + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; + u32 sec_hi; +}; + +typedef uint16_t domid_t; + +struct vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + xen_ulong_t evtchn_pending_sel; + struct arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; +}; + +struct shared_info { + struct vcpu_info vcpu_info[1]; + xen_ulong_t evtchn_pending[64]; + xen_ulong_t evtchn_mask[64]; + struct pvclock_wall_clock wc; + struct arch_shared_info arch; +}; + +struct start_info { + char magic[32]; + long unsigned int nr_pages; + long unsigned int shared_info; + uint32_t flags; + xen_pfn_t store_mfn; + uint32_t store_evtchn; + union { + struct { + xen_pfn_t mfn; + uint32_t evtchn; + } domU; + struct { + uint32_t info_off; + uint32_t info_size; + } dom0; + } console; + long unsigned int pt_base; + long unsigned int nr_pt_frames; + long unsigned int mfn_list; + long unsigned int mod_start; + long unsigned int mod_len; + int8_t cmd_line[1024]; + long unsigned int first_p2m_pfn; + long unsigned int nr_p2m_frames; +}; + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE = 0, + VDSO_CLOCKMODE_ARCHTIMER = 1, + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT = 2, + VDSO_CLOCKMODE_MAX = 3, + VDSO_CLOCKMODE_TIMENS = 2147483647, +}; + +struct clocksource { + u64 (*read)(struct clocksource *); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + enum vdso_clock_mode vdso_clock_mode; + long unsigned int flags; + int (*enable)(struct clocksource *); + void (*disable)(struct clocksource *); + void (*suspend)(struct clocksource *); + void (*resume)(struct clocksource *); + void (*mark_unstable)(struct clocksource *); + void (*tick_stable)(struct clocksource *); + struct module *owner; +}; + +struct sched_shutdown { + unsigned int reason; +}; + +struct xenpf_settime32 { + uint32_t secs; + uint32_t nsecs; + uint64_t system_time; +}; + +struct xenpf_settime64 { + uint64_t secs; + uint32_t nsecs; + uint32_t mbz; + uint64_t system_time; +}; + +struct xenpf_add_memtype { + xen_pfn_t mfn; + uint64_t nr_mfns; + uint32_t type; + uint32_t handle; + uint32_t reg; +}; + +struct xenpf_del_memtype { + uint32_t handle; + uint32_t reg; +}; + +struct xenpf_read_memtype { + uint32_t reg; + xen_pfn_t mfn; + uint64_t nr_mfns; + uint32_t type; +}; + +struct xenpf_microcode_update { + __guest_handle_void data; + uint32_t length; +}; + +struct xenpf_platform_quirk { + uint32_t quirk_id; +}; + +struct xenpf_efi_time { + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t hour; + uint8_t min; + uint8_t sec; + uint32_t ns; + int16_t tz; + uint8_t daylight; +}; + +struct xenpf_efi_guid { + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +}; + +struct xenpf_efi_runtime_call { + uint32_t function; + uint32_t misc; + xen_ulong_t status; + union { + struct { + struct xenpf_efi_time time; + uint32_t resolution; + uint32_t accuracy; + } get_time; + struct xenpf_efi_time set_time; + struct xenpf_efi_time get_wakeup_time; + struct xenpf_efi_time set_wakeup_time; + struct { + __guest_handle_void name; + xen_ulong_t size; + __guest_handle_void data; + struct xenpf_efi_guid vendor_guid; + } get_variable; + struct { + __guest_handle_void name; + xen_ulong_t size; + __guest_handle_void data; + struct xenpf_efi_guid vendor_guid; + } set_variable; + struct { + xen_ulong_t size; + __guest_handle_void name; + struct xenpf_efi_guid vendor_guid; + } get_next_variable_name; + struct { + uint32_t attr; + uint64_t max_store_size; + uint64_t remain_store_size; + uint64_t max_size; + } query_variable_info; + struct { + __guest_handle_void capsule_header_array; + xen_ulong_t capsule_count; + uint64_t max_capsule_size; + uint32_t reset_type; + } query_capsule_capabilities; + struct { + __guest_handle_void capsule_header_array; + xen_ulong_t capsule_count; + uint64_t sg_list; + } update_capsule; + } u; +}; + +union xenpf_efi_info { + uint32_t version; + struct { + uint64_t addr; + uint32_t nent; + } cfg; + struct { + uint32_t revision; + uint32_t bufsz; + __guest_handle_void name; + } vendor; + struct { + uint64_t addr; + uint64_t size; + uint64_t attr; + uint32_t type; + } mem; +}; + +struct xenpf_firmware_info { + uint32_t type; + uint32_t index; + union { + struct { + uint8_t device; + uint8_t version; + uint16_t interface_support; + uint16_t legacy_max_cylinder; + uint8_t legacy_max_head; + uint8_t legacy_sectors_per_track; + __guest_handle_void edd_params; + } disk_info; + struct { + uint8_t device; + uint32_t mbr_signature; + } disk_mbr_signature; + struct { + uint8_t capabilities; + uint8_t edid_transfer_time; + __guest_handle_uchar edid; + } vbeddc_info; + union xenpf_efi_info efi_info; + uint8_t kbd_shift_flags; + } u; +}; + +struct xenpf_enter_acpi_sleep { + uint16_t val_a; + uint16_t val_b; + uint32_t sleep_state; + uint32_t flags; +}; + +struct xenpf_change_freq { + uint32_t flags; + uint32_t cpu; + uint64_t freq; +}; + +struct xenpf_getidletime { + __guest_handle_uchar cpumap_bitmap; + uint32_t cpumap_nr_cpus; + __guest_handle_uint64_t idletime; + uint64_t now; +}; + +struct xen_power_register { + uint32_t space_id; + uint32_t bit_width; + uint32_t bit_offset; + uint32_t access_size; + uint64_t address; +}; + +struct xen_processor_csd { + uint32_t domain; + uint32_t coord_type; + uint32_t num; +}; + +typedef struct { + union { + struct xen_processor_csd *p; + uint64_t q; + }; +} __guest_handle_xen_processor_csd; + +struct xen_processor_cx { + struct xen_power_register reg; + uint8_t type; + uint32_t latency; + uint32_t power; + uint32_t dpcnt; + __guest_handle_xen_processor_csd dp; +}; + +typedef struct { + union { + struct xen_processor_cx *p; + uint64_t q; + }; +} __guest_handle_xen_processor_cx; + +struct xen_processor_flags { + uint32_t bm_control: 1; + uint32_t bm_check: 1; + uint32_t has_cst: 1; + uint32_t power_setup_done: 1; + uint32_t bm_rld_set: 1; +}; + +struct xen_processor_power { + uint32_t count; + struct xen_processor_flags flags; + __guest_handle_xen_processor_cx states; +}; + +struct xen_pct_register { + uint8_t descriptor; + uint16_t length; + uint8_t space_id; + uint8_t bit_width; + uint8_t bit_offset; + uint8_t reserved; + uint64_t address; +}; + +struct xen_processor_px { + uint64_t core_frequency; + uint64_t power; + uint64_t transition_latency; + uint64_t bus_master_latency; + uint64_t control; + uint64_t status; +}; + +typedef struct { + union { + struct xen_processor_px *p; + uint64_t q; + }; +} __guest_handle_xen_processor_px; + +struct xen_psd_package { + uint64_t num_entries; + uint64_t revision; + uint64_t domain; + uint64_t coord_type; + uint64_t num_processors; +}; + +struct xen_processor_performance { + uint32_t flags; + uint32_t platform_limit; + struct xen_pct_register control_register; + struct xen_pct_register status_register; + uint32_t state_count; + __guest_handle_xen_processor_px states; + struct xen_psd_package domain_info; + uint32_t shared_type; +}; + +struct xenpf_set_processor_pminfo { + uint32_t id; + uint32_t type; + union { + struct xen_processor_power power; + struct xen_processor_performance perf; + __guest_handle_uint32_t pdc; + }; +}; + +struct xenpf_pcpuinfo { + uint32_t xen_cpuid; + uint32_t max_present; + uint32_t flags; + uint32_t apic_id; + uint32_t acpi_id; +}; + +struct xenpf_cpu_ol { + uint32_t cpuid; +}; + +struct xenpf_cpu_hotadd { + uint32_t apic_id; + uint32_t acpi_id; + uint32_t pxm; +}; + +struct xenpf_mem_hotadd { + uint64_t spfn; + uint64_t epfn; + uint32_t pxm; + uint32_t flags; +}; + +struct xenpf_core_parking { + uint32_t type; + uint32_t idle_nums; +}; + +struct xenpf_symdata { + uint32_t namelen; + uint32_t symnum; + __guest_handle_char name; + uint64_t address; + char type; +}; + +struct xen_platform_op { + uint32_t cmd; + uint32_t interface_version; + union { + struct xenpf_settime32 settime32; + struct xenpf_settime64 settime64; + struct xenpf_add_memtype add_memtype; + struct xenpf_del_memtype del_memtype; + struct xenpf_read_memtype read_memtype; + struct xenpf_microcode_update microcode; + struct xenpf_platform_quirk platform_quirk; + struct xenpf_efi_runtime_call efi_runtime_call; + struct xenpf_firmware_info firmware_info; + struct xenpf_enter_acpi_sleep enter_acpi_sleep; + struct xenpf_change_freq change_freq; + struct xenpf_getidletime getidletime; + struct xenpf_set_processor_pminfo set_pminfo; + struct xenpf_pcpuinfo pcpu_info; + struct xenpf_cpu_ol cpu_ol; + struct xenpf_cpu_hotadd cpu_add; + struct xenpf_mem_hotadd mem_add; + struct xenpf_core_parking core_parking; + struct xenpf_symdata symdata; + uint8_t pad[128]; + } u; +}; + +struct xen_memory_region { + long unsigned int start_pfn; + long unsigned int n_pfns; +}; + +struct grant_frames { + xen_pfn_t *pfn; + unsigned int count; + void *vaddr; +}; + +struct xen_hvm_param { + domid_t domid; + uint32_t index; + uint64_t value; +}; + +struct vcpu_register_vcpu_info { + uint64_t mfn; + uint32_t offset; + uint32_t rsvd; +}; + +struct xen_add_to_physmap { + domid_t domid; + uint16_t size; + unsigned int space; + xen_ulong_t idx; + xen_pfn_t gpfn; +}; + +struct xsd_errors { + int errnum; + const char *errstring; +}; + +struct tk_read_base { + struct clocksource *clock; + u64 mask; + u64 cycle_last; + u32 mult; + u32 shift; + u64 xtime_nsec; + ktime_t base; + u64 base_real; +}; + +struct timekeeper { + struct tk_read_base tkr_mono; + struct tk_read_base tkr_raw; + u64 xtime_sec; + long unsigned int ktime_sec; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; + ktime_t next_leap_ktime; + u64 raw_sec; + struct timespec64 monotonic_to_boot; + u64 cycle_interval; + u64 xtime_interval; + s64 xtime_remainder; + u64 raw_interval; + u64 ntp_tick; + s64 ntp_error; + u32 ntp_error_shift; + u32 ntp_err_mult; + u32 skip_second_overflow; +}; + +typedef uint16_t grant_status_t; + +typedef uint32_t grant_ref_t; + +typedef uint32_t grant_handle_t; + +struct gnttab_map_grant_ref { + uint64_t host_addr; + uint32_t flags; + grant_ref_t ref; + domid_t dom; + int16_t status; + grant_handle_t handle; + uint64_t dev_bus_addr; +}; + +struct gnttab_unmap_grant_ref { + uint64_t host_addr; + uint64_t dev_bus_addr; + grant_handle_t handle; + int16_t status; +}; + +struct xen_p2m_entry { + long unsigned int pfn; + long unsigned int mfn; + long unsigned int nr_pages; + struct rb_node rbnode_phys; +}; + +struct gnttab_cache_flush { + union { + uint64_t dev_bus_addr; + grant_ref_t ref; + } a; + uint16_t offset; + uint16_t length; + uint32_t op; +}; + +struct static_key_true { + struct static_key key; +}; + +enum tk_offsets { + TK_OFFS_REAL = 0, + TK_OFFS_BOOT = 1, + TK_OFFS_TAI = 2, + TK_OFFS_MAX = 3, +}; + +struct clone_args { + __u64 flags; + __u64 pidfd; + __u64 child_tid; + __u64 parent_tid; + __u64 exit_signal; + __u64 stack; + __u64 stack_size; + __u64 tls; + __u64 set_tid; + __u64 set_tid_size; + __u64 cgroup; +}; + +struct fdtable { + unsigned int max_fds; + struct file **fd; + long unsigned int *close_on_exec; + long unsigned int *open_fds; + long unsigned int *full_fds_bits; + struct callback_head rcu; +}; + +struct files_struct { + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + struct fdtable *fdt; + struct fdtable fdtab; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t file_lock; + unsigned int next_fd; + long unsigned int close_on_exec_init[1]; + long unsigned int open_fds_init[1]; + long unsigned int full_fds_bits_init[1]; + struct file *fd_array[64]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_identity { + struct files_struct *files; + struct mm_struct *mm; + struct cgroup_subsys_state *blkcg_css; + const struct cred *creds; + struct nsproxy *nsproxy; + struct fs_struct *fs; + long unsigned int fsize; + kuid_t loginuid; + unsigned int sessionid; + refcount_t count; +}; + +struct io_uring_task { + struct xarray xa; + struct wait_queue_head wait; + struct file *last; + struct percpu_counter inflight; + struct io_identity __identity; + struct io_identity *identity; + atomic_t in_idle; + bool sqpoll; +}; + +struct robust_list { + struct robust_list *next; +}; + +struct robust_list_head { + struct robust_list list; + long int futex_offset; + struct robust_list *list_op_pending; +}; + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + int exit_signal; + long unsigned int stack; + long unsigned int stack_size; + long unsigned int tls; + pid_t *set_tid; + size_t set_tid_size; + int cgroup; + struct cgroup *cgrp; + struct css_set *cset; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; + +typedef int (*proc_visitor)(struct task_struct *, void *); + +enum { + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, +}; + +enum { + FUTEX_STATE_OK = 0, + FUTEX_STATE_EXITING = 1, + FUTEX_STATE_DEAD = 2, +}; + +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, +}; + +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; + struct dentry *proc_thread_self; + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; +}; + +struct trace_event_raw_task_newtask { + struct trace_entry ent; + pid_t pid; + char comm[16]; + long unsigned int clone_flags; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_task_rename { + struct trace_entry ent; + pid_t pid; + char oldcomm[16]; + char newcomm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_data_offsets_task_newtask {}; + +struct trace_event_data_offsets_task_rename {}; + +typedef void (*btf_trace_task_newtask)(void *, struct task_struct *, long unsigned int); + +typedef void (*btf_trace_task_rename)(void *, struct task_struct *, const char *); + +struct taint_flag { + char c_true; + char c_false; + bool module; +}; + +enum ftrace_dump_mode { + DUMP_NONE = 0, + DUMP_ALL = 1, + DUMP_ORIG = 2, +}; + +enum kmsg_dump_reason { + KMSG_DUMP_UNDEF = 0, + KMSG_DUMP_PANIC = 1, + KMSG_DUMP_OOPS = 2, + KMSG_DUMP_EMERG = 3, + KMSG_DUMP_SHUTDOWN = 4, + KMSG_DUMP_MAX = 5, +}; + +enum con_flush_mode { + CONSOLE_FLUSH_PENDING = 0, + CONSOLE_REPLAY_ALL = 1, +}; + +struct warn_args { + const char *fmt; + va_list args; +}; + +enum hk_flags { + HK_FLAG_TIMER = 1, + HK_FLAG_RCU = 2, + HK_FLAG_MISC = 4, + HK_FLAG_SCHED = 8, + HK_FLAG_TICK = 16, + HK_FLAG_DOMAIN = 32, + HK_FLAG_WQ = 64, + HK_FLAG_MANAGED_IRQ = 128, + HK_FLAG_KTHREAD = 256, +}; + +enum cpuhp_smt_control { + CPU_SMT_ENABLED = 0, + CPU_SMT_DISABLED = 1, + CPU_SMT_FORCE_DISABLED = 2, + CPU_SMT_NOT_SUPPORTED = 3, + CPU_SMT_NOT_IMPLEMENTED = 4, +}; + +struct smp_hotplug_thread { + struct task_struct **store; + struct list_head list; + int (*thread_should_run)(unsigned int); + void (*thread_fn)(unsigned int); + void (*create)(unsigned int); + void (*setup)(unsigned int); + void (*cleanup)(unsigned int, bool); + void (*park)(unsigned int); + void (*unpark)(unsigned int); + bool selfparking; + const char *thread_comm; +}; + +struct trace_event_raw_cpuhp_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_multi_enter { + struct trace_entry ent; + unsigned int cpu; + int target; + int idx; + void *fun; + char __data[0]; +}; + +struct trace_event_raw_cpuhp_exit { + struct trace_entry ent; + unsigned int cpu; + int state; + int idx; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_cpuhp_enter {}; + +struct trace_event_data_offsets_cpuhp_multi_enter {}; + +struct trace_event_data_offsets_cpuhp_exit {}; + +typedef void (*btf_trace_cpuhp_enter)(void *, unsigned int, int, int, int (*)(unsigned int)); + +typedef void (*btf_trace_cpuhp_multi_enter)(void *, unsigned int, int, int, int (*)(unsigned int, struct hlist_node *), struct hlist_node *); + +typedef void (*btf_trace_cpuhp_exit)(void *, unsigned int, int, int, int); + +struct cpuhp_cpu_state { + enum cpuhp_state state; + enum cpuhp_state target; + enum cpuhp_state fail; + struct task_struct *thread; + bool should_run; + bool rollback; + bool single; + bool bringup; + struct hlist_node *node; + struct hlist_node *last; + enum cpuhp_state cb_state; + int result; + struct completion done_up; + struct completion done_down; +}; + +struct cpuhp_step { + const char *name; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } startup; + union { + int (*single)(unsigned int); + int (*multi)(unsigned int, struct hlist_node *); + } teardown; + struct hlist_head list; + bool cant_stop; + bool multi_instance; +}; + +enum cpu_mitigations { + CPU_MITIGATIONS_OFF = 0, + CPU_MITIGATIONS_AUTO = 1, + CPU_MITIGATIONS_AUTO_NOSMT = 2, +}; + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +typedef u32 compat_uint_t; + +struct compat_rusage { + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +struct waitid_info { + pid_t pid; + uid_t uid; + int status; + int cause; +}; + +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + struct waitid_info *wo_info; + int wo_stat; + struct rusage *wo_rusage; + wait_queue_entry_t child_wait; + int notask_error; +}; + +typedef struct { + unsigned int __softirq_pending; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +} irq_cpustat_t; + +struct softirq_action { + void (*action)(struct softirq_action *); +}; + +struct tasklet_struct { + struct tasklet_struct *next; + long unsigned int state; + atomic_t count; + bool use_callback; + union { + void (*func)(long unsigned int); + void (*callback)(struct tasklet_struct *); + }; + long unsigned int data; +}; + +enum { + TASKLET_STATE_SCHED = 0, + TASKLET_STATE_RUN = 1, +}; + +struct kernel_stat { + long unsigned int irqs_sum; + unsigned int softirqs[10]; +}; + +struct trace_event_raw_irq_handler_entry { + struct trace_entry ent; + int irq; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_irq_handler_exit { + struct trace_entry ent; + int irq; + int ret; + char __data[0]; +}; + +struct trace_event_raw_softirq { + struct trace_entry ent; + unsigned int vec; + char __data[0]; +}; + +struct trace_event_data_offsets_irq_handler_entry { + u32 name; +}; + +struct trace_event_data_offsets_irq_handler_exit {}; + +struct trace_event_data_offsets_softirq {}; + +typedef void (*btf_trace_irq_handler_entry)(void *, int, struct irqaction *); + +typedef void (*btf_trace_irq_handler_exit)(void *, int, struct irqaction *, int); + +typedef void (*btf_trace_softirq_entry)(void *, unsigned int); + +typedef void (*btf_trace_softirq_exit)(void *, unsigned int); + +typedef void (*btf_trace_softirq_raise)(void *, unsigned int); + +struct tasklet_head { + struct tasklet_struct *head; + struct tasklet_struct **tail; +}; + +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, +}; + +typedef void (*dr_release_t)(struct device *, void *); + +enum { + REGION_INTERSECTS = 0, + REGION_DISJOINT = 1, + REGION_MIXED = 2, +}; + +struct resource_constraint { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); + void *alignf_data; +}; + +enum { + MAX_IORES_LEVEL = 5, +}; + +struct region_devres { + struct resource *parent; + resource_size_t start; + resource_size_t n; +}; + +struct dentry_stat_t { + long int nr_dentry; + long int nr_unused; + long int age_limit; + long int want_pages; + long int nr_negative; + long int dummy; +}; + +struct files_stat_struct { + long unsigned int nr_files; + long unsigned int nr_free_files; + long unsigned int max_files; +}; + +struct inodes_stat_t { + long int nr_inodes; + long int nr_unused; + long int dummy[5]; +}; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE = 0, + SCHED_TUNABLESCALING_LOG = 1, + SCHED_TUNABLESCALING_LINEAR = 2, + SCHED_TUNABLESCALING_END = 3, +}; + +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = 4294967295, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; + +struct do_proc_dointvec_minmax_conv_param { + int *min; + int *max; +}; + +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +struct __user_cap_header_struct { + __u32 version; + int pid; +}; + +typedef struct __user_cap_header_struct *cap_user_header_t; + +struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +}; + +typedef struct __user_cap_data_struct *cap_user_data_t; + +typedef struct siginfo siginfo_t; + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct user_struct *user; +}; + +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + +struct ptrace_syscall_info { + __u8 op; + __u8 pad[3]; + __u32 arch; + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +typedef struct compat_siginfo compat_siginfo_t; + +typedef long unsigned int old_sigset_t; + +typedef u32 compat_old_sigset_t; + +struct compat_sigaction { + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; + compat_sigset_t sa_mask; +}; + +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; + +enum { + TRACE_SIGNAL_DELIVERED = 0, + TRACE_SIGNAL_IGNORED = 1, + TRACE_SIGNAL_ALREADY_PENDING = 2, + TRACE_SIGNAL_OVERFLOW_FAIL = 3, + TRACE_SIGNAL_LOSE_INFO = 4, +}; + +struct trace_event_raw_signal_generate { + struct trace_entry ent; + int sig; + int errno; + int code; + char comm[16]; + pid_t pid; + int group; + int result; + char __data[0]; +}; + +struct trace_event_raw_signal_deliver { + struct trace_entry ent; + int sig; + int errno; + int code; + long unsigned int sa_handler; + long unsigned int sa_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_signal_generate {}; + +struct trace_event_data_offsets_signal_deliver {}; + +typedef void (*btf_trace_signal_generate)(void *, int, struct kernel_siginfo *, struct task_struct *, int, int); + +typedef void (*btf_trace_signal_deliver)(void *, int, struct kernel_siginfo *, struct k_sigaction *); + +typedef __kernel_clock_t clock_t; + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[0]; +}; + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; + +enum uts_proc { + UTS_PROC_OSTYPE = 0, + UTS_PROC_OSRELEASE = 1, + UTS_PROC_VERSION = 2, + UTS_PROC_HOSTNAME = 3, + UTS_PROC_DOMAINNAME = 4, +}; + +struct prctl_mm_map { + __u64 start_code; + __u64 end_code; + __u64 start_data; + __u64 end_data; + __u64 start_brk; + __u64 brk; + __u64 start_stack; + __u64 arg_start; + __u64 arg_end; + __u64 env_start; + __u64 env_end; + __u64 *auxv; + __u32 auxv_size; + __u32 exe_fd; +}; + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct tms { + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; +}; + +struct getcpu_cache { + long unsigned int blob[16]; +}; + +struct compat_sysinfo { + s32 uptime; + u32 loads[3]; + u32 totalram; + u32 freeram; + u32 sharedram; + u32 bufferram; + u32 totalswap; + u32 freeswap; + u16 procs; + u16 pad; + u32 totalhigh; + u32 freehigh; + u32 mem_unit; + char _f[8]; +}; + +struct wq_flusher; + +struct worker; + +struct workqueue_attrs; + +struct pool_workqueue; + +struct wq_device; + +struct workqueue_struct { + struct list_head pwqs; + struct list_head list; + struct mutex mutex; + int work_color; + int flush_color; + atomic_t nr_pwqs_to_flush; + struct wq_flusher *first_flusher; + struct list_head flusher_queue; + struct list_head flusher_overflow; + struct list_head maydays; + struct worker *rescuer; + int nr_drainers; + int saved_max_active; + struct workqueue_attrs *unbound_attrs; + struct pool_workqueue *dfl_pwq; + struct wq_device *wq_dev; + char name[24]; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + unsigned int flags; + struct pool_workqueue *cpu_pwqs; + struct pool_workqueue *numa_pwq_tbl[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct workqueue_attrs { + int nice; + cpumask_var_t cpumask; + bool no_numa; +}; + +struct execute_work { + struct work_struct work; +}; + +enum { + WQ_UNBOUND = 2, + WQ_FREEZABLE = 4, + WQ_MEM_RECLAIM = 8, + WQ_HIGHPRI = 16, + WQ_CPU_INTENSIVE = 32, + WQ_SYSFS = 64, + WQ_POWER_EFFICIENT = 128, + __WQ_DRAINING = 65536, + __WQ_ORDERED = 131072, + __WQ_LEGACY = 262144, + __WQ_ORDERED_EXPLICIT = 524288, + WQ_MAX_ACTIVE = 512, + WQ_MAX_UNBOUND_PER_CPU = 4, + WQ_DFL_ACTIVE = 256, +}; + +typedef unsigned int xa_mark_t; + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +struct ida { + struct xarray xa; +}; + +struct __una_u32 { + u32 x; +}; + +struct worker_pool; + +struct worker { + union { + struct list_head entry; + struct hlist_node hentry; + }; + struct work_struct *current_work; + work_func_t current_func; + struct pool_workqueue *current_pwq; + struct list_head scheduled; + struct task_struct *task; + struct worker_pool *pool; + struct list_head node; + long unsigned int last_active; + unsigned int flags; + int id; + int sleeping; + char desc[24]; + struct workqueue_struct *rescue_wq; + work_func_t last_func; +}; + +struct pool_workqueue { + struct worker_pool *pool; + struct workqueue_struct *wq; + int work_color; + int flush_color; + int refcnt; + int nr_in_flight[15]; + int nr_active; + int max_active; + struct list_head delayed_works; + struct list_head pwqs_node; + struct list_head mayday_node; + struct work_struct unbound_release_work; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct worker_pool { + raw_spinlock_t lock; + int cpu; + int node; + int id; + unsigned int flags; + long unsigned int watchdog_ts; + struct list_head worklist; + int nr_workers; + int nr_idle; + struct list_head idle_list; + struct timer_list idle_timer; + struct timer_list mayday_timer; + struct hlist_head busy_hash[64]; + struct worker *manager; + struct list_head workers; + struct completion *detach_completion; + struct ida worker_ida; + struct workqueue_attrs *attrs; + struct hlist_node hash_node; + int refcnt; + long: 32; + long: 64; + long: 64; + long: 64; + atomic_t nr_running; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum { + POOL_MANAGER_ACTIVE = 1, + POOL_DISASSOCIATED = 4, + WORKER_DIE = 2, + WORKER_IDLE = 4, + WORKER_PREP = 8, + WORKER_CPU_INTENSIVE = 64, + WORKER_UNBOUND = 128, + WORKER_REBOUND = 256, + WORKER_NOT_RUNNING = 456, + NR_STD_WORKER_POOLS = 2, + UNBOUND_POOL_HASH_ORDER = 6, + BUSY_WORKER_HASH_ORDER = 6, + MAX_IDLE_WORKERS_RATIO = 4, + IDLE_WORKER_TIMEOUT = 300000, + MAYDAY_INITIAL_TIMEOUT = 10, + MAYDAY_INTERVAL = 100, + CREATE_COOLDOWN = 1000, + RESCUER_NICE_LEVEL = 4294967276, + HIGHPRI_NICE_LEVEL = 4294967276, + WQ_NAME_LEN = 24, +}; + +struct wq_flusher { + struct list_head list; + int flush_color; + struct completion done; +}; + +struct wq_device { + struct workqueue_struct *wq; + struct device dev; +}; + +struct trace_event_raw_workqueue_queue_work { + struct trace_entry ent; + void *work; + void *function; + void *workqueue; + unsigned int req_cpu; + unsigned int cpu; + char __data[0]; +}; + +struct trace_event_raw_workqueue_activate_work { + struct trace_entry ent; + void *work; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_start { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_raw_workqueue_execute_end { + struct trace_entry ent; + void *work; + void *function; + char __data[0]; +}; + +struct trace_event_data_offsets_workqueue_queue_work {}; + +struct trace_event_data_offsets_workqueue_activate_work {}; + +struct trace_event_data_offsets_workqueue_execute_start {}; + +struct trace_event_data_offsets_workqueue_execute_end {}; + +typedef void (*btf_trace_workqueue_queue_work)(void *, unsigned int, struct pool_workqueue *, struct work_struct *); + +typedef void (*btf_trace_workqueue_activate_work)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_start)(void *, struct work_struct *); + +typedef void (*btf_trace_workqueue_execute_end)(void *, struct work_struct *, work_func_t); + +struct wq_barrier { + struct work_struct work; + struct completion done; + struct task_struct *task; +}; + +struct cwt_wait { + wait_queue_entry_t wait; + struct work_struct *work; +}; + +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; + struct workqueue_attrs *attrs; + struct list_head list; + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[0]; +}; + +struct work_for_cpu { + struct work_struct work; + long int (*fn)(void *); + void *arg; + long int ret; +}; + +typedef void (*task_work_func_t)(struct callback_head *); + +enum task_work_notify_mode { + TWA_NONE = 0, + TWA_RESUME = 1, + TWA_SIGNAL = 2, +}; + +enum { + KERNEL_PARAM_OPS_FL_NOARG = 1, +}; + +enum { + KERNEL_PARAM_FL_UNSAFE = 1, + KERNEL_PARAM_FL_HWPARAM = 2, +}; + +struct param_attribute { + struct module_attribute mattr; + const struct kernel_param *param; +}; + +struct module_param_attrs { + unsigned int num; + struct attribute_group grp; + struct param_attribute attrs[0]; +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +}; + +enum lockdown_reason { + LOCKDOWN_NONE = 0, + LOCKDOWN_MODULE_SIGNATURE = 1, + LOCKDOWN_DEV_MEM = 2, + LOCKDOWN_EFI_TEST = 3, + LOCKDOWN_KEXEC = 4, + LOCKDOWN_HIBERNATION = 5, + LOCKDOWN_PCI_ACCESS = 6, + LOCKDOWN_IOPORT = 7, + LOCKDOWN_MSR = 8, + LOCKDOWN_ACPI_TABLES = 9, + LOCKDOWN_PCMCIA_CIS = 10, + LOCKDOWN_TIOCSSERIAL = 11, + LOCKDOWN_MODULE_PARAMETERS = 12, + LOCKDOWN_MMIOTRACE = 13, + LOCKDOWN_DEBUGFS = 14, + LOCKDOWN_XMON_WR = 15, + LOCKDOWN_INTEGRITY_MAX = 16, + LOCKDOWN_KCORE = 17, + LOCKDOWN_KPROBES = 18, + LOCKDOWN_BPF_READ = 19, + LOCKDOWN_PERF = 20, + LOCKDOWN_TRACEFS = 21, + LOCKDOWN_XMON_RW = 22, + LOCKDOWN_CONFIDENTIALITY_MAX = 23, +}; + +struct kmalloced_param { + struct list_head list; + char val[0]; +}; + +struct sched_param { + int sched_priority; +}; + +enum { + __PERCPU_REF_ATOMIC = 1, + __PERCPU_REF_DEAD = 2, + __PERCPU_REF_ATOMIC_DEAD = 3, + __PERCPU_REF_FLAG_BITS = 2, +}; + +struct kthread_work; + +typedef void (*kthread_work_func_t)(struct kthread_work *); + +struct kthread_worker; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + int canceling; +}; + +enum { + KTW_FREEZABLE = 1, +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; + +enum { + CSS_NO_REF = 1, + CSS_ONLINE = 2, + CSS_RELEASED = 4, + CSS_VISIBLE = 8, + CSS_DYING = 16, +}; + +struct kthread_create_info { + int (*threadfn)(void *); + void *data; + int node; + struct task_struct *result; + struct completion *done; + struct list_head list; +}; + +struct kthread { + long unsigned int flags; + unsigned int cpu; + int (*threadfn)(void *); + void *data; + mm_segment_t oldfs; + struct completion parked; + struct completion exited; + struct cgroup_subsys_state *blkcg_css; +}; + +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP = 1, + KTHREAD_SHOULD_PARK = 2, +}; + +struct kthread_flush_work { + struct kthread_work work; + struct completion done; +}; + +struct pt_regs___2; + +struct ipc_ids { + int in_use; + short unsigned int seq; + struct rw_semaphore rwsem; + struct idr ipcs_idr; + int max_idx; + int last_idx; + int next_id; + struct rhashtable key_ht; +}; + +struct ipc_namespace { + refcount_t count; + struct ipc_ids ids[3]; + int sem_ctls[4]; + int used_sems; + unsigned int msg_ctlmax; + unsigned int msg_ctlmnb; + unsigned int msg_ctlmni; + atomic_t msg_bytes; + atomic_t msg_hdrs; + size_t shm_ctlmax; + size_t shm_ctlall; + long unsigned int shm_tot; + int shm_ctlmni; + int shm_rmid_forced; + struct notifier_block ipcns_nb; + struct vfsmount *mq_mnt; + unsigned int mq_queues_count; + unsigned int mq_queues_max; + unsigned int mq_msg_max; + unsigned int mq_msgsize_max; + unsigned int mq_msg_default; + unsigned int mq_msgsize_default; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_struct srcu; + struct notifier_block *head; +}; + +enum what { + PROC_EVENT_NONE = 0, + PROC_EVENT_FORK = 1, + PROC_EVENT_EXEC = 2, + PROC_EVENT_UID = 4, + PROC_EVENT_GID = 64, + PROC_EVENT_SID = 128, + PROC_EVENT_PTRACE = 256, + PROC_EVENT_COMM = 512, + PROC_EVENT_COREDUMP = 1073741824, + PROC_EVENT_EXIT = 2147483648, +}; + +enum reboot_type { + BOOT_TRIPLE = 116, + BOOT_KBD = 107, + BOOT_BIOS = 98, + BOOT_ACPI = 97, + BOOT_EFI = 101, + BOOT_CF9_FORCE = 112, + BOOT_CF9_SAFE = 113, +}; + +typedef u64 async_cookie_t; + +typedef void (*async_func_t)(void *, async_cookie_t); + +struct async_domain { + struct list_head pending; + unsigned int registered: 1; +}; + +struct async_entry { + struct list_head domain_list; + struct list_head global_list; + struct work_struct work; + async_cookie_t cookie; + async_func_t func; + void *data; + struct async_domain *domain; +}; + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE = 1, + HP_THREAD_PARKED = 2, +}; + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; + +struct pin_cookie {}; + +enum { + CSD_FLAG_LOCK = 1, + IRQ_WORK_PENDING = 1, + IRQ_WORK_BUSY = 2, + IRQ_WORK_LAZY = 4, + IRQ_WORK_HARD_IRQ = 8, + IRQ_WORK_CLAIMED = 3, + CSD_TYPE_ASYNC = 0, + CSD_TYPE_SYNC = 16, + CSD_TYPE_IRQ_WORK = 32, + CSD_TYPE_TTWU = 48, + CSD_FLAG_TYPE_MASK = 240, +}; + +typedef struct __call_single_data call_single_data_t; + +struct dl_bw { + raw_spinlock_t lock; + u64 bw; + u64 total_bw; +}; + +struct cpudl_item; + +struct cpudl { + raw_spinlock_t lock; + int size; + cpumask_var_t free_cpus; + struct cpudl_item *elements; +}; + +struct cpupri_vec { + atomic_t count; + cpumask_var_t mask; +}; + +struct cpupri { + struct cpupri_vec pri_to_cpu[102]; + int *cpu_to_pri; +}; + +struct perf_domain; + +struct root_domain { + atomic_t refcount; + atomic_t rto_count; + struct callback_head rcu; + cpumask_var_t span; + cpumask_var_t online; + int overload; + int overutilized; + cpumask_var_t dlo_mask; + atomic_t dlo_count; + struct dl_bw dl_bw; + struct cpudl cpudl; + struct irq_work rto_push_work; + raw_spinlock_t rto_lock; + int rto_loop; + int rto_cpu; + atomic_t rto_loop_next; + atomic_t rto_loop_start; + cpumask_var_t rto_mask; + struct cpupri cpupri; + long unsigned int max_cpu_capacity; + struct perf_domain *pd; +}; + +struct cfs_rq { + struct load_weight load; + unsigned int nr_running; + unsigned int h_nr_running; + unsigned int idle_h_nr_running; + u64 exec_clock; + u64 min_vruntime; + struct rb_root_cached tasks_timeline; + struct sched_entity *curr; + struct sched_entity *next; + struct sched_entity *last; + struct sched_entity *skip; + unsigned int nr_spread_over; + long: 32; + long: 64; + long: 64; + long: 64; + struct sched_avg avg; + struct { + raw_spinlock_t lock; + int nr; + long unsigned int load_avg; + long unsigned int util_avg; + long unsigned int runnable_avg; + long: 64; + long: 64; + long: 64; + long: 64; + } removed; + long unsigned int tg_load_avg_contrib; + long int propagate; + long int prop_runnable_sum; + long unsigned int h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; + struct rq *rq; + int on_list; + struct list_head leaf_cfs_rq_list; + struct task_group *tg; + int runtime_enabled; + s64 runtime_remaining; + u64 throttled_clock; + u64 throttled_clock_task; + u64 throttled_clock_task_time; + int throttled; + int throttle_count; + struct list_head throttled_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cfs_bandwidth { + raw_spinlock_t lock; + ktime_t period; + u64 quota; + u64 runtime; + s64 hierarchical_quota; + u8 idle; + u8 period_active; + u8 slack_started; + struct hrtimer period_timer; + struct hrtimer slack_timer; + struct list_head throttled_cfs_rq; + int nr_periods; + int nr_throttled; + u64 throttled_time; +}; + +struct task_group { + struct cgroup_subsys_state css; + struct sched_entity **se; + struct cfs_rq **cfs_rq; + long unsigned int shares; + long: 64; + long: 64; + long: 64; + long: 64; + atomic_long_t load_avg; + struct callback_head rcu; + struct list_head list; + struct task_group *parent; + struct list_head siblings; + struct list_head children; + struct autogroup *autogroup; + struct cfs_bandwidth cfs_bandwidth; + unsigned int uclamp_pct[2]; + struct uclamp_se uclamp_req[2]; + struct uclamp_se uclamp[2]; +}; + +enum { + SD_BALANCE_NEWIDLE = 1, + SD_BALANCE_EXEC = 2, + SD_BALANCE_FORK = 4, + SD_BALANCE_WAKE = 8, + SD_WAKE_AFFINE = 16, + SD_ASYM_CPUCAPACITY = 32, + SD_SHARE_CPUCAPACITY = 64, + SD_SHARE_PKG_RESOURCES = 128, + SD_SERIALIZE = 256, + SD_ASYM_PACKING = 512, + SD_PREFER_SIBLING = 1024, + SD_OVERLAP = 2048, + SD_NUMA = 4096, +}; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_group; + +struct sched_domain { + struct sched_domain *parent; + struct sched_domain *child; + struct sched_group *groups; + long unsigned int min_interval; + long unsigned int max_interval; + unsigned int busy_factor; + unsigned int imbalance_pct; + unsigned int cache_nice_tries; + int nohz_idle; + int flags; + int level; + long unsigned int last_balance; + unsigned int balance_interval; + unsigned int nr_balance_failed; + u64 max_newidle_lb_cost; + long unsigned int next_decay_max_lb_cost; + u64 avg_scan_cost; + unsigned int lb_count[3]; + unsigned int lb_failed[3]; + unsigned int lb_balanced[3]; + unsigned int lb_imbalance[3]; + unsigned int lb_gained[3]; + unsigned int lb_hot_gained[3]; + unsigned int lb_nobusyg[3]; + unsigned int lb_nobusyq[3]; + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; + char *name; + union { + void *private; + struct callback_head rcu; + }; + struct sched_domain_shared *shared; + unsigned int span_weight; + long unsigned int span[0]; +}; + +struct sched_group_capacity; + +struct sched_group { + struct sched_group *next; + atomic_t ref; + unsigned int group_weight; + struct sched_group_capacity *sgc; + int asym_prefer_cpu; + long unsigned int cpumask[0]; +}; + +struct sched_group_capacity { + atomic_t ref; + long unsigned int capacity; + long unsigned int min_capacity; + long unsigned int max_capacity; + long unsigned int next_update; + int imbalance; + int id; + long unsigned int cpumask[0]; +}; + +struct autogroup { + struct kref kref; + struct task_group *tg; + struct rw_semaphore lock; + long unsigned int id; + int nice; +}; + +struct kernel_cpustat { + u64 cpustat[10]; +}; + +enum { + CFTYPE_ONLY_ON_ROOT = 1, + CFTYPE_NOT_ON_ROOT = 2, + CFTYPE_NS_DELEGATABLE = 4, + CFTYPE_NO_PREFIX = 8, + CFTYPE_WORLD_WRITABLE = 16, + CFTYPE_DEBUG = 32, + __CFTYPE_ONLY_ON_DFL = 65536, + __CFTYPE_NOT_ON_DFL = 131072, +}; + +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + struct list_head *cset_pos; + struct list_head *cset_head; + struct list_head *tcset_pos; + struct list_head *tcset_head; + struct list_head *task_pos; + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +struct trace_event_raw_sched_kthread_stop { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_kthread_stop_ret { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_raw_sched_wakeup_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int success; + int target_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_switch { + struct trace_entry ent; + char prev_comm[16]; + pid_t prev_pid; + int prev_prio; + long int prev_state; + char next_comm[16]; + pid_t next_pid; + int next_prio; + char __data[0]; +}; + +struct trace_event_raw_sched_migrate_task { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + int orig_cpu; + int dest_cpu; + char __data[0]; +}; + +struct trace_event_raw_sched_process_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_wait { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int prio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_fork { + struct trace_entry ent; + char parent_comm[16]; + pid_t parent_pid; + char child_comm[16]; + pid_t child_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_process_exec { + struct trace_entry ent; + u32 __data_loc_filename; + pid_t pid; + pid_t old_pid; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_template { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 delay; + char __data[0]; +}; + +struct trace_event_raw_sched_stat_runtime { + struct trace_entry ent; + char comm[16]; + pid_t pid; + u64 runtime; + u64 vruntime; + char __data[0]; +}; + +struct trace_event_raw_sched_pi_setprio { + struct trace_entry ent; + char comm[16]; + pid_t pid; + int oldprio; + int newprio; + char __data[0]; +}; + +struct trace_event_raw_sched_process_hang { + struct trace_entry ent; + char comm[16]; + pid_t pid; + char __data[0]; +}; + +struct trace_event_raw_sched_move_numa { + struct trace_entry ent; + pid_t pid; + pid_t tgid; + pid_t ngid; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_numa_pair_template { + struct trace_entry ent; + pid_t src_pid; + pid_t src_tgid; + pid_t src_ngid; + int src_cpu; + int src_nid; + pid_t dst_pid; + pid_t dst_tgid; + pid_t dst_ngid; + int dst_cpu; + int dst_nid; + char __data[0]; +}; + +struct trace_event_raw_sched_wake_idle_without_ipi { + struct trace_entry ent; + int cpu; + char __data[0]; +}; + +struct trace_event_data_offsets_sched_kthread_stop {}; + +struct trace_event_data_offsets_sched_kthread_stop_ret {}; + +struct trace_event_data_offsets_sched_wakeup_template {}; + +struct trace_event_data_offsets_sched_switch {}; + +struct trace_event_data_offsets_sched_migrate_task {}; + +struct trace_event_data_offsets_sched_process_template {}; + +struct trace_event_data_offsets_sched_process_wait {}; + +struct trace_event_data_offsets_sched_process_fork {}; + +struct trace_event_data_offsets_sched_process_exec { + u32 filename; +}; + +struct trace_event_data_offsets_sched_stat_template {}; + +struct trace_event_data_offsets_sched_stat_runtime {}; + +struct trace_event_data_offsets_sched_pi_setprio {}; + +struct trace_event_data_offsets_sched_process_hang {}; + +struct trace_event_data_offsets_sched_move_numa {}; + +struct trace_event_data_offsets_sched_numa_pair_template {}; + +struct trace_event_data_offsets_sched_wake_idle_without_ipi {}; + +typedef void (*btf_trace_sched_kthread_stop)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_kthread_stop_ret)(void *, int); + +typedef void (*btf_trace_sched_waking)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wakeup_new)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_switch)(void *, bool, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_migrate_task)(void *, struct task_struct *, int); + +typedef void (*btf_trace_sched_process_free)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exit)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_wait_task)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_process_wait)(void *, struct pid *); + +typedef void (*btf_trace_sched_process_fork)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_exec)(void *, struct task_struct *, pid_t, struct linux_binprm *); + +typedef void (*btf_trace_sched_stat_wait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_sleep)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_iowait)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_blocked)(void *, struct task_struct *, u64); + +typedef void (*btf_trace_sched_stat_runtime)(void *, struct task_struct *, u64, u64); + +typedef void (*btf_trace_sched_pi_setprio)(void *, struct task_struct *, struct task_struct *); + +typedef void (*btf_trace_sched_process_hang)(void *, struct task_struct *); + +typedef void (*btf_trace_sched_move_numa)(void *, struct task_struct *, int, int); + +typedef void (*btf_trace_sched_stick_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_swap_numa)(void *, struct task_struct *, int, struct task_struct *, int); + +typedef void (*btf_trace_sched_wake_idle_without_ipi)(void *, int); + +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1, + MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2, + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4, + MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = 64, + MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = 128, +}; + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +struct sched_attr { + __u32 size; + __u32 sched_policy; + __u64 sched_flags; + __s32 sched_nice; + __u32 sched_priority; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; + __u32 sched_util_min; + __u32 sched_util_max; +}; + +struct cpuidle_state_usage { + long long unsigned int disable; + long long unsigned int usage; + u64 time_ns; + long long unsigned int above; + long long unsigned int below; + long long unsigned int rejected; + long long unsigned int s2idle_usage; + long long unsigned int s2idle_time; +}; + +struct cpuidle_device; + +struct cpuidle_driver; + +struct cpuidle_state { + char name[16]; + char desc[32]; + u64 exit_latency_ns; + u64 target_residency_ns; + unsigned int flags; + unsigned int exit_latency; + int power_usage; + unsigned int target_residency; + int (*enter)(struct cpuidle_device *, struct cpuidle_driver *, int); + int (*enter_dead)(struct cpuidle_device *, int); + int (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver *, int); +}; + +struct cpuidle_state_kobj; + +struct cpuidle_driver_kobj; + +struct cpuidle_device_kobj; + +struct cpuidle_device { + unsigned int registered: 1; + unsigned int enabled: 1; + unsigned int poll_time_limit: 1; + unsigned int cpu; + ktime_t next_hrtimer; + int last_state_idx; + u64 last_residency_ns; + u64 poll_limit_ns; + u64 forced_idle_latency_limit_ns; + struct cpuidle_state_usage states_usage[10]; + struct cpuidle_state_kobj *kobjs[10]; + struct cpuidle_driver_kobj *kobj_driver; + struct cpuidle_device_kobj *kobj_dev; + struct list_head device_list; +}; + +struct cpuidle_driver { + const char *name; + struct module *owner; + unsigned int bctimer: 1; + struct cpuidle_state states[10]; + int state_count; + int safe_state_index; + struct cpumask *cpumask; + const char *governor; +}; + +typedef int (*cpu_stop_fn_t)(void *); + +struct cpu_stop_done; + +struct cpu_stop_work { + struct list_head list; + cpu_stop_fn_t fn; + void *arg; + struct cpu_stop_done *done; +}; + +struct cpudl_item { + u64 dl; + int cpu; + int idx; +}; + +struct rt_prio_array { + long unsigned int bitmap[2]; + struct list_head queue[100]; +}; + +struct rt_bandwidth { + raw_spinlock_t rt_runtime_lock; + ktime_t rt_period; + u64 rt_runtime; + struct hrtimer rt_period_timer; + unsigned int rt_period_active; +}; + +struct dl_bandwidth { + raw_spinlock_t dl_runtime_lock; + u64 dl_runtime; + u64 dl_period; +}; + +typedef int (*tg_visitor)(struct task_group *, void *); + +struct uclamp_bucket { + long unsigned int value: 11; + long unsigned int tasks: 53; +}; + +struct uclamp_rq { + unsigned int value; + struct uclamp_bucket bucket[5]; +}; + +struct rt_rq { + struct rt_prio_array active; + unsigned int rt_nr_running; + unsigned int rr_nr_running; + struct { + int curr; + int next; + } highest_prio; + long unsigned int rt_nr_migratory; + long unsigned int rt_nr_total; + int overloaded; + struct plist_head pushable_tasks; + int rt_queued; + int rt_throttled; + u64 rt_time; + u64 rt_runtime; + raw_spinlock_t rt_runtime_lock; +}; + +struct dl_rq { + struct rb_root_cached root; + long unsigned int dl_nr_running; + struct { + u64 curr; + u64 next; + } earliest_dl; + long unsigned int dl_nr_migratory; + int overloaded; + struct rb_root_cached pushable_dl_tasks_root; + u64 running_bw; + u64 this_bw; + u64 extra_bw; + u64 bw_ratio; +}; + +struct rq { + raw_spinlock_t lock; + unsigned int nr_running; + unsigned int nr_numa_running; + unsigned int nr_preferred_running; + unsigned int numa_migrate_on; + long unsigned int last_blocked_load_update_tick; + unsigned int has_blocked_load; + long: 32; + long: 64; + long: 64; + long: 64; + call_single_data_t nohz_csd; + unsigned int nohz_tick_stopped; + atomic_t nohz_flags; + unsigned int ttwu_pending; + u64 nr_switches; + long: 64; + struct uclamp_rq uclamp[2]; + unsigned int uclamp_flags; + long: 32; + long: 64; + long: 64; + long: 64; + struct cfs_rq cfs; + struct rt_rq rt; + struct dl_rq dl; + struct list_head leaf_cfs_rq_list; + struct list_head *tmp_alone_branch; + long unsigned int nr_uninterruptible; + struct task_struct *curr; + struct task_struct *idle; + struct task_struct *stop; + long unsigned int next_balance; + struct mm_struct *prev_mm; + unsigned int clock_update_flags; + u64 clock; + long: 64; + long: 64; + long: 64; + u64 clock_task; + u64 clock_pelt; + long unsigned int lost_idle_time; + atomic_t nr_iowait; + int membarrier_state; + struct root_domain *rd; + struct sched_domain *sd; + long unsigned int cpu_capacity; + long unsigned int cpu_capacity_orig; + struct callback_head *balance_callback; + unsigned char nohz_idle_balance; + unsigned char idle_balance; + long unsigned int misfit_task_load; + int active_balance; + int push_cpu; + struct cpu_stop_work active_balance_work; + int cpu; + int online; + struct list_head cfs_tasks; + long: 64; + long: 64; + long: 64; + long: 64; + struct sched_avg avg_rt; + struct sched_avg avg_dl; + struct sched_avg avg_irq; + struct sched_avg avg_thermal; + u64 idle_stamp; + u64 avg_idle; + u64 max_idle_balance_cost; + u64 prev_steal_time; + u64 prev_steal_time_rq; + long unsigned int calc_load_update; + long int calc_load_active; + long: 64; + call_single_data_t hrtick_csd; + struct hrtimer hrtick_timer; + struct sched_info rq_sched_info; + long long unsigned int rq_cpu_time; + unsigned int yld_count; + unsigned int sched_count; + unsigned int sched_goidle; + unsigned int ttwu_count; + unsigned int ttwu_local; + struct cpuidle_state *idle_state; + long: 64; + long: 64; + long: 64; +}; + +struct perf_domain { + struct em_perf_domain *em_pd; + struct perf_domain *next; + struct callback_head rcu; +}; + +struct rq_flags { + long unsigned int flags; + struct pin_cookie cookie; + unsigned int clock_update_flags; +}; + +enum { + __SCHED_FEAT_GENTLE_FAIR_SLEEPERS = 0, + __SCHED_FEAT_START_DEBIT = 1, + __SCHED_FEAT_NEXT_BUDDY = 2, + __SCHED_FEAT_LAST_BUDDY = 3, + __SCHED_FEAT_CACHE_HOT_BUDDY = 4, + __SCHED_FEAT_WAKEUP_PREEMPTION = 5, + __SCHED_FEAT_HRTICK = 6, + __SCHED_FEAT_DOUBLE_TICK = 7, + __SCHED_FEAT_NONTASK_CAPACITY = 8, + __SCHED_FEAT_TTWU_QUEUE = 9, + __SCHED_FEAT_SIS_AVG_CPU = 10, + __SCHED_FEAT_SIS_PROP = 11, + __SCHED_FEAT_WARN_DOUBLE_CLOCK = 12, + __SCHED_FEAT_RT_PUSH_IPI = 13, + __SCHED_FEAT_RT_RUNTIME_SHARE = 14, + __SCHED_FEAT_LB_MIN = 15, + __SCHED_FEAT_ATTACH_AGE_LOAD = 16, + __SCHED_FEAT_WA_IDLE = 17, + __SCHED_FEAT_WA_WEIGHT = 18, + __SCHED_FEAT_WA_BIAS = 19, + __SCHED_FEAT_UTIL_EST = 20, + __SCHED_FEAT_UTIL_EST_FASTUP = 21, + __SCHED_FEAT_NR = 22, +}; + +struct migration_arg { + struct task_struct *task; + int dest_cpu; +}; + +struct migration_swap_arg { + struct task_struct *src_task; + struct task_struct *dst_task; + int src_cpu; + int dst_cpu; +}; + +struct uclamp_request { + s64 percent; + u64 util; + int ret; +}; + +struct cfs_schedulable_data { + struct task_group *tg; + u64 period; + u64 quota; +}; + +enum { + cpuset = 0, + possible = 1, + fail = 2, +}; + +enum s2idle_states { + S2IDLE_STATE_NONE = 0, + S2IDLE_STATE_ENTER = 1, + S2IDLE_STATE_WAKE = 2, +}; + +struct idle_timer { + struct hrtimer timer; + int done; +}; + +typedef void (*rcu_callback_t)(struct callback_head *); + +struct numa_group { + refcount_t refcount; + spinlock_t lock; + int nr_tasks; + pid_t gid; + int active_nodes; + struct callback_head rcu; + long unsigned int total_faults; + long unsigned int max_faults_cpu; + long unsigned int *faults_cpu; + long unsigned int faults[0]; +}; + +struct update_util_data { + void (*func)(struct update_util_data *, u64, unsigned int); +}; + +enum numa_topology_type { + NUMA_DIRECT = 0, + NUMA_GLUELESS_MESH = 1, + NUMA_BACKPLANE = 2, +}; + +enum numa_faults_stats { + NUMA_MEM = 0, + NUMA_CPU = 1, + NUMA_MEMBUF = 2, + NUMA_CPUBUF = 3, +}; + +enum schedutil_type { + FREQUENCY_UTIL = 0, + ENERGY_UTIL = 1, +}; + +enum numa_type { + node_has_spare = 0, + node_fully_busy = 1, + node_overloaded = 2, +}; + +struct numa_stats { + long unsigned int load; + long unsigned int runnable; + long unsigned int util; + long unsigned int compute_capacity; + unsigned int nr_running; + unsigned int weight; + enum numa_type node_type; + int idle_cpu; +}; + +struct task_numa_env { + struct task_struct *p; + int src_cpu; + int src_nid; + int dst_cpu; + int dst_nid; + struct numa_stats src_stats; + struct numa_stats dst_stats; + int imbalance_pct; + int dist; + struct task_struct *best_task; + long int best_imp; + int best_cpu; +}; + +enum fbq_type { + regular = 0, + remote = 1, + all = 2, +}; + +enum group_type { + group_has_spare = 0, + group_fully_busy = 1, + group_misfit_task = 2, + group_asym_packing = 3, + group_imbalanced = 4, + group_overloaded = 5, +}; + +enum migration_type { + migrate_load = 0, + migrate_util = 1, + migrate_task = 2, + migrate_misfit = 3, +}; + +struct lb_env { + struct sched_domain *sd; + struct rq *src_rq; + int src_cpu; + int dst_cpu; + struct rq *dst_rq; + struct cpumask *dst_grpmask; + int new_dst_cpu; + enum cpu_idle_type idle; + long int imbalance; + struct cpumask *cpus; + unsigned int flags; + unsigned int loop; + unsigned int loop_break; + unsigned int loop_max; + enum fbq_type fbq_type; + enum migration_type migration_type; + struct list_head tasks; +}; + +struct sg_lb_stats { + long unsigned int avg_load; + long unsigned int group_load; + long unsigned int group_capacity; + long unsigned int group_util; + long unsigned int group_runnable; + unsigned int sum_nr_running; + unsigned int sum_h_nr_running; + unsigned int idle_cpus; + unsigned int group_weight; + enum group_type group_type; + unsigned int group_asym_packing; + long unsigned int group_misfit_task_load; + unsigned int nr_numa_running; + unsigned int nr_preferred_running; +}; + +struct sd_lb_stats { + struct sched_group *busiest; + struct sched_group *local; + long unsigned int total_load; + long unsigned int total_capacity; + long unsigned int avg_load; + unsigned int prefer_sibling; + struct sg_lb_stats busiest_stat; + struct sg_lb_stats local_stat; +}; + +typedef struct rt_rq *rt_rq_iter_t; + +struct wait_bit_key { + void *flags; + int bit_nr; + long unsigned int timeout; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + +typedef int wait_bit_action_f(struct wait_bit_key *, int); + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct sd_flag_debug { + unsigned int meta_flags; + char *name; +}; + +struct sched_domain_attr { + int relax_domain_level; +}; + +typedef const struct cpumask * (*sched_domain_mask_f)(int); + +typedef int (*sched_domain_flags_f)(); + +struct sd_data { + struct sched_domain **sd; + struct sched_domain_shared **sds; + struct sched_group **sg; + struct sched_group_capacity **sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; + char *name; +}; + +struct s_data { + struct sched_domain **sd; + struct root_domain *rd; +}; + +enum s_alloc { + sa_rootdomain = 0, + sa_sd = 1, + sa_sd_storage = 2, + sa_none = 3, +}; + +enum cpuacct_stat_index { + CPUACCT_STAT_USER = 0, + CPUACCT_STAT_SYSTEM = 1, + CPUACCT_STAT_NSTATS = 2, +}; + +struct cpuacct_usage { + u64 usages[2]; +}; + +struct cpuacct { + struct cgroup_subsys_state css; + struct cpuacct_usage *cpuusage; + struct kernel_cpustat *cpustat; +}; + +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *, char *); + ssize_t (*store)(struct gov_attr_set *, const char *, size_t); +}; + +struct sugov_tunables { + struct gov_attr_set attr_set; + unsigned int rate_limit_us; +}; + +struct sugov_policy { + struct cpufreq_policy *policy; + struct sugov_tunables *tunables; + struct list_head tunables_hook; + raw_spinlock_t update_lock; + u64 last_freq_update_time; + s64 freq_update_delay_ns; + unsigned int next_freq; + unsigned int cached_raw_freq; + struct irq_work irq_work; + struct kthread_work work; + struct mutex work_lock; + struct kthread_worker worker; + struct task_struct *thread; + bool work_in_progress; + bool limits_changed; + bool need_freq_update; +}; + +struct sugov_cpu { + struct update_util_data update_util; + struct sugov_policy *sg_policy; + unsigned int cpu; + bool iowait_boost_pending; + unsigned int iowait_boost; + u64 last_update; + long unsigned int bw_dl; + long unsigned int max; + long unsigned int saved_idle_calls; +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = 1, + MEMBARRIER_FLAG_RSEQ = 2, +}; + +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_GLOBAL = 1, + MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2, + MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4, + MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64, + MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = 128, + MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = 256, + MEMBARRIER_CMD_SHARED = 1, +}; + +enum membarrier_cmd_flag { + MEMBARRIER_CMD_FLAG_CPU = 1, +}; + +struct proc_ops { + unsigned int proc_flags; + int (*proc_open)(struct inode *, struct file *); + ssize_t (*proc_read)(struct file *, char *, size_t, loff_t *); + ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); + ssize_t (*proc_write)(struct file *, const char *, size_t, loff_t *); + loff_t (*proc_lseek)(struct file *, loff_t, int); + int (*proc_release)(struct inode *, struct file *); + __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); + long int (*proc_ioctl)(struct file *, unsigned int, long unsigned int); + long int (*proc_compat_ioctl)(struct file *, unsigned int, long unsigned int); + int (*proc_mmap)(struct file *, struct vm_area_struct *); + long unsigned int (*proc_get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); +}; + +enum psi_res { + PSI_IO = 0, + PSI_MEM = 1, + PSI_CPU = 2, + NR_PSI_RESOURCES = 3, +}; + +struct psi_window { + u64 size; + u64 start_time; + u64 start_value; + u64 prev_growth; +}; + +struct psi_trigger { + enum psi_states state; + u64 threshold; + struct list_head node; + struct psi_group *group; + wait_queue_head_t event_wait; + int event; + struct psi_window win; + u64 last_event_time; + struct kref refcount; +}; + +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + long unsigned int stamp; + unsigned int acquired; + short unsigned int wounded; + short unsigned int is_wait_die; +}; + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; +}; + +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE = 2, +}; + +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; + +struct semaphore_waiter { + struct list_head list; + struct task_struct *task; + bool up; +}; + +enum rwsem_waiter_type { + RWSEM_WAITING_FOR_WRITE = 0, + RWSEM_WAITING_FOR_READ = 1, +}; + +struct rwsem_waiter { + struct list_head list; + struct task_struct *task; + enum rwsem_waiter_type type; + long unsigned int timeout; + long unsigned int last_rowner; +}; + +enum rwsem_wake_type { + RWSEM_WAKE_ANY = 0, + RWSEM_WAKE_READERS = 1, + RWSEM_WAKE_READ_OWNED = 2, +}; + +enum writer_wait_state { + WRITER_NOT_FIRST = 0, + WRITER_FIRST = 1, + WRITER_HANDOFF = 2, +}; + +enum owner_state { + OWNER_NULL = 1, + OWNER_WRITER = 2, + OWNER_READER = 4, + OWNER_NONSPINNABLE = 8, +}; + +struct optimistic_spin_node { + struct optimistic_spin_node *next; + struct optimistic_spin_node *prev; + int locked; + int cpu; +}; + +struct mcs_spinlock { + struct mcs_spinlock *next; + int locked; + int count; +}; + +struct qnode { + struct mcs_spinlock mcs; +}; + +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; + +struct rt_mutex; + +struct rt_mutex_waiter { + struct rb_node tree_entry; + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; + int prio; + u64 deadline; +}; + +struct rt_mutex { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +enum rtmutex_chainwalk { + RT_MUTEX_MIN_CHAINWALK = 0, + RT_MUTEX_FULL_CHAINWALK = 1, +}; + +struct task_struct___2; + +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +enum pm_qos_req_action { + PM_QOS_ADD_REQ = 0, + PM_QOS_UPDATE_REQ = 1, + PM_QOS_REMOVE_REQ = 2, +}; + +typedef int suspend_state_t; + +enum suspend_stat_step { + SUSPEND_FREEZE = 1, + SUSPEND_PREPARE = 2, + SUSPEND_SUSPEND = 3, + SUSPEND_SUSPEND_LATE = 4, + SUSPEND_SUSPEND_NOIRQ = 5, + SUSPEND_RESUME_NOIRQ = 6, + SUSPEND_RESUME_EARLY = 7, + SUSPEND_RESUME = 8, +}; + +struct suspend_stats { + int success; + int fail; + int failed_freeze; + int failed_prepare; + int failed_suspend; + int failed_suspend_late; + int failed_suspend_noirq; + int failed_resume; + int failed_resume_early; + int failed_resume_noirq; + int last_failed_dev; + char failed_devs[80]; + int last_failed_errno; + int errno[2]; + int last_failed_step; + enum suspend_stat_step failed_steps[2]; +}; + +enum { + TEST_NONE = 0, + TEST_CORE = 1, + TEST_CPUS = 2, + TEST_PLATFORM = 3, + TEST_DEVICES = 4, + TEST_FREEZER = 5, + __TEST_AFTER_LAST = 6, +}; + +struct pm_vt_switch { + struct list_head head; + struct device *dev; + bool required; +}; + +struct platform_suspend_ops { + int (*valid)(suspend_state_t); + int (*begin)(suspend_state_t); + int (*prepare)(); + int (*prepare_late)(); + int (*enter)(suspend_state_t); + void (*wake)(); + void (*finish)(); + bool (*suspend_again)(); + void (*end)(); + void (*recover)(); +}; + +struct platform_s2idle_ops { + int (*begin)(); + int (*prepare)(); + int (*prepare_late)(); + bool (*wake)(); + void (*restore_early)(); + void (*restore)(); + void (*end)(); +}; + +struct platform_hibernation_ops { + int (*begin)(pm_message_t); + void (*end)(); + int (*pre_snapshot)(); + void (*finish)(); + int (*prepare)(); + int (*enter)(); + void (*leave)(); + int (*pre_restore)(); + void (*restore_cleanup)(); + void (*recover)(); +}; + +enum { + HIBERNATION_INVALID = 0, + HIBERNATION_PLATFORM = 1, + HIBERNATION_SHUTDOWN = 2, + HIBERNATION_REBOOT = 3, + HIBERNATION_SUSPEND = 4, + HIBERNATION_TEST_RESUME = 5, + __HIBERNATION_AFTER_LAST = 6, +}; + +struct swsusp_info { + struct new_utsname uts; + u32 version_code; + long unsigned int num_physpages; + int cpus; + long unsigned int image_pages; + long unsigned int pages; + long unsigned int size; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct snapshot_handle { + unsigned int cur; + void *buffer; + int sync_read; +}; + +struct linked_page { + struct linked_page *next; + char data[4088]; +}; + +struct chain_allocator { + struct linked_page *chain; + unsigned int used_space; + gfp_t gfp_mask; + int safe_needed; +}; + +struct rtree_node { + struct list_head list; + long unsigned int *data; +}; + +struct mem_zone_bm_rtree { + struct list_head list; + struct list_head nodes; + struct list_head leaves; + long unsigned int start_pfn; + long unsigned int end_pfn; + struct rtree_node *rtree; + int levels; + unsigned int blocks; +}; + +struct bm_position { + struct mem_zone_bm_rtree *zone; + struct rtree_node *node; + long unsigned int node_pfn; + int node_bit; +}; + +struct memory_bitmap { + struct list_head zones; + struct linked_page *p_list; + struct bm_position cur; +}; + +struct mem_extent { + struct list_head hook; + long unsigned int start; + long unsigned int end; +}; + +struct nosave_region { + struct list_head list; + long unsigned int start_pfn; + long unsigned int end_pfn; +}; + +enum { + BIO_NO_PAGE_REF = 0, + BIO_CLONED = 1, + BIO_BOUNCED = 2, + BIO_WORKINGSET = 3, + BIO_QUIET = 4, + BIO_CHAIN = 5, + BIO_REFFED = 6, + BIO_THROTTLED = 7, + BIO_TRACE_COMPLETION = 8, + BIO_CGROUP_ACCT = 9, + BIO_TRACKED = 10, + BIO_FLAG_LAST = 11, +}; + +enum req_opf { + REQ_OP_READ = 0, + REQ_OP_WRITE = 1, + REQ_OP_FLUSH = 2, + REQ_OP_DISCARD = 3, + REQ_OP_SECURE_ERASE = 5, + REQ_OP_WRITE_SAME = 7, + REQ_OP_WRITE_ZEROES = 9, + REQ_OP_ZONE_OPEN = 10, + REQ_OP_ZONE_CLOSE = 11, + REQ_OP_ZONE_FINISH = 12, + REQ_OP_ZONE_APPEND = 13, + REQ_OP_ZONE_RESET = 15, + REQ_OP_ZONE_RESET_ALL = 17, + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST = 36, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = 8, + __REQ_FAILFAST_TRANSPORT = 9, + __REQ_FAILFAST_DRIVER = 10, + __REQ_SYNC = 11, + __REQ_META = 12, + __REQ_PRIO = 13, + __REQ_NOMERGE = 14, + __REQ_IDLE = 15, + __REQ_INTEGRITY = 16, + __REQ_FUA = 17, + __REQ_PREFLUSH = 18, + __REQ_RAHEAD = 19, + __REQ_BACKGROUND = 20, + __REQ_NOWAIT = 21, + __REQ_CGROUP_PUNT = 22, + __REQ_NOUNMAP = 23, + __REQ_HIPRI = 24, + __REQ_DRV = 25, + __REQ_SWAP = 26, + __REQ_NR_BITS = 27, +}; + +struct swap_map_page { + sector_t entries[511]; + sector_t next_swap; +}; + +struct swap_map_page_list { + struct swap_map_page *map; + struct swap_map_page_list *next; +}; + +struct swap_map_handle { + struct swap_map_page *cur; + struct swap_map_page_list *maps; + sector_t cur_swap; + sector_t first_sector; + unsigned int k; + long unsigned int reqd_free_pages; + u32 crc32; +}; + +struct swsusp_header { + char reserved[4060]; + u32 crc32; + sector_t image; + unsigned int flags; + char orig_sig[10]; + char sig[10]; +}; + +struct swsusp_extent { + struct rb_node node; + long unsigned int start; + long unsigned int end; +}; + +struct hib_bio_batch { + atomic_t count; + wait_queue_head_t wait; + blk_status_t error; + struct blk_plug plug; +}; + +struct crc_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + unsigned int run_threads; + wait_queue_head_t go; + wait_queue_head_t done; + u32 *crc32; + size_t *unc_len[3]; + unsigned char *unc[3]; +}; + +struct cmp_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; + unsigned char wrk[16384]; +}; + +struct dec_data { + struct task_struct *thr; + atomic_t ready; + atomic_t stop; + int ret; + wait_queue_head_t go; + wait_queue_head_t done; + size_t unc_len; + size_t cmp_len; + unsigned char unc[131072]; + unsigned char cmp[143360]; +}; + +typedef s64 compat_loff_t; + +struct resume_swap_area { + __kernel_loff_t offset; + __u32 dev; +} __attribute__((packed)); + +struct snapshot_data { + struct snapshot_handle handle; + int swap; + int mode; + bool frozen; + bool ready; + bool platform_support; + bool free_bitmaps; + dev_t dev; +}; + +struct compat_resume_swap_area { + compat_loff_t offset; + u32 dev; +} __attribute__((packed)); + +struct sysrq_key_op { + void (* const handler)(int); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; +}; + +struct em_data_callback { + int (*active_power)(long unsigned int *, long unsigned int *, struct device *); +}; + +struct dev_printk_info { + char subsystem[16]; + char device[48]; +}; + +struct console { + char name[16]; + void (*write)(struct console *, const char *, unsigned int); + int (*read)(struct console *, char *, unsigned int); + struct tty_driver * (*device)(struct console *, int *); + void (*unblank)(); + int (*setup)(struct console *, char *); + int (*exit)(struct console *); + int (*match)(struct console *, char *, int, char *); + short int flags; + short int index; + int cflag; + void *data; + struct console *next; +}; + +struct kmsg_dumper { + struct list_head list; + void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason); + enum kmsg_dump_reason max_reason; + bool active; + bool registered; + u32 cur_idx; + u32 next_idx; + u64 cur_seq; + u64 next_seq; +}; + +struct trace_event_raw_console { + struct trace_entry ent; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_data_offsets_console { + u32 msg; +}; + +typedef void (*btf_trace_console)(void *, const char *, size_t); + +struct printk_info { + u64 seq; + u64 ts_nsec; + u16 text_len; + u8 facility; + u8 flags: 5; + u8 level: 3; + u32 caller_id; + struct dev_printk_info dev_info; +}; + +struct printk_record { + struct printk_info *info; + char *text_buf; + unsigned int text_buf_size; +}; + +struct prb_data_blk_lpos { + long unsigned int begin; + long unsigned int next; +}; + +struct prb_desc { + atomic_long_t state_var; + struct prb_data_blk_lpos text_blk_lpos; +}; + +struct prb_data_ring { + unsigned int size_bits; + char *data; + atomic_long_t head_lpos; + atomic_long_t tail_lpos; +}; + +struct prb_desc_ring { + unsigned int count_bits; + struct prb_desc *descs; + struct printk_info *infos; + atomic_long_t head_id; + atomic_long_t tail_id; +}; + +struct printk_ringbuffer { + struct prb_desc_ring desc_ring; + struct prb_data_ring text_data_ring; + atomic_long_t fail; +}; + +struct prb_reserved_entry { + struct printk_ringbuffer *rb; + long unsigned int irqflags; + long unsigned int id; + unsigned int text_space; +}; + +enum desc_state { + desc_miss = 4294967295, + desc_reserved = 0, + desc_committed = 1, + desc_finalized = 2, + desc_reusable = 3, +}; + +struct console_cmdline { + char name[16]; + int index; + bool user_specified; + char *options; + char *brl_options; +}; + +enum devkmsg_log_bits { + __DEVKMSG_LOG_BIT_ON = 0, + __DEVKMSG_LOG_BIT_OFF = 1, + __DEVKMSG_LOG_BIT_LOCK = 2, +}; + +enum devkmsg_log_masks { + DEVKMSG_LOG_MASK_ON = 1, + DEVKMSG_LOG_MASK_OFF = 2, + DEVKMSG_LOG_MASK_LOCK = 4, +}; + +enum con_msg_format_flags { + MSG_FORMAT_DEFAULT = 0, + MSG_FORMAT_SYSLOG = 1, +}; + +enum log_flags { + LOG_NEWLINE = 2, + LOG_CONT = 8, +}; + +struct devkmsg_user { + u64 seq; + struct ratelimit_state rs; + struct mutex lock; + char buf[8192]; + struct printk_info info; + char text_buf[8192]; + struct printk_record record; +}; + +struct printk_safe_seq_buf { + atomic_t len; + atomic_t message_lost; + struct irq_work work; + unsigned char buffer[8160]; +}; + +struct prb_data_block { + long unsigned int id; + char data[0]; +}; + +enum { + IRQS_AUTODETECT = 1, + IRQS_SPURIOUS_DISABLED = 2, + IRQS_POLL_INPROGRESS = 8, + IRQS_ONESHOT = 32, + IRQS_REPLAY = 64, + IRQS_WAITING = 128, + IRQS_PENDING = 512, + IRQS_SUSPENDED = 2048, + IRQS_TIMINGS = 4096, + IRQS_NMI = 8192, +}; + +enum { + _IRQ_DEFAULT_INIT_FLAGS = 0, + _IRQ_PER_CPU = 512, + _IRQ_LEVEL = 256, + _IRQ_NOPROBE = 1024, + _IRQ_NOREQUEST = 2048, + _IRQ_NOTHREAD = 65536, + _IRQ_NOAUTOEN = 4096, + _IRQ_MOVE_PCNTXT = 16384, + _IRQ_NO_BALANCING = 8192, + _IRQ_NESTED_THREAD = 32768, + _IRQ_PER_CPU_DEVID = 131072, + _IRQ_IS_POLLED = 262144, + _IRQ_DISABLE_UNLAZY = 524288, + _IRQ_HIDDEN = 1048576, + _IRQF_MODIFY_MASK = 2096911, +}; + +enum { + IRQTF_RUNTHREAD = 0, + IRQTF_WARNED = 1, + IRQTF_AFFINITY = 2, + IRQTF_FORCED_THREAD = 3, +}; + +enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY = 1, + IRQ_SET_MASK_OK_DONE = 2, +}; + +enum { + IRQCHIP_SET_TYPE_MASKED = 1, + IRQCHIP_EOI_IF_HANDLED = 2, + IRQCHIP_MASK_ON_SUSPEND = 4, + IRQCHIP_ONOFFLINE_ENABLED = 8, + IRQCHIP_SKIP_SET_WAKE = 16, + IRQCHIP_ONESHOT_SAFE = 32, + IRQCHIP_EOI_THREADED = 64, + IRQCHIP_SUPPORTS_LEVEL_MSI = 128, + IRQCHIP_SUPPORTS_NMI = 256, + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = 512, +}; + +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED = 1, +}; + +enum { + IRQ_STARTUP_NORMAL = 0, + IRQ_STARTUP_MANAGED = 1, + IRQ_STARTUP_ABORT = 2, +}; + +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +struct irq_desc_devres { + unsigned int from; + unsigned int cnt; +}; + +struct irq_generic_chip_devres { + struct irq_chip_generic *gc; + u32 msk; + unsigned int clr; + unsigned int set; +}; + +enum { + IRQ_DOMAIN_FLAG_HIERARCHY = 1, + IRQ_DOMAIN_NAME_ALLOCATED = 2, + IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4, + IRQ_DOMAIN_FLAG_IPI_SINGLE = 8, + IRQ_DOMAIN_FLAG_MSI = 16, + IRQ_DOMAIN_FLAG_MSI_REMAP = 32, + IRQ_DOMAIN_MSI_NOMASK_QUIRK = 64, + IRQ_DOMAIN_FLAG_NONCORE = 65536, +}; + +enum { + IRQCHIP_FWNODE_REAL = 0, + IRQCHIP_FWNODE_NAMED = 1, + IRQCHIP_FWNODE_NAMED_ID = 2, +}; + +struct irqchip_fwid { + struct fwnode_handle fwnode; + unsigned int type; + char *name; + phys_addr_t *pa; +}; + +enum { + AFFINITY = 0, + AFFINITY_LIST = 1, + EFFECTIVE = 2, + EFFECTIVE_LIST = 3, +}; + +struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + union { + long unsigned int ul; + void *ptr; + } scratchpad[2]; +}; + +typedef struct msi_alloc_info msi_alloc_info_t; + +struct msi_domain_info; + +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *); + int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *); + void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int); + int (*msi_check)(struct irq_domain *, struct msi_domain_info *, struct device *); + int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *); + void (*msi_finish)(msi_alloc_info_t *, int); + void (*set_desc)(msi_alloc_info_t *, struct msi_desc *); + int (*handle_error)(struct irq_domain *, struct msi_desc *, int); + int (*domain_alloc_irqs)(struct irq_domain *, struct device *, int); + void (*domain_free_irqs)(struct irq_domain *, struct device *); +}; + +struct msi_domain_info { + u32 flags; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +enum { + MSI_FLAG_USE_DEF_DOM_OPS = 1, + MSI_FLAG_USE_DEF_CHIP_OPS = 2, + MSI_FLAG_MULTI_PCI_MSI = 4, + MSI_FLAG_PCI_MSIX = 8, + MSI_FLAG_ACTIVATE_EARLY = 16, + MSI_FLAG_MUST_REACTIVATE = 32, + MSI_FLAG_LEVEL_CAPABLE = 64, +}; + +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int); + void *priv; +}; + +struct node_vectors { + unsigned int id; + union { + unsigned int nvectors; + unsigned int ncpus; + }; +}; + +typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t); + +struct rcu_synchronize { + struct callback_head head; + struct completion completion; +}; + +struct trace_event_raw_rcu_utilization { + struct trace_entry ent; + const char *s; + char __data[0]; +}; + +struct trace_event_data_offsets_rcu_utilization {}; + +typedef void (*btf_trace_rcu_utilization)(void *, const char *); + +struct rcu_tasks; + +typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *); + +typedef void (*pregp_func_t)(); + +typedef void (*pertask_func_t)(struct task_struct *, struct list_head *); + +typedef void (*postscan_func_t)(struct list_head *); + +typedef void (*holdouts_func_t)(struct list_head *, bool, bool *); + +typedef void (*postgp_func_t)(struct rcu_tasks *); + +struct rcu_tasks { + struct callback_head *cbs_head; + struct callback_head **cbs_tail; + struct wait_queue_head cbs_wq; + raw_spinlock_t cbs_lock; + int gp_state; + int gp_sleep; + int init_fract; + long unsigned int gp_jiffies; + long unsigned int gp_start; + long unsigned int n_gps; + long unsigned int n_ipis; + long unsigned int n_ipis_fails; + struct task_struct *kthread_ptr; + rcu_tasks_gp_func_t gp_func; + pregp_func_t pregp_func; + pertask_func_t pertask_func; + postscan_func_t postscan_func; + holdouts_func_t holdouts_func; + postgp_func_t postgp_func; + call_rcu_func_t call_func; + char *name; + char *kname; +}; + +enum { + GP_IDLE = 0, + GP_ENTER = 1, + GP_PASSED = 2, + GP_EXIT = 3, + GP_REPLAY = 4, +}; + +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long int len; +}; + +enum rcutorture_type { + RCU_FLAVOR = 0, + RCU_TASKS_FLAVOR = 1, + RCU_TASKS_RUDE_FLAVOR = 2, + RCU_TASKS_TRACING_FLAVOR = 3, + RCU_TRIVIAL_FLAVOR = 4, + SRCU_FLAVOR = 5, + INVALID_RCU_FLAVOR = 6, +}; + +enum tick_dep_bits { + TICK_DEP_BIT_POSIX_TIMER = 0, + TICK_DEP_BIT_PERF_EVENTS = 1, + TICK_DEP_BIT_SCHED = 2, + TICK_DEP_BIT_CLOCK_UNSTABLE = 3, + TICK_DEP_BIT_RCU = 4, + TICK_DEP_BIT_RCU_EXP = 5, +}; + +struct rcu_exp_work { + long unsigned int rew_s; + struct work_struct rew_work; +}; + +struct rcu_node { + raw_spinlock_t lock; + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + long unsigned int completedqs; + long unsigned int qsmask; + long unsigned int rcu_gp_init_mask; + long unsigned int qsmaskinit; + long unsigned int qsmaskinitnext; + long unsigned int expmask; + long unsigned int expmaskinit; + long unsigned int expmaskinitnext; + long unsigned int cbovldmask; + long unsigned int ffmask; + long unsigned int grpmask; + int grplo; + int grphi; + u8 grpnum; + u8 level; + bool wait_blkd_tasks; + struct rcu_node *parent; + struct list_head blkd_tasks; + struct list_head *gp_tasks; + struct list_head *exp_tasks; + struct list_head *boost_tasks; + struct rt_mutex boost_mtx; + long unsigned int boost_time; + struct task_struct *boost_kthread_task; + unsigned int boost_kthread_status; + long: 32; + long: 64; + long: 64; + long: 64; + raw_spinlock_t fqslock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t exp_lock; + long unsigned int exp_seq_rq; + wait_queue_head_t exp_wq[4]; + struct rcu_exp_work rew; + bool exp_need_flush; + long: 56; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +union rcu_noqs { + struct { + u8 norm; + u8 exp; + } b; + u16 s; +}; + +struct rcu_data { + long unsigned int gp_seq; + long unsigned int gp_seq_needed; + union rcu_noqs cpu_no_qs; + bool core_needs_qs; + bool beenonline; + bool gpwrap; + bool exp_deferred_qs; + bool cpu_started; + struct rcu_node *mynode; + long unsigned int grpmask; + long unsigned int ticks_this_gp; + struct irq_work defer_qs_iw; + bool defer_qs_iw_pending; + struct work_struct strict_work; + struct rcu_segcblist cblist; + long int qlen_last_fqs_check; + long unsigned int n_cbs_invoked; + long unsigned int n_force_qs_snap; + long int blimit; + int dynticks_snap; + long int dynticks_nesting; + long int dynticks_nmi_nesting; + atomic_t dynticks; + bool rcu_need_heavy_qs; + bool rcu_urgent_qs; + bool rcu_forced_tick; + bool rcu_forced_tick_exp; + struct callback_head barrier_head; + int exp_dynticks_snap; + struct task_struct *rcu_cpu_kthread_task; + unsigned int rcu_cpu_kthread_status; + char rcu_cpu_has_work; + unsigned int softirq_snap; + struct irq_work rcu_iw; + bool rcu_iw_pending; + long unsigned int rcu_iw_gp_seq; + long unsigned int rcu_ofl_gp_seq; + short int rcu_ofl_gp_flags; + long unsigned int rcu_onl_gp_seq; + short int rcu_onl_gp_flags; + long unsigned int last_fqs_resched; + int cpu; +}; + +struct rcu_state { + struct rcu_node node[9]; + struct rcu_node *level[3]; + int ncpus; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + u8 boost; + long unsigned int gp_seq; + long unsigned int gp_max; + struct task_struct *gp_kthread; + struct swait_queue_head gp_wq; + short int gp_flags; + short int gp_state; + long unsigned int gp_wake_time; + long unsigned int gp_wake_seq; + struct mutex barrier_mutex; + atomic_t barrier_cpu_count; + struct completion barrier_completion; + long unsigned int barrier_sequence; + struct mutex exp_mutex; + struct mutex exp_wake_mutex; + long unsigned int expedited_sequence; + atomic_t expedited_need_qs; + struct swait_queue_head expedited_wq; + int ncpus_snap; + u8 cbovld; + u8 cbovldnext; + long unsigned int jiffies_force_qs; + long unsigned int jiffies_kick_kthreads; + long unsigned int n_force_qs; + long unsigned int gp_start; + long unsigned int gp_end; + long unsigned int gp_activity; + long unsigned int gp_req_activity; + long unsigned int jiffies_stall; + long unsigned int jiffies_resched; + long unsigned int n_force_qs_gpstart; + const char *name; + char abbr; + long: 56; + long: 64; + long: 64; + raw_spinlock_t ofl_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kvfree_rcu_bulk_data { + long unsigned int nr_records; + struct kvfree_rcu_bulk_data *next; + void *records[0]; +}; + +struct kfree_rcu_cpu; + +struct kfree_rcu_cpu_work { + struct rcu_work rcu_work; + struct callback_head *head_free; + struct kvfree_rcu_bulk_data *bkvhead_free[2]; + struct kfree_rcu_cpu *krcp; +}; + +struct kfree_rcu_cpu { + struct callback_head *head; + struct kvfree_rcu_bulk_data *bkvhead[2]; + struct kfree_rcu_cpu_work krw_arr[2]; + raw_spinlock_t lock; + struct delayed_work monitor_work; + bool monitor_todo; + bool initialized; + int count; + struct work_struct page_cache_work; + atomic_t work_in_progress; + struct hrtimer hrtimer; + struct llist_head bkvcache; + int nr_bkv_objs; +}; + +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; + +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + long unsigned int attrs; +}; + +struct reserved_mem_ops; + +struct reserved_mem { + const char *name; + long unsigned int fdt_node; + long unsigned int phandle; + const struct reserved_mem_ops *ops; + phys_addr_t base; + phys_addr_t size; + void *priv; +}; + +struct reserved_mem_ops { + int (*device_init)(struct reserved_mem *, struct device *); + void (*device_release)(struct reserved_mem *, struct device *); +}; + +typedef int (*reservedmem_of_init_fn)(struct reserved_mem *); + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + long unsigned int pfn_base; + int size; + long unsigned int *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +struct trace_event_raw_swiotlb_bounced { + struct trace_entry ent; + u32 __data_loc_dev_name; + u64 dma_mask; + dma_addr_t dev_addr; + size_t size; + enum swiotlb_force swiotlb_force; + char __data[0]; +}; + +struct trace_event_data_offsets_swiotlb_bounced { + u32 dev_name; +}; + +typedef void (*btf_trace_swiotlb_bounced)(void *, struct device *, dma_addr_t, size_t, enum swiotlb_force); + +struct gen_pool; + +typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int); + +struct gen_pool { + spinlock_t lock; + struct list_head chunks; + int min_alloc_order; + genpool_algo_t algo; + void *data; + const char *name; +}; + +enum kcmp_type { + KCMP_FILE = 0, + KCMP_VM = 1, + KCMP_FILES = 2, + KCMP_FS = 3, + KCMP_SIGHAND = 4, + KCMP_IO = 5, + KCMP_SYSVSEM = 6, + KCMP_EPOLL_TFD = 7, + KCMP_TYPES = 8, +}; + +struct kcmp_epoll_slot { + __u32 efd; + __u32 tfd; + __u32 toff; +}; + +enum profile_type { + PROFILE_TASK_EXIT = 0, + PROFILE_MUNMAP = 1, +}; + +struct profile_hit { + u32 pc; + u32 hits; +}; + +struct stacktrace_cookie { + long unsigned int *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +typedef __kernel_long_t __kernel_suseconds_t; + +typedef __kernel_suseconds_t suseconds_t; + +typedef __u64 timeu64_t; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; + +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long int tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + long long int offset; + long long int freq; + long long int maxerror; + long long int esterror; + int status; + long long int constant; + long long int precision; + long long int tolerance; + struct __kernel_timex_timeval time; + long long int tick; + long long int ppsfreq; + long long int jitter; + int shift; + long long int stabil; + long long int jitcnt; + long long int calcnt; + long long int errcnt; + long long int stbcnt; + int tai; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct trace_event_raw_timer_class { + struct trace_entry ent; + void *timer; + char __data[0]; +}; + +struct trace_event_raw_timer_start { + struct trace_entry ent; + void *timer; + void *function; + long unsigned int expires; + long unsigned int now; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_timer_expire_entry { + struct trace_entry ent; + void *timer; + long unsigned int now; + void *function; + long unsigned int baseclk; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_init { + struct trace_entry ent; + void *hrtimer; + clockid_t clockid; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_start { + struct trace_entry ent; + void *hrtimer; + void *function; + s64 expires; + s64 softexpires; + enum hrtimer_mode mode; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_expire_entry { + struct trace_entry ent; + void *hrtimer; + s64 now; + void *function; + char __data[0]; +}; + +struct trace_event_raw_hrtimer_class { + struct trace_entry ent; + void *hrtimer; + char __data[0]; +}; + +struct trace_event_raw_itimer_state { + struct trace_entry ent; + int which; + long long unsigned int expires; + long int value_sec; + long int value_nsec; + long int interval_sec; + long int interval_nsec; + char __data[0]; +}; + +struct trace_event_raw_itimer_expire { + struct trace_entry ent; + int which; + pid_t pid; + long long unsigned int now; + char __data[0]; +}; + +struct trace_event_raw_tick_stop { + struct trace_entry ent; + int success; + int dependency; + char __data[0]; +}; + +struct trace_event_data_offsets_timer_class {}; + +struct trace_event_data_offsets_timer_start {}; + +struct trace_event_data_offsets_timer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_init {}; + +struct trace_event_data_offsets_hrtimer_start {}; + +struct trace_event_data_offsets_hrtimer_expire_entry {}; + +struct trace_event_data_offsets_hrtimer_class {}; + +struct trace_event_data_offsets_itimer_state {}; + +struct trace_event_data_offsets_itimer_expire {}; + +struct trace_event_data_offsets_tick_stop {}; + +typedef void (*btf_trace_timer_init)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_start)(void *, struct timer_list *, long unsigned int, unsigned int); + +typedef void (*btf_trace_timer_expire_entry)(void *, struct timer_list *, long unsigned int); + +typedef void (*btf_trace_timer_expire_exit)(void *, struct timer_list *); + +typedef void (*btf_trace_timer_cancel)(void *, struct timer_list *); + +typedef void (*btf_trace_hrtimer_init)(void *, struct hrtimer *, clockid_t, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_start)(void *, struct hrtimer *, enum hrtimer_mode); + +typedef void (*btf_trace_hrtimer_expire_entry)(void *, struct hrtimer *, ktime_t *); + +typedef void (*btf_trace_hrtimer_expire_exit)(void *, struct hrtimer *); + +typedef void (*btf_trace_hrtimer_cancel)(void *, struct hrtimer *); + +typedef void (*btf_trace_itimer_state)(void *, int, const struct itimerspec64 * const, long long unsigned int); + +typedef void (*btf_trace_itimer_expire)(void *, int, struct pid *, long long unsigned int); + +typedef void (*btf_trace_tick_stop)(void *, int, int); + +struct timer_base { + raw_spinlock_t lock; + struct timer_list *running_timer; + long unsigned int clk; + long unsigned int next_expiry; + unsigned int cpu; + bool next_expiry_recalc; + bool is_idle; + long unsigned int pending_map[9]; + struct hlist_head vectors[576]; + long: 64; + long: 64; +}; + +struct process_timer { + struct timer_list timer; + struct task_struct *task; +}; + +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED = 0, + CLOCK_EVT_STATE_SHUTDOWN = 1, + CLOCK_EVT_STATE_PERIODIC = 2, + CLOCK_EVT_STATE_ONESHOT = 3, + CLOCK_EVT_STATE_ONESHOT_STOPPED = 4, +}; + +struct clock_event_device { + void (*event_handler)(struct clock_event_device *); + int (*set_next_event)(long unsigned int, struct clock_event_device *); + int (*set_next_ktime)(ktime_t, struct clock_event_device *); + ktime_t next_event; + u64 max_delta_ns; + u64 min_delta_ns; + u32 mult; + u32 shift; + enum clock_event_state state_use_accessors; + unsigned int features; + long unsigned int retries; + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_oneshot_stopped)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *); + void (*suspend)(struct clock_event_device *); + void (*resume)(struct clock_event_device *); + long unsigned int min_delta_ticks; + long unsigned int max_delta_ticks; + const char *name; + int rating; + int irq; + int bound_on; + const struct cpumask *cpumask; + struct list_head list; + struct module *owner; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum tick_device_mode { + TICKDEV_MODE_PERIODIC = 0, + TICKDEV_MODE_ONESHOT = 1, +}; + +struct tick_device { + struct clock_event_device *evtdev; + enum tick_device_mode mode; +}; + +struct ktime_timestamps { + u64 mono; + u64 boot; + u64 real; +}; + +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t raw; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; + +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; + +struct system_counterval_t { + u64 cycles; + struct clocksource *cs; +}; + +typedef struct { + seqcount_t seqcount; +} seqcount_latch_t; + +struct audit_ntp_val { + long long int oldval; + long long int newval; +}; + +struct audit_ntp_data { + struct audit_ntp_val vals[6]; +}; + +enum timekeeping_adv_mode { + TK_ADV_TICK = 0, + TK_ADV_FREQ = 1, +}; + +struct tk_fast { + seqcount_latch_t seq; + struct tk_read_base base[2]; +}; + +enum tick_nohz_mode { + NOHZ_MODE_INACTIVE = 0, + NOHZ_MODE_LOWRES = 1, + NOHZ_MODE_HIGHRES = 2, +}; + +struct tick_sched { + struct hrtimer sched_timer; + long unsigned int check_clocks; + enum tick_nohz_mode nohz_mode; + unsigned int inidle: 1; + unsigned int tick_stopped: 1; + unsigned int idle_active: 1; + unsigned int do_timer_last: 1; + unsigned int got_idle_tick: 1; + ktime_t last_tick; + ktime_t next_tick; + long unsigned int idle_jiffies; + long unsigned int idle_calls; + long unsigned int idle_sleeps; + ktime_t idle_entrytime; + ktime_t idle_waketime; + ktime_t idle_exittime; + ktime_t idle_sleeptime; + ktime_t iowait_sleeptime; + long unsigned int last_jiffies; + u64 timer_expires; + u64 timer_expires_base; + u64 next_timer; + ktime_t idle_expires; + atomic_t tick_dep_mask; +}; + +struct timer_list_iter { + int cpu; + bool second_pass; + u64 now; +}; + +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long int tm_year; + int tm_wday; + int tm_yday; +}; + +typedef __kernel_timer_t timer_t; + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +struct rtc_wkalrm { + unsigned char enabled; + unsigned char pending; + struct rtc_time time; +}; + +enum alarmtimer_type { + ALARM_REALTIME = 0, + ALARM_BOOTTIME = 1, + ALARM_NUMTYPE = 2, + ALARM_REALTIME_FREEZER = 3, + ALARM_BOOTTIME_FREEZER = 4, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART = 0, + ALARMTIMER_RESTART = 1, +}; + +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t); + enum alarmtimer_type type; + int state; + void *data; +}; + +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + int firing; +}; + +struct k_clock; + +struct k_itimer { + struct list_head list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_active; + s64 it_overrun; + s64 it_overrun_last; + int it_requeue_pending; + int it_sigev_notify; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue *sigq; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +struct k_clock { + int (*clock_getres)(const clockid_t, struct timespec64 *); + int (*clock_set)(const clockid_t, const struct timespec64 *); + int (*clock_get_timespec)(const clockid_t, struct timespec64 *); + ktime_t (*clock_get_ktime)(const clockid_t); + int (*clock_adj)(const clockid_t, struct __kernel_timex *); + int (*timer_create)(struct k_itimer *); + int (*nsleep)(const clockid_t, int, const struct timespec64 *); + int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *); + int (*timer_del)(struct k_itimer *); + void (*timer_get)(struct k_itimer *, struct itimerspec64 *); + void (*timer_rearm)(struct k_itimer *); + s64 (*timer_forward)(struct k_itimer *, ktime_t); + ktime_t (*timer_remaining)(struct k_itimer *, ktime_t); + int (*timer_try_to_cancel)(struct k_itimer *); + void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool); + void (*timer_wait_running)(struct k_itimer *); +}; + +struct class_interface { + struct list_head node; + struct class *class; + int (*add_dev)(struct device *, struct class_interface *); + void (*remove_dev)(struct device *, struct class_interface *); +}; + +struct rtc_class_ops { + int (*ioctl)(struct device *, unsigned int, long unsigned int); + int (*read_time)(struct device *, struct rtc_time *); + int (*set_time)(struct device *, struct rtc_time *); + int (*read_alarm)(struct device *, struct rtc_wkalrm *); + int (*set_alarm)(struct device *, struct rtc_wkalrm *); + int (*proc)(struct device *, struct seq_file *); + int (*alarm_irq_enable)(struct device *, unsigned int); + int (*read_offset)(struct device *, long int *); + int (*set_offset)(struct device *, long int); +}; + +struct rtc_device; + +struct rtc_timer { + struct timerqueue_node node; + ktime_t period; + void (*func)(struct rtc_device *); + struct rtc_device *rtc; + int enabled; +}; + +struct rtc_device { + struct device dev; + struct module *owner; + int id; + const struct rtc_class_ops *ops; + struct mutex ops_lock; + struct cdev char_dev; + long unsigned int flags; + long unsigned int irq_data; + spinlock_t irq_lock; + wait_queue_head_t irq_queue; + struct fasync_struct *async_queue; + int irq_freq; + int max_user_freq; + struct timerqueue_head timerqueue; + struct rtc_timer aie_timer; + struct rtc_timer uie_rtctimer; + struct hrtimer pie_timer; + int pie_enabled; + struct work_struct irqwork; + int uie_unsupported; + long int set_offset_nsec; + bool registered; + bool nvram_old_abi; + struct bin_attribute *nvram; + time64_t range_min; + timeu64_t range_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + struct work_struct uie_task; + struct timer_list uie_timer; + unsigned int oldsecs; + unsigned int uie_irq_active: 1; + unsigned int stop_uie_polling: 1; + unsigned int uie_task_active: 1; + unsigned int uie_timer_active: 1; +}; + +struct property_entry; + +struct platform_device_info { + struct device *parent; + struct fwnode_handle *fwnode; + bool of_node_reused; + const char *name; + int id; + const struct resource *res; + unsigned int num_res; + const void *data; + size_t size_data; + u64 dma_mask; + const struct property_entry *properties; +}; + +enum dev_prop_type { + DEV_PROP_U8 = 0, + DEV_PROP_U16 = 1, + DEV_PROP_U32 = 2, + DEV_PROP_U64 = 3, + DEV_PROP_STRING = 4, + DEV_PROP_REF = 5, +}; + +struct property_entry { + const char *name; + size_t length; + bool is_inline; + enum dev_prop_type type; + union { + const void *pointer; + union { + u8 u8_data[8]; + u16 u16_data[4]; + u32 u32_data[2]; + u64 u64_data[1]; + const char *str[1]; + } value; + }; +}; + +struct trace_event_raw_alarmtimer_suspend { + struct trace_entry ent; + s64 expires; + unsigned char alarm_type; + char __data[0]; +}; + +struct trace_event_raw_alarm_class { + struct trace_entry ent; + void *alarm; + unsigned char alarm_type; + s64 expires; + s64 now; + char __data[0]; +}; + +struct trace_event_data_offsets_alarmtimer_suspend {}; + +struct trace_event_data_offsets_alarm_class {}; + +typedef void (*btf_trace_alarmtimer_suspend)(void *, ktime_t, int); + +typedef void (*btf_trace_alarmtimer_fired)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_start)(void *, struct alarm *, ktime_t); + +typedef void (*btf_trace_alarmtimer_cancel)(void *, struct alarm *, ktime_t); + +struct alarm_base { + spinlock_t lock; + struct timerqueue_head timerqueue; + ktime_t (*get_ktime)(); + void (*get_timespec)(struct timespec64 *); + clockid_t base_clockid; +}; + +struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[12]; + int _tid; + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef struct sigevent sigevent_t; + +struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[13]; + compat_int_t _tid; + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +}; + +typedef unsigned int uint; + +struct posix_clock; + +struct posix_clock_operations { + struct module *owner; + int (*clock_adjtime)(struct posix_clock *, struct __kernel_timex *); + int (*clock_gettime)(struct posix_clock *, struct timespec64 *); + int (*clock_getres)(struct posix_clock *, struct timespec64 *); + int (*clock_settime)(struct posix_clock *, const struct timespec64 *); + long int (*ioctl)(struct posix_clock *, unsigned int, long unsigned int); + int (*open)(struct posix_clock *, fmode_t); + __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *); + int (*release)(struct posix_clock *); + ssize_t (*read)(struct posix_clock *, uint, char *, size_t); +}; + +struct posix_clock { + struct posix_clock_operations ops; + struct cdev cdev; + struct device *dev; + struct rw_semaphore rwsem; + bool zombie; +}; + +struct posix_clock_desc { + struct file *fp; + struct posix_clock *clk; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct old_itimerval32 { + struct old_timeval32 it_interval; + struct old_timeval32 it_value; +}; + +typedef s64 int64_t; + +struct ce_unbind { + struct clock_event_device *ce; + int res; +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT = 0, + TICK_BROADCAST_ENTER = 1, +}; + +enum tick_broadcast_mode { + TICK_BROADCAST_OFF = 0, + TICK_BROADCAST_ON = 1, + TICK_BROADCAST_FORCE = 2, +}; + +struct clock_data { + seqcount_latch_t seq; + struct clock_read_data read_data[2]; + ktime_t wrap_kt; + long unsigned int rate; + u64 (*actual_read_sched_clock)(); +}; + +struct proc_timens_offset { + int clockid; + struct timespec64 val; +}; + +union futex_key { + struct { + u64 i_seq; + long unsigned int pgoff; + unsigned int offset; + } shared; + struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; + long unsigned int address; + unsigned int offset; + } private; + struct { + u64 ptr; + long unsigned int word; + unsigned int offset; + } both; +}; + +struct futex_pi_state { + struct list_head list; + struct rt_mutex pi_mutex; + struct task_struct *owner; + refcount_t refcount; + union futex_key key; +}; + +struct futex_q { + struct plist_node list; + struct task_struct *task; + spinlock_t *lock_ptr; + union futex_key key; + struct futex_pi_state *pi_state; + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; +}; + +struct futex_hash_bucket { + atomic_t waiters; + spinlock_t lock; + struct plist_head chain; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum futex_access { + FUTEX_READ = 0, + FUTEX_WRITE = 1, +}; + +typedef bool (*smp_cond_func_t)(int, void *); + +struct call_function_data { + call_single_data_t *csd; + cpumask_var_t cpumask; + cpumask_var_t cpumask_ipi; +}; + +struct smp_call_on_cpu_struct { + struct work_struct work; + struct completion done; + int (*func)(void *); + void *data; + int ret; + int cpu; +}; + +typedef short unsigned int __kernel_old_uid_t; + +typedef short unsigned int __kernel_old_gid_t; + +typedef __kernel_old_uid_t old_uid_t; + +typedef __kernel_old_gid_t old_gid_t; + +struct latch_tree_root { + seqcount_latch_t seq; + struct rb_root tree[2]; +}; + +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *, struct latch_tree_node *); + int (*comp)(void *, struct latch_tree_node *); +}; + +struct modversion_info { + long unsigned int crc; + char name[56]; +}; + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source; + struct module *target; +}; + +struct module_sect_attr { + struct bin_attribute battr; + long unsigned int address; +}; + +struct module_sect_attrs { + struct attribute_group grp; + unsigned int nsections; + struct module_sect_attr attrs[0]; +}; + +struct module_notes_attrs { + struct kobject *dir; + unsigned int notes; + struct bin_attribute attrs[0]; +}; + +enum mod_license { + NOT_GPL_ONLY = 0, + GPL_ONLY = 1, + WILL_BE_GPL_ONLY = 2, +}; + +struct symsearch { + const struct kernel_symbol *start; + const struct kernel_symbol *stop; + const s32 *crcs; + enum mod_license license; + bool unused; +}; + +enum kernel_read_file_id { + READING_UNKNOWN = 0, + READING_FIRMWARE = 1, + READING_MODULE = 2, + READING_KEXEC_IMAGE = 3, + READING_KEXEC_INITRAMFS = 4, + READING_POLICY = 5, + READING_X509_CERTIFICATE = 6, + READING_MAX_ID = 7, +}; + +enum kernel_load_data_id { + LOADING_UNKNOWN = 0, + LOADING_FIRMWARE = 1, + LOADING_MODULE = 2, + LOADING_KEXEC_IMAGE = 3, + LOADING_KEXEC_INITRAMFS = 4, + LOADING_POLICY = 5, + LOADING_X509_CERTIFICATE = 6, + LOADING_MAX_ID = 7, +}; + +enum { + PROC_ENTRY_PERMANENT = 1, +}; + +struct _ddebug { + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno: 18; + unsigned int flags: 8; + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; +}; + +struct load_info { + const char *name; + struct module *mod; + Elf64_Ehdr *hdr; + long unsigned int len; + Elf64_Shdr *sechdrs; + char *secstrings; + char *strtab; + long unsigned int symoffs; + long unsigned int stroffs; + long unsigned int init_typeoffs; + long unsigned int core_typeoffs; + struct _ddebug *debug; + unsigned int num_debug; + bool sig_ok; + long unsigned int mod_kallsyms_init_off; + struct { + unsigned int sym; + unsigned int str; + unsigned int mod; + unsigned int vers; + unsigned int info; + unsigned int pcpu; + } index; +}; + +struct trace_event_raw_module_load { + struct trace_entry ent; + unsigned int taints; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_free { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_refcnt { + struct trace_entry ent; + long unsigned int ip; + int refcnt; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_module_request { + struct trace_entry ent; + long unsigned int ip; + bool wait; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_module_load { + u32 name; +}; + +struct trace_event_data_offsets_module_free { + u32 name; +}; + +struct trace_event_data_offsets_module_refcnt { + u32 name; +}; + +struct trace_event_data_offsets_module_request { + u32 name; +}; + +typedef void (*btf_trace_module_load)(void *, struct module *); + +typedef void (*btf_trace_module_free)(void *, struct module *); + +typedef void (*btf_trace_module_get)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_put)(void *, struct module *, long unsigned int); + +typedef void (*btf_trace_module_request)(void *, char *, bool, long unsigned int); + +struct mod_tree_root { + struct latch_tree_root root; + long unsigned int addr_min; + long unsigned int addr_max; +}; + +struct find_symbol_arg { + const char *name; + bool gplok; + bool warn; + struct module *owner; + const s32 *crc; + const struct kernel_symbol *sym; + enum mod_license license; +}; + +struct mod_initfree { + struct llist_node node; + void *module_init; +}; + +struct module_signature { + u8 algo; + u8 hash; + u8 id_type; + u8 signer_len; + u8 key_id_len; + u8 __pad[3]; + __be32 sig_len; +}; + +enum pkey_id_type { + PKEY_ID_PGP = 0, + PKEY_ID_X509 = 1, + PKEY_ID_PKCS7 = 2, +}; + +struct kallsym_iter { + loff_t pos; + loff_t pos_arch_end; + loff_t pos_mod_end; + loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; + long unsigned int value; + unsigned int nameoff; + char type; + char name[128]; + char module_name[56]; + int exported; + int show_value; +}; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +struct kstatfs { + long int f_type; + long int f_bsize; + u64 f_blocks; + u64 f_bfree; + u64 f_bavail; + u64 f_files; + u64 f_ffree; + __kernel_fsid_t f_fsid; + long int f_namelen; + long int f_frsize; + long int f_flags; + long int f_spare[4]; +}; + +typedef __u16 comp_t; + +struct acct_v3 { + char ac_flag; + char ac_version; + __u16 ac_tty; + __u32 ac_exitcode; + __u32 ac_uid; + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + __u32 ac_btime; + __u32 ac_etime; + comp_t ac_utime; + comp_t ac_stime; + comp_t ac_mem; + comp_t ac_io; + comp_t ac_rw; + comp_t ac_minflt; + comp_t ac_majflt; + comp_t ac_swaps; + char ac_comm[16]; +}; + +typedef struct acct_v3 acct_t; + +struct fs_pin { + wait_queue_head_t wait; + int done; + struct hlist_node s_list; + struct hlist_node m_list; + void (*kill)(struct fs_pin *); +}; + +struct bsd_acct_struct { + struct fs_pin pin; + atomic_long_t count; + struct callback_head rcu; + struct mutex lock; + int active; + long unsigned int needcheck; + struct file *file; + struct pid_namespace *ns; + struct work_struct work; + struct completion done; +}; + +struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +}; + +struct elf_note_section { + struct elf64_note n_hdr; + u8 n_data[0]; +}; + +typedef long unsigned int elf_greg_t; + +typedef elf_greg_t elf_gregset_t[34]; + +struct elf_siginfo { + int si_signo; + int si_code; + int si_errno; +}; + +struct elf_prstatus { + struct elf_siginfo pr_info; + short int pr_cursig; + long unsigned int pr_sigpend; + long unsigned int pr_sighold; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; + struct __kernel_old_timeval pr_stime; + struct __kernel_old_timeval pr_cutime; + struct __kernel_old_timeval pr_cstime; + elf_gregset_t pr_reg; + int pr_fpvalid; +}; + +typedef u32 note_buf_t[106]; + +struct compat_kexec_segment { + compat_uptr_t buf; + compat_size_t bufsz; + compat_ulong_t mem; + compat_size_t memsz; +}; + +struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +}; + +typedef struct elf64_phdr Elf64_Phdr; + +struct crypto_alg; + +struct crypto_tfm { + u32 crt_flags; + int node; + void (*exit)(struct crypto_tfm *); + struct crypto_alg *__crt_alg; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__crt_ctx[0]; +}; + +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int); + void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *); + void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *); +}; + +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); + int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *); +}; + +struct crypto_type; + +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + int cra_priority; + refcount_t cra_refcnt; + char cra_name[128]; + char cra_driver_name[128]; + const struct crypto_type *cra_type; + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + int (*cra_init)(struct crypto_tfm *); + void (*cra_exit)(struct crypto_tfm *); + void (*cra_destroy)(struct crypto_alg *); + struct module *cra_module; +}; + +struct crypto_instance; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *, u32, u32); + unsigned int (*extsize)(struct crypto_alg *); + int (*init)(struct crypto_tfm *, u32, u32); + int (*init_tfm)(struct crypto_tfm *); + void (*show)(struct seq_file *, struct crypto_alg *); + int (*report)(struct sk_buff *, struct crypto_alg *); + void (*free)(struct crypto_instance *); + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + +struct crypto_shash; + +struct shash_desc { + struct crypto_shash *tfm; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_shash { + unsigned int descsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_tfm base; +}; + +struct kexec_sha_region { + long unsigned int start; + long unsigned int len; +}; + +enum migrate_reason { + MR_COMPACTION = 0, + MR_MEMORY_FAILURE = 1, + MR_MEMORY_HOTPLUG = 2, + MR_SYSCALL = 3, + MR_MEMPOLICY_MBIND = 4, + MR_NUMA_MISPLACED = 5, + MR_CONTIG_RANGE = 6, + MR_TYPES = 7, +}; + +typedef __kernel_ulong_t __kernel_ino_t; + +typedef __kernel_ino_t ino_t; + +enum kernfs_node_type { + KERNFS_DIR = 1, + KERNFS_FILE = 2, + KERNFS_LINK = 4, +}; + +enum kernfs_root_flag { + KERNFS_ROOT_CREATE_DEACTIVATED = 1, + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2, + KERNFS_ROOT_SUPPORT_EXPORTOP = 4, + KERNFS_ROOT_SUPPORT_USER_XATTR = 8, +}; + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + long unsigned int magic; + bool new_sb_created; +}; + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + MAX_BPF_LINK_TYPE = 7, +}; + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u64 target_name; + __u32 target_name_len; + union { + struct { + __u32 map_id; + } map; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + }; +}; + +struct bpf_link_ops; + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *); + void (*dealloc)(struct bpf_link *); + int (*detach)(struct bpf_link *); + int (*update_prog)(struct bpf_link *, struct bpf_prog *, struct bpf_prog *); + void (*show_fdinfo)(const struct bpf_link *, struct seq_file *); + int (*fill_link_info)(const struct bpf_link *, struct bpf_link_info *); +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +enum { + CGRP_NOTIFY_ON_RELEASE = 0, + CGRP_CPUSET_CLONE_CHILDREN = 1, + CGRP_FREEZE = 2, + CGRP_FROZEN = 3, +}; + +enum { + CGRP_ROOT_NOPREFIX = 2, + CGRP_ROOT_XATTR = 4, + CGRP_ROOT_NS_DELEGATE = 8, + CGRP_ROOT_CPUSET_V2_MODE = 16, + CGRP_ROOT_MEMORY_LOCAL_EVENTS = 32, + CGRP_ROOT_MEMORY_RECURSIVE_PROT = 64, +}; + +struct cgroup_taskset { + struct list_head src_csets; + struct list_head dst_csets; + int nr_tasks; + int ssid; + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; +}; + +struct cgroup_fs_context { + struct kernfs_fs_context kfc; + struct cgroup_root *root; + struct cgroup_namespace *ns; + unsigned int flags; + bool cpuset_clone_children; + bool none; + bool all_ss; + u16 subsys_mask; + char *name; + char *release_agent; +}; + +struct cgrp_cset_link { + struct cgroup *cgrp; + struct css_set *cset; + struct list_head cset_link; + struct list_head cgrp_link; +}; + +struct cgroup_mgctx { + struct list_head preloaded_src_csets; + struct list_head preloaded_dst_csets; + struct cgroup_taskset tset; + u16 ss_mask; +}; + +struct trace_event_raw_cgroup_root { + struct trace_entry ent; + int root; + u16 ss_mask; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_cgroup { + struct trace_entry ent; + int root; + int id; + int level; + u32 __data_loc_path; + char __data[0]; +}; + +struct trace_event_raw_cgroup_migrate { + struct trace_entry ent; + int dst_root; + int dst_id; + int dst_level; + int pid; + u32 __data_loc_dst_path; + u32 __data_loc_comm; + char __data[0]; +}; + +struct trace_event_raw_cgroup_event { + struct trace_entry ent; + int root; + int id; + int level; + u32 __data_loc_path; + int val; + char __data[0]; +}; + +struct trace_event_data_offsets_cgroup_root { + u32 name; +}; + +struct trace_event_data_offsets_cgroup { + u32 path; +}; + +struct trace_event_data_offsets_cgroup_migrate { + u32 dst_path; + u32 comm; +}; + +struct trace_event_data_offsets_cgroup_event { + u32 path; +}; + +typedef void (*btf_trace_cgroup_setup_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_destroy_root)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_remount)(void *, struct cgroup_root *); + +typedef void (*btf_trace_cgroup_mkdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rmdir)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_release)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_rename)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_freeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_unfreeze)(void *, struct cgroup *, const char *); + +typedef void (*btf_trace_cgroup_attach_task)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_transfer_tasks)(void *, struct cgroup *, const char *, struct task_struct *, bool); + +typedef void (*btf_trace_cgroup_notify_populated)(void *, struct cgroup *, const char *, int); + +typedef void (*btf_trace_cgroup_notify_frozen)(void *, struct cgroup *, const char *, int); + +enum cgroup2_param { + Opt_nsdelegate = 0, + Opt_memory_localevents = 1, + Opt_memory_recursiveprot = 2, + nr__cgroup2_params = 3, +}; + +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + __u64 nr_io_wait; +}; + +enum cgroup_filetype { + CGROUP_FILE_PROCS = 0, + CGROUP_FILE_TASKS = 1, +}; + +struct cgroup_pidlist { + struct { + enum cgroup_filetype type; + struct pid_namespace *ns; + } key; + pid_t *list; + int length; + struct list_head links; + struct cgroup *owner; + struct delayed_work destroy_dwork; +}; + +enum cgroup1_param { + Opt_all = 0, + Opt_clone_children = 1, + Opt_cpuset_v2_mode = 2, + Opt_name = 3, + Opt_none = 4, + Opt_noprefix = 5, + Opt_release_agent = 6, + Opt_xattr = 7, +}; + +enum freezer_state_flags { + CGROUP_FREEZER_ONLINE = 1, + CGROUP_FREEZING_SELF = 2, + CGROUP_FREEZING_PARENT = 4, + CGROUP_FROZEN = 8, + CGROUP_FREEZING = 6, +}; + +struct freezer { + struct cgroup_subsys_state css; + unsigned int state; +}; + +struct pids_cgroup { + struct cgroup_subsys_state css; + atomic64_t counter; + atomic64_t limit; + struct cgroup_file events_file; + atomic64_t events_limit; +}; + +struct root_domain___2; + +struct fmeter { + int cnt; + int val; + time64_t time; + spinlock_t lock; +}; + +struct cpuset { + struct cgroup_subsys_state css; + long unsigned int flags; + cpumask_var_t cpus_allowed; + nodemask_t mems_allowed; + cpumask_var_t effective_cpus; + nodemask_t effective_mems; + cpumask_var_t subparts_cpus; + nodemask_t old_mems_allowed; + struct fmeter fmeter; + int attach_in_progress; + int pn; + int relax_domain_level; + int nr_subparts_cpus; + int partition_root_state; + int use_parent_ecpus; + int child_ecpus_count; +}; + +struct tmpmasks { + cpumask_var_t addmask; + cpumask_var_t delmask; + cpumask_var_t new_cpus; +}; + +typedef enum { + CS_ONLINE = 0, + CS_CPU_EXCLUSIVE = 1, + CS_MEM_EXCLUSIVE = 2, + CS_MEM_HARDWALL = 3, + CS_MEMORY_MIGRATE = 4, + CS_SCHED_LOAD_BALANCE = 5, + CS_SPREAD_PAGE = 6, + CS_SPREAD_SLAB = 7, +} cpuset_flagbits_t; + +enum subparts_cmd { + partcmd_enable = 0, + partcmd_disable = 1, + partcmd_update = 2, +}; + +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +typedef enum { + FILE_MEMORY_MIGRATE = 0, + FILE_CPULIST = 1, + FILE_MEMLIST = 2, + FILE_EFFECTIVE_CPULIST = 3, + FILE_EFFECTIVE_MEMLIST = 4, + FILE_SUBPARTS_CPULIST = 5, + FILE_CPU_EXCLUSIVE = 6, + FILE_MEM_EXCLUSIVE = 7, + FILE_MEM_HARDWALL = 8, + FILE_SCHED_LOAD_BALANCE = 9, + FILE_PARTITION_ROOT = 10, + FILE_SCHED_RELAX_DOMAIN_LEVEL = 11, + FILE_MEMORY_PRESSURE_ENABLED = 12, + FILE_MEMORY_PRESSURE = 13, + FILE_SPREAD_PAGE = 14, + FILE_SPREAD_SLAB = 15, +} cpuset_filetype_t; + +struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt = 0, + kernel_pkey_decrypt = 1, + kernel_pkey_sign = 2, + kernel_pkey_verify = 3, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op: 8; +}; + +struct key_preparsed_payload { + char *description; + union key_payload payload; + const void *data; + size_t datalen; + size_t quotalen; + time64_t expiry; +}; + +struct key_match_data { + bool (*cmp)(const struct key *, const struct key_match_data *); + const void *raw_data; + void *preparsed; + unsigned int lookup_type; +}; + +struct idmap_key { + bool map_up; + u32 id; + u32 count; +}; + +struct ctl_path { + const char *procname; +}; + +typedef void (*exitcall_t)(); + +struct cpu_stop_done { + atomic_t nr_todo; + int ret; + struct completion completion; +}; + +struct cpu_stopper { + struct task_struct *thread; + raw_spinlock_t lock; + bool enabled; + struct list_head works; + struct cpu_stop_work stop_work; +}; + +enum multi_stop_state { + MULTI_STOP_NONE = 0, + MULTI_STOP_PREPARE = 1, + MULTI_STOP_DISABLE_IRQ = 2, + MULTI_STOP_RUN = 3, + MULTI_STOP_EXIT = 4, +}; + +struct multi_stop_data { + cpu_stop_fn_t fn; + void *data; + unsigned int num_threads; + const struct cpumask *active_cpus; + enum multi_stop_state state; + atomic_t thread_ack; +}; + +typedef int __kernel_mqd_t; + +typedef __kernel_mqd_t mqd_t; + +enum audit_state { + AUDIT_DISABLED = 0, + AUDIT_BUILD_CONTEXT = 1, + AUDIT_RECORD_CONTEXT = 2, +}; + +struct audit_cap_data { + kernel_cap_t permitted; + kernel_cap_t inheritable; + union { + unsigned int fE; + kernel_cap_t effective; + }; + kernel_cap_t ambient; + kuid_t rootid; +}; + +struct audit_names { + struct list_head list; + struct filename *name; + int name_len; + bool hidden; + long unsigned int ino; + dev_t dev; + umode_t mode; + kuid_t uid; + kgid_t gid; + dev_t rdev; + u32 osid; + struct audit_cap_data fcap; + unsigned int fcap_ver; + unsigned char type; + bool should_free; +}; + +struct mq_attr { + __kernel_long_t mq_flags; + __kernel_long_t mq_maxmsg; + __kernel_long_t mq_msgsize; + __kernel_long_t mq_curmsgs; + __kernel_long_t __reserved[4]; +}; + +struct audit_proctitle { + int len; + char *value; +}; + +struct audit_aux_data; + +struct __kernel_sockaddr_storage; + +struct audit_tree_refs; + +struct audit_context { + int dummy; + int in_syscall; + enum audit_state state; + enum audit_state current_state; + unsigned int serial; + int major; + struct timespec64 ctime; + long unsigned int argv[4]; + long int return_code; + u64 prio; + int return_valid; + struct audit_names preallocated_names[5]; + int name_count; + struct list_head names_list; + char *filterkey; + struct path pwd; + struct audit_aux_data *aux; + struct audit_aux_data *aux_pids; + struct __kernel_sockaddr_storage *sockaddr; + size_t sockaddr_len; + pid_t pid; + pid_t ppid; + kuid_t uid; + kuid_t euid; + kuid_t suid; + kuid_t fsuid; + kgid_t gid; + kgid_t egid; + kgid_t sgid; + kgid_t fsgid; + long unsigned int personality; + int arch; + pid_t target_pid; + kuid_t target_auid; + kuid_t target_uid; + unsigned int target_sessionid; + u32 target_sid; + char target_comm[16]; + struct audit_tree_refs *trees; + struct audit_tree_refs *first_trees; + struct list_head killed_trees; + int tree_count; + int type; + union { + struct { + int nargs; + long int args[6]; + } socketcall; + struct { + kuid_t uid; + kgid_t gid; + umode_t mode; + u32 osid; + int has_perm; + uid_t perm_uid; + gid_t perm_gid; + umode_t perm_mode; + long unsigned int qbytes; + } ipc; + struct { + mqd_t mqdes; + struct mq_attr mqstat; + } mq_getsetattr; + struct { + mqd_t mqdes; + int sigev_signo; + } mq_notify; + struct { + mqd_t mqdes; + size_t msg_len; + unsigned int msg_prio; + struct timespec64 abs_timeout; + } mq_sendrecv; + struct { + int oflag; + umode_t mode; + struct mq_attr attr; + } mq_open; + struct { + pid_t pid; + struct audit_cap_data cap; + } capset; + struct { + int fd; + int flags; + } mmap; + struct { + int argc; + } execve; + struct { + char *name; + } module; + }; + int fds[2]; + struct audit_proctitle proctitle; +}; + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + char __data[126]; + }; + void *__align; + }; +}; + +enum audit_nlgrps { + AUDIT_NLGRP_NONE = 0, + AUDIT_NLGRP_READLOG = 1, + __AUDIT_NLGRP_MAX = 2, +}; + +struct audit_status { + __u32 mask; + __u32 enabled; + __u32 failure; + __u32 pid; + __u32 rate_limit; + __u32 backlog_limit; + __u32 lost; + __u32 backlog; + union { + __u32 version; + __u32 feature_bitmap; + }; + __u32 backlog_wait_time; + __u32 backlog_wait_time_actual; +}; + +struct audit_features { + __u32 vers; + __u32 mask; + __u32 features; + __u32 lock; +}; + +struct audit_tty_status { + __u32 enabled; + __u32 log_passwd; +}; + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[0]; +}; + +struct net_generic { + union { + struct { + unsigned int len; + struct callback_head rcu; + } s; + void *ptr[0]; + }; +}; + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *); + void (*pre_exit)(struct net *); + void (*exit)(struct net *); + void (*exit_batch)(struct list_head *); + unsigned int *id; + size_t size; +}; + +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *); + struct mutex *cb_mutex; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + bool (*compare)(struct net *, struct sock *); +}; + +struct audit_netlink_list { + __u32 portid; + struct net *net; + struct sk_buff_head q; +}; + +struct audit_net { + struct sock *sk; +}; + +struct auditd_connection { + struct pid *pid; + u32 portid; + struct net *net; + struct callback_head rcu; +}; + +struct audit_ctl_mutex { + struct mutex lock; + void *owner; +}; + +struct audit_buffer { + struct sk_buff *skb; + struct audit_context *ctx; + gfp_t gfp_mask; +}; + +struct audit_reply { + __u32 portid; + struct net *net; + struct sk_buff *skb; +}; + +enum { + Audit_equal = 0, + Audit_not_equal = 1, + Audit_bitmask = 2, + Audit_bittest = 3, + Audit_lt = 4, + Audit_gt = 5, + Audit_le = 6, + Audit_ge = 7, + Audit_bad = 8, +}; + +struct audit_rule_data { + __u32 flags; + __u32 action; + __u32 field_count; + __u32 mask[64]; + __u32 fields[64]; + __u32 values[64]; + __u32 fieldflags[64]; + __u32 buflen; + char buf[0]; +}; + +struct audit_field; + +struct audit_watch; + +struct audit_tree; + +struct audit_fsnotify_mark; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[64]; + u32 buflen; + u32 field_count; + char *filterkey; + struct audit_field *fields; + struct audit_field *arch_f; + struct audit_field *inode_f; + struct audit_watch *watch; + struct audit_tree *tree; + struct audit_fsnotify_mark *exe; + struct list_head rlist; + struct list_head list; + u64 prio; +}; + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + char *lsm_str; + void *lsm_rule; + }; + }; + u32 op; +}; + +struct audit_entry { + struct list_head list; + struct callback_head rcu; + struct audit_krule rule; +}; + +struct audit_buffer___2; + +typedef int __kernel_key_t; + +typedef __kernel_key_t key_t; + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + long unsigned int seq; + void *security; + struct rhash_head khtnode; + struct callback_head rcu; + refcount_t refcount; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; + kuid_t rootid; +}; + +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + +struct fsnotify_mark_connector { + spinlock_t lock; + short unsigned int type; + short unsigned int flags; + __kernel_fsid_t fsid; + union { + fsnotify_connp_t *obj; + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER = 0, + AUDIT_XT_OP_REPLACE = 1, + AUDIT_XT_OP_UNREGISTER = 2, + AUDIT_NFT_OP_TABLE_REGISTER = 3, + AUDIT_NFT_OP_TABLE_UNREGISTER = 4, + AUDIT_NFT_OP_CHAIN_REGISTER = 5, + AUDIT_NFT_OP_CHAIN_UNREGISTER = 6, + AUDIT_NFT_OP_RULE_REGISTER = 7, + AUDIT_NFT_OP_RULE_UNREGISTER = 8, + AUDIT_NFT_OP_SET_REGISTER = 9, + AUDIT_NFT_OP_SET_UNREGISTER = 10, + AUDIT_NFT_OP_SETELEM_REGISTER = 11, + AUDIT_NFT_OP_SETELEM_UNREGISTER = 12, + AUDIT_NFT_OP_GEN_REGISTER = 13, + AUDIT_NFT_OP_OBJ_REGISTER = 14, + AUDIT_NFT_OP_OBJ_UNREGISTER = 15, + AUDIT_NFT_OP_OBJ_RESET = 16, + AUDIT_NFT_OP_FLOWTABLE_REGISTER = 17, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER = 18, + AUDIT_NFT_OP_INVALID = 19, +}; + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_INODE = 0, + FSNOTIFY_OBJ_TYPE_PARENT = 1, + FSNOTIFY_OBJ_TYPE_VFSMOUNT = 2, + FSNOTIFY_OBJ_TYPE_SB = 3, + FSNOTIFY_OBJ_TYPE_COUNT = 4, + FSNOTIFY_OBJ_TYPE_DETACHED = 4, +}; + +struct audit_aux_data { + struct audit_aux_data *next; + int type; +}; + +struct audit_chunk; + +struct audit_tree_refs { + struct audit_tree_refs *next; + struct audit_chunk *c[31]; +}; + +struct audit_aux_data_pids { + struct audit_aux_data d; + pid_t target_pid[16]; + kuid_t target_auid[16]; + kuid_t target_uid[16]; + unsigned int target_sessionid[16]; + u32 target_sid[16]; + char target_comm[256]; + int pid_count; +}; + +struct audit_aux_data_bprm_fcaps { + struct audit_aux_data d; + struct audit_cap_data fcap; + unsigned int fcap_ver; + struct audit_cap_data old_pcap; + struct audit_cap_data new_pcap; +}; + +struct audit_nfcfgop_tab { + enum audit_nfcfgop op; + const char *s; +}; + +struct audit_parent; + +struct audit_watch { + refcount_t count; + dev_t dev; + char *path; + long unsigned int ino; + struct audit_parent *parent; + struct list_head wlist; + struct list_head rules; +}; + +struct fsnotify_group; + +struct fsnotify_iter_info; + +struct fsnotify_mark; + +struct fsnotify_event; + +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *, u32, const void *, int, struct inode *, const struct qstr *, u32, struct fsnotify_iter_info *); + int (*handle_inode_event)(struct fsnotify_mark *, u32, struct inode *, struct inode *, const struct qstr *, u32); + void (*free_group_priv)(struct fsnotify_group *); + void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *); + void (*free_event)(struct fsnotify_event *); + void (*free_mark)(struct fsnotify_mark *); +}; + +struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; +}; + +struct fanotify_group_private_data { + struct list_head access_list; + wait_queue_head_t access_waitq; + int flags; + int f_flags; + unsigned int max_marks; + struct user_struct *user; +}; + +struct fsnotify_group { + const struct fsnotify_ops *ops; + refcount_t refcnt; + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + unsigned int priority; + bool shutdown; + struct mutex mark_mutex; + atomic_t num_marks; + atomic_t user_waits; + struct list_head marks_list; + struct fasync_struct *fsn_fa; + struct fsnotify_event *overflow_event; + struct mem_cgroup *memcg; + union { + void *private; + struct inotify_group_private_data inotify_data; + struct fanotify_group_private_data fanotify_data; + }; +}; + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[4]; + unsigned int report_mask; + int srcu_idx; +}; + +struct fsnotify_mark { + __u32 mask; + refcount_t refcnt; + struct fsnotify_group *group; + struct list_head g_list; + spinlock_t lock; + struct hlist_node obj_list; + struct fsnotify_mark_connector *connector; + __u32 ignored_mask; + unsigned int flags; +}; + +struct fsnotify_event { + struct list_head list; + long unsigned int objectid; +}; + +struct audit_parent { + struct list_head watches; + struct fsnotify_mark mark; +}; + +struct audit_fsnotify_mark { + dev_t dev; + long unsigned int ino; + char *path; + struct fsnotify_mark mark; + struct audit_krule *rule; +}; + +struct audit_chunk___2; + +struct audit_tree { + refcount_t count; + int goner; + struct audit_chunk___2 *root; + struct list_head chunks; + struct list_head rules; + struct list_head list; + struct list_head same_root; + struct callback_head head; + char pathname[0]; +}; + +struct node { + struct list_head list; + struct audit_tree *owner; + unsigned int index; +}; + +struct audit_chunk___2 { + struct list_head hash; + long unsigned int key; + struct fsnotify_mark *mark; + struct list_head trees; + int count; + atomic_long_t refs; + struct callback_head head; + struct node owners[0]; +}; + +struct audit_tree_mark { + struct fsnotify_mark mark; + struct audit_chunk___2 *chunk; +}; + +enum { + HASH_SIZE = 128, +}; + +struct kprobe_blacklist_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, + PERF_RECORD_KSYMBOL_TYPE_MAX = 3, +}; + +struct kprobe_insn_page { + struct list_head list; + kprobe_opcode_t *insns; + struct kprobe_insn_cache *cache; + int nused; + int ngarbage; + char slot_used[0]; +}; + +enum kprobe_slot_state { + SLOT_CLEAN = 0, + SLOT_DIRTY = 1, + SLOT_USED = 2, +}; + +struct serial_icounter_struct { + int cts; + int dsr; + int rng; + int dcd; + int rx; + int tx; + int frame; + int overrun; + int parity; + int brk; + int buf_overrun; + int reserved[9]; +}; + +struct serial_struct { + int type; + int line; + unsigned int port; + int irq; + int flags; + int xmit_fifo_size; + int custom_divisor; + int baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char[1]; + int hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + long unsigned int iomap_base; +}; + +struct kgdb_io { + const char *name; + int (*read_char)(); + void (*write_char)(u8); + void (*flush)(); + int (*init)(); + void (*deinit)(); + void (*pre_exception)(); + void (*post_exception)(); + struct console *cons; +}; + +enum { + KDB_NOT_INITIALIZED = 0, + KDB_INIT_EARLY = 1, + KDB_INIT_FULL = 2, +}; + +struct kgdb_state { + int ex_vector; + int signo; + int err_code; + int cpu; + int pass_exception; + long unsigned int thr_query; + long unsigned int threadid; + long int kgdb_usethreadid; + struct pt_regs *linux_regs; + atomic_t *send_ready; +}; + +struct debuggerinfo_struct { + void *debuggerinfo; + struct task_struct *task; + int exception_state; + int ret_state; + int irq_depth; + int enter_kgdb; + bool rounding_up; +}; + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +struct notification; + +struct seccomp_filter { + refcount_t refs; + refcount_t users; + bool log; + struct seccomp_filter *prev; + struct bpf_prog *prog; + struct notification *notif; + struct mutex notify_lock; + wait_queue_head_t wqh; +}; + +struct seccomp_metadata { + __u64 filter_off; + __u64 flags; +}; + +struct sock_fprog { + short unsigned int len; + struct sock_filter *filter; +}; + +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + +enum notify_state { + SECCOMP_NOTIFY_INIT = 0, + SECCOMP_NOTIFY_SENT = 1, + SECCOMP_NOTIFY_REPLIED = 2, +}; + +struct seccomp_knotif { + struct task_struct *task; + u64 id; + const struct seccomp_data *data; + enum notify_state state; + int error; + long int val; + u32 flags; + struct completion ready; + struct list_head list; + struct list_head addfd; +}; + +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + int ret; + struct completion completion; + struct list_head list; +}; + +struct notification { + struct semaphore request; + u64 next_id; + struct list_head notifications; +}; + +struct seccomp_log_name { + u32 log; + const char *name; +}; + +struct rchan; + +struct rchan_buf { + void *start; + void *data; + size_t offset; + size_t subbufs_produced; + size_t subbufs_consumed; + struct rchan *chan; + wait_queue_head_t read_wait; + struct irq_work wakeup_work; + struct dentry *dentry; + struct kref kref; + struct page **page_array; + unsigned int page_count; + unsigned int finalized; + size_t *padding; + size_t prev_padding; + size_t bytes_consumed; + size_t early_bytes; + unsigned int cpu; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +struct rchan_callbacks; + +struct rchan { + u32 version; + size_t subbuf_size; + size_t n_subbufs; + size_t alloc_size; + struct rchan_callbacks *cb; + struct kref kref; + void *private_data; + size_t last_toobig; + struct rchan_buf **buf; + int is_global; + struct list_head list; + struct dentry *parent; + int has_base_filename; + char base_filename[255]; +}; + +struct rchan_callbacks { + int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t); + void (*buf_mapped)(struct rchan_buf *, struct file *); + void (*buf_unmapped)(struct rchan_buf *, struct file *); + struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *); + int (*remove_buf_file)(struct dentry *); +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + long unsigned int private; +}; + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +struct rchan_percpu_buf_dispatcher { + struct rchan_buf *buf; + struct dentry *dentry; +}; + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID = 1, + TASKSTATS_TYPE_TGID = 2, + TASKSTATS_TYPE_STATS = 3, + TASKSTATS_TYPE_AGGR_PID = 4, + TASKSTATS_TYPE_AGGR_TGID = 5, + TASKSTATS_TYPE_NULL = 6, + __TASKSTATS_TYPE_MAX = 7, +}; + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID = 1, + TASKSTATS_CMD_ATTR_TGID = 2, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4, + __TASKSTATS_CMD_ATTR_MAX = 5, +}; + +enum { + CGROUPSTATS_CMD_UNSPEC = 3, + CGROUPSTATS_CMD_GET = 4, + CGROUPSTATS_CMD_NEW = 5, + __CGROUPSTATS_CMD_MAX = 6, +}; + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS = 1, + __CGROUPSTATS_TYPE_MAX = 2, +}; + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD = 1, + __CGROUPSTATS_CMD_ATTR_MAX = 2, +}; + +struct genlmsghdr { + __u8 cmd; + __u8 version; + __u16 reserved; +}; + +enum { + NLA_UNSPEC = 0, + NLA_U8 = 1, + NLA_U16 = 2, + NLA_U32 = 3, + NLA_U64 = 4, + NLA_STRING = 5, + NLA_FLAG = 6, + NLA_MSECS = 7, + NLA_NESTED = 8, + NLA_NESTED_ARRAY = 9, + NLA_NUL_STRING = 10, + NLA_BINARY = 11, + NLA_S8 = 12, + NLA_S16 = 13, + NLA_S32 = 14, + NLA_S64 = 15, + NLA_BITFIELD32 = 16, + NLA_REJECT = 17, + __NLA_TYPE_MAX = 18, +}; + +struct genl_multicast_group { + char name[16]; +}; + +struct genl_ops; + +struct genl_info; + +struct genl_small_ops; + +struct genl_family { + int id; + unsigned int hdrsize; + char name[16]; + unsigned int version; + unsigned int maxattr; + unsigned int mcgrp_offset; + u8 netnsok: 1; + u8 parallel_ops: 1; + u8 n_ops; + u8 n_small_ops; + u8 n_mcgrps; + const struct nla_policy *policy; + int (*pre_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); + void (*post_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *); + const struct genl_ops *ops; + const struct genl_small_ops *small_ops; + const struct genl_multicast_group *mcgrps; + struct module *module; +}; + +struct genl_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*start)(struct netlink_callback *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + const struct nla_policy *policy; + unsigned int maxattr; + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +struct genl_info { + u32 snd_seq; + u32 snd_portid; + struct nlmsghdr *nlhdr; + struct genlmsghdr *genlhdr; + void *userhdr; + struct nlattr **attrs; + possible_net_t _net; + void *user_ptr[2]; + struct netlink_ext_ack *extack; +}; + +struct genl_small_ops { + int (*doit)(struct sk_buff *, struct genl_info *); + int (*dumpit)(struct sk_buff *, struct netlink_callback *); + u8 cmd; + u8 internal_flags; + u8 flags; + u8 validate; +}; + +enum genl_validate_flags { + GENL_DONT_VALIDATE_STRICT = 1, + GENL_DONT_VALIDATE_DUMP = 2, + GENL_DONT_VALIDATE_DUMP_STRICT = 4, +}; + +struct listener { + struct list_head list; + pid_t pid; + char valid; +}; + +struct listener_list { + struct rw_semaphore sem; + struct list_head list; +}; + +enum actions { + REGISTER = 0, + DEREGISTER = 1, + CPU_DONT_CARE = 2, +}; + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +struct tp_probes { + struct callback_head rcu; + struct tracepoint_func probes[0]; +}; + +enum { + FTRACE_OPS_FL_ENABLED = 1, + FTRACE_OPS_FL_DYNAMIC = 2, + FTRACE_OPS_FL_SAVE_REGS = 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8, + FTRACE_OPS_FL_RECURSION_SAFE = 16, + FTRACE_OPS_FL_STUB = 32, + FTRACE_OPS_FL_INITIALIZED = 64, + FTRACE_OPS_FL_DELETED = 128, + FTRACE_OPS_FL_ADDING = 256, + FTRACE_OPS_FL_REMOVING = 512, + FTRACE_OPS_FL_MODIFYING = 1024, + FTRACE_OPS_FL_ALLOC_TRAMP = 2048, + FTRACE_OPS_FL_IPMODIFY = 4096, + FTRACE_OPS_FL_PID = 8192, + FTRACE_OPS_FL_RCU = 16384, + FTRACE_OPS_FL_TRACE_ARRAY = 32768, + FTRACE_OPS_FL_PERMANENT = 65536, + FTRACE_OPS_FL_DIRECT = 131072, +}; + +struct ftrace_hash { + long unsigned int size_bits; + struct hlist_head *buckets; + long unsigned int count; + long unsigned int flags; + struct callback_head rcu; +}; + +struct ftrace_func_entry { + struct hlist_node hlist; + long unsigned int ip; + long unsigned int direct; +}; + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN = 0, + FTRACE_BUG_INIT = 1, + FTRACE_BUG_NOP = 2, + FTRACE_BUG_CALL = 3, + FTRACE_BUG_UPDATE = 4, +}; + +enum { + FTRACE_FL_ENABLED = 2147483648, + FTRACE_FL_REGS = 1073741824, + FTRACE_FL_REGS_EN = 536870912, + FTRACE_FL_TRAMP = 268435456, + FTRACE_FL_TRAMP_EN = 134217728, + FTRACE_FL_IPMODIFY = 67108864, + FTRACE_FL_DISABLED = 33554432, + FTRACE_FL_DIRECT = 16777216, + FTRACE_FL_DIRECT_EN = 8388608, +}; + +enum { + FTRACE_UPDATE_IGNORE = 0, + FTRACE_UPDATE_MAKE_CALL = 1, + FTRACE_UPDATE_MODIFY_CALL = 2, + FTRACE_UPDATE_MAKE_NOP = 3, +}; + +enum { + FTRACE_ITER_FILTER = 1, + FTRACE_ITER_NOTRACE = 2, + FTRACE_ITER_PRINTALL = 4, + FTRACE_ITER_DO_PROBES = 8, + FTRACE_ITER_PROBE = 16, + FTRACE_ITER_MOD = 32, + FTRACE_ITER_ENABLED = 64, +}; + +struct prog_entry; + +struct event_filter { + struct prog_entry *prog; + char *filter_string; +}; + +struct trace_array_cpu; + +struct array_buffer { + struct trace_array *tr; + struct trace_buffer *buffer; + struct trace_array_cpu *data; + u64 time_start; + int cpu; +}; + +struct trace_pid_list; + +struct trace_options; + +struct trace_array { + struct list_head list; + char *name; + struct array_buffer array_buffer; + struct trace_pid_list *filtered_pids; + struct trace_pid_list *filtered_no_pids; + arch_spinlock_t max_lock; + int buffer_disabled; + int sys_refcount_enter; + int sys_refcount_exit; + struct trace_event_file *enter_syscall_files[441]; + struct trace_event_file *exit_syscall_files[441]; + int stop_count; + int clock_id; + int nr_topts; + bool clear_trace; + int buffer_percent; + unsigned int n_err_log_entries; + struct tracer *current_trace; + unsigned int trace_flags; + unsigned char trace_flags_index[32]; + unsigned int flags; + raw_spinlock_t start_lock; + struct list_head err_log; + struct dentry *dir; + struct dentry *options; + struct dentry *percpu_dir; + struct dentry *event_dir; + struct trace_options *topts; + struct list_head systems; + struct list_head events; + struct trace_event_file *trace_marker_file; + cpumask_var_t tracing_cpumask; + int ref; + int trace_ref; + struct ftrace_ops *ops; + struct trace_pid_list *function_pids; + struct trace_pid_list *function_no_pids; + struct list_head func_probes; + struct list_head mod_trace; + struct list_head mod_notrace; + int function_enabled; + int time_stamp_abs_ref; + struct list_head hist_vars; +}; + +struct tracer_flags; + +struct tracer { + const char *name; + int (*init)(struct trace_array *); + void (*reset)(struct trace_array *); + void (*start)(struct trace_array *); + void (*stop)(struct trace_array *); + int (*update_thresh)(struct trace_array *); + void (*open)(struct trace_iterator *); + void (*pipe_open)(struct trace_iterator *); + void (*close)(struct trace_iterator *); + void (*pipe_close)(struct trace_iterator *); + ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *); + ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*print_header)(struct seq_file *); + enum print_line_t (*print_line)(struct trace_iterator *); + int (*set_flag)(struct trace_array *, u32, u32, int); + int (*flag_changed)(struct trace_array *, u32, int); + struct tracer *next; + struct tracer_flags *flags; + int enabled; + bool print_max; + bool allow_instances; + bool noboot; +}; + +struct event_subsystem; + +struct trace_subsystem_dir { + struct list_head list; + struct event_subsystem *subsystem; + struct trace_array *tr; + struct dentry *entry; + int ref_count; + int nr_events; +}; + +struct trace_array_cpu { + atomic_t disabled; + void *buffer_page; + long unsigned int entries; + long unsigned int saved_latency; + long unsigned int critical_start; + long unsigned int critical_end; + long unsigned int critical_sequence; + long unsigned int nice; + long unsigned int policy; + long unsigned int rt_priority; + long unsigned int skipped_entries; + u64 preempt_timestamp; + pid_t pid; + kuid_t uid; + char comm[16]; + int ftrace_ignore_pid; + bool ignore_pid; +}; + +struct trace_option_dentry; + +struct trace_options { + struct tracer *tracer; + struct trace_option_dentry *topts; +}; + +struct tracer_opt; + +struct trace_option_dentry { + struct tracer_opt *opt; + struct tracer_flags *flags; + struct trace_array *tr; + struct dentry *entry; +}; + +struct trace_pid_list { + int pid_max; + long unsigned int *pids; +}; + +enum { + TRACE_PIDS = 1, + TRACE_NO_PIDS = 2, +}; + +enum { + TRACE_ARRAY_FL_GLOBAL = 1, +}; + +struct tracer_opt { + const char *name; + u32 bit; +}; + +struct tracer_flags { + u32 val; + struct tracer_opt *opts; + struct tracer *trace; +}; + +enum { + TRACE_FTRACE_BIT = 0, + TRACE_FTRACE_NMI_BIT = 1, + TRACE_FTRACE_IRQ_BIT = 2, + TRACE_FTRACE_SIRQ_BIT = 3, + TRACE_INTERNAL_BIT = 4, + TRACE_INTERNAL_NMI_BIT = 5, + TRACE_INTERNAL_IRQ_BIT = 6, + TRACE_INTERNAL_SIRQ_BIT = 7, + TRACE_BRANCH_BIT = 8, + TRACE_IRQ_BIT = 9, + TRACE_GRAPH_BIT = 10, + TRACE_GRAPH_DEPTH_START_BIT = 11, + TRACE_GRAPH_DEPTH_END_BIT = 12, + TRACE_GRAPH_NOTRACE_BIT = 13, + TRACE_TRANSITION_BIT = 14, +}; + +struct ftrace_mod_load { + struct list_head list; + char *func; + char *module; + int enable; +}; + +enum { + FTRACE_HASH_FL_MOD = 1, +}; + +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int); +}; + +struct ftrace_probe_ops { + void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *); + int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **); + void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *); + int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *); +}; + +typedef int (*ftrace_mapper_func)(void *); + +struct trace_parser { + bool cont; + char *buffer; + unsigned int idx; + unsigned int size; +}; + +enum trace_iterator_bits { + TRACE_ITER_PRINT_PARENT_BIT = 0, + TRACE_ITER_SYM_OFFSET_BIT = 1, + TRACE_ITER_SYM_ADDR_BIT = 2, + TRACE_ITER_VERBOSE_BIT = 3, + TRACE_ITER_RAW_BIT = 4, + TRACE_ITER_HEX_BIT = 5, + TRACE_ITER_BIN_BIT = 6, + TRACE_ITER_BLOCK_BIT = 7, + TRACE_ITER_PRINTK_BIT = 8, + TRACE_ITER_ANNOTATE_BIT = 9, + TRACE_ITER_USERSTACKTRACE_BIT = 10, + TRACE_ITER_SYM_USEROBJ_BIT = 11, + TRACE_ITER_PRINTK_MSGONLY_BIT = 12, + TRACE_ITER_CONTEXT_INFO_BIT = 13, + TRACE_ITER_LATENCY_FMT_BIT = 14, + TRACE_ITER_RECORD_CMD_BIT = 15, + TRACE_ITER_RECORD_TGID_BIT = 16, + TRACE_ITER_OVERWRITE_BIT = 17, + TRACE_ITER_STOP_ON_FREE_BIT = 18, + TRACE_ITER_IRQ_INFO_BIT = 19, + TRACE_ITER_MARKERS_BIT = 20, + TRACE_ITER_EVENT_FORK_BIT = 21, + TRACE_ITER_PAUSE_ON_TRACE_BIT = 22, + TRACE_ITER_FUNCTION_BIT = 23, + TRACE_ITER_FUNC_FORK_BIT = 24, + TRACE_ITER_DISPLAY_GRAPH_BIT = 25, + TRACE_ITER_STACKTRACE_BIT = 26, + TRACE_ITER_LAST_BIT = 27, +}; + +struct event_subsystem { + struct list_head list; + const char *name; + struct event_filter *filter; + int ref_count; +}; + +enum regex_type { + MATCH_FULL = 0, + MATCH_FRONT_ONLY = 1, + MATCH_MIDDLE_ONLY = 2, + MATCH_END_ONLY = 3, + MATCH_GLOB = 4, + MATCH_INDEX = 5, +}; + +enum { + FTRACE_MODIFY_ENABLE_FL = 1, + FTRACE_MODIFY_MAY_SLEEP_FL = 2, +}; + +struct ftrace_func_probe { + struct ftrace_probe_ops *probe_ops; + struct ftrace_ops ops; + struct trace_array *tr; + struct list_head list; + void *data; + int ref; +}; + +struct ftrace_page { + struct ftrace_page *next; + struct dyn_ftrace *records; + int index; + int size; +}; + +struct ftrace_rec_iter { + struct ftrace_page *pg; + int index; +}; + +struct ftrace_iterator { + loff_t pos; + loff_t func_pos; + loff_t mod_pos; + struct ftrace_page *pg; + struct dyn_ftrace *func; + struct ftrace_func_probe *probe; + struct ftrace_func_entry *probe_entry; + struct trace_parser parser; + struct ftrace_hash *hash; + struct ftrace_ops *ops; + struct trace_array *tr; + struct list_head *mod_list; + int pidx; + int idx; + unsigned int flags; +}; + +struct ftrace_glob { + char *search; + unsigned int len; + int type; +}; + +struct ftrace_func_map { + struct ftrace_func_entry entry; + void *data; +}; + +struct ftrace_func_mapper { + struct ftrace_hash hash; +}; + +enum graph_filter_type { + GRAPH_FILTER_NOTRACE = 0, + GRAPH_FILTER_FUNCTION = 1, +}; + +struct ftrace_graph_data { + struct ftrace_hash *hash; + struct ftrace_func_entry *entry; + int idx; + enum graph_filter_type type; + struct ftrace_hash *new_hash; + const struct seq_operations *seq_ops; + struct trace_parser parser; +}; + +struct ftrace_mod_func { + struct list_head list; + char *name; + long unsigned int ip; + unsigned int size; +}; + +struct ftrace_mod_map { + struct callback_head rcu; + struct list_head list; + struct module *mod; + long unsigned int start_addr; + long unsigned int end_addr; + struct list_head funcs; + unsigned int num_funcs; +}; + +struct ftrace_init_func { + struct list_head list; + long unsigned int ip; +}; + +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING = 29, + RINGBUF_TYPE_TIME_EXTEND = 30, + RINGBUF_TYPE_TIME_STAMP = 31, +}; + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1, +}; + +struct ring_buffer_per_cpu; + +struct buffer_page; + +struct ring_buffer_iter { + struct ring_buffer_per_cpu *cpu_buffer; + long unsigned int head; + long unsigned int next_event; + struct buffer_page *head_page; + struct buffer_page *cache_reader_page; + long unsigned int cache_read; + u64 read_stamp; + u64 page_stamp; + struct ring_buffer_event *event; + int missed_events; +}; + +struct rb_irq_work { + struct irq_work work; + wait_queue_head_t waiters; + wait_queue_head_t full_waiters; + bool waiters_pending; + bool full_waiters_pending; + bool wakeup_full; +}; + +struct trace_buffer___2 { + unsigned int flags; + int cpus; + atomic_t record_disabled; + cpumask_var_t cpumask; + struct lock_class_key *reader_lock_key; + struct mutex mutex; + struct ring_buffer_per_cpu **buffers; + struct hlist_node node; + u64 (*clock)(); + struct rb_irq_work irq_work; + bool time_stamp_abs; +}; + +enum { + RB_LEN_TIME_EXTEND = 8, + RB_LEN_TIME_STAMP = 8, +}; + +struct buffer_data_page { + u64 time_stamp; + local_t commit; + unsigned char data[0]; +}; + +struct buffer_page { + struct list_head list; + local_t write; + unsigned int read; + local_t entries; + long unsigned int real_end; + struct buffer_data_page *page; +}; + +struct rb_event_info { + u64 ts; + u64 delta; + u64 before; + u64 after; + long unsigned int length; + struct buffer_page *tail_page; + int add_timestamp; +}; + +enum { + RB_ADD_STAMP_NONE = 0, + RB_ADD_STAMP_EXTEND = 2, + RB_ADD_STAMP_ABSOLUTE = 4, + RB_ADD_STAMP_FORCE = 8, +}; + +enum { + RB_CTX_TRANSITION = 0, + RB_CTX_NMI = 1, + RB_CTX_IRQ = 2, + RB_CTX_SOFTIRQ = 3, + RB_CTX_NORMAL = 4, + RB_CTX_MAX = 5, +}; + +struct rb_time_struct { + local64_t time; +}; + +typedef struct rb_time_struct rb_time_t; + +struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + atomic_t resize_disabled; + struct trace_buffer___2 *buffer; + raw_spinlock_t reader_lock; + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct buffer_data_page *free_page; + long unsigned int nr_pages; + unsigned int current_context; + struct list_head *pages; + struct buffer_page *head_page; + struct buffer_page *tail_page; + struct buffer_page *commit_page; + struct buffer_page *reader_page; + long unsigned int lost_events; + long unsigned int last_overrun; + long unsigned int nest; + local_t entries_bytes; + local_t entries; + local_t overrun; + local_t commit_overrun; + local_t dropped_events; + local_t committing; + local_t commits; + local_t pages_touched; + local_t pages_read; + long int last_pages_touch; + size_t shortest_full; + long unsigned int read; + long unsigned int read_bytes; + rb_time_t write_stamp; + rb_time_t before_stamp; + u64 read_stamp; + long int nr_pages_to_update; + struct list_head new_pages; + struct work_struct update_pages_work; + struct completion update_done; + struct rb_irq_work irq_work; +}; + +struct trace_export { + struct trace_export *next; + void (*write)(struct trace_export *, const void *, unsigned int); + int flags; +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + +enum event_trigger_type { + ETT_NONE = 0, + ETT_TRACE_ONOFF = 1, + ETT_SNAPSHOT = 2, + ETT_STACKTRACE = 4, + ETT_EVENT_ENABLE = 8, + ETT_EVENT_HIST = 16, + ETT_HIST_ENABLE = 32, +}; + +enum trace_type { + __TRACE_FIRST_TYPE = 0, + TRACE_FN = 1, + TRACE_CTX = 2, + TRACE_WAKE = 3, + TRACE_STACK = 4, + TRACE_PRINT = 5, + TRACE_BPRINT = 6, + TRACE_MMIO_RW = 7, + TRACE_MMIO_MAP = 8, + TRACE_BRANCH = 9, + TRACE_GRAPH_RET = 10, + TRACE_GRAPH_ENT = 11, + TRACE_USER_STACK = 12, + TRACE_BLK = 13, + TRACE_BPUTS = 14, + TRACE_HWLAT = 15, + TRACE_RAW_DATA = 16, + __TRACE_LAST_TYPE = 17, +}; + +struct ftrace_entry { + struct trace_entry ent; + long unsigned int ip; + long unsigned int parent_ip; +}; + +struct stack_entry { + struct trace_entry ent; + int size; + long unsigned int caller[8]; +}; + +struct bprint_entry { + struct trace_entry ent; + long unsigned int ip; + const char *fmt; + u32 buf[0]; +}; + +struct print_entry { + struct trace_entry ent; + long unsigned int ip; + char buf[0]; +}; + +struct raw_data_entry { + struct trace_entry ent; + unsigned int id; + char buf[0]; +}; + +struct bputs_entry { + struct trace_entry ent; + long unsigned int ip; + const char *str; +}; + +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 1, + TRACE_FLAG_IRQS_NOSUPPORT = 2, + TRACE_FLAG_NEED_RESCHED = 4, + TRACE_FLAG_HARDIRQ = 8, + TRACE_FLAG_SOFTIRQ = 16, + TRACE_FLAG_PREEMPT_RESCHED = 32, + TRACE_FLAG_NMI = 64, +}; + +typedef bool (*cond_update_fn_t)(struct trace_array *, void *); + +enum trace_iterator_flags { + TRACE_ITER_PRINT_PARENT = 1, + TRACE_ITER_SYM_OFFSET = 2, + TRACE_ITER_SYM_ADDR = 4, + TRACE_ITER_VERBOSE = 8, + TRACE_ITER_RAW = 16, + TRACE_ITER_HEX = 32, + TRACE_ITER_BIN = 64, + TRACE_ITER_BLOCK = 128, + TRACE_ITER_PRINTK = 256, + TRACE_ITER_ANNOTATE = 512, + TRACE_ITER_USERSTACKTRACE = 1024, + TRACE_ITER_SYM_USEROBJ = 2048, + TRACE_ITER_PRINTK_MSGONLY = 4096, + TRACE_ITER_CONTEXT_INFO = 8192, + TRACE_ITER_LATENCY_FMT = 16384, + TRACE_ITER_RECORD_CMD = 32768, + TRACE_ITER_RECORD_TGID = 65536, + TRACE_ITER_OVERWRITE = 131072, + TRACE_ITER_STOP_ON_FREE = 262144, + TRACE_ITER_IRQ_INFO = 524288, + TRACE_ITER_MARKERS = 1048576, + TRACE_ITER_EVENT_FORK = 2097152, + TRACE_ITER_PAUSE_ON_TRACE = 4194304, + TRACE_ITER_FUNCTION = 8388608, + TRACE_ITER_FUNC_FORK = 16777216, + TRACE_ITER_DISPLAY_GRAPH = 33554432, + TRACE_ITER_STACKTRACE = 67108864, +}; + +struct saved_cmdlines_buffer { + unsigned int map_pid_to_cmdline[32769]; + unsigned int *map_cmdline_to_pid; + unsigned int cmdline_num; + int cmdline_idx; + char *saved_cmdlines; +}; + +struct ftrace_stack { + long unsigned int calls[1024]; +}; + +struct ftrace_stacks { + struct ftrace_stack stacks[4]; +}; + +struct trace_buffer_struct { + int nesting; + char buffer[4096]; +}; + +struct ftrace_buffer_info { + struct trace_iterator iter; + void *spare; + unsigned int spare_cpu; + unsigned int read; +}; + +struct err_info { + const char **errs; + u8 type; + u8 pos; + u64 ts; +}; + +struct tracing_log_err { + struct list_head list; + struct err_info info; + char loc[128]; + char cmd[256]; +}; + +struct buffer_ref { + struct trace_buffer *buffer; + void *page; + int cpu; + refcount_t refcount; +}; + +struct ctx_switch_entry { + struct trace_entry ent; + unsigned int prev_pid; + unsigned int next_pid; + unsigned int next_cpu; + unsigned char prev_prio; + unsigned char prev_state; + unsigned char next_prio; + unsigned char next_state; +}; + +struct userstack_entry { + struct trace_entry ent; + unsigned int tgid; + long unsigned int caller[8]; +}; + +struct hwlat_entry { + struct trace_entry ent; + u64 duration; + u64 outer_duration; + u64 nmi_total_ts; + struct timespec64 timestamp; + unsigned int nmi_count; + unsigned int seqnum; + unsigned int count; +}; + +struct trace_mark { + long long unsigned int val; + char sym; +}; + +typedef int (*cmp_func_t)(const void *, const void *); + +struct tracer_stat { + const char *name; + void * (*stat_start)(struct tracer_stat *); + void * (*stat_next)(void *, int); + cmp_func_t stat_cmp; + int (*stat_show)(struct seq_file *, void *); + void (*stat_release)(void *); + int (*stat_headers)(struct seq_file *); +}; + +struct stat_node { + struct rb_node node; + void *stat; +}; + +struct stat_session { + struct list_head session_list; + struct tracer_stat *ts; + struct rb_root stat_root; + struct mutex stat_mutex; + struct dentry *file; +}; + +struct trace_bprintk_fmt { + struct list_head list; + const char *fmt; +}; + +enum { + TRACE_FUNC_OPT_STACK = 1, +}; + +struct ftrace_func_mapper___2; + +enum { + TRACE_NOP_OPT_ACCEPT = 1, + TRACE_NOP_OPT_REFUSE = 2, +}; + +struct ftrace_graph_ent { + long unsigned int func; + int depth; +} __attribute__((packed)); + +struct ftrace_graph_ret { + long unsigned int func; + long unsigned int overrun; + long long unsigned int calltime; + long long unsigned int rettime; + int depth; +} __attribute__((packed)); + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); + +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; +}; + +struct ftrace_graph_ent_entry { + struct trace_entry ent; + struct ftrace_graph_ent graph_ent; +} __attribute__((packed)); + +struct ftrace_graph_ret_entry { + struct trace_entry ent; + struct ftrace_graph_ret ret; +} __attribute__((packed)); + +struct fgraph_cpu_data { + pid_t last_pid; + int depth; + int depth_irq; + int ignore; + long unsigned int enter_funcs[50]; +}; + +struct fgraph_data { + struct fgraph_cpu_data *cpu_data; + struct ftrace_graph_ent_entry ent; + struct ftrace_graph_ret_entry ret; + int failed; + int cpu; +} __attribute__((packed)); + +enum { + FLAGS_FILL_FULL = 268435456, + FLAGS_FILL_START = 536870912, + FLAGS_FILL_END = 805306368, +}; + +struct blk_crypto_key; + +struct bio_crypt_ctx { + const struct blk_crypto_key *bc_key; + u64 bc_dun[4]; +}; + +typedef __u32 blk_mq_req_flags_t; + +struct disk_stats { + u64 nsecs[4]; + long unsigned int sectors[4]; + long unsigned int ios[4]; + long unsigned int merges[4]; + long unsigned int io_ticks; + local_t in_flight[2]; +}; + +struct blk_mq_ctxs; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[3]; + long: 64; + }; + unsigned int cpu; + short unsigned int index_hw[3]; + struct blk_mq_hw_ctx *hctxs[3]; + long unsigned int rq_dispatched[2]; + long unsigned int rq_merged; + long unsigned int rq_completed[2]; + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbitmap_word; + +struct sbitmap { + unsigned int depth; + unsigned int shift; + unsigned int map_nr; + struct sbitmap_word *map; +}; + +struct blk_mq_tags; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + long unsigned int state; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct delayed_work run_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; + long unsigned int flags; + void *sched_data; + struct request_queue *queue; + struct blk_flush_queue *fq; + void *driver_data; + struct sbitmap ctx_map; + struct blk_mq_ctx *dispatch_from; + unsigned int dispatch_busy; + short unsigned int type; + short unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + spinlock_t dispatch_wait_lock; + wait_queue_entry_t dispatch_wait; + atomic_t wait_index; + struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; + long unsigned int queued; + long unsigned int run; + long unsigned int dispatched[7]; + unsigned int numa_node; + unsigned int queue_num; + atomic_t nr_active; + atomic_t elevator_queued; + struct hlist_node cpuhp_online; + struct hlist_node cpuhp_dead; + struct kobject kobj; + long unsigned int poll_considered; + long unsigned int poll_invoked; + long unsigned int poll_success; + struct list_head hctx_list; + struct srcu_struct srcu[0]; + long: 64; + long: 64; +}; + +struct blk_mq_alloc_data { + struct request_queue *q; + blk_mq_req_flags_t flags; + unsigned int shallow_depth; + unsigned int cmd_flags; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; +}; + +struct blk_stat_callback { + struct list_head list; + struct timer_list timer; + struct blk_rq_stat *cpu_stat; + int (*bucket_fn)(const struct request *); + unsigned int buckets; + struct blk_rq_stat *stat; + void (*timer_fn)(struct blk_stat_callback *); + void *data; + struct callback_head rcu; +}; + +struct blk_trace { + int trace_state; + struct rchan *rchan; + long unsigned int *sequence; + unsigned char *msg_data; + u16 act_mask; + u64 start_lba; + u64 end_lba; + u32 pid; + u32 dev; + struct dentry *dir; + struct dentry *dropped_file; + struct dentry *msg_file; + struct list_head running_list; + atomic_t dropped; +}; + +struct blk_flush_queue { + unsigned int flush_pending_idx: 1; + unsigned int flush_running_idx: 1; + blk_status_t rq_status; + long unsigned int flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +struct sbq_wait_state; + +struct sbitmap_queue { + struct sbitmap sb; + unsigned int *alloc_hint; + unsigned int wake_batch; + atomic_t wake_index; + struct sbq_wait_state *ws; + atomic_t ws_active; + bool round_robin; + unsigned int min_shallow_depth; +}; + +struct blk_mq_tag_set { + struct blk_mq_queue_map map[3]; + unsigned int nr_maps; + const struct blk_mq_ops *ops; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int cmd_size; + int numa_node; + unsigned int timeout; + unsigned int flags; + void *driver_data; + atomic_t active_queues_shared_sbitmap; + struct sbitmap_queue __bitmap_tags; + struct sbitmap_queue __breserved_tags; + struct blk_mq_tags **tags; + struct mutex tag_list_lock; + struct list_head tag_list; +}; + +enum blktrace_cat { + BLK_TC_READ = 1, + BLK_TC_WRITE = 2, + BLK_TC_FLUSH = 4, + BLK_TC_SYNC = 8, + BLK_TC_SYNCIO = 8, + BLK_TC_QUEUE = 16, + BLK_TC_REQUEUE = 32, + BLK_TC_ISSUE = 64, + BLK_TC_COMPLETE = 128, + BLK_TC_FS = 256, + BLK_TC_PC = 512, + BLK_TC_NOTIFY = 1024, + BLK_TC_AHEAD = 2048, + BLK_TC_META = 4096, + BLK_TC_DISCARD = 8192, + BLK_TC_DRV_DATA = 16384, + BLK_TC_FUA = 32768, + BLK_TC_END = 32768, +}; + +enum blktrace_act { + __BLK_TA_QUEUE = 1, + __BLK_TA_BACKMERGE = 2, + __BLK_TA_FRONTMERGE = 3, + __BLK_TA_GETRQ = 4, + __BLK_TA_SLEEPRQ = 5, + __BLK_TA_REQUEUE = 6, + __BLK_TA_ISSUE = 7, + __BLK_TA_COMPLETE = 8, + __BLK_TA_PLUG = 9, + __BLK_TA_UNPLUG_IO = 10, + __BLK_TA_UNPLUG_TIMER = 11, + __BLK_TA_INSERT = 12, + __BLK_TA_SPLIT = 13, + __BLK_TA_BOUNCE = 14, + __BLK_TA_REMAP = 15, + __BLK_TA_ABORT = 16, + __BLK_TA_DRV_DATA = 17, + __BLK_TA_CGROUP = 256, +}; + +enum blktrace_notify { + __BLK_TN_PROCESS = 0, + __BLK_TN_TIMESTAMP = 1, + __BLK_TN_MESSAGE = 2, + __BLK_TN_CGROUP = 256, +}; + +struct blk_io_trace { + __u32 magic; + __u32 sequence; + __u64 time; + __u64 sector; + __u32 bytes; + __u32 action; + __u32 pid; + __u32 device; + __u32 cpu; + __u16 error; + __u16 pdu_len; +}; + +struct blk_io_trace_remap { + __be32 device_from; + __be32 device_to; + __be64 sector_from; +}; + +enum { + Blktrace_setup = 1, + Blktrace_running = 2, + Blktrace_stopped = 3, +}; + +struct blk_user_trace_setup { + char name[32]; + __u16 act_mask; + __u32 buf_size; + __u32 buf_nr; + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +struct sbitmap_word { + long unsigned int depth; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int word; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int cleared; + spinlock_t swap_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sbq_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + atomic_t active_queues; + struct sbitmap_queue *bitmap_tags; + struct sbitmap_queue *breserved_tags; + struct sbitmap_queue __bitmap_tags; + struct sbitmap_queue __breserved_tags; + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; +}; + +struct blk_mq_queue_data { + struct request *rq; + bool last; +}; + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID = 0, + BLK_ENCRYPTION_MODE_AES_256_XTS = 1, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV = 2, + BLK_ENCRYPTION_MODE_ADIANTUM = 3, + BLK_ENCRYPTION_MODE_MAX = 4, +}; + +struct blk_crypto_config { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int dun_bytes; +}; + +struct blk_crypto_key { + struct blk_crypto_config crypto_cfg; + unsigned int data_unit_size_bits; + unsigned int size; + u8 raw[64]; +}; + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx *queue_ctx; +}; + +typedef void blk_log_action_t(struct trace_iterator *, const char *, bool); + +struct ftrace_event_field { + struct list_head link; + const char *name; + const char *type; + int filter_type; + int offset; + int size; + int is_signed; +}; + +enum { + FORMAT_HEADER = 1, + FORMAT_FIELD_SEPERATOR = 2, + FORMAT_PRINTFMT = 3, +}; + +struct event_probe_data { + struct trace_event_file *file; + long unsigned int count; + int ref; + bool enable; +}; + +struct syscall_trace_enter { + struct trace_entry ent; + int nr; + long unsigned int args[0]; +}; + +struct syscall_trace_exit { + struct trace_entry ent; + int nr; + long int ret; +}; + +struct syscall_tp_t { + long long unsigned int regs; + long unsigned int syscall_nr; + long unsigned int ret; +}; + +struct syscall_tp_t___2 { + long long unsigned int regs; + long unsigned int syscall_nr; + long unsigned int args[6]; +}; + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_MAX = 4194304, + __PERF_SAMPLE_CALLCHAIN_EARLY = 0, +}; + +typedef long unsigned int perf_trace_t[256]; + +struct filter_pred; + +struct prog_entry { + int target; + int when_to_branch; + struct filter_pred *pred; +}; + +typedef int (*filter_pred_fn_t)(struct filter_pred *, void *); + +struct regex; + +typedef int (*regex_match_func)(char *, struct regex *, int); + +struct regex { + char pattern[256]; + int len; + int field_len; + regex_match_func match; +}; + +struct filter_pred { + filter_pred_fn_t fn; + u64 val; + struct regex regex; + short unsigned int *ops; + struct ftrace_event_field *field; + int offset; + int not; + int op; +}; + +enum filter_op_ids { + OP_GLOB = 0, + OP_NE = 1, + OP_EQ = 2, + OP_LE = 3, + OP_LT = 4, + OP_GE = 5, + OP_GT = 6, + OP_BAND = 7, + OP_MAX = 8, +}; + +enum { + FILT_ERR_NONE = 0, + FILT_ERR_INVALID_OP = 1, + FILT_ERR_TOO_MANY_OPEN = 2, + FILT_ERR_TOO_MANY_CLOSE = 3, + FILT_ERR_MISSING_QUOTE = 4, + FILT_ERR_OPERAND_TOO_LONG = 5, + FILT_ERR_EXPECT_STRING = 6, + FILT_ERR_EXPECT_DIGIT = 7, + FILT_ERR_ILLEGAL_FIELD_OP = 8, + FILT_ERR_FIELD_NOT_FOUND = 9, + FILT_ERR_ILLEGAL_INTVAL = 10, + FILT_ERR_BAD_SUBSYS_FILTER = 11, + FILT_ERR_TOO_MANY_PREDS = 12, + FILT_ERR_INVALID_FILTER = 13, + FILT_ERR_IP_FIELD_ONLY = 14, + FILT_ERR_INVALID_VALUE = 15, + FILT_ERR_ERRNO = 16, + FILT_ERR_NO_FILTER = 17, +}; + +struct filter_parse_error { + int lasterr; + int lasterr_pos; +}; + +typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **); + +enum { + INVERT = 1, + PROCESS_AND = 2, + PROCESS_OR = 4, +}; + +enum { + TOO_MANY_CLOSE = 4294967295, + TOO_MANY_OPEN = 4294967294, + MISSING_QUOTE = 4294967293, +}; + +struct filter_list { + struct list_head list; + struct event_filter *filter; +}; + +struct function_filter_data { + struct ftrace_ops *ops; + int first_filter; + int first_notrace; +}; + +struct event_trigger_ops; + +struct event_command; + +struct event_trigger_data { + long unsigned int count; + int ref; + struct event_trigger_ops *ops; + struct event_command *cmd_ops; + struct event_filter *filter; + char *filter_str; + void *private_data; + bool paused; + bool paused_tmp; + struct list_head list; + char *name; + struct list_head named_list; + struct event_trigger_data *named_data; +}; + +struct event_trigger_ops { + void (*func)(struct event_trigger_data *, void *, struct ring_buffer_event *); + int (*init)(struct event_trigger_ops *, struct event_trigger_data *); + void (*free)(struct event_trigger_ops *, struct event_trigger_data *); + int (*print)(struct seq_file *, struct event_trigger_ops *, struct event_trigger_data *); +}; + +struct event_command { + struct list_head list; + char *name; + enum event_trigger_type trigger_type; + int flags; + int (*func)(struct event_command *, struct trace_event_file *, char *, char *, char *); + int (*reg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *); + void (*unreg_all)(struct trace_event_file *); + int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *); + struct event_trigger_ops * (*get_trigger_ops)(char *, char *); +}; + +struct enable_trigger_data { + struct trace_event_file *file; + bool enable; + bool hist; +}; + +enum event_command_flags { + EVENT_CMD_FL_POST_TRIGGER = 1, + EVENT_CMD_FL_NEEDS_REC = 2, +}; + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + __BPF_FUNC_MAX_ID = 156, +}; + +enum { + BPF_F_INDEX_MASK = 4294967295, + BPF_F_CURRENT_CPU = 4294967295, + BPF_F_CTXLEN_MASK = 0, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT = 0, + BPF_FD_TYPE_TRACEPOINT = 1, + BPF_FD_TYPE_KPROBE = 2, + BPF_FD_TYPE_KRETPROBE = 3, + BPF_FD_TYPE_UPROBE = 4, + BPF_FD_TYPE_URETPROBE = 5, +}; + +struct btf_ptr { + void *ptr; + __u32 type_id; + __u32 flags; +}; + +enum { + BTF_F_COMPACT = 1, + BTF_F_NONAME = 2, + BTF_F_PTR_RAW = 4, + BTF_F_ZERO = 8, +}; + +enum bpf_arg_type { + ARG_DONTCARE = 0, + ARG_CONST_MAP_PTR = 1, + ARG_PTR_TO_MAP_KEY = 2, + ARG_PTR_TO_MAP_VALUE = 3, + ARG_PTR_TO_UNINIT_MAP_VALUE = 4, + ARG_PTR_TO_MAP_VALUE_OR_NULL = 5, + ARG_PTR_TO_MEM = 6, + ARG_PTR_TO_MEM_OR_NULL = 7, + ARG_PTR_TO_UNINIT_MEM = 8, + ARG_CONST_SIZE = 9, + ARG_CONST_SIZE_OR_ZERO = 10, + ARG_PTR_TO_CTX = 11, + ARG_PTR_TO_CTX_OR_NULL = 12, + ARG_ANYTHING = 13, + ARG_PTR_TO_SPIN_LOCK = 14, + ARG_PTR_TO_SOCK_COMMON = 15, + ARG_PTR_TO_INT = 16, + ARG_PTR_TO_LONG = 17, + ARG_PTR_TO_SOCKET = 18, + ARG_PTR_TO_SOCKET_OR_NULL = 19, + ARG_PTR_TO_BTF_ID = 20, + ARG_PTR_TO_ALLOC_MEM = 21, + ARG_PTR_TO_ALLOC_MEM_OR_NULL = 22, + ARG_CONST_ALLOC_SIZE_OR_ZERO = 23, + ARG_PTR_TO_BTF_ID_SOCK_COMMON = 24, + ARG_PTR_TO_PERCPU_BTF_ID = 25, + __BPF_ARG_TYPE_MAX = 26, +}; + +enum bpf_return_type { + RET_INTEGER = 0, + RET_VOID = 1, + RET_PTR_TO_MAP_VALUE = 2, + RET_PTR_TO_MAP_VALUE_OR_NULL = 3, + RET_PTR_TO_SOCKET_OR_NULL = 4, + RET_PTR_TO_TCP_SOCK_OR_NULL = 5, + RET_PTR_TO_SOCK_COMMON_OR_NULL = 6, + RET_PTR_TO_ALLOC_MEM_OR_NULL = 7, + RET_PTR_TO_BTF_ID_OR_NULL = 8, + RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL = 9, + RET_PTR_TO_MEM_OR_BTF_ID = 10, +}; + +struct bpf_func_proto { + u64 (*func)(u64, u64, u64, u64, u64); + bool gpl_only; + bool pkt_access; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + union { + struct { + u32 *arg1_btf_id; + u32 *arg2_btf_id; + u32 *arg3_btf_id; + u32 *arg4_btf_id; + u32 *arg5_btf_id; + }; + u32 *arg_btf_id[5]; + }; + int *ret_btf_id; + bool (*allowed)(const struct bpf_prog *); +}; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2, +}; + +struct bpf_verifier_log; + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + union { + int ctx_field_size; + u32 btf_id; + }; + struct bpf_verifier_log *log; +}; + +struct bpf_verifier_ops { + const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *); + bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *); + int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *); + int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *); + u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + int (*btf_struct_access)(struct bpf_verifier_log *, const struct btf_type *, int, int, enum bpf_access_type, u32 *); +}; + +struct bpf_array_aux { + enum bpf_prog_type type; + bool jited; + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + union { + char value[0]; + void *ptrs[0]; + void *pptrs[0]; + }; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int); + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; + +struct perf_event_query_bpf { + __u32 ids_len; + __u32 prog_cnt; + __u32 ids[0]; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + +struct btf_id_set { + u32 cnt; + u32 ids[0]; +}; + +struct trace_event_raw_bpf_trace_printk { + struct trace_entry ent; + u32 __data_loc_bpf_string; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_trace_printk { + u32 bpf_string; +}; + +typedef void (*btf_trace_bpf_trace_printk)(void *, const char *); + +struct bpf_trace_module { + struct module *module; + struct list_head list; +}; + +typedef u64 (*btf_bpf_override_return)(struct pt_regs *, long unsigned int); + +typedef u64 (*btf_bpf_probe_read_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_user_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_kernel_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_read_compat_str)(void *, u32, const void *); + +typedef u64 (*btf_bpf_probe_write_user)(void *, const void *, u32); + +typedef u64 (*btf_bpf_trace_printk)(char *, u32, u64, u64, u64); + +struct bpf_seq_printf_buf { + char buf[768]; +}; + +typedef u64 (*btf_bpf_seq_printf)(struct seq_file *, char *, u32, const void *, u32); + +typedef u64 (*btf_bpf_seq_write)(struct seq_file *, const void *, u32); + +typedef u64 (*btf_bpf_seq_printf_btf)(struct seq_file *, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_read)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_perf_event_read_value)(struct bpf_map *, u64, struct bpf_perf_event_value *, u32); + +struct bpf_trace_sample_data { + struct perf_sample_data sds[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output)(struct pt_regs *, struct bpf_map *, u64, void *, u64); + +struct bpf_nested_pt_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_get_current_task)(); + +typedef u64 (*btf_bpf_current_task_under_cgroup)(struct bpf_map *, u32); + +struct send_signal_irq_work { + struct irq_work irq_work; + struct task_struct *task; + u32 sig; + enum pid_type type; +}; + +typedef u64 (*btf_bpf_send_signal)(u32); + +typedef u64 (*btf_bpf_send_signal_thread)(u32); + +typedef u64 (*btf_bpf_d_path)(struct path *, char *, u32); + +typedef u64 (*btf_bpf_snprintf_btf)(char *, u32, struct btf_ptr *, u32, u64); + +typedef u64 (*btf_bpf_perf_event_output_tp)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_tp)(void *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_tp)(void *, void *, u32, u64); + +typedef u64 (*btf_bpf_perf_prog_read_value)(struct bpf_perf_event_data_kern *, struct bpf_perf_event_value *, u32); + +typedef u64 (*btf_bpf_read_branch_records)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +struct bpf_raw_tp_regs { + struct pt_regs regs[3]; +}; + +typedef u64 (*btf_bpf_perf_event_output_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_stackid_raw_tp)(struct bpf_raw_tracepoint_args *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack_raw_tp)(struct bpf_raw_tracepoint_args *, void *, u32, u64); + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE = 2, + DYNEVENT_TYPE_NONE = 3, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +struct kprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int ip; +}; + +struct kretprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int func; + long unsigned int ret_ip; +}; + +struct dyn_event; + +struct dyn_event_operations { + struct list_head list; + int (*create)(int, const char **); + int (*show)(struct seq_file *, struct dyn_event *); + bool (*is_busy)(struct dyn_event *); + int (*free)(struct dyn_event *); + bool (*match)(const char *, const char *, int, const char **, struct dyn_event *); +}; + +struct dyn_event { + struct list_head list; + struct dyn_event_operations *ops; +}; + +struct dynevent_arg { + const char *str; + char separator; +}; + +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); + +enum fetch_op { + FETCH_OP_NOP = 0, + FETCH_OP_REG = 1, + FETCH_OP_STACK = 2, + FETCH_OP_STACKP = 3, + FETCH_OP_RETVAL = 4, + FETCH_OP_IMM = 5, + FETCH_OP_COMM = 6, + FETCH_OP_ARG = 7, + FETCH_OP_FOFFS = 8, + FETCH_OP_DATA = 9, + FETCH_OP_DEREF = 10, + FETCH_OP_UDEREF = 11, + FETCH_OP_ST_RAW = 12, + FETCH_OP_ST_MEM = 13, + FETCH_OP_ST_UMEM = 14, + FETCH_OP_ST_STRING = 15, + FETCH_OP_ST_USTRING = 16, + FETCH_OP_MOD_BF = 17, + FETCH_OP_LP_ARRAY = 18, + FETCH_OP_END = 19, + FETCH_NOP_SYMBOL = 20, +}; + +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + long unsigned int immediate; + void *data; + }; +}; + +struct fetch_type { + const char *name; + size_t size; + int is_signed; + print_type_func_t print; + const char *fmt; + const char *fmttype; +}; + +struct probe_arg { + struct fetch_insn *code; + bool dynamic; + unsigned int offset; + unsigned int count; + const char *name; + const char *comm; + char *fmt; + const struct fetch_type *type; +}; + +struct trace_uprobe_filter { + rwlock_t rwlock; + int nr_systemwide; + struct list_head perf_events; +}; + +struct trace_probe_event { + unsigned int flags; + struct trace_event_class class; + struct trace_event_call call; + struct list_head files; + struct list_head probes; + struct trace_uprobe_filter filter[0]; +}; + +struct trace_probe { + struct list_head list; + struct trace_probe_event *event; + ssize_t size; + unsigned int nr_args; + struct probe_arg args[0]; +}; + +struct event_file_link { + struct trace_event_file *file; + struct list_head list; +}; + +enum { + TP_ERR_FILE_NOT_FOUND = 0, + TP_ERR_NO_REGULAR_FILE = 1, + TP_ERR_BAD_REFCNT = 2, + TP_ERR_REFCNT_OPEN_BRACE = 3, + TP_ERR_BAD_REFCNT_SUFFIX = 4, + TP_ERR_BAD_UPROBE_OFFS = 5, + TP_ERR_MAXACT_NO_KPROBE = 6, + TP_ERR_BAD_MAXACT = 7, + TP_ERR_MAXACT_TOO_BIG = 8, + TP_ERR_BAD_PROBE_ADDR = 9, + TP_ERR_BAD_RETPROBE = 10, + TP_ERR_BAD_ADDR_SUFFIX = 11, + TP_ERR_NO_GROUP_NAME = 12, + TP_ERR_GROUP_TOO_LONG = 13, + TP_ERR_BAD_GROUP_NAME = 14, + TP_ERR_NO_EVENT_NAME = 15, + TP_ERR_EVENT_TOO_LONG = 16, + TP_ERR_BAD_EVENT_NAME = 17, + TP_ERR_RETVAL_ON_PROBE = 18, + TP_ERR_BAD_STACK_NUM = 19, + TP_ERR_BAD_ARG_NUM = 20, + TP_ERR_BAD_VAR = 21, + TP_ERR_BAD_REG_NAME = 22, + TP_ERR_BAD_MEM_ADDR = 23, + TP_ERR_BAD_IMM = 24, + TP_ERR_IMMSTR_NO_CLOSE = 25, + TP_ERR_FILE_ON_KPROBE = 26, + TP_ERR_BAD_FILE_OFFS = 27, + TP_ERR_SYM_ON_UPROBE = 28, + TP_ERR_TOO_MANY_OPS = 29, + TP_ERR_DEREF_NEED_BRACE = 30, + TP_ERR_BAD_DEREF_OFFS = 31, + TP_ERR_DEREF_OPEN_BRACE = 32, + TP_ERR_COMM_CANT_DEREF = 33, + TP_ERR_BAD_FETCH_ARG = 34, + TP_ERR_ARRAY_NO_CLOSE = 35, + TP_ERR_BAD_ARRAY_SUFFIX = 36, + TP_ERR_BAD_ARRAY_NUM = 37, + TP_ERR_ARRAY_TOO_BIG = 38, + TP_ERR_BAD_TYPE = 39, + TP_ERR_BAD_STRING = 40, + TP_ERR_BAD_BITFIELD = 41, + TP_ERR_ARG_NAME_TOO_LONG = 42, + TP_ERR_NO_ARG_NAME = 43, + TP_ERR_BAD_ARG_NAME = 44, + TP_ERR_USED_ARG_NAME = 45, + TP_ERR_ARG_TOO_LONG = 46, + TP_ERR_NO_ARG_BODY = 47, + TP_ERR_BAD_INSN_BNDRY = 48, + TP_ERR_FAIL_REG_PROBE = 49, + TP_ERR_DIFF_PROBE_TYPE = 50, + TP_ERR_DIFF_ARG_TYPE = 51, + TP_ERR_SAME_PROBE = 52, +}; + +struct trace_kprobe { + struct dyn_event devent; + struct kretprobe rp; + long unsigned int *nhit; + const char *symbol; + struct trace_probe tp; +}; + +struct trace_event_raw_cpu { + struct trace_entry ent; + u32 state; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_powernv_throttle { + struct trace_entry ent; + int chip_id; + u32 __data_loc_reason; + int pmax; + char __data[0]; +}; + +struct trace_event_raw_pstate_sample { + struct trace_entry ent; + u32 core_busy; + u32 scaled_busy; + u32 from; + u32 to; + u64 mperf; + u64 aperf; + u64 tsc; + u32 freq; + u32 io_boost; + char __data[0]; +}; + +struct trace_event_raw_cpu_frequency_limits { + struct trace_entry ent; + u32 min_freq; + u32 max_freq; + u32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_start { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u32 __data_loc_parent; + u32 __data_loc_pm_ops; + int event; + char __data[0]; +}; + +struct trace_event_raw_device_pm_callback_end { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + int error; + char __data[0]; +}; + +struct trace_event_raw_suspend_resume { + struct trace_entry ent; + const char *action; + int val; + bool start; + char __data[0]; +}; + +struct trace_event_raw_wakeup_source { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + char __data[0]; +}; + +struct trace_event_raw_clock { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_power_domain { + struct trace_entry ent; + u32 __data_loc_name; + u64 state; + u64 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_cpu_latency_qos_request { + struct trace_entry ent; + s32 value; + char __data[0]; +}; + +struct trace_event_raw_pm_qos_update { + struct trace_entry ent; + enum pm_qos_req_action action; + int prev_value; + int curr_value; + char __data[0]; +}; + +struct trace_event_raw_dev_pm_qos_request { + struct trace_entry ent; + u32 __data_loc_name; + enum dev_pm_qos_req_type type; + s32 new_value; + char __data[0]; +}; + +struct trace_event_data_offsets_cpu {}; + +struct trace_event_data_offsets_powernv_throttle { + u32 reason; +}; + +struct trace_event_data_offsets_pstate_sample {}; + +struct trace_event_data_offsets_cpu_frequency_limits {}; + +struct trace_event_data_offsets_device_pm_callback_start { + u32 device; + u32 driver; + u32 parent; + u32 pm_ops; +}; + +struct trace_event_data_offsets_device_pm_callback_end { + u32 device; + u32 driver; +}; + +struct trace_event_data_offsets_suspend_resume {}; + +struct trace_event_data_offsets_wakeup_source { + u32 name; +}; + +struct trace_event_data_offsets_clock { + u32 name; +}; + +struct trace_event_data_offsets_power_domain { + u32 name; +}; + +struct trace_event_data_offsets_cpu_latency_qos_request {}; + +struct trace_event_data_offsets_pm_qos_update {}; + +struct trace_event_data_offsets_dev_pm_qos_request { + u32 name; +}; + +typedef void (*btf_trace_cpu_idle)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_powernv_throttle)(void *, int, const char *, int); + +typedef void (*btf_trace_pstate_sample)(void *, u32, u32, u32, u32, u64, u64, u64, u32, u32); + +typedef void (*btf_trace_cpu_frequency)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_cpu_frequency_limits)(void *, struct cpufreq_policy *); + +typedef void (*btf_trace_device_pm_callback_start)(void *, struct device *, const char *, int); + +typedef void (*btf_trace_device_pm_callback_end)(void *, struct device *, int); + +typedef void (*btf_trace_suspend_resume)(void *, const char *, int, bool); + +typedef void (*btf_trace_wakeup_source_activate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_wakeup_source_deactivate)(void *, const char *, unsigned int); + +typedef void (*btf_trace_clock_enable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_disable)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_clock_set_rate)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_power_domain_target)(void *, const char *, unsigned int, unsigned int); + +typedef void (*btf_trace_pm_qos_add_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_remove_request)(void *, s32); + +typedef void (*btf_trace_pm_qos_update_target)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_pm_qos_update_flags)(void *, enum pm_qos_req_action, int, int); + +typedef void (*btf_trace_dev_pm_qos_add_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_update_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +typedef void (*btf_trace_dev_pm_qos_remove_request)(void *, const char *, enum dev_pm_qos_req_type, s32); + +struct trace_event_raw_rpm_internal { + struct trace_entry ent; + u32 __data_loc_name; + int flags; + int usage_count; + int disable_depth; + int runtime_auto; + int request_pending; + int irq_safe; + int child_count; + char __data[0]; +}; + +struct trace_event_raw_rpm_return_int { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int ip; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_rpm_internal { + u32 name; +}; + +struct trace_event_data_offsets_rpm_return_int { + u32 name; +}; + +typedef void (*btf_trace_rpm_suspend)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_resume)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_idle)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_usage)(void *, struct device *, int); + +typedef void (*btf_trace_rpm_return_int)(void *, struct device *, long unsigned int, int); + +typedef int (*dynevent_check_arg_fn_t)(void *); + +struct dynevent_arg_pair { + const char *lhs; + const char *rhs; + char operator; + char separator; +}; + +struct trace_probe_log { + const char *subsystem; + const char **argv; + int argc; + int index; +}; + +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER = 0, + UPROBE_FILTER_UNREGISTER = 1, + UPROBE_FILTER_MMAP = 2, +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *, struct pt_regs *); + int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *); + bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); + struct uprobe_consumer *next; +}; + +struct uprobe_trace_entry_head { + struct trace_entry ent; + long unsigned int vaddr[0]; +}; + +struct trace_uprobe { + struct dyn_event devent; + struct uprobe_consumer consumer; + struct path path; + struct inode *inode; + char *filename; + long unsigned int offset; + long unsigned int ref_ctr_offset; + long unsigned int nhit; + struct trace_probe tp; +}; + +struct uprobe_dispatch_data { + struct trace_uprobe *tu; + long unsigned int bp_addr; +}; + +struct uprobe_cpu_buffer { + struct mutex mutex; + void *buf; +}; + +typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *); + +struct rnd_state { + __u32 s1; + __u32 s2; + __u32 s3; + __u32 s4; +}; + +struct rhash_lock_head; + +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + struct bucket_table *future_tbl; + struct lockdep_map dep_map; + long: 64; + struct rhash_lock_head *buckets[0]; +}; + +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP = 1, + XDP_PASS = 2, + XDP_TX = 3, + XDP_REDIRECT = 4, +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL = 0, +}; + +enum bpf_text_poke_type { + BPF_MOD_CALL = 0, + BPF_MOD_JUMP = 1, +}; + +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0 = 1, + MEM_TYPE_PAGE_POOL = 2, + MEM_TYPE_XSK_BUFF_POOL = 3, + MEM_TYPE_MAX = 4, +}; + +struct xdp_cpumap_stats { + unsigned int redirect; + unsigned int pass; + unsigned int drop; +}; + +typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int); + +struct bpf_prog_dummy { + struct bpf_prog prog; +}; + +typedef u64 (*btf_bpf_user_rnd_u32)(); + +typedef u64 (*btf_bpf_get_raw_cpu_id)(); + +struct _bpf_dtab_netdev { + struct net_device *dev; +}; + +struct rhash_lock_head {}; + +struct zero_copy_allocator; + +struct page_pool; + +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + struct zero_copy_allocator *zc_alloc; + }; + struct rhash_head node; + struct callback_head rcu; +}; + +struct trace_event_raw_xdp_exception { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_xdp_bulk_tx { + struct trace_entry ent; + int ifindex; + u32 act; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_xdp_redirect_template { + struct trace_entry ent; + int prog_id; + u32 act; + int ifindex; + int err; + int to_ifindex; + u32 map_id; + int map_index; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_kthread { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int sched; + unsigned int xdp_pass; + unsigned int xdp_drop; + unsigned int xdp_redirect; + char __data[0]; +}; + +struct trace_event_raw_xdp_cpumap_enqueue { + struct trace_entry ent; + int map_id; + u32 act; + int cpu; + unsigned int drops; + unsigned int processed; + int to_cpu; + char __data[0]; +}; + +struct trace_event_raw_xdp_devmap_xmit { + struct trace_entry ent; + int from_ifindex; + u32 act; + int to_ifindex; + int drops; + int sent; + int err; + char __data[0]; +}; + +struct trace_event_raw_mem_disconnect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + char __data[0]; +}; + +struct trace_event_raw_mem_connect { + struct trace_entry ent; + const struct xdp_mem_allocator *xa; + u32 mem_id; + u32 mem_type; + const void *allocator; + const struct xdp_rxq_info *rxq; + int ifindex; + char __data[0]; +}; + +struct trace_event_raw_mem_return_failed { + struct trace_entry ent; + const struct page *page; + u32 mem_id; + u32 mem_type; + char __data[0]; +}; + +struct trace_event_data_offsets_xdp_exception {}; + +struct trace_event_data_offsets_xdp_bulk_tx {}; + +struct trace_event_data_offsets_xdp_redirect_template {}; + +struct trace_event_data_offsets_xdp_cpumap_kthread {}; + +struct trace_event_data_offsets_xdp_cpumap_enqueue {}; + +struct trace_event_data_offsets_xdp_devmap_xmit {}; + +struct trace_event_data_offsets_mem_disconnect {}; + +struct trace_event_data_offsets_mem_connect {}; + +struct trace_event_data_offsets_mem_return_failed {}; + +typedef void (*btf_trace_xdp_exception)(void *, const struct net_device *, const struct bpf_prog *, u32); + +typedef void (*btf_trace_xdp_bulk_tx)(void *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_xdp_redirect)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32); + +typedef void (*btf_trace_xdp_redirect_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32); + +typedef void (*btf_trace_xdp_redirect_map)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32); + +typedef void (*btf_trace_xdp_redirect_map_err)(void *, const struct net_device *, const struct bpf_prog *, const void *, int, const struct bpf_map *, u32); + +typedef void (*btf_trace_xdp_cpumap_kthread)(void *, int, unsigned int, unsigned int, int, struct xdp_cpumap_stats *); + +typedef void (*btf_trace_xdp_cpumap_enqueue)(void *, int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_xdp_devmap_xmit)(void *, const struct net_device *, const struct net_device *, int, int, int); + +typedef void (*btf_trace_mem_disconnect)(void *, const struct xdp_mem_allocator *); + +typedef void (*btf_trace_mem_connect)(void *, const struct xdp_mem_allocator *, const struct xdp_rxq_info *); + +typedef void (*btf_trace_mem_return_failed)(void *, const struct xdp_mem_info *, const struct page *); + +enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, +}; + +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + +enum { + BPF_F_NO_PREALLOC = 1, + BPF_F_NO_COMMON_LRU = 2, + BPF_F_NUMA_NODE = 4, + BPF_F_RDONLY = 8, + BPF_F_WRONLY = 16, + BPF_F_STACK_BUILD_ID = 32, + BPF_F_ZERO_SEED = 64, + BPF_F_RDONLY_PROG = 128, + BPF_F_WRONLY_PROG = 256, + BPF_F_CLONE = 512, + BPF_F_MMAPABLE = 1024, + BPF_F_PRESERVE_ELEMS = 2048, + BPF_F_INNER_MAP = 4096, +}; + +enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +}; + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 jited_prog_insns; + __u64 xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 map_ids; + char name[16]; + __u32 ifindex; + __u32 gpl_compatible: 1; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 jited_ksyms; + __u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 line_info; + __u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; +}; + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; +}; + +struct bpf_btf_info { + __u64 btf; + __u32 btf_size; + __u32 id; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_attach_target_info { + struct btf_func_model fmodel; + long int tgt_addr; + const char *tgt_name; + const struct btf_type *tgt_type; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX = 3, +}; + +enum bpf_audit { + BPF_AUDIT_LOAD = 0, + BPF_AUDIT_UNLOAD = 1, + BPF_AUDIT_MAX = 2, +}; + +struct bpf_tracing_link { + struct bpf_link link; + enum bpf_attach_type attach_type; + struct bpf_trampoline *trampoline; + struct bpf_prog *tgt_prog; +}; + +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; +}; + +struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; +}; + +enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +}; + +struct btf_var_secinfo { + __u32 type; + __u32 offset; + __u32 size; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS = 1, +}; + +struct bpf_verifier_log { + u32 level; + char kbuf[1024]; + char *ubuf; + u32 len_used; + u32 len_total; +}; + +struct bpf_subprog_info { + u32 start; + u32 linfo_idx; + u16 stack_depth; + bool has_tail_call; + bool tail_call_reachable; + bool has_ld_abs; +}; + +struct bpf_verifier_stack_elem; + +struct bpf_verifier_state; + +struct bpf_verifier_state_list; + +struct bpf_insn_aux_data; + +struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; + struct bpf_prog *prog; + const struct bpf_verifier_ops *ops; + struct bpf_verifier_stack_elem *head; + int stack_size; + bool strict_alignment; + bool test_state_freq; + struct bpf_verifier_state *cur_state; + struct bpf_verifier_state_list **explored_states; + struct bpf_verifier_state_list *free_list; + struct bpf_map *used_maps[64]; + u32 used_map_cnt; + u32 id_gen; + bool allow_ptr_leaks; + bool allow_ptr_to_map_access; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; + bool seen_direct_write; + struct bpf_insn_aux_data *insn_aux_data; + const struct bpf_line_info *prev_linfo; + struct bpf_verifier_log log; + struct bpf_subprog_info subprog_info[257]; + struct { + int *insn_state; + int *insn_stack; + int cur_stack; + } cfg; + u32 pass_cnt; + u32 subprog_cnt; + u32 prev_insn_processed; + u32 insn_processed; + u32 prev_jmps_processed; + u32 jmps_processed; + u64 verification_time; + u32 max_states_per_insn; + u32 total_states; + u32 peak_states; + u32 longest_mark_read_walk; +}; + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *); + int (*check_member)(const struct btf_type *, const struct btf_member *); + int (*init_member)(const struct btf_type *, const struct btf_member *, void *, const void *); + int (*reg)(void *); + void (*unreg)(void *); + const struct btf_type *type; + const struct btf_type *value_type; + const char *name; + struct btf_func_model func_models[64]; + u32 type_id; + u32 value_id; +}; + +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *); + +struct tnum { + u64 value; + u64 mask; +}; + +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, + REG_LIVE_READ32 = 1, + REG_LIVE_READ64 = 2, + REG_LIVE_READ = 3, + REG_LIVE_WRITTEN = 4, + REG_LIVE_DONE = 8, +}; + +struct bpf_reg_state { + enum bpf_reg_type type; + union { + u16 range; + struct bpf_map *map_ptr; + u32 btf_id; + u32 mem_size; + long unsigned int raw; + }; + s32 off; + u32 id; + u32 ref_obj_id; + struct tnum var_off; + s64 smin_value; + s64 smax_value; + u64 umin_value; + u64 umax_value; + s32 s32_min_value; + s32 s32_max_value; + u32 u32_min_value; + u32 u32_max_value; + struct bpf_reg_state *parent; + u32 frameno; + s32 subreg_def; + enum bpf_reg_liveness live; + bool precise; +}; + +enum bpf_stack_slot_type { + STACK_INVALID = 0, + STACK_SPILL = 1, + STACK_MISC = 2, + STACK_ZERO = 3, +}; + +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[8]; +}; + +struct bpf_reference_state { + int id; + int insn_idx; +}; + +struct bpf_func_state { + struct bpf_reg_state regs[11]; + int callsite; + u32 frameno; + u32 subprogno; + int acquired_refs; + struct bpf_reference_state *refs; + int allocated_stack; + struct bpf_stack_state *stack; +}; + +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + +struct bpf_verifier_state { + struct bpf_func_state *frame[8]; + struct bpf_verifier_state *parent; + u32 branches; + u32 insn_idx; + u32 curframe; + u32 active_spin_lock; + bool speculative; + u32 first_insn_idx; + u32 last_insn_idx; + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; +}; + +struct bpf_verifier_state_list { + struct bpf_verifier_state state; + struct bpf_verifier_state_list *next; + int miss_cnt; + int hit_cnt; +}; + +struct bpf_insn_aux_data { + union { + enum bpf_reg_type ptr_type; + long unsigned int map_ptr_state; + s32 call_imm; + u32 alu_limit; + struct { + u32 map_index; + u32 map_off; + }; + struct { + enum bpf_reg_type reg_type; + union { + u32 btf_id; + u32 mem_size; + }; + } btf_var; + }; + u64 map_key_state; + int ctx_field_size; + int sanitize_stack_off; + u32 seen; + bool zext_dst; + u8 alu_state; + unsigned int orig_idx; + bool prune_point; +}; + +struct bpf_verifier_stack_elem { + struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; + struct bpf_verifier_stack_elem *next; + u32 log_pos; +}; + +enum { + BTF_SOCK_TYPE_INET = 0, + BTF_SOCK_TYPE_INET_CONN = 1, + BTF_SOCK_TYPE_INET_REQ = 2, + BTF_SOCK_TYPE_INET_TW = 3, + BTF_SOCK_TYPE_REQ = 4, + BTF_SOCK_TYPE_SOCK = 5, + BTF_SOCK_TYPE_SOCK_COMMON = 6, + BTF_SOCK_TYPE_TCP = 7, + BTF_SOCK_TYPE_TCP_REQ = 8, + BTF_SOCK_TYPE_TCP_TW = 9, + BTF_SOCK_TYPE_TCP6 = 10, + BTF_SOCK_TYPE_UDP = 11, + BTF_SOCK_TYPE_UDP6 = 12, + MAX_BTF_SOCK_TYPE = 13, +}; + +typedef void (*bpf_insn_print_t)(void *, const char *, ...); + +typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *); + +typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; + +struct bpf_call_arg_meta { + struct bpf_map *map_ptr; + bool raw_mode; + bool pkt_access; + int regno; + int access_size; + int mem_size; + u64 msize_max_value; + int ref_obj_id; + int func_id; + u32 btf_id; + u32 ret_btf_id; +}; + +enum reg_arg_type { + SRC_OP = 0, + DST_OP = 1, + DST_OP_NO_MARK = 2, +}; + +struct bpf_reg_types { + const enum bpf_reg_type types[10]; + u32 *btf_id; +}; + +enum { + DISCOVERED = 16, + EXPLORED = 32, + FALLTHROUGH = 1, + BRANCH = 2, +}; + +struct idpair { + u32 old; + u32 cur; +}; + +struct tree_descr { + const char *name; + const struct file_operations *ops; + int mode; +}; + +struct bpf_preload_info { + char link_name[16]; + int link_id; +}; + +struct bpf_preload_ops { + struct umd_info info; + int (*preload)(struct bpf_preload_info *); + int (*finish)(); + struct module *owner; +}; + +enum bpf_type { + BPF_TYPE_UNSPEC = 0, + BPF_TYPE_PROG = 1, + BPF_TYPE_MAP = 2, + BPF_TYPE_LINK = 3, +}; + +struct map_iter { + void *key; + bool done; +}; + +enum { + OPT_MODE = 0, +}; + +struct bpf_mount_opts { + umode_t mode; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; + +typedef u64 (*btf_bpf_map_lookup_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_update_elem)(struct bpf_map *, void *, void *, u64); + +typedef u64 (*btf_bpf_map_delete_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_push_elem)(struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_map_pop_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_map_peek_elem)(struct bpf_map *, void *); + +typedef u64 (*btf_bpf_get_smp_processor_id)(); + +typedef u64 (*btf_bpf_get_numa_node_id)(); + +typedef u64 (*btf_bpf_ktime_get_ns)(); + +typedef u64 (*btf_bpf_ktime_get_boot_ns)(); + +typedef u64 (*btf_bpf_get_current_pid_tgid)(); + +typedef u64 (*btf_bpf_get_current_uid_gid)(); + +typedef u64 (*btf_bpf_get_current_comm)(char *, u32); + +typedef u64 (*btf_bpf_spin_lock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_spin_unlock)(struct bpf_spin_lock *); + +typedef u64 (*btf_bpf_jiffies64)(); + +typedef u64 (*btf_bpf_get_current_cgroup_id)(); + +typedef u64 (*btf_bpf_get_current_ancestor_cgroup_id)(int); + +typedef u64 (*btf_bpf_get_local_storage)(struct bpf_map *, u64); + +typedef u64 (*btf_bpf_strtol)(const char *, size_t, u64, long int *); + +typedef u64 (*btf_bpf_strtoul)(const char *, size_t, u64, long unsigned int *); + +typedef u64 (*btf_bpf_get_ns_current_pid_tgid)(u64, u64, struct bpf_pidns_info *, u32); + +typedef u64 (*btf_bpf_event_output_data)(void *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_copy_from_user)(void *, u32, const void *); + +typedef u64 (*btf_bpf_per_cpu_ptr)(const void *, u32); + +typedef u64 (*btf_bpf_this_cpu_ptr)(const void *); + +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *, union bpf_iter_link_info *, struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *); + +typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *, struct seq_file *); + +typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *, struct bpf_link_info *); + +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + bpf_iter_show_fdinfo_t show_fdinfo; + bpf_iter_fill_link_info_t fill_link_info; + u32 ctx_arg_info_size; + struct bpf_ctx_arg_aux ctx_arg_info[2]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + union { + struct seq_file *seq; + }; + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter_target_info { + struct list_head list; + const struct bpf_iter_reg *reg_info; + u32 btf_id; +}; + +struct bpf_iter_link { + struct bpf_link link; + struct bpf_iter_aux_info aux; + struct bpf_iter_target_info *tinfo; +}; + +struct bpf_iter_priv_data { + struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; + struct bpf_prog *prog; + u64 session_id; + u64 seq_num; + bool done_stop; + long: 56; + u8 target_private[0]; +}; + +struct bpf_iter_seq_map_info { + u32 map_id; +}; + +struct bpf_iter__bpf_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; +}; + +struct bpf_iter_seq_task_common { + struct pid_namespace *ns; +}; + +struct bpf_iter_seq_task_info { + struct bpf_iter_seq_task_common common; + u32 tid; +}; + +struct bpf_iter__task { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; +}; + +struct bpf_iter_seq_task_file_info { + struct bpf_iter_seq_task_common common; + struct task_struct *task; + struct files_struct *files; + u32 tid; + u32 fd; +}; + +struct bpf_iter__task_file { + union { + struct bpf_iter_meta *meta; + }; + union { + struct task_struct *task; + }; + u32 fd; + union { + struct file *file; + }; +}; + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +struct bpf_iter__bpf_prog { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_prog *prog; + }; +}; + +struct bpf_iter__bpf_map_elem { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + void *value; + }; +}; + +struct pcpu_freelist_node; + +struct pcpu_freelist_head { + struct pcpu_freelist_node *first; + raw_spinlock_t lock; +}; + +struct pcpu_freelist_node { + struct pcpu_freelist_node *next; +}; + +struct pcpu_freelist { + struct pcpu_freelist_head *freelist; + struct pcpu_freelist_head extralist; +}; + +struct bpf_lru_node { + struct list_head list; + u16 cpu; + u8 type; + u8 ref; +}; + +struct bpf_lru_list { + struct list_head lists[3]; + unsigned int counts[2]; + struct list_head *next_inactive_rotation; + raw_spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_lru_locallist { + struct list_head lists[2]; + u16 next_steal; + raw_spinlock_t lock; +}; + +struct bpf_common_lru { + struct bpf_lru_list lru_list; + struct bpf_lru_locallist *local_list; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *); + +struct bpf_lru { + union { + struct bpf_common_lru common_lru; + struct bpf_lru_list *percpu_lru; + }; + del_from_htab_func del_from_htab; + void *del_arg; + unsigned int hash_offset; + unsigned int nr_scans; + bool percpu; + long: 56; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bucket { + struct hlist_nulls_head head; + union { + raw_spinlock_t raw_lock; + spinlock_t lock; + }; +}; + +struct htab_elem; + +struct bpf_htab { + struct bpf_map map; + struct bucket *buckets; + void *elems; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct pcpu_freelist freelist; + struct bpf_lru lru; + }; + struct htab_elem **extra_elems; + atomic_t count; + u32 n_buckets; + u32 elem_size; + u32 hashrnd; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct htab_elem { + union { + struct hlist_nulls_node hash_node; + struct { + void *padding; + union { + struct bpf_htab *htab; + struct pcpu_freelist_node fnode; + struct htab_elem *batch_flink; + }; + }; + }; + union { + struct callback_head rcu; + struct bpf_lru_node lru_node; + }; + u32 hash; + int: 32; + char key[0]; +}; + +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; + u32 bucket_id; + u32 skip_elems; +}; + +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +struct prog_poke_elem { + struct list_head list; + struct bpf_prog_aux *aux; +}; + +enum bpf_lru_list_type { + BPF_LRU_LIST_T_ACTIVE = 0, + BPF_LRU_LIST_T_INACTIVE = 1, + BPF_LRU_LIST_T_FREE = 2, + BPF_LRU_LOCAL_LIST_T_FREE = 3, + BPF_LRU_LOCAL_LIST_T_PENDING = 4, +}; + +struct bpf_lpm_trie_key { + __u32 prefixlen; + __u8 data[0]; +}; + +struct lpm_trie_node { + struct callback_head rcu; + struct lpm_trie_node *child[2]; + u32 prefixlen; + u32 flags; + u8 data[0]; +}; + +struct lpm_trie { + struct bpf_map map; + struct lpm_trie_node *root; + size_t n_entries; + size_t max_prefixlen; + size_t data_size; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_cgroup_storage_map { + struct bpf_map map; + spinlock_t lock; + struct rb_root root; + struct list_head list; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head; + u32 tail; + u32 size; + char elements[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum { + BPF_RB_NO_WAKEUP = 1, + BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +enum { + BPF_RINGBUF_BUSY_BIT = 2147483648, + BPF_RINGBUF_DISCARD_BIT = 1073741824, + BPF_RINGBUF_HDR_SZ = 8, +}; + +struct bpf_ringbuf { + wait_queue_head_t waitq; + struct irq_work work; + u64 mask; + struct page **pages; + int nr_pages; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t spinlock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int consumer_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long unsigned int producer_pos; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_ringbuf_map { + struct bpf_map map; + struct bpf_map_memory memory; + struct bpf_ringbuf *rb; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_ringbuf_hdr { + u32 len; + u32 pg_off; +}; + +typedef u64 (*btf_bpf_ringbuf_reserve)(struct bpf_map *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_submit)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_discard)(void *, u64); + +typedef u64 (*btf_bpf_ringbuf_output)(struct bpf_map *, void *, u64, u64); + +typedef u64 (*btf_bpf_ringbuf_query)(struct bpf_map *, u64); + +enum { + BPF_LOCAL_STORAGE_GET_F_CREATE = 1, + BPF_SK_STORAGE_GET_F_CREATE = 1, +}; + +struct bpf_local_storage_map_bucket; + +struct bpf_local_storage_map { + struct bpf_map map; + struct bpf_local_storage_map_bucket *buckets; + u32 bucket_log; + u16 elem_size; + u16 cache_idx; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_local_storage_data; + +struct bpf_local_storage { + struct bpf_local_storage_data *cache[16]; + struct hlist_head list; + void *owner; + struct callback_head rcu; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_map_bucket { + struct hlist_head list; + raw_spinlock_t lock; +}; + +struct bpf_local_storage_data { + struct bpf_local_storage_map *smap; + u8 data[0]; +}; + +struct bpf_local_storage_elem { + struct hlist_node map_node; + struct hlist_node snode; + struct bpf_local_storage *local_storage; + struct callback_head rcu; + long: 64; + struct bpf_local_storage_data sdata; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_local_storage_cache { + spinlock_t idx_lock; + u64 idx_usage_counts[16]; +}; + +struct lsm_blob_sizes { + int lbs_cred; + int lbs_file; + int lbs_inode; + int lbs_ipc; + int lbs_msg_msg; + int lbs_task; +}; + +struct bpf_storage_blob { + struct bpf_local_storage *storage; +}; + +typedef u64 (*btf_bpf_inode_storage_get)(struct bpf_map *, struct inode *, void *, u64); + +typedef u64 (*btf_bpf_inode_storage_delete)(struct bpf_map *, struct inode *); + +struct bpf_tramp_progs { + struct bpf_prog *progs[40]; + int nr_progs; +}; + +struct btf_enum { + __u32 name_off; + __s32 val; +}; + +struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; +}; + +struct btf_param { + __u32 name_off; + __u32 type; +}; + +enum { + BTF_VAR_STATIC = 0, + BTF_VAR_GLOBAL_ALLOCATED = 1, + BTF_VAR_GLOBAL_EXTERN = 2, +}; + +struct btf_var { + __u32 linkage; +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __u32 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 data_meta; + union { + struct bpf_flow_keys *flow_keys; + }; + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { + struct bpf_sock *sk; + }; + __u32 gso_size; +}; + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + __u32 ingress_ifindex; + __u32 rx_queue_index; + __u32 egress_ifindex; +}; + +struct sk_msg_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + union { + struct bpf_sock *sk; + }; +}; + +struct sk_reuseport_md { + union { + void *data; + }; + union { + void *data_end; + }; + __u32 len; + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; +}; + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + __u32 user_ip6[4]; + __u32 user_port; + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + __u32 msg_src_ip6[4]; + union { + struct bpf_sock *sk; + }; +}; + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { + struct bpf_sock *sk; + }; + union { + void *skb_data; + }; + union { + void *skb_data_end; + }; + __u32 skb_len; + __u32 skb_tcp_flags; +}; + +struct bpf_cgroup_dev_ctx { + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_sysctl { + __u32 write; + __u32 file_pos; +}; + +struct bpf_sockopt { + union { + struct bpf_sock *sk; + }; + union { + void *optval; + }; + union { + void *optval_end; + }; + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +struct bpf_sk_lookup { + union { + struct bpf_sock *sk; + }; + __u32 family; + __u32 protocol; + __u32 remote_ip4; + __u32 remote_ip6[4]; + __u32 remote_port; + __u32 local_ip4; + __u32 local_ip6[4]; + __u32 local_port; +}; + +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + void *data; + void *data_end; +}; + +struct inet_listen_hashbucket { + spinlock_t lock; + unsigned int count; + union { + struct hlist_head head; + struct hlist_nulls_head nulls_head; + }; +}; + +struct inet_ehash_bucket; + +struct inet_bind_hashbucket; + +struct inet_hashinfo { + struct inet_ehash_bucket *ehash; + spinlock_t *ehash_locks; + unsigned int ehash_mask; + unsigned int ehash_locks_mask; + struct kmem_cache *bind_bucket_cachep; + struct inet_bind_hashbucket *bhash; + unsigned int bhash_size; + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; + long: 64; + struct inet_listen_hashbucket listening_hash[32]; +}; + +struct ip_ra_chain { + struct ip_ra_chain *next; + struct sock *sk; + union { + void (*destructor)(struct sock *); + struct sock *saved_sk; + }; + struct callback_head rcu; +}; + +struct fib_table { + struct hlist_node tb_hlist; + u32 tb_id; + int tb_num_default; + struct callback_head rcu; + long unsigned int *tb_data; + long unsigned int __data[0]; +}; + +struct inet_peer_base { + struct rb_root rb_root; + seqlock_t lock; + int total; +}; + +struct tcp_fastopen_context { + siphash_key_t key[2]; + int num; + struct callback_head rcu; +}; + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; +}; + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + u64 tmp_reg; + void *t_ctx; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + struct sk_buff *syn_skb; + struct sk_buff *skb; + void *skb_data_end; + u8 op; + u8 is_fullsock; + u8 remaining_opt_len; + u64 temp; +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + u64 tmp_reg; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + __be16 sport; + u16 dport; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + struct sock *selected_sk; + bool no_reuseport; +}; + +struct sock_reuseport { + struct callback_head rcu; + u16 max_socks; + u16 num_socks; + unsigned int synq_overflow_ts; + unsigned int reuseport_id; + unsigned int bind_inany: 1; + unsigned int has_conns: 1; + struct bpf_prog *prog; + struct sock *socks[0]; +}; + +struct inet_ehash_bucket { + struct hlist_nulls_head chain; +}; + +struct inet_bind_hashbucket { + spinlock_t lock; + struct hlist_head chain; +}; + +struct ack_sample { + u32 pkts_acked; + s32 rtt_us; + u32 in_flight; +}; + +struct rate_sample { + u64 prior_mstamp; + u32 prior_delivered; + s32 delivered; + long int interval_us; + u32 snd_interval_us; + u32 rcv_interval_us; + long int rtt_us; + int losses; + u32 acked_sacked; + u32 prior_in_flight; + bool is_app_limited; + bool is_retrans; + bool is_ack_delayed; +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + long unsigned int copy; + struct scatterlist data[19]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +enum verifier_phase { + CHECK_META = 0, + CHECK_TYPE = 1, +}; + +struct resolve_vertex { + const struct btf_type *t; + u32 type_id; + u16 next_member; +}; + +enum visit_state { + NOT_VISITED = 0, + VISITED = 1, + RESOLVED = 2, +}; + +enum resolve_mode { + RESOLVE_TBD = 0, + RESOLVE_PTR = 1, + RESOLVE_STRUCT_OR_ARRAY = 2, +}; + +struct btf_sec_info { + u32 off; + u32 len; +}; + +struct btf_verifier_env { + struct btf *btf; + u8 *visit_states; + struct resolve_vertex stack[32]; + struct bpf_verifier_log log; + u32 log_type_id; + u32 top_stack; + enum verifier_phase phase; + enum resolve_mode resolve_mode; +}; + +struct btf_show { + u64 flags; + void *target; + void (*showfn)(struct btf_show *, const char *, va_list); + const struct btf *btf; + struct { + u8 depth; + u8 depth_to_show; + u8 depth_check; + u8 array_member: 1; + u8 array_terminated: 1; + u16 array_encoding; + u32 type_id; + int status; + const struct btf_type *type; + const struct btf_member *member; + char name[80]; + } state; + struct { + u32 size; + void *head; + void *data; + u8 safe[32]; + } obj; +}; + +struct btf_kind_operations { + s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32); + int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *); + int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + int (*check_kflag_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *); + void (*log_details)(struct btf_verifier_env *, const struct btf_type *); + void (*show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct btf_show *); +}; + +struct bpf_ctx_convert { + struct __sk_buff BPF_PROG_TYPE_SOCKET_FILTER_prog; + struct sk_buff BPF_PROG_TYPE_SOCKET_FILTER_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_CLS_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_CLS_kern; + struct __sk_buff BPF_PROG_TYPE_SCHED_ACT_prog; + struct sk_buff BPF_PROG_TYPE_SCHED_ACT_kern; + struct xdp_md BPF_PROG_TYPE_XDP_prog; + struct xdp_buff BPF_PROG_TYPE_XDP_kern; + struct __sk_buff BPF_PROG_TYPE_CGROUP_SKB_prog; + struct sk_buff BPF_PROG_TYPE_CGROUP_SKB_kern; + struct bpf_sock BPF_PROG_TYPE_CGROUP_SOCK_prog; + struct sock BPF_PROG_TYPE_CGROUP_SOCK_kern; + struct bpf_sock_addr BPF_PROG_TYPE_CGROUP_SOCK_ADDR_prog; + struct bpf_sock_addr_kern BPF_PROG_TYPE_CGROUP_SOCK_ADDR_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_IN_prog; + struct sk_buff BPF_PROG_TYPE_LWT_IN_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_OUT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_OUT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_XMIT_prog; + struct sk_buff BPF_PROG_TYPE_LWT_XMIT_kern; + struct __sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_prog; + struct sk_buff BPF_PROG_TYPE_LWT_SEG6LOCAL_kern; + struct bpf_sock_ops BPF_PROG_TYPE_SOCK_OPS_prog; + struct bpf_sock_ops_kern BPF_PROG_TYPE_SOCK_OPS_kern; + struct __sk_buff BPF_PROG_TYPE_SK_SKB_prog; + struct sk_buff BPF_PROG_TYPE_SK_SKB_kern; + struct sk_msg_md BPF_PROG_TYPE_SK_MSG_prog; + struct sk_msg BPF_PROG_TYPE_SK_MSG_kern; + struct __sk_buff BPF_PROG_TYPE_FLOW_DISSECTOR_prog; + struct bpf_flow_dissector BPF_PROG_TYPE_FLOW_DISSECTOR_kern; + bpf_user_pt_regs_t BPF_PROG_TYPE_KPROBE_prog; + struct pt_regs BPF_PROG_TYPE_KPROBE_kern; + __u64 BPF_PROG_TYPE_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_TRACEPOINT_kern; + struct bpf_perf_event_data BPF_PROG_TYPE_PERF_EVENT_prog; + struct bpf_perf_event_data_kern BPF_PROG_TYPE_PERF_EVENT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_kern; + struct bpf_raw_tracepoint_args BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_prog; + u64 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE_kern; + void *BPF_PROG_TYPE_TRACING_prog; + void *BPF_PROG_TYPE_TRACING_kern; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_prog; + struct bpf_cgroup_dev_ctx BPF_PROG_TYPE_CGROUP_DEVICE_kern; + struct bpf_sysctl BPF_PROG_TYPE_CGROUP_SYSCTL_prog; + struct bpf_sysctl_kern BPF_PROG_TYPE_CGROUP_SYSCTL_kern; + struct bpf_sockopt BPF_PROG_TYPE_CGROUP_SOCKOPT_prog; + struct bpf_sockopt_kern BPF_PROG_TYPE_CGROUP_SOCKOPT_kern; + struct sk_reuseport_md BPF_PROG_TYPE_SK_REUSEPORT_prog; + struct sk_reuseport_kern BPF_PROG_TYPE_SK_REUSEPORT_kern; + struct bpf_sk_lookup BPF_PROG_TYPE_SK_LOOKUP_prog; + struct bpf_sk_lookup_kern BPF_PROG_TYPE_SK_LOOKUP_kern; + void *BPF_PROG_TYPE_STRUCT_OPS_prog; + void *BPF_PROG_TYPE_STRUCT_OPS_kern; + void *BPF_PROG_TYPE_EXT_prog; + void *BPF_PROG_TYPE_EXT_kern; + void *BPF_PROG_TYPE_LSM_prog; + void *BPF_PROG_TYPE_LSM_kern; +}; + +enum { + __ctx_convertBPF_PROG_TYPE_SOCKET_FILTER = 0, + __ctx_convertBPF_PROG_TYPE_SCHED_CLS = 1, + __ctx_convertBPF_PROG_TYPE_SCHED_ACT = 2, + __ctx_convertBPF_PROG_TYPE_XDP = 3, + __ctx_convertBPF_PROG_TYPE_CGROUP_SKB = 4, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK = 5, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCK_ADDR = 6, + __ctx_convertBPF_PROG_TYPE_LWT_IN = 7, + __ctx_convertBPF_PROG_TYPE_LWT_OUT = 8, + __ctx_convertBPF_PROG_TYPE_LWT_XMIT = 9, + __ctx_convertBPF_PROG_TYPE_LWT_SEG6LOCAL = 10, + __ctx_convertBPF_PROG_TYPE_SOCK_OPS = 11, + __ctx_convertBPF_PROG_TYPE_SK_SKB = 12, + __ctx_convertBPF_PROG_TYPE_SK_MSG = 13, + __ctx_convertBPF_PROG_TYPE_FLOW_DISSECTOR = 14, + __ctx_convertBPF_PROG_TYPE_KPROBE = 15, + __ctx_convertBPF_PROG_TYPE_TRACEPOINT = 16, + __ctx_convertBPF_PROG_TYPE_PERF_EVENT = 17, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT = 18, + __ctx_convertBPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 19, + __ctx_convertBPF_PROG_TYPE_TRACING = 20, + __ctx_convertBPF_PROG_TYPE_CGROUP_DEVICE = 21, + __ctx_convertBPF_PROG_TYPE_CGROUP_SYSCTL = 22, + __ctx_convertBPF_PROG_TYPE_CGROUP_SOCKOPT = 23, + __ctx_convertBPF_PROG_TYPE_SK_REUSEPORT = 24, + __ctx_convertBPF_PROG_TYPE_SK_LOOKUP = 25, + __ctx_convertBPF_PROG_TYPE_STRUCT_OPS = 26, + __ctx_convertBPF_PROG_TYPE_EXT = 27, + __ctx_convertBPF_PROG_TYPE_LSM = 28, + __ctx_convert_unused = 29, +}; + +enum bpf_struct_walk_result { + WALK_SCALAR = 0, + WALK_PTR = 1, + WALK_STRUCT = 2, +}; + +struct btf_show_snprintf { + struct btf_show show; + int len_left; + int len; +}; + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + u32 image_off; + struct bpf_ksym ksym; +}; + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +enum net_device_flags { + IFF_UP = 1, + IFF_BROADCAST = 2, + IFF_DEBUG = 4, + IFF_LOOPBACK = 8, + IFF_POINTOPOINT = 16, + IFF_NOTRAILERS = 32, + IFF_RUNNING = 64, + IFF_NOARP = 128, + IFF_PROMISC = 256, + IFF_ALLMULTI = 512, + IFF_MASTER = 1024, + IFF_SLAVE = 2048, + IFF_MULTICAST = 4096, + IFF_PORTSEL = 8192, + IFF_AUTOMEDIA = 16384, + IFF_DYNAMIC = 32768, + IFF_LOWER_UP = 65536, + IFF_DORMANT = 131072, + IFF_ECHO = 262144, +}; + +struct xdp_dev_bulk_queue { + struct xdp_frame *q[16]; + struct list_head flush_node; + struct net_device *dev; + struct net_device *dev_rx; + unsigned int count; +}; + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN = 2, + NETDEV_REBOOT = 3, + NETDEV_CHANGE = 4, + NETDEV_REGISTER = 5, + NETDEV_UNREGISTER = 6, + NETDEV_CHANGEMTU = 7, + NETDEV_CHANGEADDR = 8, + NETDEV_PRE_CHANGEADDR = 9, + NETDEV_GOING_DOWN = 10, + NETDEV_CHANGENAME = 11, + NETDEV_FEAT_CHANGE = 12, + NETDEV_BONDING_FAILOVER = 13, + NETDEV_PRE_UP = 14, + NETDEV_PRE_TYPE_CHANGE = 15, + NETDEV_POST_TYPE_CHANGE = 16, + NETDEV_POST_INIT = 17, + NETDEV_RELEASE = 18, + NETDEV_NOTIFY_PEERS = 19, + NETDEV_JOIN = 20, + NETDEV_CHANGEUPPER = 21, + NETDEV_RESEND_IGMP = 22, + NETDEV_PRECHANGEMTU = 23, + NETDEV_CHANGEINFODATA = 24, + NETDEV_BONDING_INFO = 25, + NETDEV_PRECHANGEUPPER = 26, + NETDEV_CHANGELOWERSTATE = 27, + NETDEV_UDP_TUNNEL_PUSH_INFO = 28, + NETDEV_UDP_TUNNEL_DROP_INFO = 29, + NETDEV_CHANGE_TX_QUEUE_LEN = 30, + NETDEV_CVLAN_FILTER_PUSH_INFO = 31, + NETDEV_CVLAN_FILTER_DROP_INFO = 32, + NETDEV_SVLAN_FILTER_PUSH_INFO = 33, + NETDEV_SVLAN_FILTER_DROP_INFO = 34, +}; + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct bpf_dtab; + +struct bpf_dtab_netdev { + struct net_device *dev; + struct hlist_node index_hlist; + struct bpf_dtab *dtab; + struct bpf_prog *xdp_prog; + struct callback_head rcu; + unsigned int idx; + struct bpf_devmap_val val; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; + struct list_head list; + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; + long: 32; + long: 64; + long: 64; +}; + +struct bpf_cpumap_val { + __u32 qsize; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +typedef struct bio_vec skb_frag_t; + +struct skb_shared_hwtstamps { + ktime_t hwtstamp; +}; + +struct skb_shared_info { + __u8 __unused; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + short unsigned int gso_size; + short unsigned int gso_segs; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; + u32 tskey; + atomic_t dataref; + void *destructor_arg; + skb_frag_t frags[17]; +}; + +struct bpf_nh_params { + u32 nh_family; + union { + u32 ipv4_nh; + struct in6_addr ipv6_nh; + }; +}; + +struct bpf_redirect_info { + u32 flags; + u32 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 kern_flags; + struct bpf_nh_params nh; +}; + +struct ptr_ring { + int producer; + spinlock_t producer_lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int consumer_head; + int consumer_tail; + spinlock_t consumer_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + int size; + int batch; + void **queue; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct bpf_cpu_map_entry; + +struct xdp_bulk_queue { + void *q[8]; + struct list_head flush_node; + struct bpf_cpu_map_entry *obj; + unsigned int count; +}; + +struct bpf_cpu_map; + +struct bpf_cpu_map_entry { + u32 cpu; + int map_id; + struct xdp_bulk_queue *bulkq; + struct bpf_cpu_map *cmap; + struct ptr_ring *queue; + struct task_struct *kthread; + struct bpf_cpumap_val value; + struct bpf_prog *prog; + atomic_t refcnt; + struct callback_head rcu; + struct work_struct kthread_stop_wq; +}; + +struct bpf_cpu_map { + struct bpf_map map; + struct bpf_cpu_map_entry **cpu_map; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *, int, int); + int (*finalize)(struct bpf_verifier_env *); + int (*replace_insn)(struct bpf_verifier_env *, u32, struct bpf_insn *); + int (*remove_insns)(struct bpf_verifier_env *, u32, u32); + int (*prepare)(struct bpf_prog *); + int (*translate)(struct bpf_prog *); + void (*destroy)(struct bpf_prog *); +}; + +struct bpf_offload_dev { + const struct bpf_prog_offload_ops *ops; + struct list_head netdevs; + void *priv; +}; + +struct bpf_offload_netdev { + struct rhash_head l; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + struct list_head progs; + struct list_head maps; + struct list_head offdev_netdevs; +}; + +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +struct ns_get_path_bpf_map_args { + struct bpf_offloaded_map *offmap; + struct bpf_map_info *info; +}; + +struct bpf_netns_link { + struct bpf_link link; + enum bpf_attach_type type; + enum netns_bpf_attach_type netns_type; + struct net *net; + struct list_head node; +}; + +enum bpf_stack_build_id_status { + BPF_STACK_BUILD_ID_EMPTY = 0, + BPF_STACK_BUILD_ID_VALID = 1, + BPF_STACK_BUILD_ID_IP = 2, +}; + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + +enum { + BPF_F_SKIP_FIELD_MASK = 255, + BPF_F_USER_STACK = 256, + BPF_F_FAST_STACK_CMP = 512, + BPF_F_REUSE_STACKID = 1024, + BPF_F_USER_BUILD_ID = 2048, +}; + +typedef __u32 Elf32_Addr; + +typedef __u16 Elf32_Half; + +typedef __u32 Elf32_Off; + +struct elf32_hdr { + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +}; + +typedef struct elf32_hdr Elf32_Ehdr; + +struct elf32_phdr { + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +}; + +typedef struct elf32_phdr Elf32_Phdr; + +typedef struct elf32_note Elf32_Nhdr; + +enum perf_callchain_context { + PERF_CONTEXT_HV = 4294967264, + PERF_CONTEXT_KERNEL = 4294967168, + PERF_CONTEXT_USER = 4294966784, + PERF_CONTEXT_GUEST = 4294965248, + PERF_CONTEXT_GUEST_KERNEL = 4294965120, + PERF_CONTEXT_GUEST_USER = 4294964736, + PERF_CONTEXT_MAX = 4294963201, +}; + +struct stack_map_bucket { + struct pcpu_freelist_node fnode; + u32 hash; + u32 nr; + u64 data[0]; +}; + +struct bpf_stack_map { + struct bpf_map map; + void *elems; + struct pcpu_freelist freelist; + u32 n_buckets; + struct stack_map_bucket *buckets[0]; + long: 64; + long: 64; + long: 64; +}; + +struct stack_map_irq_work { + struct irq_work irq_work; + struct mm_struct *mm; +}; + +typedef u64 (*btf_bpf_get_stackid)(struct pt_regs *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stackid_pe)(struct bpf_perf_event_data_kern *, struct bpf_map *, u64); + +typedef u64 (*btf_bpf_get_stack)(struct pt_regs *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_task_stack)(struct task_struct *, void *, u32, u64); + +typedef u64 (*btf_bpf_get_stack_pe)(struct bpf_perf_event_data_kern *, void *, u32, u64); + +enum { + BPF_F_SYSCTL_BASE_NAME = 1, +}; + +struct bpf_prog_list { + struct list_head node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[2]; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + unsigned char data[20]; + u16 mru; +}; + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +enum { + TCPF_ESTABLISHED = 2, + TCPF_SYN_SENT = 4, + TCPF_SYN_RECV = 8, + TCPF_FIN_WAIT1 = 16, + TCPF_FIN_WAIT2 = 32, + TCPF_TIME_WAIT = 64, + TCPF_CLOSE = 128, + TCPF_CLOSE_WAIT = 256, + TCPF_LAST_ACK = 512, + TCPF_LISTEN = 1024, + TCPF_CLOSING = 2048, + TCPF_NEW_SYN_RECV = 4096, +}; + +typedef u64 (*btf_bpf_sysctl_get_name)(struct bpf_sysctl_kern *, char *, size_t, u64); + +typedef u64 (*btf_bpf_sysctl_get_current_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_get_new_value)(struct bpf_sysctl_kern *, char *, size_t); + +typedef u64 (*btf_bpf_sysctl_set_new_value)(struct bpf_sysctl_kern *, const char *, size_t); + +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; + +enum { + IPPROTO_IP = 0, + IPPROTO_ICMP = 1, + IPPROTO_IGMP = 2, + IPPROTO_IPIP = 4, + IPPROTO_TCP = 6, + IPPROTO_EGP = 8, + IPPROTO_PUP = 12, + IPPROTO_UDP = 17, + IPPROTO_IDP = 22, + IPPROTO_TP = 29, + IPPROTO_DCCP = 33, + IPPROTO_IPV6 = 41, + IPPROTO_RSVP = 46, + IPPROTO_GRE = 47, + IPPROTO_ESP = 50, + IPPROTO_AH = 51, + IPPROTO_MTP = 92, + IPPROTO_BEETPH = 94, + IPPROTO_ENCAP = 98, + IPPROTO_PIM = 103, + IPPROTO_COMP = 108, + IPPROTO_SCTP = 132, + IPPROTO_UDPLITE = 136, + IPPROTO_MPLS = 137, + IPPROTO_ETHERNET = 143, + IPPROTO_RAW = 255, + IPPROTO_MPTCP = 262, + IPPROTO_MAX = 263, +}; + +enum sock_flags { + SOCK_DEAD = 0, + SOCK_DONE = 1, + SOCK_URGINLINE = 2, + SOCK_KEEPOPEN = 3, + SOCK_LINGER = 4, + SOCK_DESTROY = 5, + SOCK_BROADCAST = 6, + SOCK_TIMESTAMP = 7, + SOCK_ZAPPED = 8, + SOCK_USE_WRITE_QUEUE = 9, + SOCK_DBG = 10, + SOCK_RCVTSTAMP = 11, + SOCK_RCVTSTAMPNS = 12, + SOCK_LOCALROUTE = 13, + SOCK_MEMALLOC = 14, + SOCK_TIMESTAMPING_RX_SOFTWARE = 15, + SOCK_FASYNC = 16, + SOCK_RXQ_OVFL = 17, + SOCK_ZEROCOPY = 18, + SOCK_WIFI_STATUS = 19, + SOCK_NOFCS = 20, + SOCK_FILTER_LOCKED = 21, + SOCK_SELECT_ERR_QUEUE = 22, + SOCK_RCU_FREE = 23, + SOCK_TXTIME = 24, + SOCK_XDP = 25, + SOCK_TSTAMP_NEW = 26, +}; + +struct reuseport_array { + struct bpf_map map; + struct sock *ptrs[0]; +}; + +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT = 0, + BPF_STRUCT_OPS_STATE_INUSE = 1, + BPF_STRUCT_OPS_STATE_TOBEFREE = 2, +}; + +struct bpf_struct_ops_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + char data[0]; +}; + +struct bpf_struct_ops_map { + struct bpf_map map; + const struct bpf_struct_ops *st_ops; + struct mutex lock; + struct bpf_prog **progs; + void *image; + struct bpf_struct_ops_value *uvalue; + struct bpf_struct_ops_value kvalue; +}; + +struct bpf_struct_ops_tcp_congestion_ops { + refcount_t refcnt; + enum bpf_struct_ops_state state; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct tcp_congestion_ops data; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct sembuf { + short unsigned int sem_num; + short int sem_op; + short int sem_flg; +}; + +enum key_need_perm { + KEY_NEED_UNSPECIFIED = 0, + KEY_NEED_VIEW = 1, + KEY_NEED_READ = 2, + KEY_NEED_WRITE = 3, + KEY_NEED_SEARCH = 4, + KEY_NEED_LINK = 5, + KEY_NEED_SETATTR = 6, + KEY_NEED_UNLINK = 7, + KEY_SYSADMIN_OVERRIDE = 8, + KEY_AUTHTOKEN_OVERRIDE = 9, + KEY_DEFER_PERM_CHECK = 10, +}; + +struct __key_reference_with_attributes; + +typedef struct __key_reference_with_attributes *key_ref_t; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; + +struct xfrm_user_sec_ctx { + __u16 len; + __u16 exttype; + __u8 ctx_alg; + __u8 ctx_doi; + __u16 ctx_len; +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1, + PERF_SAMPLE_BRANCH_KERNEL = 2, + PERF_SAMPLE_BRANCH_HV = 4, + PERF_SAMPLE_BRANCH_ANY = 8, + PERF_SAMPLE_BRANCH_ANY_CALL = 16, + PERF_SAMPLE_BRANCH_ANY_RETURN = 32, + PERF_SAMPLE_BRANCH_IND_CALL = 64, + PERF_SAMPLE_BRANCH_ABORT_TX = 128, + PERF_SAMPLE_BRANCH_IN_TX = 256, + PERF_SAMPLE_BRANCH_NO_TX = 512, + PERF_SAMPLE_BRANCH_COND = 1024, + PERF_SAMPLE_BRANCH_CALL_STACK = 2048, + PERF_SAMPLE_BRANCH_IND_JUMP = 4096, + PERF_SAMPLE_BRANCH_CALL = 8192, + PERF_SAMPLE_BRANCH_NO_FLAGS = 16384, + PERF_SAMPLE_BRANCH_NO_CYCLES = 32768, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536, + PERF_SAMPLE_BRANCH_HW_INDEX = 131072, + PERF_SAMPLE_BRANCH_MAX = 262144, +}; + +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1, + PERF_FORMAT_TOTAL_TIME_RUNNING = 2, + PERF_FORMAT_ID = 4, + PERF_FORMAT_GROUP = 8, + PERF_FORMAT_MAX = 16, +}; + +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1, +}; + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + NR_NAMESPACES = 7, +}; + +enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX = 21, +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START = 1, + PERF_ADDR_FILTER_ACTION_FILTER = 2, +}; + +struct perf_addr_filter { + struct list_head entry; + struct path path; + long unsigned int offset; + long unsigned int size; + enum perf_addr_filter_action_t action; +}; + +struct swevent_hlist { + struct hlist_head heads[256]; + struct callback_head callback_head; +}; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; + +struct perf_buffer { + refcount_t refcount; + struct callback_head callback_head; + int nr_pages; + int overwrite; + int paused; + atomic_t poll; + local_t head; + unsigned int nest; + local_t events; + local_t wakeup; + local_t lost; + long int watermark; + long int aux_watermark; + spinlock_t event_lock; + struct list_head event_list; + atomic_t mmap_count; + long unsigned int mmap_locked; + struct user_struct *mmap_user; + long int aux_head; + unsigned int aux_nest; + long int aux_wakeup; + long unsigned int aux_pgoff; + int aux_nr_pages; + int aux_overwrite; + atomic_t aux_mmap_count; + long unsigned int aux_mmap_locked; + void (*free_aux)(void *); + refcount_t aux_refcount; + int aux_in_sampling; + void **aux_pages; + void *aux_priv; + struct perf_event_mmap_page *user_page; + void *data_pages[0]; +}; + +struct match_token { + int token; + const char *pattern; +}; + +enum { + MAX_OPT_ARGS = 3, +}; + +typedef struct { + char *from; + char *to; +} substring_t; + +struct min_heap { + void *data; + int nr; + int size; +}; + +struct min_heap_callbacks { + int elem_size; + bool (*less)(const void *, const void *); + void (*swp)(void *, void *); +}; + +typedef int (*remote_function_f)(void *); + +struct remote_function_call { + struct task_struct *p; + remote_function_f func; + void *info; + int ret; +}; + +typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, struct perf_event_context *, void *); + +struct event_function_struct { + struct perf_event *event; + event_f func; + void *data; +}; + +enum event_type_t { + EVENT_FLEXIBLE = 1, + EVENT_PINNED = 2, + EVENT_TIME = 4, + EVENT_CPU = 8, + EVENT_ALL = 3, +}; + +struct stop_event_data { + struct perf_event *event; + unsigned int restart; +}; + +struct perf_read_data { + struct perf_event *event; + bool group; + int ret; +}; + +struct perf_read_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +typedef void perf_iterate_f(struct perf_event *, void *); + +struct remote_output { + struct perf_buffer *rb; + int err; +}; + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + struct { + struct perf_event_header header; + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + } event_id; +}; + +struct perf_namespaces_event { + struct task_struct *task; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 nr_namespaces; + struct perf_ns_link_info link_info[7]; + } event_id; +}; + +struct perf_cgroup_event { + char *path; + int path_size; + struct { + struct perf_event_header header; + u64 id; + char path[0]; + } event_id; +}; + +struct perf_mmap_event { + struct vm_area_struct *vma; + const char *file_name; + int file_size; + int maj; + int min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + struct { + struct perf_event_header header; + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +struct perf_switch_event { + struct task_struct *task; + struct task_struct *next_prev; + struct { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; + } event_id; +}; + +struct perf_ksymbol_event { + const char *name; + int name_len; + struct { + struct perf_event_header header; + u64 addr; + u32 len; + u16 ksym_type; + u16 flags; + } event_id; +}; + +struct perf_bpf_event { + struct bpf_prog *prog; + struct { + struct perf_event_header header; + u16 type; + u16 flags; + u32 id; + u8 tag[8]; + } event_id; +}; + +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + struct { + struct perf_event_header header; + u64 addr; + } event_id; +}; + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; + int recursion[4]; +}; + +enum perf_probe_config { + PERF_PROBE_CONFIG_IS_RETPROBE = 1, + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 32, +}; + +enum { + IF_ACT_NONE = 4294967295, + IF_ACT_FILTER = 0, + IF_ACT_START = 1, + IF_ACT_STOP = 2, + IF_SRC_FILE = 3, + IF_SRC_KERNEL = 4, + IF_SRC_FILEADDR = 5, + IF_SRC_KERNELADDR = 6, +}; + +enum { + IF_STATE_ACTION = 0, + IF_STATE_SOURCE = 1, + IF_STATE_END = 2, +}; + +struct perf_aux_event { + struct perf_event_header header; + u32 pid; + u32 tid; +}; + +struct perf_aux_event___2 { + struct perf_event_header header; + u64 offset; + u64 size; + u64 flags; +}; + +struct callchain_cpus_entries { + struct callback_head callback_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +struct bp_cpuinfo { + unsigned int cpu_pinned; + unsigned int *tsk_pinned; + unsigned int flexible; +}; + +struct bp_busy_slots { + unsigned int pinned; + unsigned int flexible; +}; + +struct compact_control; + +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +typedef u32 uprobe_opcode_t; + +struct uprobe { + struct rb_node rb_node; + refcount_t ref; + struct rw_semaphore register_rwsem; + struct rw_semaphore consumer_rwsem; + struct list_head pending_list; + struct uprobe_consumer *consumers; + struct inode *inode; + loff_t offset; + loff_t ref_ctr_offset; + long unsigned int flags; + struct arch_uprobe arch; +}; + +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + long unsigned int *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + long unsigned int vaddr; +}; + +typedef long unsigned int vm_flags_t; + +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + long unsigned int address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; + +struct compact_control { + struct list_head freepages; + struct list_head migratepages; + unsigned int nr_freepages; + unsigned int nr_migratepages; + long unsigned int free_pfn; + long unsigned int migrate_pfn; + long unsigned int fast_start_pfn; + struct zone *zone; + long unsigned int total_migrate_scanned; + long unsigned int total_free_scanned; + short unsigned int fast_search_fail; + short int search_order; + const gfp_t gfp_mask; + int order; + int migratetype; + const unsigned int alloc_flags; + const int highest_zoneidx; + enum migrate_mode mode; + bool ignore_skip_hint; + bool no_set_skip_hint; + bool ignore_block_suitable; + bool direct_compaction; + bool proactive_compaction; + bool whole_zone; + bool contended; + bool rescan; + bool alloc_contig; +}; + +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +struct map_info { + struct map_info *next; + struct mm_struct *mm; + long unsigned int vaddr; +}; + +struct parallel_data; + +struct padata_priv { + struct list_head list; + struct parallel_data *pd; + int cb_cpu; + unsigned int seq_nr; + int info; + void (*parallel)(struct padata_priv *); + void (*serial)(struct padata_priv *); +}; + +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; + +struct padata_shell; + +struct padata_list; + +struct padata_serial_queue; + +struct parallel_data { + struct padata_shell *ps; + struct padata_list *reorder_list; + struct padata_serial_queue *squeue; + atomic_t refcnt; + unsigned int seq_nr; + unsigned int processed; + int cpu; + struct padata_cpumask cpumask; + struct work_struct reorder_work; + long: 64; + long: 64; + long: 64; + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct padata_list { + struct list_head list; + spinlock_t lock; +}; + +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +struct padata_instance; + +struct padata_shell { + struct padata_instance *pinst; + struct parallel_data *pd; + struct parallel_data *opd; + struct list_head list; +}; + +struct padata_instance { + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; + struct list_head pslist; + struct padata_cpumask cpumask; + struct kobject kobj; + struct mutex lock; + u8 flags; +}; + +struct padata_mt_job { + void (*thread_fn)(long unsigned int, long unsigned int, void *); + void *fn_arg; + long unsigned int start; + long unsigned int size; + long unsigned int align; + long unsigned int min_chunk; + int max_threads; +}; + +struct padata_work { + struct work_struct pw_work; + struct list_head pw_list; + void *pw_data; +}; + +struct padata_mt_job_state { + spinlock_t lock; + struct completion completion; + struct padata_mt_job *job; + int nworks; + int nworks_fini; + long unsigned int chunk_size; +}; + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t); +}; + +struct static_key_mod { + struct static_key_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +struct static_key_deferred { + struct static_key key; + long unsigned int timeout; + struct delayed_work work; +}; + +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = 4294967295, + RSEQ_CPU_ID_REGISTRATION_FAILED = 4294967294, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = 1, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4, +}; + +struct rseq_cs { + __u32 version; + __u32 flags; + __u64 start_ip; + __u64 post_commit_offset; + __u64 abort_ip; +}; + +struct trace_event_raw_rseq_update { + struct trace_entry ent; + s32 cpu_id; + char __data[0]; +}; + +struct trace_event_raw_rseq_ip_fixup { + struct trace_entry ent; + long unsigned int regs_ip; + long unsigned int start_ip; + long unsigned int post_commit_offset; + long unsigned int abort_ip; + char __data[0]; +}; + +struct trace_event_data_offsets_rseq_update {}; + +struct trace_event_data_offsets_rseq_ip_fixup {}; + +typedef void (*btf_trace_rseq_update)(void *, struct task_struct *); + +typedef void (*btf_trace_rseq_ip_fixup)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +struct watch; + +struct watch_list { + struct callback_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +enum watch_notification_type { + WATCH_TYPE_META = 0, + WATCH_TYPE_KEY_NOTIFY = 1, + WATCH_TYPE__NR = 2, +}; + +enum watch_meta_notification_subtype { + WATCH_META_REMOVAL_NOTIFICATION = 0, + WATCH_META_LOSS_NOTIFICATION = 1, +}; + +struct watch_notification { + __u32 type: 24; + __u32 subtype: 8; + __u32 info; +}; + +struct watch_notification_type_filter { + __u32 type; + __u32 info_filter; + __u32 info_mask; + __u32 subtype_filter[8]; +}; + +struct watch_notification_filter { + __u32 nr_filters; + __u32 __reserved; + struct watch_notification_type_filter filters[0]; +}; + +struct watch_notification_removal { + struct watch_notification watch; + __u64 id; +}; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; + __u32 info_filter; + __u32 info_mask; +}; + +struct watch_filter { + union { + struct callback_head rcu; + long unsigned int type_filter[2]; + }; + u32 nr_filters; + struct watch_type_filter filters[0]; +}; + +struct watch_queue { + struct callback_head rcu; + struct watch_filter *filter; + struct pipe_inode_info *pipe; + struct hlist_head watches; + struct page **notes; + long unsigned int *notes_bitmap; + struct kref usage; + spinlock_t lock; + unsigned int nr_notes; + unsigned int nr_pages; + bool defunct; +}; + +struct watch { + union { + struct callback_head rcu; + u32 info_id; + }; + struct watch_queue *queue; + struct hlist_node queue_node; + struct watch_list *watch_list; + struct hlist_node list_node; + const struct cred *cred; + void *private; + u64 id; + struct kref usage; +}; + +struct pkcs7_message; + +typedef int __kernel_rwf_t; + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 524288, + AOP_TRUNCATED_PAGE = 524289, +}; + +struct vm_event_state { + long unsigned int event[95]; +}; + +enum iter_type { + ITER_IOVEC = 4, + ITER_KVEC = 8, + ITER_BVEC = 16, + ITER_PIPE = 32, + ITER_DISCARD = 64, +}; + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + AS_NO_WRITEBACK_TAGS = 5, + AS_THP_SUPPORT = 6, +}; + +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct pagevec { + unsigned char nr; + bool percpu_pvec_drained; + struct page *pages[15]; +}; + +struct fid { + union { + struct { + u32 ino; + u32 gen; + u32 parent_ino; + u32 parent_gen; + } i32; + struct { + u32 block; + u16 partref; + u16 parent_partref; + u32 generation; + u32 parent_block; + u32 parent_generation; + } udf; + __u32 raw[0]; + }; +}; + +struct trace_event_raw_mm_filemap_op_page_cache { + struct trace_entry ent; + long unsigned int pfn; + long unsigned int i_ino; + long unsigned int index; + dev_t s_dev; + char __data[0]; +}; + +struct trace_event_raw_filemap_set_wb_err { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + errseq_t errseq; + char __data[0]; +}; + +struct trace_event_raw_file_check_and_advance_wb_err { + struct trace_entry ent; + struct file *file; + long unsigned int i_ino; + dev_t s_dev; + errseq_t old; + errseq_t new; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_filemap_op_page_cache {}; + +struct trace_event_data_offsets_filemap_set_wb_err {}; + +struct trace_event_data_offsets_file_check_and_advance_wb_err {}; + +typedef void (*btf_trace_mm_filemap_delete_from_page_cache)(void *, struct page *); + +typedef void (*btf_trace_mm_filemap_add_to_page_cache)(void *, struct page *); + +typedef void (*btf_trace_filemap_set_wb_err)(void *, struct address_space *, errseq_t); + +typedef void (*btf_trace_file_check_and_advance_wb_err)(void *, struct file *, errseq_t); + +enum behavior { + EXCLUSIVE = 0, + SHARED = 1, + DROP = 2, +}; + +struct reciprocal_value { + u32 m; + u8 sh1; + u8 sh2; +}; + +struct array_cache; + +struct kmem_cache_node; + +struct kmem_cache { + struct array_cache *cpu_cache; + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + unsigned int size; + struct reciprocal_value reciprocal_buffer_size; + slab_flags_t flags; + unsigned int num; + unsigned int gfporder; + gfp_t allocflags; + size_t colour; + unsigned int colour_off; + struct kmem_cache *freelist_cache; + unsigned int freelist_size; + void (*ctor)(void *); + const char *name; + struct list_head list; + int refcount; + int object_size; + int align; + unsigned int useroffset; + unsigned int usersize; + struct kmem_cache_node *node[128]; +}; + +struct alien_cache; + +struct kmem_cache_node { + spinlock_t list_lock; + struct list_head slabs_partial; + struct list_head slabs_full; + struct list_head slabs_free; + long unsigned int total_slabs; + long unsigned int free_slabs; + long unsigned int free_objects; + unsigned int free_limit; + unsigned int colour_next; + struct array_cache *shared; + struct alien_cache **alien; + long unsigned int next_reap; + int free_touched; +}; + +enum oom_constraint { + CONSTRAINT_NONE = 0, + CONSTRAINT_CPUSET = 1, + CONSTRAINT_MEMORY_POLICY = 2, + CONSTRAINT_MEMCG = 3, +}; + +struct oom_control { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct mem_cgroup *memcg; + const gfp_t gfp_mask; + const int order; + long unsigned int totalpages; + struct task_struct *chosen; + long int chosen_points; + enum oom_constraint constraint; +}; + +struct mmu_table_batch { + struct callback_head rcu; + unsigned int nr; + void *tables[0]; +}; + +struct mmu_gather_batch { + struct mmu_gather_batch *next; + unsigned int nr; + unsigned int max; + struct page *pages[0]; +}; + +struct mmu_gather { + struct mm_struct *mm; + struct mmu_table_batch *batch; + long unsigned int start; + long unsigned int end; + unsigned int fullmm: 1; + unsigned int need_flush_all: 1; + unsigned int freed_tables: 1; + unsigned int cleared_ptes: 1; + unsigned int cleared_pmds: 1; + unsigned int cleared_puds: 1; + unsigned int cleared_p4ds: 1; + unsigned int vma_exec: 1; + unsigned int vma_huge: 1; + unsigned int batch_count; + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[8]; +}; + +enum compact_priority { + COMPACT_PRIO_SYNC_FULL = 0, + MIN_COMPACT_PRIORITY = 0, + COMPACT_PRIO_SYNC_LIGHT = 1, + MIN_COMPACT_COSTLY_PRIORITY = 1, + DEF_COMPACT_PRIORITY = 1, + COMPACT_PRIO_ASYNC = 2, + INIT_COMPACT_PRIORITY = 2, +}; + +enum compact_result { + COMPACT_NOT_SUITABLE_ZONE = 0, + COMPACT_SKIPPED = 1, + COMPACT_DEFERRED = 2, + COMPACT_NO_SUITABLE_PAGE = 3, + COMPACT_CONTINUE = 4, + COMPACT_COMPLETE = 5, + COMPACT_PARTIAL_SKIPPED = 6, + COMPACT_CONTENDED = 7, + COMPACT_SUCCESS = 8, +}; + +struct trace_event_raw_oom_score_adj_update { + struct trace_entry ent; + pid_t pid; + char comm[16]; + short int oom_score_adj; + char __data[0]; +}; + +struct trace_event_raw_reclaim_retry_zone { + struct trace_entry ent; + int node; + int zone_idx; + int order; + long unsigned int reclaimable; + long unsigned int available; + long unsigned int min_wmark; + int no_progress_loops; + bool wmark_check; + char __data[0]; +}; + +struct trace_event_raw_mark_victim { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_wake_reaper { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_start_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_finish_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_skip_task_reaping { + struct trace_entry ent; + int pid; + char __data[0]; +}; + +struct trace_event_raw_compact_retry { + struct trace_entry ent; + int order; + int priority; + int result; + int retries; + int max_retries; + bool ret; + char __data[0]; +}; + +struct trace_event_data_offsets_oom_score_adj_update {}; + +struct trace_event_data_offsets_reclaim_retry_zone {}; + +struct trace_event_data_offsets_mark_victim {}; + +struct trace_event_data_offsets_wake_reaper {}; + +struct trace_event_data_offsets_start_task_reaping {}; + +struct trace_event_data_offsets_finish_task_reaping {}; + +struct trace_event_data_offsets_skip_task_reaping {}; + +struct trace_event_data_offsets_compact_retry {}; + +typedef void (*btf_trace_oom_score_adj_update)(void *, struct task_struct *); + +typedef void (*btf_trace_reclaim_retry_zone)(void *, struct zoneref *, int, long unsigned int, long unsigned int, long unsigned int, int, bool); + +typedef void (*btf_trace_mark_victim)(void *, int); + +typedef void (*btf_trace_wake_reaper)(void *, int); + +typedef void (*btf_trace_start_task_reaping)(void *, int); + +typedef void (*btf_trace_finish_task_reaping)(void *, int); + +typedef void (*btf_trace_skip_task_reaping)(void *, int); + +typedef void (*btf_trace_compact_retry)(void *, int, enum compact_priority, enum compact_result, int, int, bool); + +enum wb_congested_state { + WB_async_congested = 0, + WB_sync_congested = 1, +}; + +enum { + XA_CHECK_SCHED = 4096, +}; + +enum wb_state { + WB_registered = 0, + WB_writeback_running = 1, + WB_has_dirty_io = 2, + WB_start_all = 3, +}; + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +struct wb_lock_cookie { + bool locked; + long unsigned int flags; +}; + +typedef int (*writepage_t)(struct page *, struct writeback_control *, void *); + +struct dirty_throttle_control { + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + long unsigned int avail; + long unsigned int dirty; + long unsigned int thresh; + long unsigned int bg_thresh; + long unsigned int wb_dirty; + long unsigned int wb_thresh; + long unsigned int wb_bg_thresh; + long unsigned int pos_ratio; +}; + +typedef void compound_page_dtor(struct page *); + +typedef struct {} local_lock_t; + +struct trace_event_raw_mm_lru_insertion { + struct trace_entry ent; + struct page *page; + long unsigned int pfn; + int lru; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_mm_lru_activate { + struct trace_entry ent; + struct page *page; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_lru_insertion {}; + +struct trace_event_data_offsets_mm_lru_activate {}; + +typedef void (*btf_trace_mm_lru_insertion)(void *, struct page *, int); + +typedef void (*btf_trace_mm_lru_activate)(void *, struct page *); + +struct lru_rotate { + local_lock_t lock; + struct pagevec pvec; +}; + +struct lru_pvecs { + local_lock_t lock; + struct pagevec lru_add; + struct pagevec lru_deactivate_file; + struct pagevec lru_deactivate; + struct pagevec lru_lazyfree; + struct pagevec activate_page; +}; + +enum lruvec_flags { + LRUVEC_CONGESTED = 0, +}; + +enum pgdat_flags { + PGDAT_DIRTY = 0, + PGDAT_WRITEBACK = 1, + PGDAT_RECLAIM_LOCKED = 2, +}; + +struct reclaim_stat { + unsigned int nr_dirty; + unsigned int nr_unqueued_dirty; + unsigned int nr_congested; + unsigned int nr_writeback; + unsigned int nr_immediate; + unsigned int nr_pageout; + unsigned int nr_activate[2]; + unsigned int nr_ref_keep; + unsigned int nr_unmap_fail; + unsigned int nr_lazyfree_fail; +}; + +enum ttu_flags { + TTU_MIGRATION = 1, + TTU_MUNLOCK = 2, + TTU_SPLIT_HUGE_PMD = 4, + TTU_IGNORE_MLOCK = 8, + TTU_IGNORE_HWPOISON = 32, + TTU_BATCH_FLUSH = 64, + TTU_RMAP_LOCKED = 128, + TTU_SPLIT_FREEZE = 256, +}; + +struct trace_event_raw_mm_vmscan_kswapd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_kswapd_wake { + struct trace_entry ent; + int nid; + int zid; + int order; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_wakeup_kswapd { + struct trace_entry ent; + int nid; + int zid; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template { + struct trace_entry ent; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_direct_reclaim_end_template { + struct trace_entry ent; + long unsigned int nr_reclaimed; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_start { + struct trace_entry ent; + struct shrinker *shr; + void *shrink; + int nid; + long int nr_objects_to_shrink; + gfp_t gfp_flags; + long unsigned int cache_items; + long long unsigned int delta; + long unsigned int total_scan; + int priority; + char __data[0]; +}; + +struct trace_event_raw_mm_shrink_slab_end { + struct trace_entry ent; + struct shrinker *shr; + int nid; + void *shrink; + long int unused_scan; + long int new_scan; + int retval; + long int total_scan; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_isolate { + struct trace_entry ent; + int highest_zoneidx; + int order; + long unsigned int nr_requested; + long unsigned int nr_scanned; + long unsigned int nr_skipped; + long unsigned int nr_taken; + isolate_mode_t isolate_mode; + int lru; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_writepage { + struct trace_entry ent; + long unsigned int pfn; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_inactive { + struct trace_entry ent; + int nid; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int nr_congested; + long unsigned int nr_immediate; + unsigned int nr_activate0; + unsigned int nr_activate1; + long unsigned int nr_ref_keep; + long unsigned int nr_unmap_fail; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_lru_shrink_active { + struct trace_entry ent; + int nid; + long unsigned int nr_taken; + long unsigned int nr_active; + long unsigned int nr_deactivated; + long unsigned int nr_referenced; + int priority; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_inactive_list_is_low { + struct trace_entry ent; + int nid; + int reclaim_idx; + long unsigned int total_inactive; + long unsigned int inactive; + long unsigned int total_active; + long unsigned int active; + long unsigned int ratio; + int reclaim_flags; + char __data[0]; +}; + +struct trace_event_raw_mm_vmscan_node_reclaim_begin { + struct trace_entry ent; + int nid; + int order; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {}; + +struct trace_event_data_offsets_mm_vmscan_kswapd_wake {}; + +struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {}; + +struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {}; + +struct trace_event_data_offsets_mm_shrink_slab_start {}; + +struct trace_event_data_offsets_mm_shrink_slab_end {}; + +struct trace_event_data_offsets_mm_vmscan_lru_isolate {}; + +struct trace_event_data_offsets_mm_vmscan_writepage {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {}; + +struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {}; + +struct trace_event_data_offsets_mm_vmscan_inactive_list_is_low {}; + +struct trace_event_data_offsets_mm_vmscan_node_reclaim_begin {}; + +typedef void (*btf_trace_mm_vmscan_kswapd_sleep)(void *, int); + +typedef void (*btf_trace_mm_vmscan_kswapd_wake)(void *, int, int, int); + +typedef void (*btf_trace_mm_vmscan_wakeup_kswapd)(void *, int, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_begin)(void *, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_direct_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_vmscan_memcg_softlimit_reclaim_end)(void *, long unsigned int); + +typedef void (*btf_trace_mm_shrink_slab_start)(void *, struct shrinker *, struct shrink_control *, long int, long unsigned int, long long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_shrink_slab_end)(void *, struct shrinker *, int, int, long int, long int, long int); + +typedef void (*btf_trace_mm_vmscan_lru_isolate)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, isolate_mode_t, int); + +typedef void (*btf_trace_mm_vmscan_writepage)(void *, struct page *); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_inactive)(void *, int, long unsigned int, long unsigned int, struct reclaim_stat *, int, int); + +typedef void (*btf_trace_mm_vmscan_lru_shrink_active)(void *, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int, int); + +typedef void (*btf_trace_mm_vmscan_inactive_list_is_low)(void *, int, int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, int); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_begin)(void *, int, int, gfp_t); + +typedef void (*btf_trace_mm_vmscan_node_reclaim_end)(void *, long unsigned int); + +struct scan_control { + long unsigned int nr_to_reclaim; + nodemask_t *nodemask; + struct mem_cgroup *target_mem_cgroup; + long unsigned int anon_cost; + long unsigned int file_cost; + unsigned int may_deactivate: 2; + unsigned int force_deactivate: 1; + unsigned int skipped_deactivate: 1; + unsigned int may_writepage: 1; + unsigned int may_unmap: 1; + unsigned int may_swap: 1; + unsigned int memcg_low_reclaim: 1; + unsigned int memcg_low_skipped: 1; + unsigned int hibernation_mode: 1; + unsigned int compaction_ready: 1; + unsigned int cache_trim_mode: 1; + unsigned int file_is_tiny: 1; + s8 order; + s8 priority; + s8 reclaim_idx; + gfp_t gfp_mask; + long unsigned int nr_scanned; + long unsigned int nr_reclaimed; + struct { + unsigned int dirty; + unsigned int unqueued_dirty; + unsigned int congested; + unsigned int writeback; + unsigned int immediate; + unsigned int file_taken; + unsigned int taken; + } nr; + struct reclaim_state reclaim_state; +}; + +typedef enum { + PAGE_KEEP = 0, + PAGE_ACTIVATE = 1, + PAGE_SUCCESS = 2, + PAGE_CLEAN = 3, +} pageout_t; + +enum page_references { + PAGEREF_RECLAIM = 0, + PAGEREF_RECLAIM_CLEAN = 1, + PAGEREF_KEEP = 2, + PAGEREF_ACTIVATE = 3, +}; + +enum scan_balance { + SCAN_EQUAL = 0, + SCAN_FRACT = 1, + SCAN_ANON = 2, + SCAN_FILE = 3, +}; + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_FLAG = 0, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 1, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 2, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 3, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 4, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 5, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 6, + TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 7, +}; + +struct xattr { + const char *name; + void *value; + size_t value_len; +}; + +struct constant_table { + const char *name; + int value; +}; + +enum { + MPOL_DEFAULT = 0, + MPOL_PREFERRED = 1, + MPOL_BIND = 2, + MPOL_INTERLEAVE = 3, + MPOL_LOCAL = 4, + MPOL_MAX = 5, +}; + +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; + +struct simple_xattrs { + struct list_head head; + spinlock_t lock; +}; + +struct simple_xattr { + struct list_head list; + char *name; + size_t size; + char value[0]; +}; + +enum fid_type { + FILEID_ROOT = 0, + FILEID_INO32_GEN = 1, + FILEID_INO32_GEN_PARENT = 2, + FILEID_BTRFS_WITHOUT_PARENT = 77, + FILEID_BTRFS_WITH_PARENT = 78, + FILEID_BTRFS_WITH_PARENT_ROOT = 79, + FILEID_UDF_WITHOUT_PARENT = 81, + FILEID_UDF_WITH_PARENT = 82, + FILEID_NILFS_WITHOUT_PARENT = 97, + FILEID_NILFS_WITH_PARENT = 98, + FILEID_FAT_WITHOUT_PARENT = 113, + FILEID_FAT_WITH_PARENT = 114, + FILEID_LUSTRE = 151, + FILEID_KERNFS = 254, + FILEID_INVALID = 255, +}; + +struct shmem_inode_info { + spinlock_t lock; + unsigned int seals; + long unsigned int flags; + long unsigned int alloced; + long unsigned int swapped; + struct list_head shrinklist; + struct list_head swaplist; + struct shared_policy policy; + struct simple_xattrs xattrs; + atomic_t stop_eviction; + struct inode vfs_inode; +}; + +struct shmem_sb_info { + long unsigned int max_blocks; + struct percpu_counter used_blocks; + long unsigned int max_inodes; + long unsigned int free_inodes; + spinlock_t stat_lock; + umode_t mode; + unsigned char huge; + kuid_t uid; + kgid_t gid; + bool full_inums; + ino_t next_ino; + ino_t *ino_batch; + struct mempolicy *mpol; + spinlock_t shrinklist_lock; + struct list_head shrinklist; + long unsigned int shrinklist_len; +}; + +enum sgp_type { + SGP_READ = 0, + SGP_CACHE = 1, + SGP_NOHUGE = 2, + SGP_HUGE = 3, + SGP_WRITE = 4, + SGP_FALLOC = 5, +}; + +struct shmem_falloc { + wait_queue_head_t *waitq; + long unsigned int start; + long unsigned int next; + long unsigned int nr_falloced; + long unsigned int nr_unswapped; +}; + +struct shmem_options { + long long unsigned int blocks; + long long unsigned int inodes; + struct mempolicy *mpol; + kuid_t uid; + kgid_t gid; + umode_t mode; + bool full_inums; + int huge; + int seen; +}; + +enum shmem_param { + Opt_gid = 0, + Opt_huge = 1, + Opt_mode = 2, + Opt_mpol = 3, + Opt_nr_blocks = 4, + Opt_nr_inodes = 5, + Opt_size = 6, + Opt_uid = 7, + Opt_inode32 = 8, + Opt_inode64 = 9, +}; + +enum writeback_stat_item { + NR_DIRTY_THRESHOLD = 0, + NR_DIRTY_BG_THRESHOLD = 1, + NR_VM_WRITEBACK_STAT_ITEMS = 2, +}; + +struct contig_page_info { + long unsigned int free_pages; + long unsigned int free_blocks_total; + long unsigned int free_blocks_suitable; +}; + +struct radix_tree_iter { + long unsigned int index; + long unsigned int next_index; + long unsigned int tags; + struct xa_node *node; +}; + +enum { + RADIX_TREE_ITER_TAG_MASK = 15, + RADIX_TREE_ITER_TAGGED = 16, + RADIX_TREE_ITER_CONTIG = 32, +}; + +enum mminit_level { + MMINIT_WARNING = 0, + MMINIT_VERIFY = 1, + MMINIT_TRACE = 2, +}; + +struct pcpu_group_info { + int nr_units; + long unsigned int base_offset; + unsigned int *cpu_map; +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[0]; +}; + +typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int, size_t, size_t); + +typedef void (*pcpu_fc_free_fn_t)(void *, size_t); + +typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int); + +struct trace_event_raw_percpu_alloc_percpu { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_free_percpu { + struct trace_entry ent; + void *base_addr; + int off; + void *ptr; + char __data[0]; +}; + +struct trace_event_raw_percpu_alloc_percpu_fail { + struct trace_entry ent; + bool reserved; + bool is_atomic; + size_t size; + size_t align; + char __data[0]; +}; + +struct trace_event_raw_percpu_create_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_raw_percpu_destroy_chunk { + struct trace_entry ent; + void *base_addr; + char __data[0]; +}; + +struct trace_event_data_offsets_percpu_alloc_percpu {}; + +struct trace_event_data_offsets_percpu_free_percpu {}; + +struct trace_event_data_offsets_percpu_alloc_percpu_fail {}; + +struct trace_event_data_offsets_percpu_create_chunk {}; + +struct trace_event_data_offsets_percpu_destroy_chunk {}; + +typedef void (*btf_trace_percpu_alloc_percpu)(void *, bool, bool, size_t, size_t, void *, int, void *); + +typedef void (*btf_trace_percpu_free_percpu)(void *, void *, int, void *); + +typedef void (*btf_trace_percpu_alloc_percpu_fail)(void *, bool, bool, size_t, size_t); + +typedef void (*btf_trace_percpu_create_chunk)(void *, void *); + +typedef void (*btf_trace_percpu_destroy_chunk)(void *, void *); + +enum pcpu_chunk_type { + PCPU_CHUNK_ROOT = 0, + PCPU_CHUNK_MEMCG = 1, + PCPU_NR_CHUNK_TYPES = 2, + PCPU_FAIL_ALLOC = 2, +}; + +struct pcpu_block_md { + int scan_hint; + int scan_hint_start; + int contig_hint; + int contig_hint_start; + int left_free; + int right_free; + int first_free; + int nr_bits; +}; + +struct pcpu_chunk { + struct list_head list; + int free_bytes; + struct pcpu_block_md chunk_md; + void *base_addr; + long unsigned int *alloc_map; + long unsigned int *bound_map; + struct pcpu_block_md *md_blocks; + void *data; + bool immutable; + int start_offset; + int end_offset; + struct obj_cgroup **obj_cgroups; + int nr_pages; + int nr_populated; + int nr_empty_pop_pages; + long unsigned int populated[0]; +}; + +struct trace_event_raw_kmem_alloc { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + gfp_t gfp_flags; + char __data[0]; +}; + +struct trace_event_raw_kmem_alloc_node { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + size_t bytes_req; + size_t bytes_alloc; + gfp_t gfp_flags; + int node; + char __data[0]; +}; + +struct trace_event_raw_kmem_free { + struct trace_entry ent; + long unsigned int call_site; + const void *ptr; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + char __data[0]; +}; + +struct trace_event_raw_mm_page_free_batched { + struct trace_entry ent; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + gfp_t gfp_flags; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_pcpu_drain { + struct trace_entry ent; + long unsigned int pfn; + unsigned int order; + int migratetype; + char __data[0]; +}; + +struct trace_event_raw_mm_page_alloc_extfrag { + struct trace_entry ent; + long unsigned int pfn; + int alloc_order; + int fallback_order; + int alloc_migratetype; + int fallback_migratetype; + int change_ownership; + char __data[0]; +}; + +struct trace_event_raw_rss_stat { + struct trace_entry ent; + unsigned int mm_id; + unsigned int curr; + int member; + long int size; + char __data[0]; +}; + +struct trace_event_data_offsets_kmem_alloc {}; + +struct trace_event_data_offsets_kmem_alloc_node {}; + +struct trace_event_data_offsets_kmem_free {}; + +struct trace_event_data_offsets_mm_page_free {}; + +struct trace_event_data_offsets_mm_page_free_batched {}; + +struct trace_event_data_offsets_mm_page_alloc {}; + +struct trace_event_data_offsets_mm_page {}; + +struct trace_event_data_offsets_mm_page_pcpu_drain {}; + +struct trace_event_data_offsets_mm_page_alloc_extfrag {}; + +struct trace_event_data_offsets_rss_stat {}; + +typedef void (*btf_trace_kmalloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t); + +typedef void (*btf_trace_kmem_cache_alloc)(void *, long unsigned int, const void *, size_t, size_t, gfp_t); + +typedef void (*btf_trace_kmalloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kmem_cache_alloc_node)(void *, long unsigned int, const void *, size_t, size_t, gfp_t, int); + +typedef void (*btf_trace_kfree)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_kmem_cache_free)(void *, long unsigned int, const void *); + +typedef void (*btf_trace_mm_page_free)(void *, struct page *, unsigned int); + +typedef void (*btf_trace_mm_page_free_batched)(void *, struct page *); + +typedef void (*btf_trace_mm_page_alloc)(void *, struct page *, unsigned int, gfp_t, int); + +typedef void (*btf_trace_mm_page_alloc_zone_locked)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_pcpu_drain)(void *, struct page *, unsigned int, int); + +typedef void (*btf_trace_mm_page_alloc_extfrag)(void *, struct page *, int, int, int, int); + +typedef void (*btf_trace_rss_stat)(void *, struct mm_struct *, int, long int); + +enum slab_state { + DOWN = 0, + PARTIAL = 1, + PARTIAL_NODE = 2, + UP = 3, + FULL = 4, +}; + +struct kmalloc_info_struct { + const char *name[3]; + unsigned int size; +}; + +struct slabinfo { + long unsigned int active_objs; + long unsigned int num_objs; + long unsigned int active_slabs; + long unsigned int num_slabs; + long unsigned int shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +enum pageblock_bits { + PB_migrate = 0, + PB_migrate_end = 2, + PB_migrate_skip = 3, + NR_PAGEBLOCK_BITS = 4, +}; + +struct node___2 { + struct device dev; + struct list_head access_list; + struct work_struct node_work; + struct list_head cache_attrs; + struct device *cache_dev; +}; + +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type highest_zoneidx; + bool spread_dirty_pages; +}; + +struct trace_event_raw_mm_compaction_isolate_template { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int nr_scanned; + long unsigned int nr_taken; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_migratepages { + struct trace_entry ent; + long unsigned int nr_migrated; + long unsigned int nr_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_begin { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_end { + struct trace_entry ent; + long unsigned int zone_start; + long unsigned int migrate_pfn; + long unsigned int free_pfn; + long unsigned int zone_end; + bool sync; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_try_to_compact_pages { + struct trace_entry ent; + int order; + gfp_t gfp_mask; + int prio; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_suitable_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + int ret; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_defer_template { + struct trace_entry ent; + int nid; + enum zone_type idx; + int order; + unsigned int considered; + unsigned int defer_shift; + int order_failed; + char __data[0]; +}; + +struct trace_event_raw_mm_compaction_kcompactd_sleep { + struct trace_entry ent; + int nid; + char __data[0]; +}; + +struct trace_event_raw_kcompactd_wake_template { + struct trace_entry ent; + int nid; + int order; + enum zone_type highest_zoneidx; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_compaction_isolate_template {}; + +struct trace_event_data_offsets_mm_compaction_migratepages {}; + +struct trace_event_data_offsets_mm_compaction_begin {}; + +struct trace_event_data_offsets_mm_compaction_end {}; + +struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {}; + +struct trace_event_data_offsets_mm_compaction_suitable_template {}; + +struct trace_event_data_offsets_mm_compaction_defer_template {}; + +struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {}; + +struct trace_event_data_offsets_kcompactd_wake_template {}; + +typedef void (*btf_trace_mm_compaction_isolate_migratepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_isolate_freepages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +typedef void (*btf_trace_mm_compaction_migratepages)(void *, long unsigned int, int, struct list_head *); + +typedef void (*btf_trace_mm_compaction_begin)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool); + +typedef void (*btf_trace_mm_compaction_end)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, bool, int); + +typedef void (*btf_trace_mm_compaction_try_to_compact_pages)(void *, int, gfp_t, int); + +typedef void (*btf_trace_mm_compaction_finished)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_suitable)(void *, struct zone *, int, int); + +typedef void (*btf_trace_mm_compaction_deferred)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_compaction)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_defer_reset)(void *, struct zone *, int); + +typedef void (*btf_trace_mm_compaction_kcompactd_sleep)(void *, int); + +typedef void (*btf_trace_mm_compaction_wakeup_kcompactd)(void *, int, int, enum zone_type); + +typedef void (*btf_trace_mm_compaction_kcompactd_wake)(void *, int, int, enum zone_type); + +typedef enum { + ISOLATE_ABORT = 0, + ISOLATE_NONE = 1, + ISOLATE_SUCCESS = 2, +} isolate_migrate_t; + +struct anon_vma_chain { + struct vm_area_struct *vma; + struct anon_vma *anon_vma; + struct list_head same_vma; + struct rb_node rb; + long unsigned int rb_subtree_last; +}; + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *, struct rb_node *); + void (*copy)(struct rb_node *, struct rb_node *); + void (*rotate)(struct rb_node *, struct rb_node *); +}; + +enum lru_status { + LRU_REMOVED = 0, + LRU_REMOVED_RETRY = 1, + LRU_ROTATE = 2, + LRU_SKIP = 3, + LRU_RETRY = 4, +}; + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *); + +typedef struct { + long unsigned int pd; +} hugepd_t; + +struct migration_target_control { + int nid; + nodemask_t *nmask; + gfp_t gfp_mask; +}; + +struct follow_page_context { + struct dev_pagemap *pgmap; + unsigned int page_mask; +}; + +typedef struct { + u64 val; +} pfn_t; + +typedef unsigned int pgtbl_mod_mask; + +struct zap_details { + struct address_space *check_mapping; + long unsigned int first_index; + long unsigned int last_index; +}; + +typedef int (*pte_fn_t)(pte_t *, long unsigned int, void *); + +enum { + SWP_USED = 1, + SWP_WRITEOK = 2, + SWP_DISCARDABLE = 4, + SWP_DISCARDING = 8, + SWP_SOLIDSTATE = 16, + SWP_CONTINUED = 32, + SWP_BLKDEV = 64, + SWP_ACTIVATED = 128, + SWP_FS_OPS = 256, + SWP_AREA_DISCARD = 512, + SWP_PAGE_DISCARD = 1024, + SWP_STABLE_WRITES = 2048, + SWP_SYNCHRONOUS_IO = 4096, + SWP_VALID = 8192, + SWP_SCANNING = 16384, +}; + +struct copy_subpage_arg { + struct page *dst; + struct page *src; + struct vm_area_struct *vma; +}; + +struct mm_walk; + +struct mm_walk_ops { + int (*pgd_entry)(pgd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*p4d_entry)(p4d_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *); + int (*pte_hole)(long unsigned int, long unsigned int, int, struct mm_walk *); + int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *); + int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *); + int (*pre_vma)(long unsigned int, long unsigned int, struct mm_walk *); + void (*post_vma)(struct mm_walk *); +}; + +enum page_walk_action { + ACTION_SUBTREE = 0, + ACTION_CONTINUE = 1, + ACTION_AGAIN = 2, +}; + +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + pgd_t *pgd; + struct vm_area_struct *vma; + enum page_walk_action action; + bool no_vma; + void *private; +}; + +struct vm_unmapped_area_info { + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; +}; + +enum { + HUGETLB_SHMFS_INODE = 1, + HUGETLB_ANONHUGE_INODE = 2, +}; + +struct trace_event_raw_vm_unmapped_area { + struct trace_entry ent; + long unsigned int addr; + long unsigned int total_vm; + long unsigned int flags; + long unsigned int length; + long unsigned int low_limit; + long unsigned int high_limit; + long unsigned int align_mask; + long unsigned int align_offset; + char __data[0]; +}; + +struct trace_event_data_offsets_vm_unmapped_area {}; + +typedef void (*btf_trace_vm_unmapped_area)(void *, long unsigned int, struct vm_unmapped_area_info *); + +struct rmap_walk_control { + void *arg; + bool (*rmap_one)(struct page *, struct vm_area_struct *, long unsigned int, void *); + int (*done)(struct page *); + struct anon_vma * (*anon_lock)(struct page *); + bool (*invalid_vma)(struct vm_area_struct *, void *); +}; + +struct page_referenced_arg { + int mapcount; + int referenced; + long unsigned int vm_flags; + struct mem_cgroup *memcg; +}; + +struct vmap_area { + long unsigned int va_start; + long unsigned int va_end; + struct rb_node rb_node; + struct list_head list; + union { + long unsigned int subtree_max_size; + struct vm_struct *vm; + struct llist_node purge_list; + }; +}; + +struct vfree_deferred { + struct llist_head list; + struct work_struct wq; +}; + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, + LE_FIT_TYPE = 2, + RE_FIT_TYPE = 3, + NE_FIT_TYPE = 4, +}; + +struct vmap_block_queue { + spinlock_t lock; + struct list_head free; +}; + +struct vmap_block { + spinlock_t lock; + struct vmap_area *va; + long unsigned int free; + long unsigned int dirty; + long unsigned int dirty_min; + long unsigned int dirty_max; + struct list_head free_list; + struct callback_head callback_head; + struct list_head purge; +}; + +struct page_frag_cache { + void *va; + __u16 offset; + __u16 size; + unsigned int pagecnt_bias; + bool pfmemalloc; +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK = 0, +}; + +struct mminit_pfnnid_cache { + long unsigned int last_start; + long unsigned int last_end; + int last_nid; +}; + +typedef int fpi_t; + +struct pcpu_drain { + struct zone *zone; + struct work_struct work; +}; + +enum mf_flags { + MF_COUNT_INCREASED = 1, + MF_ACTION_REQUIRED = 2, + MF_MUST_KILL = 4, + MF_SOFT_OFFLINE = 8, +}; + +struct madvise_walk_private { + struct mmu_gather *tlb; + bool pageout; +}; + +struct vma_swap_readahead { + short unsigned int win; + short unsigned int offset; + short unsigned int nr_pte; + pte_t *ptes; +}; + +union swap_header { + struct { + char reserved[4086]; + char magic[10]; + } magic; + struct { + char bootbits[1024]; + __u32 version; + __u32 last_page; + __u32 nr_badpages; + unsigned char sws_uuid[16]; + unsigned char sws_volume[16]; + __u32 padding[117]; + __u32 badpages[1]; + } info; +}; + +struct swap_extent { + struct rb_node rb_node; + long unsigned int start_page; + long unsigned int nr_pages; + sector_t start_block; +}; + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; + swp_entry_t *slots_ret; + int n_ret; +}; + +struct frontswap_ops { + void (*init)(unsigned int); + int (*store)(unsigned int, long unsigned int, struct page *); + int (*load)(unsigned int, long unsigned int, struct page *); + void (*invalidate_page)(unsigned int, long unsigned int); + void (*invalidate_area)(unsigned int); + struct frontswap_ops *next; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +struct zpool; + +struct zpool_ops { + int (*evict)(struct zpool *, long unsigned int); +}; + +enum zpool_mapmode { + ZPOOL_MM_RW = 0, + ZPOOL_MM_RO = 1, + ZPOOL_MM_WO = 2, + ZPOOL_MM_DEFAULT = 0, +}; + +struct zswap_pool { + struct zpool *zpool; + struct crypto_comp **tfm; + struct kref kref; + struct list_head list; + struct work_struct release_work; + struct work_struct shrink_work; + struct hlist_node node; + char tfm_name[128]; +}; + +struct zswap_entry { + struct rb_node rbnode; + long unsigned int offset; + int refcount; + unsigned int length; + struct zswap_pool *pool; + union { + long unsigned int handle; + long unsigned int value; + }; +}; + +struct zswap_header { + swp_entry_t swpentry; +}; + +struct zswap_tree { + struct rb_root rbroot; + spinlock_t lock; +}; + +enum zswap_get_swap_ret { + ZSWAP_SWAPCACHE_NEW = 0, + ZSWAP_SWAPCACHE_EXIST = 1, + ZSWAP_SWAPCACHE_FAIL = 2, +}; + +struct dma_pool { + struct list_head page_list; + spinlock_t lock; + size_t size; + struct device *dev; + size_t allocation; + size_t boundary; + char name[32]; + struct list_head pools; +}; + +struct dma_page { + struct list_head page_list; + void *vaddr; + dma_addr_t dma; + unsigned int in_use; + unsigned int offset; +}; + +enum string_size_units { + STRING_UNITS_10 = 0, + STRING_UNITS_2 = 1, +}; + +struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long int adds_in_progress; + struct list_head region_cache; + long int region_cache_count; + struct page_counter *reservation_counter; + long unsigned int pages_per_hpage; + struct cgroup_subsys_state *css; +}; + +struct file_region { + struct list_head link; + long int from; + long int to; + struct page_counter *reservation_counter; + struct cgroup_subsys_state *css; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +enum hugetlb_memory_event { + HUGETLB_MAX = 0, + HUGETLB_NR_MEMORY_EVENTS = 1, +}; + +struct hugetlb_cgroup { + struct cgroup_subsys_state css; + struct page_counter hugepage[4]; + struct page_counter rsvd_hugepage[4]; + atomic_long_t events[4]; + atomic_long_t events_local[4]; + struct cgroup_file events_file[4]; + struct cgroup_file events_local_file[4]; +}; + +enum vma_resv_mode { + VMA_NEEDS_RESV = 0, + VMA_COMMIT_RESV = 1, + VMA_END_RESV = 2, + VMA_ADD_RESV = 3, +}; + +struct node_hstate { + struct kobject *hugepages_kobj; + struct kobject *hstate_kobjs[4]; +}; + +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +struct sp_node { + struct rb_node nd; + long unsigned int start; + long unsigned int end; + struct mempolicy *policy; +}; + +struct mempolicy_operations { + int (*create)(struct mempolicy *, const nodemask_t *); + void (*rebind)(struct mempolicy *, const nodemask_t *); +}; + +struct queue_pages { + struct list_head *pagelist; + long unsigned int flags; + nodemask_t *nmask; + long unsigned int start; + long unsigned int end; + struct vm_area_struct *first; +}; + +struct mmu_notifier_subscriptions { + struct hlist_head list; + bool has_itree; + spinlock_t lock; + long unsigned int invalidate_seq; + long unsigned int active_invalidate_ranges; + struct rb_root_cached itree; + wait_queue_head_t wq; + struct hlist_head deferred_list; +}; + +struct interval_tree_node { + struct rb_node rb; + long unsigned int start; + long unsigned int last; + long unsigned int __subtree_last; +}; + +struct mmu_interval_notifier; + +struct mmu_interval_notifier_ops { + bool (*invalidate)(struct mmu_interval_notifier *, const struct mmu_notifier_range *, long unsigned int); +}; + +struct mmu_interval_notifier { + struct interval_tree_node interval_tree; + const struct mmu_interval_notifier_ops *ops; + struct mm_struct *mm; + struct hlist_node deferred_item; + long unsigned int invalidate_seq; +}; + +struct rmap_item; + +struct mm_slot { + struct hlist_node link; + struct list_head mm_list; + struct rmap_item *rmap_list; + struct mm_struct *mm; +}; + +struct stable_node; + +struct rmap_item { + struct rmap_item *rmap_list; + union { + struct anon_vma *anon_vma; + int nid; + }; + struct mm_struct *mm; + long unsigned int address; + unsigned int oldchecksum; + union { + struct rb_node node; + struct { + struct stable_node *head; + struct hlist_node hlist; + }; + }; +}; + +struct ksm_scan { + struct mm_slot *mm_slot; + long unsigned int address; + struct rmap_item **rmap_list; + long unsigned int seqnr; +}; + +struct stable_node { + union { + struct rb_node node; + struct { + struct list_head *head; + struct { + struct hlist_node hlist_dup; + struct list_head list; + }; + }; + }; + struct hlist_head hlist; + union { + long unsigned int kpfn; + long unsigned int chain_prune_time; + }; + int rmap_hlist_len; + int nid; +}; + +enum get_ksm_page_flags { + GET_KSM_PAGE_NOLOCK = 0, + GET_KSM_PAGE_LOCK = 1, + GET_KSM_PAGE_TRYLOCK = 2, +}; + +struct array_cache { + unsigned int avail; + unsigned int limit; + unsigned int batchcount; + unsigned int touched; + void *entry[0]; +}; + +struct alien_cache { + spinlock_t lock; + struct array_cache ac; +}; + +typedef unsigned char freelist_idx_t; + +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = 12, + MIX_SECTION_INFO = 13, + NODE_INFO = 14, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14, +}; + +enum { + MMOP_OFFLINE = 0, + MMOP_ONLINE = 1, + MMOP_ONLINE_KERNEL = 2, + MMOP_ONLINE_MOVABLE = 3, +}; + +typedef int mhp_t; + +typedef void (*online_page_callback_t)(struct page *, unsigned int); + +struct memory_block { + long unsigned int start_section_nr; + long unsigned int state; + int online_type; + int phys_device; + struct device dev; + int nid; +}; + +struct buffer_head; + +typedef void bh_end_io_t(struct buffer_head *, int); + +struct buffer_head { + long unsigned int b_state; + struct buffer_head *b_this_page; + struct page *b_page; + sector_t b_blocknr; + size_t b_size; + char *b_data; + struct block_device *b_bdev; + bh_end_io_t *b_end_io; + void *b_private; + struct list_head b_assoc_buffers; + struct address_space *b_assoc_map; + atomic_t b_count; + spinlock_t b_uptodate_lock; +}; + +typedef struct page *new_page_t(struct page *, long unsigned int); + +typedef void free_page_t(struct page *, long unsigned int); + +enum bh_state_bits { + BH_Uptodate = 0, + BH_Dirty = 1, + BH_Lock = 2, + BH_Req = 3, + BH_Mapped = 4, + BH_New = 5, + BH_Async_Read = 6, + BH_Async_Write = 7, + BH_Delay = 8, + BH_Boundary = 9, + BH_Write_EIO = 10, + BH_Unwritten = 11, + BH_Quiet = 12, + BH_Meta = 13, + BH_Prio = 14, + BH_Defer_Completion = 15, + BH_PrivateStart = 16, +}; + +struct trace_event_raw_mm_migrate_pages { + struct trace_entry ent; + long unsigned int succeeded; + long unsigned int failed; + long unsigned int thp_succeeded; + long unsigned int thp_failed; + long unsigned int thp_split; + enum migrate_mode mode; + int reason; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_migrate_pages {}; + +typedef void (*btf_trace_mm_migrate_pages)(void *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, enum migrate_mode, int); + +enum scan_result { + SCAN_FAIL = 0, + SCAN_SUCCEED = 1, + SCAN_PMD_NULL = 2, + SCAN_EXCEED_NONE_PTE = 3, + SCAN_EXCEED_SWAP_PTE = 4, + SCAN_EXCEED_SHARED_PTE = 5, + SCAN_PTE_NON_PRESENT = 6, + SCAN_PTE_UFFD_WP = 7, + SCAN_PAGE_RO = 8, + SCAN_LACK_REFERENCED_PAGE = 9, + SCAN_PAGE_NULL = 10, + SCAN_SCAN_ABORT = 11, + SCAN_PAGE_COUNT = 12, + SCAN_PAGE_LRU = 13, + SCAN_PAGE_LOCK = 14, + SCAN_PAGE_ANON = 15, + SCAN_PAGE_COMPOUND = 16, + SCAN_ANY_PROCESS = 17, + SCAN_VMA_NULL = 18, + SCAN_VMA_CHECK = 19, + SCAN_ADDRESS_RANGE = 20, + SCAN_SWAP_CACHE_PAGE = 21, + SCAN_DEL_PAGE_LRU = 22, + SCAN_ALLOC_HUGE_PAGE_FAIL = 23, + SCAN_CGROUP_CHARGE_FAIL = 24, + SCAN_TRUNCATED = 25, + SCAN_PAGE_HAS_PRIVATE = 26, +}; + +struct trace_event_raw_mm_khugepaged_scan_pmd { + struct trace_entry ent; + struct mm_struct *mm; + long unsigned int pfn; + bool writable; + int referenced; + int none_or_zero; + int status; + int unmapped; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page { + struct trace_entry ent; + struct mm_struct *mm; + int isolated; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_isolate { + struct trace_entry ent; + long unsigned int pfn; + int none_or_zero; + int referenced; + bool writable; + int status; + char __data[0]; +}; + +struct trace_event_raw_mm_collapse_huge_page_swapin { + struct trace_entry ent; + struct mm_struct *mm; + int swapped_in; + int referenced; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_mm_khugepaged_scan_pmd {}; + +struct trace_event_data_offsets_mm_collapse_huge_page {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_isolate {}; + +struct trace_event_data_offsets_mm_collapse_huge_page_swapin {}; + +typedef void (*btf_trace_mm_khugepaged_scan_pmd)(void *, struct mm_struct *, struct page *, bool, int, int, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page)(void *, struct mm_struct *, int, int); + +typedef void (*btf_trace_mm_collapse_huge_page_isolate)(void *, struct page *, int, int, bool, int); + +typedef void (*btf_trace_mm_collapse_huge_page_swapin)(void *, struct mm_struct *, int, int, int); + +struct mm_slot___2 { + struct hlist_node hash; + struct list_head mm_node; + struct mm_struct *mm; + int nr_pte_mapped_thp; + long unsigned int pte_mapped_thp[8]; +}; + +struct khugepaged_scan { + struct list_head mm_head; + struct mm_slot___2 *mm_slot; + long unsigned int address; +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + unsigned int generation; +}; + +struct mem_cgroup_tree_per_node { + struct rb_root rb_root; + struct rb_node *rb_rightmost; + spinlock_t lock; +}; + +struct mem_cgroup_tree { + struct mem_cgroup_tree_per_node *rb_tree_per_node[128]; +}; + +struct mem_cgroup_eventfd_list { + struct list_head list; + struct eventfd_ctx *eventfd; +}; + +struct mem_cgroup_event { + struct mem_cgroup *memcg; + struct eventfd_ctx *eventfd; + struct list_head list; + int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *); + void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *); + poll_table pt; + wait_queue_head_t *wqh; + wait_queue_entry_t wait; + struct work_struct remove; +}; + +struct move_charge_struct { + spinlock_t lock; + struct mm_struct *mm; + struct mem_cgroup *from; + struct mem_cgroup *to; + long unsigned int flags; + long unsigned int precharge; + long unsigned int moved_charge; + long unsigned int moved_swap; + struct task_struct *moving_task; + wait_queue_head_t waitq; +}; + +enum res_type { + _MEM = 0, + _MEMSWAP = 1, + _OOM_TYPE = 2, + _KMEM = 3, + _TCP = 4, +}; + +struct memory_stat { + const char *name; + unsigned int ratio; + unsigned int idx; +}; + +struct oom_wait_info { + struct mem_cgroup *memcg; + wait_queue_entry_t wait; +}; + +enum oom_status { + OOM_SUCCESS = 0, + OOM_FAILED = 1, + OOM_ASYNC = 2, + OOM_SKIPPED = 3, +}; + +struct memcg_stock_pcp { + struct mem_cgroup *cached; + unsigned int nr_pages; + struct obj_cgroup *cached_objcg; + unsigned int nr_bytes; + struct work_struct work; + long unsigned int flags; +}; + +enum { + RES_USAGE = 0, + RES_LIMIT = 1, + RES_MAX_USAGE = 2, + RES_FAILCNT = 3, + RES_SOFT_LIMIT = 4, +}; + +union mc_target { + struct page *page; + swp_entry_t ent; +}; + +enum mc_target_type { + MC_TARGET_NONE = 0, + MC_TARGET_PAGE = 1, + MC_TARGET_SWAP = 2, + MC_TARGET_DEVICE = 3, +}; + +struct uncharge_gather { + struct mem_cgroup *memcg; + long unsigned int nr_pages; + long unsigned int pgpgout; + long unsigned int nr_kmem; + struct page *dummy_page; +}; + +struct numa_stat { + const char *name; + unsigned int lru_mask; +}; + +enum vmpressure_levels { + VMPRESSURE_LOW = 0, + VMPRESSURE_MEDIUM = 1, + VMPRESSURE_CRITICAL = 2, + VMPRESSURE_NUM_LEVELS = 3, +}; + +enum vmpressure_modes { + VMPRESSURE_NO_PASSTHROUGH = 0, + VMPRESSURE_HIERARCHY = 1, + VMPRESSURE_LOCAL = 2, + VMPRESSURE_NUM_MODES = 3, +}; + +struct vmpressure_event { + struct eventfd_ctx *efd; + enum vmpressure_levels level; + enum vmpressure_modes mode; + struct list_head node; +}; + +struct swap_cgroup_ctrl { + struct page **map; + long unsigned int length; + spinlock_t lock; +}; + +struct swap_cgroup { + short unsigned int id; +}; + +enum { + RES_USAGE___2 = 0, + RES_RSVD_USAGE = 1, + RES_LIMIT___2 = 2, + RES_RSVD_LIMIT = 3, + RES_MAX_USAGE___2 = 4, + RES_RSVD_MAX_USAGE = 5, + RES_FAILCNT___2 = 6, + RES_RSVD_FAILCNT = 7, +}; + +enum mf_result { + MF_IGNORED = 0, + MF_FAILED = 1, + MF_DELAYED = 2, + MF_RECOVERED = 3, +}; + +enum mf_action_page_type { + MF_MSG_KERNEL = 0, + MF_MSG_KERNEL_HIGH_ORDER = 1, + MF_MSG_SLAB = 2, + MF_MSG_DIFFERENT_COMPOUND = 3, + MF_MSG_POISONED_HUGE = 4, + MF_MSG_HUGE = 5, + MF_MSG_FREE_HUGE = 6, + MF_MSG_NON_PMD_HUGE = 7, + MF_MSG_UNMAP_FAILED = 8, + MF_MSG_DIRTY_SWAPCACHE = 9, + MF_MSG_CLEAN_SWAPCACHE = 10, + MF_MSG_DIRTY_MLOCKED_LRU = 11, + MF_MSG_CLEAN_MLOCKED_LRU = 12, + MF_MSG_DIRTY_UNEVICTABLE_LRU = 13, + MF_MSG_CLEAN_UNEVICTABLE_LRU = 14, + MF_MSG_DIRTY_LRU = 15, + MF_MSG_CLEAN_LRU = 16, + MF_MSG_TRUNCATED_LRU = 17, + MF_MSG_BUDDY = 18, + MF_MSG_BUDDY_2ND = 19, + MF_MSG_DAX = 20, + MF_MSG_UNSPLIT_THP = 21, + MF_MSG_UNKNOWN = 22, +}; + +typedef long unsigned int dax_entry_t; + +struct __kfifo { + unsigned int in; + unsigned int out; + unsigned int mask; + unsigned int esize; + void *data; +}; + +struct to_kill { + struct list_head nd; + struct task_struct *tsk; + long unsigned int addr; + short int size_shift; +}; + +struct page_state { + long unsigned int mask; + long unsigned int res; + enum mf_action_page_type type; + int (*action)(struct page *, long unsigned int); +}; + +struct memory_failure_entry { + long unsigned int pfn; + int flags; +}; + +struct memory_failure_cpu { + struct { + union { + struct __kfifo kfifo; + struct memory_failure_entry *type; + const struct memory_failure_entry *const_type; + char (*rectype)[0]; + struct memory_failure_entry *ptr; + const struct memory_failure_entry *ptr_const; + }; + struct memory_failure_entry buf[16]; + } fifo; + spinlock_t lock; + struct work_struct work; +}; + +struct cleancache_filekey { + union { + ino_t ino; + __u32 fh[6]; + u32 key[6]; + } u; +}; + +struct cleancache_ops { + int (*init_fs)(size_t); + int (*init_shared_fs)(uuid_t *, size_t); + int (*get_page)(int, struct cleancache_filekey, long unsigned int, struct page *); + void (*put_page)(int, struct cleancache_filekey, long unsigned int, struct page *); + void (*invalidate_page)(int, struct cleancache_filekey, long unsigned int); + void (*invalidate_inode)(int, struct cleancache_filekey); + void (*invalidate_fs)(int); +}; + +struct trace_event_raw_test_pages_isolated { + struct trace_entry ent; + long unsigned int start_pfn; + long unsigned int end_pfn; + long unsigned int fin_pfn; + char __data[0]; +}; + +struct trace_event_data_offsets_test_pages_isolated {}; + +typedef void (*btf_trace_test_pages_isolated)(void *, long unsigned int, long unsigned int, long unsigned int); + +struct zpool_driver; + +struct zpool { + struct zpool_driver *driver; + void *pool; + const struct zpool_ops *ops; + bool evictable; + struct list_head list; +}; + +struct zpool_driver { + char *type; + struct module *owner; + atomic_t refcount; + struct list_head list; + void * (*create)(const char *, gfp_t, const struct zpool_ops *, struct zpool *); + void (*destroy)(void *); + bool malloc_support_movable; + int (*malloc)(void *, size_t, gfp_t, long unsigned int *); + void (*free)(void *, long unsigned int); + int (*shrink)(void *, unsigned int, unsigned int *); + void * (*map)(void *, long unsigned int, enum zpool_mapmode); + void (*unmap)(void *, long unsigned int); + u64 (*total_size)(void *); +}; + +struct zbud_pool; + +struct zbud_ops { + int (*evict)(struct zbud_pool *, long unsigned int); +}; + +struct zbud_pool { + spinlock_t lock; + struct list_head unbuddied[63]; + struct list_head buddied; + struct list_head lru; + u64 pages_nr; + const struct zbud_ops *ops; + struct zpool *zpool; + const struct zpool_ops *zpool_ops; +}; + +struct zbud_header { + struct list_head buddy; + struct list_head lru; + unsigned int first_chunks; + unsigned int last_chunks; + bool under_reclaim; +}; + +enum buddy { + FIRST = 0, + LAST = 1, +}; + +enum zs_mapmode { + ZS_MM_RW = 0, + ZS_MM_RO = 1, + ZS_MM_WO = 2, +}; + +struct zs_pool_stats { + long unsigned int pages_compacted; +}; + +enum fullness_group { + ZS_EMPTY = 0, + ZS_ALMOST_EMPTY = 1, + ZS_ALMOST_FULL = 2, + ZS_FULL = 3, + NR_ZS_FULLNESS = 4, +}; + +enum zs_stat_type { + CLASS_EMPTY = 0, + CLASS_ALMOST_EMPTY = 1, + CLASS_ALMOST_FULL = 2, + CLASS_FULL = 3, + OBJ_ALLOCATED = 4, + OBJ_USED = 5, + NR_ZS_STAT_TYPE = 6, +}; + +struct zs_size_stat { + long unsigned int objs[6]; +}; + +struct size_class { + spinlock_t lock; + struct list_head fullness_list[4]; + int size; + int objs_per_zspage; + int pages_per_zspage; + unsigned int index; + struct zs_size_stat stats; +}; + +struct link_free { + union { + long unsigned int next; + long unsigned int handle; + }; +}; + +struct zs_pool { + const char *name; + struct size_class *size_class[255]; + struct kmem_cache *handle_cachep; + struct kmem_cache *zspage_cachep; + atomic_long_t pages_allocated; + struct zs_pool_stats stats; + struct shrinker shrinker; + struct inode *inode; + struct work_struct free_work; + struct wait_queue_head migration_wait; + atomic_long_t isolated_pages; + bool destroying; +}; + +struct zspage { + struct { + unsigned int fullness: 2; + unsigned int class: 9; + unsigned int isolated: 3; + unsigned int magic: 8; + }; + unsigned int inuse; + unsigned int freeobj; + struct page *first_page; + struct list_head list; + rwlock_t lock; +}; + +struct mapping_area { + char *vm_buf; + char *vm_addr; + enum zs_mapmode vm_mm; +}; + +struct zs_compact_control { + struct page *s_page; + struct page *d_page; + int obj_idx; +}; + +struct cma { + long unsigned int base_pfn; + long unsigned int count; + long unsigned int *bitmap; + unsigned int order_per_bit; + struct mutex lock; + char name[64]; +}; + +struct trace_event_raw_cma_alloc { + struct trace_entry ent; + long unsigned int pfn; + const struct page *page; + unsigned int count; + unsigned int align; + char __data[0]; +}; + +struct trace_event_raw_cma_release { + struct trace_entry ent; + long unsigned int pfn; + const struct page *page; + unsigned int count; + char __data[0]; +}; + +struct trace_event_data_offsets_cma_alloc {}; + +struct trace_event_data_offsets_cma_release {}; + +typedef void (*btf_trace_cma_alloc)(void *, long unsigned int, const struct page *, unsigned int, unsigned int); + +typedef void (*btf_trace_cma_release)(void *, long unsigned int, const struct page *, unsigned int); + +struct balloon_dev_info { + long unsigned int isolated_pages; + spinlock_t pages_lock; + struct list_head pages; + int (*migratepage)(struct balloon_dev_info *, struct page *, struct page *, enum migrate_mode); + struct inode *inode; +}; + +struct page_ext_operations { + size_t offset; + size_t size; + bool (*need)(); + void (*init)(); +}; + +struct frame_vector { + unsigned int nr_allocated; + unsigned int nr_frames; + bool got_ref; + bool is_pfns; + void *ptrs[0]; +}; + +enum { + BAD_STACK = 4294967295, + NOT_STACK = 0, + GOOD_FRAME = 1, + GOOD_STACK = 2, +}; + +enum hmm_pfn_flags { + HMM_PFN_VALID = 0, + HMM_PFN_WRITE = 0, + HMM_PFN_ERROR = 0, + HMM_PFN_ORDER_SHIFT = 56, + HMM_PFN_REQ_FAULT = 0, + HMM_PFN_REQ_WRITE = 0, + HMM_PFN_FLAGS = 0, +}; + +struct hmm_range { + struct mmu_interval_notifier *notifier; + long unsigned int notifier_seq; + long unsigned int start; + long unsigned int end; + long unsigned int *hmm_pfns; + long unsigned int default_flags; + long unsigned int pfn_flags_mask; + void *dev_private_owner; +}; + +struct hmm_vma_walk { + struct hmm_range *range; + long unsigned int last; +}; + +enum { + HMM_NEED_FAULT = 1, + HMM_NEED_WRITE_FAULT = 2, + HMM_NEED_ALL_BITS = 3, +}; + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +struct page_reporting_dev_info { + int (*report)(struct page_reporting_dev_info *, struct scatterlist *, unsigned int); + struct delayed_work work; + atomic_t state; +}; + +enum { + PAGE_REPORTING_IDLE = 0, + PAGE_REPORTING_REQUESTED = 1, + PAGE_REPORTING_ACTIVE = 2, +}; + +typedef s32 compat_off_t; + +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE = 0, + FSNOTIFY_EVENT_PATH = 1, + FSNOTIFY_EVENT_INODE = 2, +}; + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; + +typedef __kernel_rwf_t rwf_t; + +struct fscrypt_policy_v1 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[8]; +}; + +struct fscrypt_policy_v2 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 __reserved[4]; + __u8 master_key_identifier[16]; +}; + +union fscrypt_policy { + u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; +}; + +enum vfs_get_super_keying { + vfs_get_single_super = 0, + vfs_get_single_reconf_super = 1, + vfs_get_keyed_super = 2, + vfs_get_independent_super = 3, +}; + +struct kobj_map; + +struct char_device_struct { + struct char_device_struct *next; + unsigned int major; + unsigned int baseminor; + int minorct; + char name[64]; + struct cdev *cdev; +}; + +struct stat { + long unsigned int st_dev; + long unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + long unsigned int st_rdev; + long unsigned int __pad1; + long int st_size; + int st_blksize; + int __pad2; + long int st_blocks; + long int st_atime; + long unsigned int st_atime_nsec; + long int st_mtime; + long unsigned int st_mtime_nsec; + long int st_ctime; + long unsigned int st_ctime_nsec; + unsigned int __unused4; + unsigned int __unused5; +}; + +typedef u32 compat_ino_t; + +typedef u16 compat_ushort_t; + +typedef s64 compat_s64; + +typedef u16 __compat_uid16_t; + +typedef u16 __compat_gid16_t; + +typedef u16 compat_mode_t; + +typedef u32 compat_dev_t; + +struct compat_stat { + compat_dev_t st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_ushort_t st_nlink; + __compat_uid16_t st_uid; + __compat_gid16_t st_gid; + compat_dev_t st_rdev; + compat_off_t st_size; + compat_off_t st_blksize; + compat_off_t st_blocks; + old_time32_t st_atime; + compat_ulong_t st_atime_nsec; + old_time32_t st_mtime; + compat_ulong_t st_mtime_nsec; + old_time32_t st_ctime; + compat_ulong_t st_ctime_nsec; + compat_ulong_t __unused4[2]; +}; + +struct stat64 { + compat_u64 st_dev; + unsigned char __pad0[4]; + compat_ulong_t __st_ino; + compat_uint_t st_mode; + compat_uint_t st_nlink; + compat_ulong_t st_uid; + compat_ulong_t st_gid; + compat_u64 st_rdev; + unsigned char __pad3[4]; + compat_s64 st_size; + compat_ulong_t st_blksize; + compat_u64 st_blocks; + compat_ulong_t st_atime; + compat_ulong_t st_atime_nsec; + compat_ulong_t st_mtime; + compat_ulong_t st_mtime_nsec; + compat_ulong_t st_ctime; + compat_ulong_t st_ctime_nsec; + compat_u64 st_ino; +}; + +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; + +struct statx { + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + __u64 stx_mnt_id; + __u64 __spare2; + __u64 __spare3[12]; +}; + +struct mount; + +struct mnt_namespace { + atomic_t count; + struct ns_common ns; + struct mount *root; + struct list_head list; + spinlock_t ns_lock; + struct user_namespace *user_ns; + struct ucounts *ucounts; + u64 seq; + wait_queue_head_t poll; + u64 event; + unsigned int mounts; + unsigned int pending_mounts; +}; + +struct mnt_pcp; + +struct mountpoint; + +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; + union { + struct callback_head mnt_rcu; + struct llist_node mnt_llist; + }; + struct mnt_pcp *mnt_pcp; + struct list_head mnt_mounts; + struct list_head mnt_child; + struct list_head mnt_instance; + const char *mnt_devname; + struct list_head mnt_list; + struct list_head mnt_expire; + struct list_head mnt_share; + struct list_head mnt_slave_list; + struct list_head mnt_slave; + struct mount *mnt_master; + struct mnt_namespace *mnt_ns; + struct mountpoint *mnt_mp; + union { + struct hlist_node mnt_mp_list; + struct hlist_node mnt_umount; + }; + struct list_head mnt_umounting; + struct fsnotify_mark_connector *mnt_fsnotify_marks; + __u32 mnt_fsnotify_mask; + int mnt_id; + int mnt_group_id; + int mnt_expiry_mark; + struct hlist_head mnt_pins; + struct hlist_head mnt_stuck_children; +}; + +struct mnt_pcp { + int mnt_count; + int mnt_writers; +}; + +struct mountpoint { + struct hlist_node m_hash; + struct dentry *m_dentry; + struct hlist_head m_list; + int m_count; +}; + +typedef short unsigned int ushort; + +struct user_arg_ptr { + bool is_compat; + union { + const char * const *native; + const compat_uptr_t *compat; + } ptr; +}; + +enum inode_i_mutex_lock_class { + I_MUTEX_NORMAL = 0, + I_MUTEX_PARENT = 1, + I_MUTEX_CHILD = 2, + I_MUTEX_XATTR = 3, + I_MUTEX_NONDIR2 = 4, + I_MUTEX_PARENT2 = 5, +}; + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler **xattr; + const struct dentry_operations *dops; + long unsigned int magic; +}; + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[32]; +}; + +struct saved { + struct path link; + struct delayed_call done; + const char *name; + unsigned int seq; +}; + +struct nameidata { + struct path path; + struct qstr last; + struct path root; + struct inode *inode; + unsigned int flags; + unsigned int seq; + unsigned int m_seq; + unsigned int r_seq; + int last_type; + unsigned int depth; + int total_link_count; + struct saved *stack; + struct saved internal[2]; + struct filename *name; + struct nameidata *saved; + unsigned int root_seq; + int dfd; + kuid_t dir_uid; + umode_t dir_mode; +}; + +enum { + LAST_NORM = 0, + LAST_ROOT = 1, + LAST_DOT = 2, + LAST_DOTDOT = 3, +}; + +enum { + WALK_TRAILING = 1, + WALK_MORE = 2, + WALK_NOFOLLOW = 4, +}; + +struct word_at_a_time { + const long unsigned int one_bits; + const long unsigned int high_bits; +}; + +struct compat_flock { + short int l_type; + short int l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +struct compat_flock64 { + short int l_type; + short int l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +}; + +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; + +struct flock { + short int l_type; + short int l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; +}; + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + __s32 status; + __u32 reserved; +}; + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + +typedef int get_block_t(struct inode *, sector_t, struct buffer_head *, int); + +struct fiemap_extent; + +struct fiemap_extent_info { + unsigned int fi_flags; + unsigned int fi_extents_mapped; + unsigned int fi_extents_max; + struct fiemap_extent *fi_extents_start; +}; + +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; +}; + +struct fiemap_extent { + __u64 fe_logical; + __u64 fe_physical; + __u64 fe_length; + __u64 fe_reserved64[2]; + __u32 fe_flags; + __u32 fe_reserved[3]; +}; + +struct fiemap { + __u64 fm_start; + __u64 fm_length; + __u32 fm_flags; + __u32 fm_mapped_extents; + __u32 fm_extent_count; + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; +}; + +struct linux_dirent64 { + u64 d_ino; + s64 d_off; + short unsigned int d_reclen; + unsigned char d_type; + char d_name[0]; +}; + +struct linux_dirent { + long unsigned int d_ino; + long unsigned int d_off; + short unsigned int d_reclen; + char d_name[1]; +}; + +struct getdents_callback { + struct dir_context ctx; + struct linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct getdents_callback64 { + struct dir_context ctx; + struct linux_dirent64 *current_dir; + int prev_reclen; + int count; + int error; +}; + +struct compat_old_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_offset; + short unsigned int d_namlen; + char d_name[1]; +}; + +struct compat_readdir_callback { + struct dir_context ctx; + struct compat_old_linux_dirent *dirent; + int result; +}; + +struct compat_linux_dirent { + compat_ulong_t d_ino; + compat_ulong_t d_off; + short unsigned int d_reclen; + char d_name[1]; +}; + +struct compat_getdents_callback { + struct dir_context ctx; + struct compat_linux_dirent *current_dir; + int prev_reclen; + int count; + int error; +}; + +typedef struct { + long unsigned int fds_bits[16]; +} __kernel_fd_set; + +typedef __kernel_fd_set fd_set; + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + +struct poll_table_page; + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[9]; +}; + +struct poll_table_page { + struct poll_table_page *next; + struct poll_table_entry *entry; + struct poll_table_entry entries[0]; +}; + +enum poll_time_type { + PT_TIMEVAL = 0, + PT_OLD_TIMEVAL = 1, + PT_TIMESPEC = 2, + PT_OLD_TIMESPEC = 3, +}; + +typedef struct { + long unsigned int *in; + long unsigned int *out; + long unsigned int *ex; + long unsigned int *res_in; + long unsigned int *res_out; + long unsigned int *res_ex; +} fd_set_bits; + +struct sigset_argpack { + sigset_t *p; + size_t size; +}; + +struct poll_list { + struct poll_list *next; + int len; + struct pollfd entries[0]; +}; + +struct compat_sel_arg_struct { + compat_ulong_t n; + compat_uptr_t inp; + compat_uptr_t outp; + compat_uptr_t exp; + compat_uptr_t tvp; +}; + +struct compat_sigset_argpack { + compat_uptr_t p; + compat_size_t size; +}; + +enum dentry_d_lock_class { + DENTRY_D_LOCK_NORMAL = 0, + DENTRY_D_LOCK_NESTED = 1, +}; + +struct external_name { + union { + atomic_t count; + struct callback_head head; + } u; + unsigned char name[0]; +}; + +enum d_walk_ret { + D_WALK_CONTINUE = 0, + D_WALK_QUIT = 1, + D_WALK_NORETRY = 2, + D_WALK_SKIP = 3, +}; + +struct check_mount { + struct vfsmount *mnt; + unsigned int mounted; +}; + +struct select_data { + struct dentry *start; + union { + long int found; + struct dentry *victim; + }; + struct list_head dispose; +}; + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +struct proc_mounts { + struct mnt_namespace *ns; + struct path root; + int (*show)(struct seq_file *, struct vfsmount *); + struct mount cursor; +}; + +enum umount_tree_flags { + UMOUNT_SYNC = 1, + UMOUNT_PROPAGATE = 2, + UMOUNT_CONNECTED = 4, +}; + +struct unicode_map { + const char *charset; + int version; +}; + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + +struct simple_attr { + int (*get)(void *, u64 *); + int (*set)(void *, u64); + char get_buf[24]; + char set_buf[24]; + void *data; + const char *fmt; + struct mutex mutex; +}; + +struct wb_writeback_work { + long int nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages: 1; + unsigned int for_kupdate: 1; + unsigned int range_cyclic: 1; + unsigned int for_background: 1; + unsigned int for_sync: 1; + unsigned int auto_free: 1; + enum wb_reason reason; + struct list_head list; + struct wb_completion *done; +}; + +struct trace_event_raw_writeback_page_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_writeback_dirty_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_inode_foreign_history { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t cgroup_ino; + unsigned int history; + char __data[0]; +}; + +struct trace_event_raw_inode_switch_wbs { + struct trace_entry ent; + char name[32]; + ino_t ino; + ino_t old_cgroup_ino; + ino_t new_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_track_foreign_dirty { + struct trace_entry ent; + char name[32]; + u64 bdi_id; + ino_t ino; + unsigned int memcg_id; + ino_t cgroup_ino; + ino_t page_cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_flush_foreign { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + unsigned int frn_bdi_id; + unsigned int frn_memcg_id; + char __data[0]; +}; + +struct trace_event_raw_writeback_write_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + int sync_mode; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_work_class { + struct trace_entry ent; + char name[32]; + long int nr_pages; + dev_t sb_dev; + int sync_mode; + int for_kupdate; + int range_cyclic; + int for_background; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_pages_written { + struct trace_entry ent; + long int pages; + char __data[0]; +}; + +struct trace_event_raw_writeback_class { + struct trace_entry ent; + char name[32]; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_bdi_register { + struct trace_entry ent; + char name[32]; + char __data[0]; +}; + +struct trace_event_raw_wbc_class { + struct trace_entry ent; + char name[32]; + long int nr_to_write; + long int pages_skipped; + int sync_mode; + int for_kupdate; + int for_background; + int for_reclaim; + int range_cyclic; + long int range_start; + long int range_end; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_queue_io { + struct trace_entry ent; + char name[32]; + long unsigned int older; + long int age; + int moved; + int reason; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_global_dirty_state { + struct trace_entry ent; + long unsigned int nr_dirty; + long unsigned int nr_writeback; + long unsigned int background_thresh; + long unsigned int dirty_thresh; + long unsigned int dirty_limit; + long unsigned int nr_dirtied; + long unsigned int nr_written; + char __data[0]; +}; + +struct trace_event_raw_bdi_dirty_ratelimit { + struct trace_entry ent; + char bdi[32]; + long unsigned int write_bw; + long unsigned int avg_write_bw; + long unsigned int dirty_rate; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + long unsigned int balanced_dirty_ratelimit; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_balance_dirty_pages { + struct trace_entry ent; + char bdi[32]; + long unsigned int limit; + long unsigned int setpoint; + long unsigned int dirty; + long unsigned int bdi_setpoint; + long unsigned int bdi_dirty; + long unsigned int dirty_ratelimit; + long unsigned int task_ratelimit; + unsigned int dirtied; + unsigned int dirtied_pause; + long unsigned int paused; + long int pause; + long unsigned int period; + long int think; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_sb_inodes_requeue { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_congest_waited_template { + struct trace_entry ent; + unsigned int usec_timeout; + unsigned int usec_delayed; + char __data[0]; +}; + +struct trace_event_raw_writeback_single_inode_template { + struct trace_entry ent; + char name[32]; + ino_t ino; + long unsigned int state; + long unsigned int dirtied_when; + long unsigned int writeback_index; + long int nr_to_write; + long unsigned int wrote; + ino_t cgroup_ino; + char __data[0]; +}; + +struct trace_event_raw_writeback_inode_template { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int state; + __u16 mode; + long unsigned int dirtied_when; + char __data[0]; +}; + +struct trace_event_data_offsets_writeback_page_template {}; + +struct trace_event_data_offsets_writeback_dirty_inode_template {}; + +struct trace_event_data_offsets_inode_foreign_history {}; + +struct trace_event_data_offsets_inode_switch_wbs {}; + +struct trace_event_data_offsets_track_foreign_dirty {}; + +struct trace_event_data_offsets_flush_foreign {}; + +struct trace_event_data_offsets_writeback_write_inode_template {}; + +struct trace_event_data_offsets_writeback_work_class {}; + +struct trace_event_data_offsets_writeback_pages_written {}; + +struct trace_event_data_offsets_writeback_class {}; + +struct trace_event_data_offsets_writeback_bdi_register {}; + +struct trace_event_data_offsets_wbc_class {}; + +struct trace_event_data_offsets_writeback_queue_io {}; + +struct trace_event_data_offsets_global_dirty_state {}; + +struct trace_event_data_offsets_bdi_dirty_ratelimit {}; + +struct trace_event_data_offsets_balance_dirty_pages {}; + +struct trace_event_data_offsets_writeback_sb_inodes_requeue {}; + +struct trace_event_data_offsets_writeback_congest_waited_template {}; + +struct trace_event_data_offsets_writeback_single_inode_template {}; + +struct trace_event_data_offsets_writeback_inode_template {}; + +typedef void (*btf_trace_writeback_dirty_page)(void *, struct page *, struct address_space *); + +typedef void (*btf_trace_wait_on_page_writeback)(void *, struct page *, struct address_space *); + +typedef void (*btf_trace_writeback_mark_inode_dirty)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode_start)(void *, struct inode *, int); + +typedef void (*btf_trace_writeback_dirty_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_inode_foreign_history)(void *, struct inode *, struct writeback_control *, unsigned int); + +typedef void (*btf_trace_inode_switch_wbs)(void *, struct inode *, struct bdi_writeback *, struct bdi_writeback *); + +typedef void (*btf_trace_track_foreign_dirty)(void *, struct page *, struct bdi_writeback *); + +typedef void (*btf_trace_flush_foreign)(void *, struct bdi_writeback *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_write_inode_start)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_write_inode)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_writeback_queue)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_exec)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_start)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_written)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_wait)(void *, struct bdi_writeback *, struct wb_writeback_work *); + +typedef void (*btf_trace_writeback_pages_written)(void *, long int); + +typedef void (*btf_trace_writeback_wake_background)(void *, struct bdi_writeback *); + +typedef void (*btf_trace_writeback_bdi_register)(void *, struct backing_dev_info *); + +typedef void (*btf_trace_wbc_writepage)(void *, struct writeback_control *, struct backing_dev_info *); + +typedef void (*btf_trace_writeback_queue_io)(void *, struct bdi_writeback *, struct wb_writeback_work *, long unsigned int, int); + +typedef void (*btf_trace_global_dirty_state)(void *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_bdi_dirty_ratelimit)(void *, struct bdi_writeback *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_balance_dirty_pages)(void *, struct bdi_writeback *, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int, long int, long unsigned int); + +typedef void (*btf_trace_writeback_sb_inodes_requeue)(void *, struct inode *); + +typedef void (*btf_trace_writeback_congestion_wait)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_wait_iff_congested)(void *, unsigned int, unsigned int); + +typedef void (*btf_trace_writeback_single_inode_start)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_single_inode)(void *, struct inode *, struct writeback_control *, long unsigned int); + +typedef void (*btf_trace_writeback_lazytime)(void *, struct inode *); + +typedef void (*btf_trace_writeback_lazytime_iput)(void *, struct inode *); + +typedef void (*btf_trace_writeback_dirty_inode_enqueue)(void *, struct inode *); + +typedef void (*btf_trace_sb_mark_inode_writeback)(void *, struct inode *); + +typedef void (*btf_trace_sb_clear_inode_writeback)(void *, struct inode *); + +struct inode_switch_wbs_context { + struct inode *inode; + struct bdi_writeback *new_wb; + struct callback_head callback_head; + struct work_struct work; +}; + +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + union { + void *userptr; + struct file *file; + void *data; + } u; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; +}; + +typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *); + +typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *); + +struct old_utimbuf32 { + old_time32_t actime; + old_time32_t modtime; +}; + +typedef int __kernel_daddr_t; + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +typedef s32 compat_daddr_t; + +typedef __kernel_fsid_t compat_fsid_t; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +struct statfs { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __kernel_long_t f_blocks; + __kernel_long_t f_bfree; + __kernel_long_t f_bavail; + __kernel_long_t f_files; + __kernel_long_t f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct statfs64 { + __kernel_long_t f_type; + __kernel_long_t f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __kernel_long_t f_namelen; + __kernel_long_t f_frsize; + __kernel_long_t f_flags; + __kernel_long_t f_spare[4]; +}; + +struct compat_statfs64___2 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_flags; + __u32 f_spare[4]; +} __attribute__((packed)); + +typedef struct ns_common *ns_get_path_helper_t(void *); + +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +enum legacy_fs_param { + LEGACY_FS_UNSET_PARAMS = 0, + LEGACY_FS_MONOLITHIC_PARAMS = 1, + LEGACY_FS_INDIVIDUAL_PARAMS = 2, +}; + +struct legacy_fs_context { + char *legacy_data; + size_t data_size; + enum legacy_fs_param param_type; +}; + +enum fsconfig_command { + FSCONFIG_SET_FLAG = 0, + FSCONFIG_SET_STRING = 1, + FSCONFIG_SET_BINARY = 2, + FSCONFIG_SET_PATH = 3, + FSCONFIG_SET_PATH_EMPTY = 4, + FSCONFIG_SET_FD = 5, + FSCONFIG_CMD_CREATE = 6, + FSCONFIG_CMD_RECONFIGURE = 7, +}; + +struct dax_device; + +struct iomap_page_ops; + +struct iomap___2 { + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + struct block_device *bdev; + struct dax_device *dax_dev; + void *inline_data; + void *private; + const struct iomap_page_ops *page_ops; +}; + +struct iomap_page_ops { + int (*page_prepare)(struct inode *, loff_t, unsigned int, struct iomap___2 *); + void (*page_done)(struct inode *, loff_t, unsigned int, struct page *, struct iomap___2 *); +}; + +struct decrypt_bh_ctx { + struct work_struct work; + struct buffer_head *bh; +}; + +struct bh_lru { + struct buffer_head *bhs[16]; +}; + +struct bh_accounting { + int nr; + int ratelimit; +}; + +enum { + DISK_EVENT_MEDIA_CHANGE = 1, + DISK_EVENT_EJECT_REQUEST = 2, +}; + +enum { + BIOSET_NEED_BVECS = 1, + BIOSET_NEED_RESCUER = 2, +}; + +struct bdev_inode { + struct block_device bdev; + struct inode vfs_inode; +}; + +struct blkdev_dio { + union { + struct kiocb *iocb; + struct task_struct *waiter; + }; + size_t size; + atomic_t ref; + bool multi_bio: 1; + bool should_dirty: 1; + bool is_sync: 1; + struct bio bio; +}; + +struct bd_holder_disk { + struct list_head list; + struct gendisk *disk; + int refcnt; +}; + +typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *); + +typedef void dio_submit_t(struct bio *, struct inode *, loff_t); + +enum { + DIO_LOCKING = 1, + DIO_SKIP_HOLES = 2, +}; + +struct dio_submit { + struct bio *bio; + unsigned int blkbits; + unsigned int blkfactor; + unsigned int start_zero_done; + int pages_in_io; + sector_t block_in_file; + unsigned int blocks_available; + int reap_counter; + sector_t final_block_in_request; + int boundary; + get_block_t *get_block; + dio_submit_t *submit_io; + loff_t logical_offset_in_bio; + sector_t final_block_in_bio; + sector_t next_block_for_io; + struct page *cur_page; + unsigned int cur_page_offset; + unsigned int cur_page_len; + sector_t cur_page_block; + loff_t cur_page_fs_offset; + struct iov_iter *iter; + unsigned int head; + unsigned int tail; + size_t from; + size_t to; +}; + +struct dio { + int flags; + int op; + int op_flags; + blk_qc_t bio_cookie; + struct gendisk *bio_disk; + struct inode *inode; + loff_t i_size; + dio_iodone_t *end_io; + void *private; + spinlock_t bio_lock; + int page_errors; + int is_async; + bool defer_completion; + bool should_dirty; + int io_error; + long unsigned int refcount; + struct bio *bio_list; + struct task_struct *waiter; + struct kiocb *iocb; + ssize_t result; + union { + struct page *pages[64]; + struct work_struct complete_work; + }; + long: 64; +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned int done; +}; + +struct mpage_readpage_args { + struct bio *bio; + struct page *page; + unsigned int nr_pages; + bool is_readahead; + sector_t last_block_in_bio; + struct buffer_head map_bh; + long unsigned int first_logical_block; + get_block_t *get_block; +}; + +struct mpage_data { + struct bio *bio; + sector_t last_block_in_bio; + get_block_t *get_block; + unsigned int use_writepage; +}; + +typedef u32 nlink_t; + +typedef int (*proc_write_t)(struct file *, char *, size_t); + +struct proc_dir_entry { + atomic_t in_use; + refcount_t refcnt; + struct list_head pde_openers; + spinlock_t pde_unload_lock; + struct completion *pde_unload_completion; + const struct inode_operations *proc_iops; + union { + const struct proc_ops *proc_ops; + const struct file_operations *proc_dir_ops; + }; + const struct dentry_operations *proc_dops; + union { + const struct seq_operations *seq_ops; + int (*single_show)(struct seq_file *, void *); + }; + proc_write_t write; + void *data; + unsigned int state_size; + unsigned int low_ino; + nlink_t nlink; + kuid_t uid; + kgid_t gid; + loff_t size; + struct proc_dir_entry *parent; + struct rb_root subdir; + struct rb_node subdir_node; + char *name; + umode_t mode; + u8 flags; + u8 namelen; + char inline_name[0]; +}; + +union proc_op { + int (*proc_get_link)(struct dentry *, struct path *); + int (*proc_show)(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); + const char *lsm; +}; + +struct proc_inode { + struct pid *pid; + unsigned int fd; + union proc_op op; + struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; + struct hlist_node sibling_inodes; + const struct proc_ns_operations *ns_ops; + struct inode vfs_inode; +}; + +struct proc_fs_opts { + int flag; + const char *str; +}; + +struct file_handle { + __u32 handle_bytes; + int handle_type; + unsigned char f_handle[0]; +}; + +struct inotify_inode_mark { + struct fsnotify_mark fsn_mark; + int wd; +}; + +struct dnotify_struct { + struct dnotify_struct *dn_next; + __u32 dn_mask; + int dn_fd; + struct file *dn_filp; + fl_owner_t dn_owner; +}; + +struct dnotify_mark { + struct fsnotify_mark fsn_mark; + struct dnotify_struct *dn; +}; + +struct inotify_event_info { + struct fsnotify_event fse; + u32 mask; + int wd; + u32 sync_cookie; + int name_len; + char name[0]; +}; + +struct inotify_event { + __s32 wd; + __u32 mask; + __u32 cookie; + __u32 len; + char name[0]; +}; + +enum { + FAN_EVENT_INIT = 0, + FAN_EVENT_REPORTED = 1, + FAN_EVENT_ANSWERED = 2, + FAN_EVENT_CANCELED = 3, +}; + +struct fanotify_fh { + u8 type; + u8 len; + u8 flags; + u8 pad; + unsigned char buf[0]; +}; + +struct fanotify_info { + u8 dir_fh_totlen; + u8 file_fh_totlen; + u8 name_len; + u8 pad; + unsigned char buf[0]; +}; + +enum fanotify_event_type { + FANOTIFY_EVENT_TYPE_FID = 0, + FANOTIFY_EVENT_TYPE_FID_NAME = 1, + FANOTIFY_EVENT_TYPE_PATH = 2, + FANOTIFY_EVENT_TYPE_PATH_PERM = 3, + FANOTIFY_EVENT_TYPE_OVERFLOW = 4, +}; + +struct fanotify_event { + struct fsnotify_event fse; + u32 mask; + enum fanotify_event_type type; + struct pid *pid; +}; + +struct fanotify_fid_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct fanotify_fh object_fh; + unsigned char _inline_fh_buf[12]; +}; + +struct fanotify_name_event { + struct fanotify_event fae; + __kernel_fsid_t fsid; + struct fanotify_info info; +}; + +struct fanotify_path_event { + struct fanotify_event fae; + struct path path; +}; + +struct fanotify_perm_event { + struct fanotify_event fae; + struct path path; + short unsigned int response; + short unsigned int state; + int fd; +}; + +struct fanotify_event_metadata { + __u32 event_len; + __u8 vers; + __u8 reserved; + __u16 metadata_len; + __u64 mask; + __s32 fd; + __s32 pid; +}; + +struct fanotify_event_info_header { + __u8 info_type; + __u8 pad; + __u16 len; +}; + +struct fanotify_event_info_fid { + struct fanotify_event_info_header hdr; + __kernel_fsid_t fsid; + unsigned char handle[0]; +}; + +struct fanotify_response { + __s32 fd; + __u32 response; +}; + +struct epoll_event { + __poll_t events; + __u64 data; +}; + +struct epoll_filefd { + struct file *file; + int fd; +} __attribute__((packed)); + +struct nested_call_node { + struct list_head llink; + void *cookie; + void *ctx; +}; + +struct nested_calls { + struct list_head tasks_call_list; + spinlock_t lock; +}; + +struct eventpoll; + +struct epitem { + union { + struct rb_node rbn; + struct callback_head rcu; + }; + struct list_head rdllink; + struct epitem *next; + struct epoll_filefd ffd; + int nwait; + struct list_head pwqlist; + struct eventpoll *ep; + struct list_head fllink; + struct wakeup_source *ws; + struct epoll_event event; +}; + +struct eventpoll { + struct mutex mtx; + wait_queue_head_t wq; + wait_queue_head_t poll_wait; + struct list_head rdllist; + rwlock_t lock; + struct rb_root_cached rbr; + struct epitem *ovflist; + struct wakeup_source *ws; + struct user_struct *user; + struct file *file; + u64 gen; + unsigned int napi_id; +}; + +struct eppoll_entry { + struct list_head llink; + struct epitem *base; + wait_queue_entry_t wait; + wait_queue_head_t *whead; +}; + +struct ep_pqueue { + poll_table pt; + struct epitem *epi; +}; + +struct ep_send_events_data { + int maxevents; + struct epoll_event *events; + int res; +}; + +struct signalfd_siginfo { + __u32 ssi_signo; + __s32 ssi_errno; + __s32 ssi_code; + __u32 ssi_pid; + __u32 ssi_uid; + __s32 ssi_fd; + __u32 ssi_tid; + __u32 ssi_band; + __u32 ssi_overrun; + __u32 ssi_trapno; + __s32 ssi_status; + __s32 ssi_int; + __u64 ssi_ptr; + __u64 ssi_utime; + __u64 ssi_stime; + __u64 ssi_addr; + __u16 ssi_addr_lsb; + __u16 __pad2; + __s32 ssi_syscall; + __u64 ssi_call_addr; + __u32 ssi_arch; + __u8 __pad[28]; +}; + +struct signalfd_ctx { + sigset_t sigmask; +}; + +struct timerfd_ctx { + union { + struct hrtimer tmr; + struct alarm alarm; + } t; + ktime_t tintv; + ktime_t moffs; + wait_queue_head_t wqh; + u64 ticks; + int clockid; + short unsigned int expired; + short unsigned int settime_flags; + struct callback_head rcu; + struct list_head clist; + spinlock_t cancel_lock; + bool might_cancel; +}; + +struct eventfd_ctx___2 { + struct kref kref; + wait_queue_head_t wqh; + __u64 count; + unsigned int flags; + int id; +}; + +enum userfaultfd_state { + UFFD_STATE_WAIT_API = 0, + UFFD_STATE_RUNNING = 1, +}; + +struct userfaultfd_ctx { + wait_queue_head_t fault_pending_wqh; + wait_queue_head_t fault_wqh; + wait_queue_head_t fd_wqh; + wait_queue_head_t event_wqh; + seqcount_spinlock_t refile_seq; + refcount_t refcount; + unsigned int flags; + unsigned int features; + enum userfaultfd_state state; + bool released; + bool mmap_changing; + struct mm_struct *mm; +}; + +struct uffd_msg { + __u8 event; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + union { + struct { + __u64 flags; + __u64 address; + union { + __u32 ptid; + } feat; + } pagefault; + struct { + __u32 ufd; + } fork; + struct { + __u64 from; + __u64 to; + __u64 len; + } remap; + struct { + __u64 start; + __u64 end; + } remove; + struct { + __u64 reserved1; + __u64 reserved2; + __u64 reserved3; + } reserved; + } arg; +}; + +struct uffdio_api { + __u64 api; + __u64 features; + __u64 ioctls; +}; + +struct uffdio_range { + __u64 start; + __u64 len; +}; + +struct uffdio_register { + struct uffdio_range range; + __u64 mode; + __u64 ioctls; +}; + +struct uffdio_copy { + __u64 dst; + __u64 src; + __u64 len; + __u64 mode; + __s64 copy; +}; + +struct uffdio_zeropage { + struct uffdio_range range; + __u64 mode; + __s64 zeropage; +}; + +struct uffdio_writeprotect { + struct uffdio_range range; + __u64 mode; +}; + +struct userfaultfd_fork_ctx { + struct userfaultfd_ctx *orig; + struct userfaultfd_ctx *new; + struct list_head list; +}; + +struct userfaultfd_unmap_ctx { + struct userfaultfd_ctx *ctx; + long unsigned int start; + long unsigned int end; + struct list_head list; +}; + +struct userfaultfd_wait_queue { + struct uffd_msg msg; + wait_queue_entry_t wq; + struct userfaultfd_ctx *ctx; + bool waken; +}; + +struct userfaultfd_wake_range { + long unsigned int start; + long unsigned int len; +}; + +typedef u32 compat_aio_context_t; + +struct kioctx; + +struct kioctx_table { + struct callback_head rcu; + unsigned int nr; + struct kioctx *table[0]; +}; + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; + +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + +struct iocb { + __u64 aio_data; + __u32 aio_key; + __kernel_rwf_t aio_rw_flags; + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + __u64 aio_reserved2; + __u32 aio_flags; + __u32 aio_resfd; +}; + +typedef int kiocb_cancel_fn(struct kiocb *); + +struct aio_ring { + unsigned int id; + unsigned int nr; + unsigned int head; + unsigned int tail; + unsigned int magic; + unsigned int compat_features; + unsigned int incompat_features; + unsigned int header_length; + struct io_event io_events[0]; +}; + +struct kioctx_cpu; + +struct ctx_rq_wait; + +struct kioctx { + struct percpu_ref users; + atomic_t dead; + struct percpu_ref reqs; + long unsigned int user_id; + struct kioctx_cpu *cpu; + unsigned int req_batch; + unsigned int max_reqs; + unsigned int nr_events; + long unsigned int mmap_base; + long unsigned int mmap_size; + struct page **ring_pages; + long int nr_pages; + struct rcu_work free_rwork; + struct ctx_rq_wait *rq_wait; + long: 64; + long: 64; + long: 64; + struct { + atomic_t reqs_available; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + spinlock_t ctx_lock; + struct list_head active_reqs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + struct mutex ring_lock; + wait_queue_head_t wait; + long: 64; + }; + struct { + unsigned int tail; + unsigned int completed_events; + spinlock_t completion_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct page *internal_pages[8]; + struct file *aio_ring_file; + unsigned int id; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct kioctx_cpu { + unsigned int reqs_available; +}; + +struct ctx_rq_wait { + struct completion comp; + atomic_t count; +}; + +struct fsync_iocb { + struct file *file; + struct work_struct work; + bool datasync; + struct cred *creds; +}; + +struct poll_iocb { + struct file *file; + struct wait_queue_head *head; + __poll_t events; + bool done; + bool cancelled; + struct wait_queue_entry wait; + struct work_struct work; +}; + +struct aio_kiocb { + union { + struct file *ki_filp; + struct kiocb rw; + struct fsync_iocb fsync; + struct poll_iocb poll; + }; + struct kioctx *ki_ctx; + kiocb_cancel_fn *ki_cancel; + struct io_event ki_res; + struct list_head ki_list; + refcount_t ki_refcnt; + struct eventfd_ctx *ki_eventfd; +}; + +struct aio_poll_table { + struct poll_table_struct pt; + struct aio_kiocb *iocb; + int error; +}; + +struct __aio_sigset { + const sigset_t *sigmask; + size_t sigsetsize; +}; + +struct __compat_aio_sigset { + compat_uptr_t sigmask; + compat_size_t sigsetsize; +}; + +typedef s32 compat_ssize_t; + +enum { + PERCPU_REF_INIT_ATOMIC = 1, + PERCPU_REF_INIT_DEAD = 2, + PERCPU_REF_ALLOW_REINIT = 4, +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct scm_fp_list { + short int count; + short int max; + struct user_struct *user; + struct file *fp[253]; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + u32 secid; + u32 consumed; +}; + +struct trace_event_raw_io_uring_create { + struct trace_entry ent; + int fd; + void *ctx; + u32 sq_entries; + u32 cq_entries; + u32 flags; + char __data[0]; +}; + +struct trace_event_raw_io_uring_register { + struct trace_entry ent; + void *ctx; + unsigned int opcode; + unsigned int nr_files; + unsigned int nr_bufs; + bool eventfd; + long int ret; + char __data[0]; +}; + +struct trace_event_raw_io_uring_file_get { + struct trace_entry ent; + void *ctx; + int fd; + char __data[0]; +}; + +struct io_wq_work; + +struct trace_event_raw_io_uring_queue_async_work { + struct trace_entry ent; + void *ctx; + int rw; + void *req; + struct io_wq_work *work; + unsigned int flags; + char __data[0]; +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work { + struct io_wq_work_node list; + struct io_identity *identity; + unsigned int flags; +}; + +struct trace_event_raw_io_uring_defer { + struct trace_entry ent; + void *ctx; + void *req; + long long unsigned int data; + char __data[0]; +}; + +struct trace_event_raw_io_uring_link { + struct trace_entry ent; + void *ctx; + void *req; + void *target_req; + char __data[0]; +}; + +struct trace_event_raw_io_uring_cqring_wait { + struct trace_entry ent; + void *ctx; + int min_events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_fail_link { + struct trace_entry ent; + void *req; + void *link; + char __data[0]; +}; + +struct trace_event_raw_io_uring_complete { + struct trace_entry ent; + void *ctx; + u64 user_data; + long int res; + char __data[0]; +}; + +struct trace_event_raw_io_uring_submit_sqe { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + bool force_nonblock; + bool sq_thread; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_arm { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + int events; + char __data[0]; +}; + +struct trace_event_raw_io_uring_poll_wake { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_add { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + int mask; + char __data[0]; +}; + +struct trace_event_raw_io_uring_task_run { + struct trace_entry ent; + void *ctx; + u8 opcode; + u64 user_data; + char __data[0]; +}; + +struct trace_event_data_offsets_io_uring_create {}; + +struct trace_event_data_offsets_io_uring_register {}; + +struct trace_event_data_offsets_io_uring_file_get {}; + +struct trace_event_data_offsets_io_uring_queue_async_work {}; + +struct trace_event_data_offsets_io_uring_defer {}; + +struct trace_event_data_offsets_io_uring_link {}; + +struct trace_event_data_offsets_io_uring_cqring_wait {}; + +struct trace_event_data_offsets_io_uring_fail_link {}; + +struct trace_event_data_offsets_io_uring_complete {}; + +struct trace_event_data_offsets_io_uring_submit_sqe {}; + +struct trace_event_data_offsets_io_uring_poll_arm {}; + +struct trace_event_data_offsets_io_uring_poll_wake {}; + +struct trace_event_data_offsets_io_uring_task_add {}; + +struct trace_event_data_offsets_io_uring_task_run {}; + +typedef void (*btf_trace_io_uring_create)(void *, int, void *, u32, u32, u32); + +typedef void (*btf_trace_io_uring_register)(void *, void *, unsigned int, unsigned int, unsigned int, bool, long int); + +typedef void (*btf_trace_io_uring_file_get)(void *, void *, int); + +typedef void (*btf_trace_io_uring_queue_async_work)(void *, void *, int, void *, struct io_wq_work *, unsigned int); + +typedef void (*btf_trace_io_uring_defer)(void *, void *, void *, long long unsigned int); + +typedef void (*btf_trace_io_uring_link)(void *, void *, void *, void *); + +typedef void (*btf_trace_io_uring_cqring_wait)(void *, void *, int); + +typedef void (*btf_trace_io_uring_fail_link)(void *, void *, void *); + +typedef void (*btf_trace_io_uring_complete)(void *, void *, u64, long int); + +typedef void (*btf_trace_io_uring_submit_sqe)(void *, void *, u8, u64, bool, bool); + +typedef void (*btf_trace_io_uring_poll_arm)(void *, void *, u8, u64, int, int); + +typedef void (*btf_trace_io_uring_poll_wake)(void *, void *, u8, u64, int); + +typedef void (*btf_trace_io_uring_task_add)(void *, void *, u8, u64, int); + +typedef void (*btf_trace_io_uring_task_run)(void *, void *, u8, u64); + +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + }; + union { + __u64 addr; + __u64 splice_off_in; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + }; + __u64 user_data; + union { + struct { + union { + __u16 buf_index; + __u16 buf_group; + }; + __u16 personality; + __s32 splice_fd_in; + }; + __u64 __pad2[3]; + }; +}; + +enum { + IOSQE_FIXED_FILE_BIT = 0, + IOSQE_IO_DRAIN_BIT = 1, + IOSQE_IO_LINK_BIT = 2, + IOSQE_IO_HARDLINK_BIT = 3, + IOSQE_ASYNC_BIT = 4, + IOSQE_BUFFER_SELECT_BIT = 5, +}; + +enum { + IORING_OP_NOP = 0, + IORING_OP_READV = 1, + IORING_OP_WRITEV = 2, + IORING_OP_FSYNC = 3, + IORING_OP_READ_FIXED = 4, + IORING_OP_WRITE_FIXED = 5, + IORING_OP_POLL_ADD = 6, + IORING_OP_POLL_REMOVE = 7, + IORING_OP_SYNC_FILE_RANGE = 8, + IORING_OP_SENDMSG = 9, + IORING_OP_RECVMSG = 10, + IORING_OP_TIMEOUT = 11, + IORING_OP_TIMEOUT_REMOVE = 12, + IORING_OP_ACCEPT = 13, + IORING_OP_ASYNC_CANCEL = 14, + IORING_OP_LINK_TIMEOUT = 15, + IORING_OP_CONNECT = 16, + IORING_OP_FALLOCATE = 17, + IORING_OP_OPENAT = 18, + IORING_OP_CLOSE = 19, + IORING_OP_FILES_UPDATE = 20, + IORING_OP_STATX = 21, + IORING_OP_READ = 22, + IORING_OP_WRITE = 23, + IORING_OP_FADVISE = 24, + IORING_OP_MADVISE = 25, + IORING_OP_SEND = 26, + IORING_OP_RECV = 27, + IORING_OP_OPENAT2 = 28, + IORING_OP_EPOLL_CTL = 29, + IORING_OP_SPLICE = 30, + IORING_OP_PROVIDE_BUFFERS = 31, + IORING_OP_REMOVE_BUFFERS = 32, + IORING_OP_TEE = 33, + IORING_OP_LAST = 34, +}; + +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; +}; + +enum { + IORING_CQE_BUFFER_SHIFT = 16, +}; + +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 resv2; +}; + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 resv2; +}; + +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; + +enum { + IORING_REGISTER_BUFFERS = 0, + IORING_UNREGISTER_BUFFERS = 1, + IORING_REGISTER_FILES = 2, + IORING_UNREGISTER_FILES = 3, + IORING_REGISTER_EVENTFD = 4, + IORING_UNREGISTER_EVENTFD = 5, + IORING_REGISTER_FILES_UPDATE = 6, + IORING_REGISTER_EVENTFD_ASYNC = 7, + IORING_REGISTER_PROBE = 8, + IORING_REGISTER_PERSONALITY = 9, + IORING_UNREGISTER_PERSONALITY = 10, + IORING_REGISTER_RESTRICTIONS = 11, + IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_LAST = 13, +}; + +struct io_uring_files_update { + __u32 offset; + __u32 resv; + __u64 fds; +}; + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; + +struct io_uring_restriction { + __u16 opcode; + union { + __u8 register_op; + __u8 sqe_op; + __u8 sqe_flags; + }; + __u8 resv; + __u32 resv2[3]; +}; + +enum { + IORING_RESTRICTION_REGISTER_OP = 0, + IORING_RESTRICTION_SQE_OP = 1, + IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2, + IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3, + IORING_RESTRICTION_LAST = 4, +}; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_NO_CANCEL = 8, + IO_WQ_WORK_CONCURRENT = 16, + IO_WQ_WORK_FILES = 32, + IO_WQ_WORK_FS = 64, + IO_WQ_WORK_MM = 128, + IO_WQ_WORK_CREDS = 256, + IO_WQ_WORK_BLKCG = 512, + IO_WQ_WORK_FSIZE = 1024, + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK = 0, + IO_WQ_CANCEL_RUNNING = 1, + IO_WQ_CANCEL_NOTFOUND = 2, +}; + +typedef void free_work_fn(struct io_wq_work *); + +typedef struct io_wq_work *io_wq_work_fn(struct io_wq_work *); + +struct io_wq_data { + struct user_struct *user; + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +struct io_uring { + u32 head; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 tail; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_rings { + struct io_uring sq; + struct io_uring cq; + u32 sq_ring_mask; + u32 cq_ring_mask; + u32 sq_ring_entries; + u32 cq_ring_entries; + u32 sq_dropped; + u32 sq_flags; + u32 cq_flags; + u32 cq_overflow; + long: 64; + long: 64; + long: 64; + long: 64; + struct io_uring_cqe cqes[0]; +}; + +struct io_mapped_ubuf { + u64 ubuf; + size_t len; + struct bio_vec *bvec; + unsigned int nr_bvecs; + long unsigned int acct_pages; +}; + +struct fixed_file_table { + struct file **files; +}; + +struct fixed_file_data; + +struct fixed_file_ref_node { + struct percpu_ref refs; + struct list_head node; + struct list_head file_list; + struct fixed_file_data *file_data; + struct llist_node llist; + bool done; +}; + +struct io_ring_ctx; + +struct fixed_file_data { + struct fixed_file_table *table; + struct io_ring_ctx *ctx; + struct fixed_file_ref_node *node; + struct percpu_ref refs; + struct completion done; + struct list_head ref_list; + spinlock_t lock; +}; + +struct io_wq; + +struct io_restriction { + long unsigned int register_op[1]; + long unsigned int sqe_op[1]; + u8 sqe_flags_allowed; + u8 sqe_flags_required; + bool registered; +}; + +struct io_sq_data; + +struct io_kiocb; + +struct io_ring_ctx { + struct { + struct percpu_ref refs; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + unsigned int flags; + unsigned int compat: 1; + unsigned int limit_mem: 1; + unsigned int cq_overflow_flushed: 1; + unsigned int drain_next: 1; + unsigned int eventfd_async: 1; + unsigned int restricted: 1; + unsigned int sqo_dead: 1; + u32 *sq_array; + unsigned int cached_sq_head; + unsigned int sq_entries; + unsigned int sq_mask; + unsigned int sq_thread_idle; + unsigned int cached_sq_dropped; + unsigned int cached_cq_overflow; + long unsigned int sq_check_overflow; + struct list_head defer_list; + struct list_head timeout_list; + struct list_head cq_overflow_list; + wait_queue_head_t inflight_wait; + struct io_uring_sqe *sq_sqes; + }; + struct io_rings *rings; + struct io_wq *io_wq; + struct task_struct *sqo_task; + struct mm_struct *mm_account; + struct cgroup_subsys_state *sqo_blkcg_css; + struct io_sq_data *sq_data; + struct wait_queue_head sqo_sq_wait; + struct wait_queue_entry sqo_wait_entry; + struct list_head sqd_list; + struct fixed_file_data *file_data; + unsigned int nr_user_files; + unsigned int nr_user_bufs; + struct io_mapped_ubuf *user_bufs; + struct user_struct *user; + const struct cred *creds; + kuid_t loginuid; + unsigned int sessionid; + struct completion ref_comp; + struct completion sq_thread_comp; + struct io_kiocb *fallback_req; + struct socket *ring_sock; + struct idr io_buffer_idr; + struct idr personality_idr; + long: 64; + long: 64; + struct { + unsigned int cached_cq_tail; + unsigned int cq_entries; + unsigned int cq_mask; + atomic_t cq_timeouts; + unsigned int cq_last_tm_flush; + long unsigned int cq_check_overflow; + struct wait_queue_head cq_wait; + struct fasync_struct *cq_fasync; + struct eventfd_ctx *cq_ev_fd; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + }; + struct { + struct mutex uring_lock; + wait_queue_head_t wait; + long: 64; + }; + struct { + spinlock_t completion_lock; + struct list_head iopoll_list; + struct hlist_head *cancel_hash; + unsigned int cancel_hash_bits; + bool poll_multi_file; + spinlock_t inflight_lock; + struct list_head inflight_list; + }; + struct delayed_work file_put_work; + struct llist_head file_put_llist; + struct work_struct exit_work; + struct io_restriction restrictions; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __s32 len; + __u16 bid; +}; + +struct io_sq_data { + refcount_t refs; + struct mutex lock; + struct list_head ctx_list; + struct list_head ctx_new_list; + struct mutex ctx_lock; + struct task_struct *thread; + struct wait_queue_head wait; +}; + +struct io_rw { + struct kiocb kiocb; + u64 addr; + u64 len; +}; + +struct io_poll_iocb { + struct file *file; + union { + struct wait_queue_head *head; + u64 addr; + }; + __poll_t events; + bool done; + bool canceled; + struct wait_queue_entry wait; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + long unsigned int nofile; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct io_cancel { + struct file *file; + u64 addr; +}; + +struct io_timeout { + struct file *file; + u32 off; + u32 target_seq; + struct list_head list; +}; + +struct io_timeout_rem { + struct file *file; + u64 addr; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; +}; + +struct io_sr_msg { + struct file *file; + union { + struct user_msghdr *umsg; + void *buf; + }; + int msg_flags; + int bgid; + size_t len; + struct io_buffer *kbuf; +}; + +struct io_open { + struct file *file; + int dfd; + bool ignore_nonblock; + struct filename *filename; + struct open_how how; + long unsigned int nofile; +}; + +struct io_close { + struct file *file; + struct file *put_file; + int fd; +}; + +struct io_files_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u32 len; + u32 advice; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u32 len; + u32 advice; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +struct io_splice { + struct file *file_out; + struct file *file_in; + loff_t off_out; + loff_t off_in; + u64 len; + unsigned int flags; +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __s32 len; + __u32 bgid; + __u16 nbufs; + __u16 bid; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + const char *filename; + struct statx *buffer; +}; + +struct io_completion { + struct file *file; + struct list_head list; + int cflags; +}; + +struct async_poll; + +struct io_kiocb { + union { + struct file *file; + struct io_rw rw; + struct io_poll_iocb poll; + struct io_accept accept; + struct io_sync sync; + struct io_cancel cancel; + struct io_timeout timeout; + struct io_timeout_rem timeout_rem; + struct io_connect connect; + struct io_sr_msg sr_msg; + struct io_open open; + struct io_close close; + struct io_files_update files_update; + struct io_fadvise fadvise; + struct io_madvise madvise; + struct io_epoll epoll; + struct io_splice splice; + struct io_provide_buf pbuf; + struct io_statx statx; + struct io_completion compl; + }; + void *async_data; + u8 opcode; + u8 iopoll_completed; + u16 buf_index; + u32 result; + struct io_ring_ctx *ctx; + unsigned int flags; + refcount_t refs; + struct task_struct *task; + u64 user_data; + struct list_head link_list; + struct list_head inflight_entry; + struct percpu_ref *fixed_file_refs; + struct callback_head task_work; + struct hlist_node hash_node; + struct async_poll *apoll; + struct io_wq_work work; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; +}; + +struct io_async_connect { + struct __kernel_sockaddr_storage address; +}; + +struct io_async_msghdr { + struct iovec fast_iov[8]; + struct iovec *iov; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_async_rw { + struct iovec fast_iov[8]; + const struct iovec *free_iovec; + struct iov_iter iter; + size_t bytes_done; + struct wait_page_queue wpq; +}; + +enum { + REQ_F_FIXED_FILE_BIT = 0, + REQ_F_IO_DRAIN_BIT = 1, + REQ_F_LINK_BIT = 2, + REQ_F_HARDLINK_BIT = 3, + REQ_F_FORCE_ASYNC_BIT = 4, + REQ_F_BUFFER_SELECT_BIT = 5, + REQ_F_LINK_HEAD_BIT = 6, + REQ_F_FAIL_LINK_BIT = 7, + REQ_F_INFLIGHT_BIT = 8, + REQ_F_CUR_POS_BIT = 9, + REQ_F_NOWAIT_BIT = 10, + REQ_F_LINK_TIMEOUT_BIT = 11, + REQ_F_ISREG_BIT = 12, + REQ_F_NEED_CLEANUP_BIT = 13, + REQ_F_POLLED_BIT = 14, + REQ_F_BUFFER_SELECTED_BIT = 15, + REQ_F_NO_FILE_TABLE_BIT = 16, + REQ_F_WORK_INITIALIZED_BIT = 17, + REQ_F_LTIMEOUT_ACTIVE_BIT = 18, + __REQ_F_LAST_BIT = 19, +}; + +enum { + REQ_F_FIXED_FILE = 1, + REQ_F_IO_DRAIN = 2, + REQ_F_LINK = 4, + REQ_F_HARDLINK = 8, + REQ_F_FORCE_ASYNC = 16, + REQ_F_BUFFER_SELECT = 32, + REQ_F_LINK_HEAD = 64, + REQ_F_FAIL_LINK = 128, + REQ_F_INFLIGHT = 256, + REQ_F_CUR_POS = 512, + REQ_F_NOWAIT = 1024, + REQ_F_LINK_TIMEOUT = 2048, + REQ_F_ISREG = 4096, + REQ_F_NEED_CLEANUP = 8192, + REQ_F_POLLED = 16384, + REQ_F_BUFFER_SELECTED = 32768, + REQ_F_NO_FILE_TABLE = 65536, + REQ_F_WORK_INITIALIZED = 131072, + REQ_F_LTIMEOUT_ACTIVE = 262144, +}; + +struct async_poll { + struct io_poll_iocb poll; + struct io_poll_iocb *double_poll; +}; + +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; +}; + +struct io_comp_state { + unsigned int nr; + struct list_head list; + struct io_ring_ctx *ctx; +}; + +struct io_submit_state { + struct blk_plug plug; + void *reqs[8]; + unsigned int free_reqs; + struct io_comp_state comp; + struct file *file; + unsigned int fd; + unsigned int has_refs; + unsigned int ios_left; +}; + +struct io_op_def { + unsigned int needs_file: 1; + unsigned int needs_file_no_error: 1; + unsigned int hash_reg_file: 1; + unsigned int unbound_nonreg_file: 1; + unsigned int not_supported: 1; + unsigned int pollin: 1; + unsigned int pollout: 1; + unsigned int buffer_select: 1; + unsigned int needs_async_data: 1; + short unsigned int async_size; + unsigned int work_flags; +}; + +enum io_mem_account { + ACCT_LOCKED = 0, + ACCT_PINNED = 1, +}; + +struct req_batch { + void *reqs[8]; + int to_free; + struct task_struct *task; + int task_refs; +}; + +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int error; +}; + +enum sq_ret { + SQT_IDLE = 1, + SQT_SPIN = 2, + SQT_DID_WORK = 4, +}; + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned int to_wait; + unsigned int nr_timeouts; +}; + +struct io_file_put { + struct list_head list; + struct file *file; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +typedef bool work_cancel_fn(struct io_wq_work *, void *); + +enum { + IO_WORKER_F_UP = 1, + IO_WORKER_F_RUNNING = 2, + IO_WORKER_F_FREE = 4, + IO_WORKER_F_FIXED = 8, + IO_WORKER_F_BOUND = 16, +}; + +enum { + IO_WQ_BIT_EXIT = 0, + IO_WQ_BIT_CANCEL = 1, + IO_WQ_BIT_ERROR = 2, +}; + +enum { + IO_WQE_FLAG_STALLED = 1, +}; + +struct io_wqe; + +struct io_worker { + refcount_t ref; + unsigned int flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wqe *wqe; + struct io_wq_work *cur_work; + spinlock_t lock; + struct callback_head rcu; + struct mm_struct *mm; + struct cgroup_subsys_state *blkcg_css; + const struct cred *cur_creds; + const struct cred *saved_creds; + struct files_struct *restore_files; + struct nsproxy *restore_nsproxy; + struct fs_struct *restore_fs; +}; + +struct io_wqe_acct { + unsigned int nr_workers; + unsigned int max_workers; + atomic_t nr_running; +}; + +struct io_wq___2; + +struct io_wqe { + struct { + raw_spinlock_t lock; + struct io_wq_work_list work_list; + long unsigned int hash_map; + unsigned int flags; + long: 32; + long: 64; + long: 64; + long: 64; + }; + int node; + struct io_wqe_acct acct[2]; + struct hlist_nulls_head free_list; + struct list_head all_list; + struct io_wq___2 *wq; + struct io_wq_work *hash_tail[64]; +}; + +enum { + IO_WQ_ACCT_BOUND = 0, + IO_WQ_ACCT_UNBOUND = 1, +}; + +struct io_wq___2 { + struct io_wqe **wqes; + long unsigned int state; + free_work_fn *free_work; + io_wq_work_fn *do_work; + struct task_struct *manager; + struct user_struct *user; + refcount_t refs; + struct completion done; + struct hlist_node cpuhp_node; + refcount_t use_refs; +}; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +struct iomap_ops { + int (*iomap_begin)(struct inode *, loff_t, loff_t, unsigned int, struct iomap___2 *, struct iomap___2 *); + int (*iomap_end)(struct inode *, loff_t, loff_t, ssize_t, unsigned int, struct iomap___2 *); +}; + +struct trace_event_raw_dax_pmd_fault_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_start; + long unsigned int vm_end; + long unsigned int vm_flags; + long unsigned int address; + long unsigned int pgoff; + long unsigned int max_pgoff; + dev_t dev; + unsigned int flags; + int result; + char __data[0]; +}; + +struct trace_event_raw_dax_pmd_load_hole_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + struct page *zero_page; + void *radix_entry; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_dax_pmd_insert_mapping_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + long int length; + u64 pfn_val; + void *radix_entry; + dev_t dev; + int write; + char __data[0]; +}; + +struct trace_event_raw_dax_pte_fault_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + long unsigned int pgoff; + dev_t dev; + unsigned int flags; + int result; + char __data[0]; +}; + +struct trace_event_raw_dax_insert_mapping { + struct trace_entry ent; + long unsigned int ino; + long unsigned int vm_flags; + long unsigned int address; + void *radix_entry; + dev_t dev; + int write; + char __data[0]; +}; + +struct trace_event_raw_dax_writeback_range_class { + struct trace_entry ent; + long unsigned int ino; + long unsigned int start_index; + long unsigned int end_index; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_dax_writeback_one { + struct trace_entry ent; + long unsigned int ino; + long unsigned int pgoff; + long unsigned int pglen; + dev_t dev; + char __data[0]; +}; + +struct trace_event_data_offsets_dax_pmd_fault_class {}; + +struct trace_event_data_offsets_dax_pmd_load_hole_class {}; + +struct trace_event_data_offsets_dax_pmd_insert_mapping_class {}; + +struct trace_event_data_offsets_dax_pte_fault_class {}; + +struct trace_event_data_offsets_dax_insert_mapping {}; + +struct trace_event_data_offsets_dax_writeback_range_class {}; + +struct trace_event_data_offsets_dax_writeback_one {}; + +typedef void (*btf_trace_dax_pmd_fault)(void *, struct inode *, struct vm_fault *, long unsigned int, int); + +typedef void (*btf_trace_dax_pmd_fault_done)(void *, struct inode *, struct vm_fault *, long unsigned int, int); + +typedef void (*btf_trace_dax_pmd_load_hole)(void *, struct inode *, struct vm_fault *, struct page *, void *); + +typedef void (*btf_trace_dax_pmd_load_hole_fallback)(void *, struct inode *, struct vm_fault *, struct page *, void *); + +typedef void (*btf_trace_dax_pmd_insert_mapping)(void *, struct inode *, struct vm_fault *, long int, pfn_t, void *); + +typedef void (*btf_trace_dax_pte_fault)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_pte_fault_done)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_load_hole)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_pfn_mkwrite_no_entry)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_pfn_mkwrite)(void *, struct inode *, struct vm_fault *, int); + +typedef void (*btf_trace_dax_insert_mapping)(void *, struct inode *, struct vm_fault *, void *); + +typedef void (*btf_trace_dax_writeback_range)(void *, struct inode *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_dax_writeback_range_done)(void *, struct inode *, long unsigned int, long unsigned int); + +typedef void (*btf_trace_dax_writeback_one)(void *, struct inode *, long unsigned int, long unsigned int); + +struct exceptional_entry_key { + struct xarray *xa; + long unsigned int entry_start; +}; + +struct wait_exceptional_entry_queue { + wait_queue_entry_t wait; + struct exceptional_entry_key key; +}; + +struct crypto_skcipher; + +struct fscrypt_blk_crypto_key; + +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; + struct fscrypt_blk_crypto_key *blk_key; +}; + +struct fscrypt_mode; + +struct fscrypt_direct_key; + +struct fscrypt_info { + struct fscrypt_prepared_key ci_enc_key; + bool ci_owns_key; + bool ci_inlinecrypt; + struct fscrypt_mode *ci_mode; + struct inode *ci_inode; + struct key *ci_master_key; + struct list_head ci_master_key_link; + struct fscrypt_direct_key *ci_direct_key; + siphash_key_t ci_dirhash_key; + bool ci_dirhash_key_initialized; + union fscrypt_policy ci_policy; + u8 ci_nonce[16]; + u32 ci_hashed_ino; +}; + +struct crypto_async_request; + +typedef void (*crypto_completion_t)(struct crypto_async_request *, int); + +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + u32 flags; +}; + +struct crypto_wait { + struct completion completion; + int err; +}; + +struct skcipher_request { + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + struct crypto_async_request base; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_skcipher { + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_tfm base; +}; + +struct fscrypt_mode { + const char *friendly_name; + const char *cipher_str; + int keysize; + int ivsize; + int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; +}; + +typedef enum { + FS_DECRYPT = 0, + FS_ENCRYPT = 1, +} fscrypt_direction_t; + +union fscrypt_iv { + struct { + __le64 lblk_num; + u8 nonce[16]; + }; + u8 raw[32]; + __le64 dun[4]; +}; + +struct fscrypt_str { + unsigned char *name; + u32 len; +}; + +struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + bool is_nokey_name; +}; + +struct fscrypt_nokey_name { + u32 dirhash[2]; + u8 bytes[149]; + u8 sha256[32]; +}; + +struct shash_alg { + int (*init)(struct shash_desc *); + int (*update)(struct shash_desc *, const u8 *, unsigned int); + int (*final)(struct shash_desc *, u8 *); + int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *); + int (*export)(struct shash_desc *, void *); + int (*import)(struct shash_desc *, const void *); + int (*setkey)(struct crypto_shash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_shash *); + void (*exit_tfm)(struct crypto_shash *); + unsigned int descsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + unsigned int digestsize; + unsigned int statesize; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct fscrypt_hkdf { + struct crypto_shash *hmac_tfm; +}; + +struct fscrypt_key_specifier { + __u32 type; + __u32 __reserved; + union { + __u8 __reserved[32]; + __u8 descriptor[8]; + __u8 identifier[16]; + } u; +}; + +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; +} __attribute__((packed)); + +struct fscrypt_master_key_secret { + struct fscrypt_hkdf hkdf; + u32 size; + u8 raw[64]; +}; + +struct fscrypt_master_key { + struct fscrypt_master_key_secret mk_secret; + struct rw_semaphore mk_secret_sem; + struct fscrypt_key_specifier mk_spec; + struct key *mk_users; + refcount_t mk_refcount; + struct list_head mk_decrypted_inodes; + spinlock_t mk_decrypted_inodes_lock; + struct fscrypt_prepared_key mk_direct_keys[10]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[10]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[10]; + siphash_key_t mk_ino_hash_key; + bool mk_ino_hash_key_initialized; +}; + +enum key_state { + KEY_IS_UNINSTANTIATED = 0, + KEY_IS_POSITIVE = 1, +}; + +struct fscrypt_provisioning_key_payload { + __u32 type; + __u32 __reserved; + __u8 raw[0]; +}; + +struct fscrypt_add_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 raw_size; + __u32 key_id; + __u32 __reserved[8]; + __u8 raw[0]; +}; + +struct fscrypt_remove_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 removal_status_flags; + __u32 __reserved[5]; +}; + +struct fscrypt_get_key_status_arg { + struct fscrypt_key_specifier key_spec; + __u32 __reserved[6]; + __u32 status; + __u32 status_flags; + __u32 user_count; + __u32 __out_reserved[13]; +}; + +struct skcipher_alg { + int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int); + int (*encrypt)(struct skcipher_request *); + int (*decrypt)(struct skcipher_request *); + int (*init)(struct crypto_skcipher *); + void (*exit)(struct crypto_skcipher *); + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; + unsigned int chunksize; + unsigned int walksize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct fscrypt_context_v1 { + u8 version; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[8]; + u8 nonce[16]; +}; + +struct fscrypt_context_v2 { + u8 version; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 __reserved[4]; + u8 master_key_identifier[16]; + u8 nonce[16]; +}; + +union fscrypt_context { + u8 version; + struct fscrypt_context_v1 v1; + struct fscrypt_context_v2 v2; +}; + +struct crypto_template; + +struct crypto_spawn; + +struct crypto_instance { + struct crypto_alg alg; + struct crypto_template *tmpl; + union { + struct hlist_node list; + struct crypto_spawn *spawns; + }; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + union { + struct crypto_instance *inst; + struct crypto_spawn *next; + }; + const struct crypto_type *frontend; + u32 mask; + bool dead; + bool registered; +}; + +struct rtattr; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + int (*create)(struct crypto_template *, struct rtattr **); + char name[128]; +}; + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + long: 48; + char data[0]; +}; + +struct fscrypt_key { + __u32 mode; + __u8 raw[64]; + __u32 size; +}; + +struct fscrypt_direct_key { + struct hlist_node dk_node; + refcount_t dk_refcount; + const struct fscrypt_mode *dk_mode; + struct fscrypt_prepared_key dk_key; + u8 dk_descriptor[8]; + u8 dk_raw[64]; +}; + +struct fscrypt_get_policy_ex_arg { + __u64 policy_size; + union { + __u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; + } policy; +}; + +struct fscrypt_dummy_policy { + const union fscrypt_policy *policy; +}; + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[0]; +}; + +struct fsverity_hash_alg; + +struct merkle_tree_params { + struct fsverity_hash_alg *hash_alg; + const u8 *hashstate; + unsigned int digest_size; + unsigned int block_size; + unsigned int hashes_per_block; + unsigned int log_blocksize; + unsigned int log_arity; + unsigned int num_levels; + u64 tree_size; + long unsigned int level0_blocks; + u64 level_start[8]; +}; + +struct fsverity_info { + struct merkle_tree_params tree_params; + u8 root_hash[64]; + u8 measurement[64]; + const struct inode *inode; +}; + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct crypto_ahash; + +struct fsverity_hash_alg { + struct crypto_ahash *tfm; + const char *name; + unsigned int digest_size; + unsigned int block_size; + mempool_t req_pool; +}; + +struct ahash_request; + +struct crypto_ahash { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_tfm base; +}; + +struct fsverity_descriptor { + __u8 version; + __u8 hash_algorithm; + __u8 log_blocksize; + __u8 salt_size; + __le32 sig_size; + __le64 data_size; + __u8 root_hash[64]; + __u8 salt[32]; + __u8 __reserved[144]; + __u8 signature[0]; +}; + +struct ahash_request { + struct crypto_async_request base; + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + void *priv; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; + __u8 digest[0]; +}; + +struct flock64 { + short int l_type; + short int l_whence; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; +}; + +struct trace_event_raw_locks_get_lock_context { + struct trace_entry ent; + long unsigned int i_ino; + dev_t s_dev; + unsigned char type; + struct file_lock_context *ctx; + char __data[0]; +}; + +struct trace_event_raw_filelock_lock { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_pid; + unsigned int fl_flags; + unsigned char fl_type; + loff_t fl_start; + loff_t fl_end; + int ret; + char __data[0]; +}; + +struct trace_event_raw_filelock_lease { + struct trace_entry ent; + struct file_lock *fl; + long unsigned int i_ino; + dev_t s_dev; + struct file_lock *fl_blocker; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + long unsigned int fl_break_time; + long unsigned int fl_downgrade_time; + char __data[0]; +}; + +struct trace_event_raw_generic_add_lease { + struct trace_entry ent; + long unsigned int i_ino; + int wcount; + int rcount; + int icount; + dev_t s_dev; + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + char __data[0]; +}; + +struct trace_event_raw_leases_conflict { + struct trace_entry ent; + void *lease; + void *breaker; + unsigned int l_fl_flags; + unsigned int b_fl_flags; + unsigned char l_fl_type; + unsigned char b_fl_type; + bool conflict; + char __data[0]; +}; + +struct trace_event_data_offsets_locks_get_lock_context {}; + +struct trace_event_data_offsets_filelock_lock {}; + +struct trace_event_data_offsets_filelock_lease {}; + +struct trace_event_data_offsets_generic_add_lease {}; + +struct trace_event_data_offsets_leases_conflict {}; + +typedef void (*btf_trace_locks_get_lock_context)(void *, struct inode *, int, struct file_lock_context *); + +typedef void (*btf_trace_posix_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_fcntl_setlk)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_locks_remove_posix)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_flock_lock_inode)(void *, struct inode *, struct file_lock *, int); + +typedef void (*btf_trace_break_lease_noblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_block)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_break_lease_unblock)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_delete_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_time_out_leases)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_generic_add_lease)(void *, struct inode *, struct file_lock *); + +typedef void (*btf_trace_leases_conflict)(void *, bool, struct file_lock *, struct file_lock *); + +struct file_lock_list_struct { + spinlock_t lock; + struct hlist_head hlist; +}; + +struct locks_iterator { + int li_cpu; + loff_t li_pos; +}; + +typedef unsigned int __kernel_uid_t; + +typedef unsigned int __kernel_gid_t; + +struct gnu_property { + u32 pr_type; + u32 pr_datasz; +}; + +struct elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + long unsigned int pr_flag; + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct core_vma_metadata { + long unsigned int start; + long unsigned int end; + long unsigned int flags; + long unsigned int dump_size; +}; + +struct memelfnote { + const char *name; + int type; + unsigned int datasz; + void *data; +}; + +struct elf_thread_core_info { + struct elf_thread_core_info *next; + struct task_struct *task; + struct elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info { + struct elf_thread_core_info *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +}; + +typedef u16 __compat_uid_t; + +typedef u16 __compat_gid_t; + +typedef unsigned int compat_elf_greg_t; + +typedef compat_elf_greg_t compat_elf_gregset_t[18]; + +struct compat_elf_siginfo { + compat_int_t si_signo; + compat_int_t si_code; + compat_int_t si_errno; +}; + +struct compat_elf_prstatus { + struct compat_elf_siginfo pr_info; + short int pr_cursig; + compat_ulong_t pr_sigpend; + compat_ulong_t pr_sighold; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + struct old_timeval32 pr_utime; + struct old_timeval32 pr_stime; + struct old_timeval32 pr_cutime; + struct old_timeval32 pr_cstime; + compat_elf_gregset_t pr_reg; + compat_int_t pr_fpvalid; +}; + +struct compat_elf_prpsinfo { + char pr_state; + char pr_sname; + char pr_zomb; + char pr_nice; + compat_ulong_t pr_flag; + __compat_uid_t pr_uid; + __compat_gid_t pr_gid; + compat_pid_t pr_pid; + compat_pid_t pr_ppid; + compat_pid_t pr_pgrp; + compat_pid_t pr_sid; + char pr_fname[16]; + char pr_psargs[80]; +}; + +struct elf_thread_core_info___2 { + struct elf_thread_core_info___2 *next; + struct task_struct *task; + struct compat_elf_prstatus prstatus; + struct memelfnote notes[0]; +}; + +struct elf_note_info___2 { + struct elf_thread_core_info___2 *thread; + struct memelfnote psinfo; + struct memelfnote signote; + struct memelfnote auxv; + struct memelfnote files; + compat_siginfo_t csigdata; + size_t size; + int thread_notes; +}; + +struct mb_cache_entry { + struct list_head e_list; + struct hlist_bl_node e_hash_list; + atomic_t e_refcnt; + u32 e_key; + u32 e_referenced: 1; + u32 e_reusable: 1; + u64 e_value; +}; + +struct mb_cache { + struct hlist_bl_head *c_hash; + int c_bucket_bits; + long unsigned int c_max_entries; + spinlock_t c_list_lock; + struct list_head c_list; + long unsigned int c_entry_count; + struct shrinker c_shrink; + struct work_struct c_shrink_work; +}; + +struct posix_acl_xattr_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct posix_acl_xattr_header { + __le32 a_version; +}; + +struct core_name { + char *corename; + int used; + int size; +}; + +struct trace_event_raw_iomap_readpage_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + int nr_pages; + char __data[0]; +}; + +struct trace_event_raw_iomap_range_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t size; + long unsigned int offset; + unsigned int length; + char __data[0]; +}; + +struct trace_event_raw_iomap_class { + struct trace_entry ent; + dev_t dev; + u64 ino; + u64 addr; + loff_t offset; + u64 length; + u16 type; + u16 flags; + dev_t bdev; + char __data[0]; +}; + +struct trace_event_raw_iomap_apply { + struct trace_entry ent; + dev_t dev; + u64 ino; + loff_t pos; + loff_t length; + unsigned int flags; + const void *ops; + void *actor; + long unsigned int caller; + char __data[0]; +}; + +struct trace_event_data_offsets_iomap_readpage_class {}; + +struct trace_event_data_offsets_iomap_range_class {}; + +struct trace_event_data_offsets_iomap_class {}; + +struct trace_event_data_offsets_iomap_apply {}; + +typedef void (*btf_trace_iomap_readpage)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_readahead)(void *, struct inode *, int); + +typedef void (*btf_trace_iomap_writepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_releasepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_invalidatepage)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_dio_invalidate_fail)(void *, struct inode *, long unsigned int, unsigned int); + +typedef void (*btf_trace_iomap_apply_dstmap)(void *, struct inode *, struct iomap___2 *); + +typedef void (*btf_trace_iomap_apply_srcmap)(void *, struct inode *, struct iomap___2 *); + +typedef void (*btf_trace_iomap_apply)(void *, struct inode *, loff_t, loff_t, unsigned int, const void *, void *, long unsigned int); + +typedef loff_t (*iomap_actor_t)(struct inode *, loff_t, loff_t, void *, struct iomap___2 *, struct iomap___2 *); + +struct iomap_ioend { + struct list_head io_list; + u16 io_type; + u16 io_flags; + struct inode *io_inode; + size_t io_size; + loff_t io_offset; + void *io_private; + struct bio *io_bio; + struct bio io_inline_bio; +}; + +struct iomap_writepage_ctx; + +struct iomap_writeback_ops { + int (*map_blocks)(struct iomap_writepage_ctx *, struct inode *, loff_t); + int (*prepare_ioend)(struct iomap_ioend *, int); + void (*discard_page)(struct page *, loff_t); +}; + +struct iomap_writepage_ctx { + struct iomap___2 iomap; + struct iomap_ioend *ioend; + const struct iomap_writeback_ops *ops; +}; + +struct iomap_page { + atomic_t read_bytes_pending; + atomic_t write_bytes_pending; + spinlock_t uptodate_lock; + long unsigned int uptodate[0]; +}; + +struct iomap_readpage_ctx { + struct page *cur_page; + bool cur_page_in_bio; + struct bio *bio; + struct readahead_control *rac; +}; + +enum { + IOMAP_WRITE_F_UNSHARE = 1, +}; + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *, ssize_t, int, unsigned int); + blk_qc_t (*submit_io)(struct inode *, struct iomap___2 *, struct bio *, loff_t); +}; + +struct iomap_dio { + struct kiocb *iocb; + const struct iomap_dio_ops *dops; + loff_t i_size; + loff_t size; + atomic_t ref; + unsigned int flags; + int error; + bool wait_for_completion; + union { + struct { + struct iov_iter *iter; + struct task_struct *waiter; + struct request_queue *last_queue; + blk_qc_t cookie; + } submit; + struct { + struct work_struct work; + } aio; + }; +}; + +struct fiemap_ctx { + struct fiemap_extent_info *fi; + struct iomap___2 prev; +}; + +struct iomap_swapfile_info { + struct iomap___2 iomap; + struct swap_info_struct *sis; + uint64_t lowest_ppage; + uint64_t highest_ppage; + long unsigned int nr_pages; + int nr_extents; +}; + +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B = 1, + QIF_ILIMITS_B = 2, + QIF_INODES_B = 3, + QIF_BTIME_B = 4, + QIF_ITIME_B = 5, +}; + +typedef __kernel_uid32_t qid_t; + +enum { + DQF_INFO_DIRTY_B = 17, +}; + +struct dqstats { + long unsigned int stat[8]; + struct percpu_counter counter[8]; +}; + +enum { + _DQUOT_USAGE_ENABLED = 0, + _DQUOT_LIMITS_ENABLED = 1, + _DQUOT_SUSPENDED = 2, + _DQUOT_STATE_FLAGS = 3, +}; + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; + +struct dquot_warn { + struct super_block *w_sb; + struct kqid w_dq_id; + short int w_type; +}; + +struct fs_disk_quota { + __s8 d_version; + __s8 d_flags; + __u16 d_fieldmask; + __u32 d_id; + __u64 d_blk_hardlimit; + __u64 d_blk_softlimit; + __u64 d_ino_hardlimit; + __u64 d_ino_softlimit; + __u64 d_bcount; + __u64 d_icount; + __s32 d_itimer; + __s32 d_btimer; + __u16 d_iwarns; + __u16 d_bwarns; + __s8 d_itimer_hi; + __s8 d_btimer_hi; + __s8 d_rtbtimer_hi; + __s8 d_padding2; + __u64 d_rtb_hardlimit; + __u64 d_rtb_softlimit; + __u64 d_rtbcount; + __s32 d_rtbtimer; + __u16 d_rtbwarns; + __s16 d_padding3; + char d_padding4[8]; +}; + +struct fs_qfilestat { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; +}; + +typedef struct fs_qfilestat fs_qfilestat_t; + +struct fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + fs_qfilestat_t qs_uquota; + fs_qfilestat_t qs_gquota; + __u32 qs_incoredqs; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +}; + +struct fs_qfilestatv { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; + __u32 qfs_pad; +}; + +struct fs_quota_statv { + __s8 qs_version; + __u8 qs_pad1; + __u16 qs_flags; + __u32 qs_incoredqs; + struct fs_qfilestatv qs_uquota; + struct fs_qfilestatv qs_gquota; + struct fs_qfilestatv qs_pquota; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; + __u64 qs_pad2[8]; +}; + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +struct if_nextdqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; + __u32 dqb_id; +}; + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +struct compat_if_dqblk { + compat_u64 dqb_bhardlimit; + compat_u64 dqb_bsoftlimit; + compat_u64 dqb_curspace; + compat_u64 dqb_ihardlimit; + compat_u64 dqb_isoftlimit; + compat_u64 dqb_curinodes; + compat_u64 dqb_btime; + compat_u64 dqb_itime; + compat_uint_t dqb_valid; +}; + +enum { + QUOTA_NL_C_UNSPEC = 0, + QUOTA_NL_C_WARNING = 1, + __QUOTA_NL_C_MAX = 2, +}; + +enum { + QUOTA_NL_A_UNSPEC = 0, + QUOTA_NL_A_QTYPE = 1, + QUOTA_NL_A_EXCESS_ID = 2, + QUOTA_NL_A_WARNING = 3, + QUOTA_NL_A_DEV_MAJOR = 4, + QUOTA_NL_A_DEV_MINOR = 5, + QUOTA_NL_A_CAUSED_ID = 6, + QUOTA_NL_A_PAD = 7, + __QUOTA_NL_A_MAX = 8, +}; + +struct proc_maps_private { + struct inode *inode; + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *tail_vma; + struct mempolicy *task_mempolicy; +}; + +struct mem_size_stats { + long unsigned int resident; + long unsigned int shared_clean; + long unsigned int shared_dirty; + long unsigned int private_clean; + long unsigned int private_dirty; + long unsigned int referenced; + long unsigned int anonymous; + long unsigned int lazyfree; + long unsigned int anonymous_thp; + long unsigned int shmem_thp; + long unsigned int file_thp; + long unsigned int swap; + long unsigned int shared_hugetlb; + long unsigned int private_hugetlb; + u64 pss; + u64 pss_anon; + u64 pss_file; + u64 pss_shmem; + u64 pss_locked; + u64 swap_pss; + bool check_shmem_swap; +}; + +enum clear_refs_types { + CLEAR_REFS_ALL = 1, + CLEAR_REFS_ANON = 2, + CLEAR_REFS_MAPPED = 3, + CLEAR_REFS_SOFT_DIRTY = 4, + CLEAR_REFS_MM_HIWATER_RSS = 5, + CLEAR_REFS_LAST = 6, +}; + +struct clear_refs_private { + enum clear_refs_types type; +}; + +typedef struct { + u64 pme; +} pagemap_entry_t; + +struct pagemapread { + int pos; + int len; + pagemap_entry_t *buffer; + bool show_pfn; +}; + +struct numa_maps { + long unsigned int pages; + long unsigned int anon; + long unsigned int active; + long unsigned int writeback; + long unsigned int mapcount_max; + long unsigned int dirty; + long unsigned int swapcache; + long unsigned int node[128]; +}; + +struct numa_maps_private { + struct proc_maps_private proc_maps; + struct numa_maps md; +}; + +struct pde_opener { + struct list_head lh; + struct file *file; + bool closing; + struct completion *c; +}; + +enum { + BIAS = 2147483648, +}; + +struct proc_fs_context { + struct pid_namespace *pid_ns; + unsigned int mask; + enum proc_hidepid hidepid; + int gid; + enum proc_pidonly pidonly; +}; + +enum proc_param { + Opt_gid___2 = 0, + Opt_hidepid = 1, + Opt_subset = 2, +}; + +struct genradix_root; + +struct __genradix { + struct genradix_root *root; +}; + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +typedef struct dentry *instantiate_t(struct dentry *, struct task_struct *, const void *); + +struct pid_entry { + const char *name; + unsigned int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; + union proc_op op; +}; + +struct limit_names { + const char *name; + const char *unit; +}; + +struct map_files_info { + long unsigned int start; + long unsigned int end; + fmode_t mode; +}; + +struct timers_private { + struct pid *pid; + struct task_struct *task; + struct sighand_struct *sighand; + struct pid_namespace *ns; + long unsigned int flags; +}; + +struct tgid_iter { + unsigned int tgid; + struct task_struct *task; +}; + +struct fd_data { + fmode_t mode; + unsigned int fd; +}; + +struct sysctl_alias { + const char *kernel_param; + const char *sysctl_param; +}; + +struct seq_net_private { + struct net *net; +}; + +struct bpf_iter_aux_info___2; + +enum kcore_type { + KCORE_TEXT = 0, + KCORE_VMALLOC = 1, + KCORE_RAM = 2, + KCORE_VMEMMAP = 3, + KCORE_USER = 4, + KCORE_OTHER = 5, + KCORE_REMAP = 6, +}; + +struct kcore_list { + struct list_head list; + long unsigned int addr; + long unsigned int vaddr; + size_t size; + int type; +}; + +struct kernfs_iattrs { + kuid_t ia_uid; + kgid_t ia_gid; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + struct simple_xattrs xattrs; + atomic_t nr_user_xattrs; + atomic_t user_xattr_size; +}; + +struct kernfs_super_info { + struct super_block *sb; + struct kernfs_root *root; + const void *ns; + struct list_head node; +}; + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 16, + KERNFS_NS = 32, + KERNFS_HAS_SEQ_SHOW = 64, + KERNFS_HAS_MMAP = 128, + KERNFS_LOCKDEP = 256, + KERNFS_SUICIDAL = 1024, + KERNFS_SUICIDED = 2048, + KERNFS_EMPTY_DIR = 4096, + KERNFS_HAS_RELEASE = 8192, +}; + +struct kernfs_open_node { + atomic_t refcnt; + atomic_t event; + wait_queue_head_t poll; + struct list_head files; +}; + +struct config_group; + +struct config_item_type; + +struct config_item { + char *ci_name; + char ci_namebuf[20]; + struct kref ci_kref; + struct list_head ci_entry; + struct config_item *ci_parent; + struct config_group *ci_group; + const struct config_item_type *ci_type; + struct dentry *ci_dentry; +}; + +struct configfs_subsystem; + +struct config_group { + struct config_item cg_item; + struct list_head cg_children; + struct configfs_subsystem *cg_subsys; + struct list_head default_groups; + struct list_head group_entry; +}; + +struct configfs_item_operations; + +struct configfs_group_operations; + +struct configfs_attribute; + +struct configfs_bin_attribute; + +struct config_item_type { + struct module *ct_owner; + struct configfs_item_operations *ct_item_ops; + struct configfs_group_operations *ct_group_ops; + struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; +}; + +struct configfs_item_operations { + void (*release)(struct config_item *); + int (*allow_link)(struct config_item *, struct config_item *); + void (*drop_link)(struct config_item *, struct config_item *); +}; + +struct configfs_group_operations { + struct config_item * (*make_item)(struct config_group *, const char *); + struct config_group * (*make_group)(struct config_group *, const char *); + int (*commit_item)(struct config_item *); + void (*disconnect_notify)(struct config_group *, struct config_item *); + void (*drop_item)(struct config_group *, struct config_item *); +}; + +struct configfs_attribute { + const char *ca_name; + struct module *ca_owner; + umode_t ca_mode; + ssize_t (*show)(struct config_item *, char *); + ssize_t (*store)(struct config_item *, const char *, size_t); +}; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; + void *cb_private; + size_t cb_max_size; + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +struct configfs_subsystem { + struct config_group su_group; + struct mutex su_mutex; +}; + +struct configfs_fragment { + atomic_t frag_count; + struct rw_semaphore frag_sem; + bool frag_dead; +}; + +struct configfs_dirent { + atomic_t s_count; + int s_dependent_count; + struct list_head s_sibling; + struct list_head s_children; + int s_links; + void *s_element; + int s_type; + umode_t s_mode; + struct dentry *s_dentry; + struct iattr *s_iattr; + struct configfs_fragment *s_frag; +}; + +struct configfs_buffer { + size_t count; + loff_t pos; + char *page; + struct configfs_item_operations *ops; + struct mutex mutex; + int needs_read_fill; + bool read_in_progress; + bool write_in_progress; + char *bin_buffer; + int bin_buffer_size; + int cb_max_size; + struct config_item *item; + struct module *owner; + union { + struct configfs_attribute *attr; + struct configfs_bin_attribute *bin_attr; + }; +}; + +struct pts_mount_opts { + int setuid; + int setgid; + kuid_t uid; + kgid_t gid; + umode_t mode; + umode_t ptmxmode; + int reserve; + int max; +}; + +enum { + Opt_uid___2 = 0, + Opt_gid___3 = 1, + Opt_mode___2 = 2, + Opt_ptmxmode = 3, + Opt_newinstance = 4, + Opt_max = 5, + Opt_err = 6, +}; + +struct pts_fs_info { + struct ida allocated_ptys; + struct pts_mount_opts mount_opts; + struct super_block *sb; + struct dentry *ptmx_dentry; +}; + +struct dcookie_struct { + struct path path; + struct list_head hash_list; +}; + +struct dcookie_user { + struct list_head next; +}; + +typedef unsigned int tid_t; + +struct transaction_chp_stats_s { + long unsigned int cs_chp_time; + __u32 cs_forced_to_close; + __u32 cs_written; + __u32 cs_dropped; +}; + +struct journal_s; + +typedef struct journal_s journal_t; + +struct journal_head; + +struct transaction_s; + +typedef struct transaction_s transaction_t; + +struct transaction_s { + journal_t *t_journal; + tid_t t_tid; + enum { + T_RUNNING = 0, + T_LOCKED = 1, + T_SWITCH = 2, + T_FLUSH = 3, + T_COMMIT = 4, + T_COMMIT_DFLUSH = 5, + T_COMMIT_JFLUSH = 6, + T_COMMIT_CALLBACK = 7, + T_FINISHED = 8, + } t_state; + long unsigned int t_log_start; + int t_nr_buffers; + struct journal_head *t_reserved_list; + struct journal_head *t_buffers; + struct journal_head *t_forget; + struct journal_head *t_checkpoint_list; + struct journal_head *t_checkpoint_io_list; + struct journal_head *t_shadow_list; + struct list_head t_inode_list; + spinlock_t t_handle_lock; + long unsigned int t_max_wait; + long unsigned int t_start; + long unsigned int t_requested; + struct transaction_chp_stats_s t_chp_stats; + atomic_t t_updates; + atomic_t t_outstanding_credits; + atomic_t t_outstanding_revokes; + atomic_t t_handle_count; + transaction_t *t_cpnext; + transaction_t *t_cpprev; + long unsigned int t_expires; + ktime_t t_start_time; + unsigned int t_synchronous_commit: 1; + int t_need_data_flush; + struct list_head t_private_list; +}; + +struct jbd2_buffer_trigger_type; + +struct journal_head { + struct buffer_head *b_bh; + spinlock_t b_state_lock; + int b_jcount; + unsigned int b_jlist; + unsigned int b_modified; + char *b_frozen_data; + char *b_committed_data; + transaction_t *b_transaction; + transaction_t *b_next_transaction; + struct journal_head *b_tnext; + struct journal_head *b_tprev; + transaction_t *b_cp_transaction; + struct journal_head *b_cpnext; + struct journal_head *b_cpprev; + struct jbd2_buffer_trigger_type *b_triggers; + struct jbd2_buffer_trigger_type *b_frozen_triggers; +}; + +struct jbd2_buffer_trigger_type { + void (*t_frozen)(struct jbd2_buffer_trigger_type *, struct buffer_head *, void *, size_t); + void (*t_abort)(struct jbd2_buffer_trigger_type *, struct buffer_head *); +}; + +struct jbd2_journal_handle; + +typedef struct jbd2_journal_handle handle_t; + +struct jbd2_journal_handle { + union { + transaction_t *h_transaction; + journal_t *h_journal; + }; + handle_t *h_rsv_handle; + int h_total_credits; + int h_revoke_credits; + int h_revoke_credits_requested; + int h_ref; + int h_err; + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; + long unsigned int h_start_jiffies; + unsigned int h_requested_credits; + unsigned int saved_alloc_context; +}; + +struct transaction_run_stats_s { + long unsigned int rs_wait; + long unsigned int rs_request_delay; + long unsigned int rs_running; + long unsigned int rs_locked; + long unsigned int rs_flushing; + long unsigned int rs_logging; + __u32 rs_handle_count; + __u32 rs_blocks; + __u32 rs_blocks_logged; +}; + +struct transaction_stats_s { + long unsigned int ts_tid; + long unsigned int ts_requested; + struct transaction_run_stats_s run; +}; + +enum passtype { + PASS_SCAN = 0, + PASS_REVOKE = 1, + PASS_REPLAY = 2, +}; + +struct journal_superblock_s; + +typedef struct journal_superblock_s journal_superblock_t; + +struct jbd2_revoke_table_s; + +struct jbd2_inode; + +struct journal_s { + long unsigned int j_flags; + int j_errno; + struct mutex j_abort_mutex; + struct buffer_head *j_sb_buffer; + journal_superblock_t *j_superblock; + int j_format_version; + rwlock_t j_state_lock; + int j_barrier_count; + struct mutex j_barrier; + transaction_t *j_running_transaction; + transaction_t *j_committing_transaction; + transaction_t *j_checkpoint_transactions; + wait_queue_head_t j_wait_transaction_locked; + wait_queue_head_t j_wait_done_commit; + wait_queue_head_t j_wait_commit; + wait_queue_head_t j_wait_updates; + wait_queue_head_t j_wait_reserved; + wait_queue_head_t j_fc_wait; + struct mutex j_checkpoint_mutex; + struct buffer_head *j_chkpt_bhs[64]; + long unsigned int j_head; + long unsigned int j_tail; + long unsigned int j_free; + long unsigned int j_first; + long unsigned int j_last; + long unsigned int j_fc_first; + long unsigned int j_fc_off; + long unsigned int j_fc_last; + struct block_device *j_dev; + int j_blocksize; + long long unsigned int j_blk_offset; + char j_devname[56]; + struct block_device *j_fs_dev; + unsigned int j_total_len; + atomic_t j_reserved_credits; + spinlock_t j_list_lock; + struct inode *j_inode; + tid_t j_tail_sequence; + tid_t j_transaction_sequence; + tid_t j_commit_sequence; + tid_t j_commit_request; + __u8 j_uuid[16]; + struct task_struct *j_task; + int j_max_transaction_buffers; + int j_revoke_records_per_block; + long unsigned int j_commit_interval; + struct timer_list j_commit_timer; + spinlock_t j_revoke_lock; + struct jbd2_revoke_table_s *j_revoke; + struct jbd2_revoke_table_s *j_revoke_table[2]; + struct buffer_head **j_wbuf; + struct buffer_head **j_fc_wbuf; + int j_wbufsize; + int j_fc_wbufsize; + pid_t j_last_sync_writer; + u64 j_average_commit_time; + u32 j_min_batch_time; + u32 j_max_batch_time; + void (*j_commit_callback)(journal_t *, transaction_t *); + int (*j_submit_inode_data_buffers)(struct jbd2_inode *); + int (*j_finish_inode_data_buffers)(struct jbd2_inode *); + spinlock_t j_history_lock; + struct proc_dir_entry *j_proc_entry; + struct transaction_stats_s j_stats; + unsigned int j_failed_commit; + void *j_private; + struct crypto_shash *j_chksum_driver; + __u32 j_csum_seed; + void (*j_fc_cleanup_callback)(struct journal_s *, int); + int (*j_fc_replay_callback)(struct journal_s *, struct buffer_head *, enum passtype, int, tid_t); +}; + +struct journal_header_s { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; +}; + +typedef struct journal_header_s journal_header_t; + +struct journal_superblock_s { + journal_header_t s_header; + __be32 s_blocksize; + __be32 s_maxlen; + __be32 s_first; + __be32 s_sequence; + __be32 s_start; + __be32 s_errno; + __be32 s_feature_compat; + __be32 s_feature_incompat; + __be32 s_feature_ro_compat; + __u8 s_uuid[16]; + __be32 s_nr_users; + __be32 s_dynsuper; + __be32 s_max_transaction; + __be32 s_max_trans_data; + __u8 s_checksum_type; + __u8 s_padding2[3]; + __be32 s_num_fc_blks; + __u32 s_padding[41]; + __be32 s_checksum; + __u8 s_users[768]; +}; + +enum jbd_state_bits { + BH_JBD = 16, + BH_JWrite = 17, + BH_Freed = 18, + BH_Revoked = 19, + BH_RevokeValid = 20, + BH_JBDDirty = 21, + BH_JournalHead = 22, + BH_Shadow = 23, + BH_Verified = 24, + BH_JBDPrivateStart = 25, +}; + +struct jbd2_inode { + transaction_t *i_transaction; + transaction_t *i_next_transaction; + struct list_head i_list; + struct inode *i_vfs_inode; + long unsigned int i_flags; + loff_t i_dirty_start; + loff_t i_dirty_end; +}; + +struct bgl_lock { + spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct blockgroup_lock { + struct bgl_lock locks[128]; +}; + +typedef int ext4_grpblk_t; + +typedef long long unsigned int ext4_fsblk_t; + +typedef __u32 ext4_lblk_t; + +typedef unsigned int ext4_group_t; + +struct ext4_allocation_request { + struct inode *inode; + unsigned int len; + ext4_lblk_t logical; + ext4_lblk_t lleft; + ext4_lblk_t lright; + ext4_fsblk_t goal; + ext4_fsblk_t pleft; + ext4_fsblk_t pright; + unsigned int flags; +}; + +struct ext4_system_blocks { + struct rb_root root; + struct callback_head rcu; +}; + +struct ext4_group_desc { + __le32 bg_block_bitmap_lo; + __le32 bg_inode_bitmap_lo; + __le32 bg_inode_table_lo; + __le16 bg_free_blocks_count_lo; + __le16 bg_free_inodes_count_lo; + __le16 bg_used_dirs_count_lo; + __le16 bg_flags; + __le32 bg_exclude_bitmap_lo; + __le16 bg_block_bitmap_csum_lo; + __le16 bg_inode_bitmap_csum_lo; + __le16 bg_itable_unused_lo; + __le16 bg_checksum; + __le32 bg_block_bitmap_hi; + __le32 bg_inode_bitmap_hi; + __le32 bg_inode_table_hi; + __le16 bg_free_blocks_count_hi; + __le16 bg_free_inodes_count_hi; + __le16 bg_used_dirs_count_hi; + __le16 bg_itable_unused_hi; + __le32 bg_exclude_bitmap_hi; + __le16 bg_block_bitmap_csum_hi; + __le16 bg_inode_bitmap_csum_hi; + __u32 bg_reserved; +}; + +struct flex_groups { + atomic64_t free_clusters; + atomic_t free_inodes; + atomic_t used_dirs; +}; + +struct extent_status { + struct rb_node rb_node; + ext4_lblk_t es_lblk; + ext4_lblk_t es_len; + ext4_fsblk_t es_pblk; +}; + +struct ext4_es_tree { + struct rb_root root; + struct extent_status *cache_es; +}; + +struct ext4_es_stats { + long unsigned int es_stats_shrunk; + struct percpu_counter es_stats_cache_hits; + struct percpu_counter es_stats_cache_misses; + u64 es_stats_scan_time; + u64 es_stats_max_scan_time; + struct percpu_counter es_stats_all_cnt; + struct percpu_counter es_stats_shk_cnt; +}; + +struct ext4_pending_tree { + struct rb_root root; +}; + +struct ext4_fc_stats { + unsigned int fc_ineligible_reason_count[10]; + long unsigned int fc_num_commits; + long unsigned int fc_ineligible_commits; + long unsigned int fc_numblks; +}; + +struct ext4_fc_alloc_region { + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + int ino; + int len; +}; + +struct ext4_fc_replay_state { + int fc_replay_num_tags; + int fc_replay_expected_off; + int fc_current_pass; + int fc_cur_tag; + int fc_crc; + struct ext4_fc_alloc_region *fc_regions; + int fc_regions_size; + int fc_regions_used; + int fc_regions_valid; + int *fc_modified_inodes; + int fc_modified_inodes_used; + int fc_modified_inodes_size; +}; + +struct ext4_inode_info { + __le32 i_data[15]; + __u32 i_dtime; + ext4_fsblk_t i_file_acl; + ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; + long unsigned int i_flags; + struct rw_semaphore xattr_sem; + struct list_head i_orphan; + struct list_head i_fc_list; + ext4_lblk_t i_fc_lblk_start; + ext4_lblk_t i_fc_lblk_len; + atomic_t i_fc_updates; + wait_queue_head_t i_fc_wait; + struct mutex i_fc_lock; + loff_t i_disksize; + struct rw_semaphore i_data_sem; + struct rw_semaphore i_mmap_sem; + struct inode vfs_inode; + struct jbd2_inode *jinode; + spinlock_t i_raw_lock; + struct timespec64 i_crtime; + atomic_t i_prealloc_active; + struct list_head i_prealloc_list; + spinlock_t i_prealloc_lock; + struct ext4_es_tree i_es_tree; + rwlock_t i_es_lock; + struct list_head i_es_list; + unsigned int i_es_all_nr; + unsigned int i_es_shk_nr; + ext4_lblk_t i_es_shrink_lblk; + ext4_group_t i_last_alloc_group; + unsigned int i_reserved_data_blocks; + struct ext4_pending_tree i_pending_tree; + __u16 i_extra_isize; + u16 i_inline_off; + u16 i_inline_size; + qsize_t i_reserved_quota; + spinlock_t i_completed_io_lock; + struct list_head i_rsv_conversion_list; + struct work_struct i_rsv_conversion_work; + atomic_t i_unwritten; + spinlock_t i_block_reservation_lock; + tid_t i_sync_tid; + tid_t i_datasync_tid; + struct dquot *i_dquot[3]; + __u32 i_csum_seed; + kprojid_t i_projid; +}; + +struct ext4_super_block { + __le32 s_inodes_count; + __le32 s_blocks_count_lo; + __le32 s_r_blocks_count_lo; + __le32 s_free_blocks_count_lo; + __le32 s_free_inodes_count; + __le32 s_first_data_block; + __le32 s_log_block_size; + __le32 s_log_cluster_size; + __le32 s_blocks_per_group; + __le32 s_clusters_per_group; + __le32 s_inodes_per_group; + __le32 s_mtime; + __le32 s_wtime; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_magic; + __le16 s_state; + __le16 s_errors; + __le16 s_minor_rev_level; + __le32 s_lastcheck; + __le32 s_checkinterval; + __le32 s_creator_os; + __le32 s_rev_level; + __le16 s_def_resuid; + __le16 s_def_resgid; + __le32 s_first_ino; + __le16 s_inode_size; + __le16 s_block_group_nr; + __le32 s_feature_compat; + __le32 s_feature_incompat; + __le32 s_feature_ro_compat; + __u8 s_uuid[16]; + char s_volume_name[16]; + char s_last_mounted[64]; + __le32 s_algorithm_usage_bitmap; + __u8 s_prealloc_blocks; + __u8 s_prealloc_dir_blocks; + __le16 s_reserved_gdt_blocks; + __u8 s_journal_uuid[16]; + __le32 s_journal_inum; + __le32 s_journal_dev; + __le32 s_last_orphan; + __le32 s_hash_seed[4]; + __u8 s_def_hash_version; + __u8 s_jnl_backup_type; + __le16 s_desc_size; + __le32 s_default_mount_opts; + __le32 s_first_meta_bg; + __le32 s_mkfs_time; + __le32 s_jnl_blocks[17]; + __le32 s_blocks_count_hi; + __le32 s_r_blocks_count_hi; + __le32 s_free_blocks_count_hi; + __le16 s_min_extra_isize; + __le16 s_want_extra_isize; + __le32 s_flags; + __le16 s_raid_stride; + __le16 s_mmp_update_interval; + __le64 s_mmp_block; + __le32 s_raid_stripe_width; + __u8 s_log_groups_per_flex; + __u8 s_checksum_type; + __u8 s_encryption_level; + __u8 s_reserved_pad; + __le64 s_kbytes_written; + __le32 s_snapshot_inum; + __le32 s_snapshot_id; + __le64 s_snapshot_r_blocks_count; + __le32 s_snapshot_list; + __le32 s_error_count; + __le32 s_first_error_time; + __le32 s_first_error_ino; + __le64 s_first_error_block; + __u8 s_first_error_func[32]; + __le32 s_first_error_line; + __le32 s_last_error_time; + __le32 s_last_error_ino; + __le32 s_last_error_line; + __le64 s_last_error_block; + __u8 s_last_error_func[32]; + __u8 s_mount_opts[64]; + __le32 s_usr_quota_inum; + __le32 s_grp_quota_inum; + __le32 s_overhead_clusters; + __le32 s_backup_bgs[2]; + __u8 s_encrypt_algos[4]; + __u8 s_encrypt_pw_salt[16]; + __le32 s_lpf_ino; + __le32 s_prj_quota_inum; + __le32 s_checksum_seed; + __u8 s_wtime_hi; + __u8 s_mtime_hi; + __u8 s_mkfs_time_hi; + __u8 s_lastcheck_hi; + __u8 s_first_error_time_hi; + __u8 s_last_error_time_hi; + __u8 s_first_error_errcode; + __u8 s_last_error_errcode; + __le16 s_encoding; + __le16 s_encoding_flags; + __le32 s_reserved[95]; + __le32 s_checksum; +}; + +struct mb_cache___2; + +struct ext4_group_info; + +struct ext4_locality_group; + +struct ext4_li_request; + +struct ext4_sb_info { + long unsigned int s_desc_size; + long unsigned int s_inodes_per_block; + long unsigned int s_blocks_per_group; + long unsigned int s_clusters_per_group; + long unsigned int s_inodes_per_group; + long unsigned int s_itb_per_group; + long unsigned int s_gdb_count; + long unsigned int s_desc_per_block; + ext4_group_t s_groups_count; + ext4_group_t s_blockfile_groups; + long unsigned int s_overhead; + unsigned int s_cluster_ratio; + unsigned int s_cluster_bits; + loff_t s_bitmap_maxbytes; + struct buffer_head *s_sbh; + struct ext4_super_block *s_es; + struct buffer_head **s_group_desc; + unsigned int s_mount_opt; + unsigned int s_mount_opt2; + long unsigned int s_mount_flags; + unsigned int s_def_mount_opt; + ext4_fsblk_t s_sb_block; + atomic64_t s_resv_clusters; + kuid_t s_resuid; + kgid_t s_resgid; + short unsigned int s_mount_state; + short unsigned int s_pad; + int s_addr_per_block_bits; + int s_desc_per_block_bits; + int s_inode_size; + int s_first_ino; + unsigned int s_inode_readahead_blks; + unsigned int s_inode_goal; + u32 s_hash_seed[4]; + int s_def_hash_version; + int s_hash_unsigned; + struct percpu_counter s_freeclusters_counter; + struct percpu_counter s_freeinodes_counter; + struct percpu_counter s_dirs_counter; + struct percpu_counter s_dirtyclusters_counter; + struct blockgroup_lock *s_blockgroup_lock; + struct proc_dir_entry *s_proc; + struct kobject s_kobj; + struct completion s_kobj_unregister; + struct super_block *s_sb; + struct journal_s *s_journal; + struct list_head s_orphan; + struct mutex s_orphan_lock; + long unsigned int s_ext4_flags; + long unsigned int s_commit_interval; + u32 s_max_batch_time; + u32 s_min_batch_time; + struct block_device *s_journal_bdev; + char *s_qf_names[3]; + int s_jquota_fmt; + unsigned int s_want_extra_isize; + struct ext4_system_blocks *s_system_blks; + struct ext4_group_info ***s_group_info; + struct inode *s_buddy_cache; + spinlock_t s_md_lock; + short unsigned int *s_mb_offsets; + unsigned int *s_mb_maxs; + unsigned int s_group_info_size; + unsigned int s_mb_free_pending; + struct list_head s_freed_data_list; + long unsigned int s_stripe; + unsigned int s_mb_stream_request; + unsigned int s_mb_max_to_scan; + unsigned int s_mb_min_to_scan; + unsigned int s_mb_stats; + unsigned int s_mb_order2_reqs; + unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; + unsigned int s_max_dir_size_kb; + long unsigned int s_mb_last_group; + long unsigned int s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; + atomic_t s_bal_reqs; + atomic_t s_bal_success; + atomic_t s_bal_allocated; + atomic_t s_bal_ex_scanned; + atomic_t s_bal_goals; + atomic_t s_bal_breaks; + atomic_t s_bal_2orders; + spinlock_t s_bal_lock; + long unsigned int s_mb_buddies_generated; + long long unsigned int s_mb_generation_time; + atomic_t s_mb_lost_chunks; + atomic_t s_mb_preallocated; + atomic_t s_mb_discarded; + atomic_t s_lock_busy; + struct ext4_locality_group *s_locality_groups; + long unsigned int s_sectors_written_start; + u64 s_kbytes_written; + unsigned int s_extent_max_zeroout_kb; + unsigned int s_log_groups_per_flex; + struct flex_groups **s_flex_groups; + ext4_group_t s_flex_groups_allocated; + struct workqueue_struct *rsv_conversion_wq; + struct timer_list s_err_report; + struct ext4_li_request *s_li_request; + unsigned int s_li_wait_mult; + struct task_struct *s_mmp_tsk; + atomic_t s_last_trim_minblks; + struct crypto_shash *s_chksum_driver; + __u32 s_csum_seed; + struct shrinker s_es_shrinker; + struct list_head s_es_list; + long int s_es_nr_inode; + struct ext4_es_stats s_es_stats; + struct mb_cache___2 *s_ea_block_cache; + struct mb_cache___2 *s_ea_inode_cache; + long: 64; + long: 64; + long: 64; + spinlock_t s_es_lock; + struct ratelimit_state s_err_ratelimit_state; + struct ratelimit_state s_warning_ratelimit_state; + struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; + struct fscrypt_dummy_policy s_dummy_enc_policy; + struct percpu_rw_semaphore s_writepages_rwsem; + struct dax_device *s_daxdev; + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; + atomic_t s_fc_subtid; + atomic_t s_fc_ineligible_updates; + struct list_head s_fc_q[2]; + struct list_head s_fc_dentry_q[2]; + unsigned int s_fc_bytes; + spinlock_t s_fc_lock; + struct buffer_head *s_fc_bh; + struct ext4_fc_stats s_fc_stats; + u64 s_fc_avg_commit_time; + struct ext4_fc_replay_state s_fc_replay_state; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ext4_group_info { + long unsigned int bb_state; + struct rb_root bb_free_root; + ext4_grpblk_t bb_first_free; + ext4_grpblk_t bb_free; + ext4_grpblk_t bb_fragments; + ext4_grpblk_t bb_largest_free_order; + struct list_head bb_prealloc_list; + struct rw_semaphore alloc_sem; + ext4_grpblk_t bb_counters[0]; +}; + +struct ext4_locality_group { + struct mutex lg_mutex; + struct list_head lg_prealloc_list[10]; + spinlock_t lg_prealloc_lock; +}; + +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP = 0, + EXT4_LI_MODE_ITABLE = 1, +}; + +struct ext4_li_request { + struct super_block *lr_super; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; + ext4_group_t lr_next_group; + struct list_head lr_request; + long unsigned int lr_next_sched; + long unsigned int lr_timeout; +}; + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; +}; + +struct ext4_system_zone { + struct rb_node node; + ext4_fsblk_t start_blk; + unsigned int count; + u32 ino; +}; + +enum { + EXT4_INODE_SECRM = 0, + EXT4_INODE_UNRM = 1, + EXT4_INODE_COMPR = 2, + EXT4_INODE_SYNC = 3, + EXT4_INODE_IMMUTABLE = 4, + EXT4_INODE_APPEND = 5, + EXT4_INODE_NODUMP = 6, + EXT4_INODE_NOATIME = 7, + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, + EXT4_INODE_NOCOMPR = 10, + EXT4_INODE_ENCRYPT = 11, + EXT4_INODE_INDEX = 12, + EXT4_INODE_IMAGIC = 13, + EXT4_INODE_JOURNAL_DATA = 14, + EXT4_INODE_NOTAIL = 15, + EXT4_INODE_DIRSYNC = 16, + EXT4_INODE_TOPDIR = 17, + EXT4_INODE_HUGE_FILE = 18, + EXT4_INODE_EXTENTS = 19, + EXT4_INODE_VERITY = 20, + EXT4_INODE_EA_INODE = 21, + EXT4_INODE_DAX = 25, + EXT4_INODE_INLINE_DATA = 28, + EXT4_INODE_PROJINHERIT = 29, + EXT4_INODE_CASEFOLD = 30, + EXT4_INODE_RESERVED = 31, +}; + +enum { + EXT4_FC_REASON_OK = 0, + EXT4_FC_REASON_INELIGIBLE = 1, + EXT4_FC_REASON_ALREADY_COMMITTED = 2, + EXT4_FC_REASON_FC_START_FAILED = 3, + EXT4_FC_REASON_FC_FAILED = 4, + EXT4_FC_REASON_XATTR = 0, + EXT4_FC_REASON_CROSS_RENAME = 1, + EXT4_FC_REASON_JOURNAL_FLAG_CHANGE = 2, + EXT4_FC_REASON_NOMEM = 3, + EXT4_FC_REASON_SWAP_BOOT = 4, + EXT4_FC_REASON_RESIZE = 5, + EXT4_FC_REASON_RENAME_DIR = 6, + EXT4_FC_REASON_FALLOC_RANGE = 7, + EXT4_FC_REASON_INODE_JOURNAL_DATA = 8, + EXT4_FC_COMMIT_FAILED = 9, + EXT4_FC_REASON_MAX = 10, +}; + +struct ext4_dir_entry_2 { + __le32 inode; + __le16 rec_len; + __u8 name_len; + __u8 file_type; + char name[255]; +}; + +struct fname; + +struct dir_private_info { + struct rb_root root; + struct rb_node *curr_node; + struct fname *extra_fname; + loff_t last_pos; + __u32 curr_hash; + __u32 curr_minor_hash; + __u32 next_hash; +}; + +struct fname { + __u32 hash; + __u32 minor_hash; + struct rb_node rb_hash; + struct fname *next; + __u32 inode; + __u8 name_len; + __u8 file_type; + char name[0]; +}; + +enum SHIFT_DIRECTION { + SHIFT_LEFT = 0, + SHIFT_RIGHT = 1, +}; + +struct ext4_io_end_vec { + struct list_head list; + loff_t offset; + ssize_t size; +}; + +struct ext4_io_end { + struct list_head list; + handle_t *handle; + struct inode *inode; + struct bio *bio; + unsigned int flag; + atomic_t count; + struct list_head list_vec; +}; + +typedef struct ext4_io_end ext4_io_end_t; + +enum { + ES_WRITTEN_B = 0, + ES_UNWRITTEN_B = 1, + ES_DELAYED_B = 2, + ES_HOLE_B = 3, + ES_REFERENCED_B = 4, + ES_FLAGS = 5, +}; + +enum { + EXT4_STATE_JDATA = 0, + EXT4_STATE_NEW = 1, + EXT4_STATE_XATTR = 2, + EXT4_STATE_NO_EXPAND = 3, + EXT4_STATE_DA_ALLOC_CLOSE = 4, + EXT4_STATE_EXT_MIGRATE = 5, + EXT4_STATE_NEWENTRY = 6, + EXT4_STATE_MAY_INLINE_DATA = 7, + EXT4_STATE_EXT_PRECACHED = 8, + EXT4_STATE_LUSTRE_EA_INODE = 9, + EXT4_STATE_VERITY_IN_PROGRESS = 10, + EXT4_STATE_FC_COMMITTING = 11, +}; + +struct ext4_iloc { + struct buffer_head *bh; + long unsigned int offset; + ext4_group_t block_group; +}; + +struct ext4_extent_tail { + __le32 et_checksum; +}; + +struct ext4_extent { + __le32 ee_block; + __le16 ee_len; + __le16 ee_start_hi; + __le32 ee_start_lo; +}; + +struct ext4_extent_idx { + __le32 ei_block; + __le32 ei_leaf_lo; + __le16 ei_leaf_hi; + __u16 ei_unused; +}; + +struct ext4_extent_header { + __le16 eh_magic; + __le16 eh_entries; + __le16 eh_max; + __le16 eh_depth; + __le32 eh_generation; +}; + +struct ext4_ext_path { + ext4_fsblk_t p_block; + __u16 p_depth; + __u16 p_maxdepth; + struct ext4_extent *p_ext; + struct ext4_extent_idx *p_idx; + struct ext4_extent_header *p_hdr; + struct buffer_head *p_bh; +}; + +struct partial_cluster { + ext4_fsblk_t pclu; + ext4_lblk_t lblk; + enum { + initial = 0, + tofree = 1, + nofree = 2, + } state; +}; + +struct pending_reservation { + struct rb_node rb_node; + ext4_lblk_t lclu; +}; + +struct rsvd_count { + int ndelonly; + bool first_do_lblk_found; + ext4_lblk_t first_do_lblk; + ext4_lblk_t last_do_lblk; + struct extent_status *left_es; + bool partial; + ext4_lblk_t lclu; +}; + +enum { + EXT4_MF_MNTDIR_SAMPLED = 0, + EXT4_MF_FS_ABORTED = 1, + EXT4_MF_FC_INELIGIBLE = 2, + EXT4_MF_FC_COMMITTING = 3, +}; + +struct fsmap { + __u32 fmr_device; + __u32 fmr_flags; + __u64 fmr_physical; + __u64 fmr_owner; + __u64 fmr_offset; + __u64 fmr_length; + __u64 fmr_reserved[3]; +}; + +struct ext4_fsmap { + struct list_head fmr_list; + dev_t fmr_device; + uint32_t fmr_flags; + uint64_t fmr_physical; + uint64_t fmr_owner; + uint64_t fmr_length; +}; + +struct ext4_fsmap_head { + uint32_t fmh_iflags; + uint32_t fmh_oflags; + unsigned int fmh_count; + unsigned int fmh_entries; + struct ext4_fsmap fmh_keys[2]; +}; + +typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *); + +struct ext4_getfsmap_info { + struct ext4_fsmap_head *gfi_head; + ext4_fsmap_format_t gfi_formatter; + void *gfi_format_arg; + ext4_fsblk_t gfi_next_fsblk; + u32 gfi_dev; + ext4_group_t gfi_agno; + struct ext4_fsmap gfi_low; + struct ext4_fsmap gfi_high; + struct ext4_fsmap gfi_lastfree; + struct list_head gfi_meta_list; + bool gfi_last; +}; + +struct ext4_getfsmap_dev { + int (*gfd_fn)(struct super_block *, struct ext4_fsmap *, struct ext4_getfsmap_info *); + u32 gfd_dev; +}; + +struct dx_hash_info { + u32 hash; + u32 minor_hash; + int hash_version; + u32 *seed; +}; + +typedef unsigned int __kernel_mode_t; + +typedef __kernel_mode_t mode_t; + +struct ext4_inode { + __le16 i_mode; + __le16 i_uid; + __le32 i_size_lo; + __le32 i_atime; + __le32 i_ctime; + __le32 i_mtime; + __le32 i_dtime; + __le16 i_gid; + __le16 i_links_count; + __le32 i_blocks_lo; + __le32 i_flags; + union { + struct { + __le32 l_i_version; + } linux1; + struct { + __u32 h_i_translator; + } hurd1; + struct { + __u32 m_i_reserved1; + } masix1; + } osd1; + __le32 i_block[15]; + __le32 i_generation; + __le32 i_file_acl_lo; + __le32 i_size_high; + __le32 i_obso_faddr; + union { + struct { + __le16 l_i_blocks_high; + __le16 l_i_file_acl_high; + __le16 l_i_uid_high; + __le16 l_i_gid_high; + __le16 l_i_checksum_lo; + __le16 l_i_reserved; + } linux2; + struct { + __le16 h_i_reserved1; + __u16 h_i_mode_high; + __u16 h_i_uid_high; + __u16 h_i_gid_high; + __u32 h_i_author; + } hurd2; + struct { + __le16 h_i_reserved1; + __le16 m_i_file_acl_high; + __u32 m_i_reserved2[2]; + } masix2; + } osd2; + __le16 i_extra_isize; + __le16 i_checksum_hi; + __le32 i_ctime_extra; + __le32 i_mtime_extra; + __le32 i_atime_extra; + __le32 i_crtime; + __le32 i_crtime_extra; + __le32 i_version_hi; + __le32 i_projid; +}; + +struct orlov_stats { + __u64 free_clusters; + __u32 free_inodes; + __u32 used_dirs; +}; + +typedef struct { + __le32 *p; + __le32 key; + struct buffer_head *bh; +} Indirect; + +struct ext4_filename { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + struct dx_hash_info hinfo; + struct fscrypt_str crypto_buf; + struct fscrypt_str cf_name; +}; + +struct ext4_xattr_ibody_header { + __le32 h_magic; +}; + +struct ext4_xattr_entry { + __u8 e_name_len; + __u8 e_name_index; + __le16 e_value_offs; + __le32 e_value_inum; + __le32 e_value_size; + __le32 e_hash; + char e_name[0]; +}; + +struct ext4_xattr_info { + const char *name; + const void *value; + size_t value_len; + int name_index; + int in_inode; +}; + +struct ext4_xattr_search { + struct ext4_xattr_entry *first; + void *base; + void *end; + struct ext4_xattr_entry *here; + int not_found; +}; + +struct ext4_xattr_ibody_find { + struct ext4_xattr_search s; + struct ext4_iloc iloc; +}; + +typedef short unsigned int __kernel_uid16_t; + +typedef short unsigned int __kernel_gid16_t; + +typedef __kernel_uid16_t uid16_t; + +typedef __kernel_gid16_t gid16_t; + +struct ext4_io_submit { + struct writeback_control *io_wbc; + struct bio *io_bio; + ext4_io_end_t *io_end; + sector_t io_next_block; +}; + +typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 1, + EXT4_IGET_HANDLE = 2, +} ext4_iget_flags; + +struct ext4_xattr_inode_array { + unsigned int count; + struct inode *inodes[0]; +}; + +struct mpage_da_data { + struct inode *inode; + struct writeback_control *wbc; + long unsigned int first_page; + long unsigned int next_page; + long unsigned int last_page; + struct ext4_map_blocks map; + struct ext4_io_submit io_submit; + unsigned int do_map: 1; + unsigned int scanned_until_end: 1; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +struct ext4_new_group_input { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 unused; +}; + +struct compat_ext4_new_group_input { + u32 group; + compat_u64 block_bitmap; + compat_u64 inode_bitmap; + compat_u64 inode_table; + u32 blocks_count; + u16 reserved_blocks; + u16 unused; +}; + +struct ext4_new_group_data { + __u32 group; + __u64 block_bitmap; + __u64 inode_bitmap; + __u64 inode_table; + __u32 blocks_count; + __u16 reserved_blocks; + __u16 mdata_blocks; + __u32 free_clusters_count; +}; + +struct move_extent { + __u32 reserved; + __u32 donor_fd; + __u64 orig_start; + __u64 donor_start; + __u64 len; + __u64 moved_len; +}; + +struct fsmap_head { + __u32 fmh_iflags; + __u32 fmh_oflags; + __u32 fmh_count; + __u32 fmh_entries; + __u64 fmh_reserved[6]; + struct fsmap fmh_keys[2]; + struct fsmap fmh_recs[0]; +}; + +struct getfsmap_info { + struct super_block *gi_sb; + struct fsmap_head *gi_data; + unsigned int gi_idx; + __u32 gi_last_flags; +}; + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 2560, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 4294967295, +}; + +struct ext4_free_data { + struct list_head efd_list; + struct rb_node efd_node; + ext4_group_t efd_group; + ext4_grpblk_t efd_start_cluster; + ext4_grpblk_t efd_count; + tid_t efd_tid; +}; + +struct ext4_prealloc_space { + struct list_head pa_inode_list; + struct list_head pa_group_list; + union { + struct list_head pa_tmp_list; + struct callback_head pa_rcu; + } u; + spinlock_t pa_lock; + atomic_t pa_count; + unsigned int pa_deleted; + ext4_fsblk_t pa_pstart; + ext4_lblk_t pa_lstart; + ext4_grpblk_t pa_len; + ext4_grpblk_t pa_free; + short unsigned int pa_type; + spinlock_t *pa_obj_lock; + struct inode *pa_inode; +}; + +enum { + MB_INODE_PA = 0, + MB_GROUP_PA = 1, +}; + +struct ext4_free_extent { + ext4_lblk_t fe_logical; + ext4_grpblk_t fe_start; + ext4_group_t fe_group; + ext4_grpblk_t fe_len; +}; + +struct ext4_allocation_context { + struct inode *ac_inode; + struct super_block *ac_sb; + struct ext4_free_extent ac_o_ex; + struct ext4_free_extent ac_g_ex; + struct ext4_free_extent ac_b_ex; + struct ext4_free_extent ac_f_ex; + __u16 ac_groups_scanned; + __u16 ac_found; + __u16 ac_tail; + __u16 ac_buddy; + __u16 ac_flags; + __u8 ac_status; + __u8 ac_criteria; + __u8 ac_2order; + __u8 ac_op; + struct page *ac_bitmap_page; + struct page *ac_buddy_page; + struct ext4_prealloc_space *ac_pa; + struct ext4_locality_group *ac_lg; +}; + +struct ext4_buddy { + struct page *bd_buddy_page; + void *bd_buddy; + struct page *bd_bitmap_page; + void *bd_bitmap; + struct ext4_group_info *bd_info; + struct super_block *bd_sb; + __u16 bd_blkbits; + ext4_group_t bd_group; +}; + +typedef int (*ext4_mballoc_query_range_fn)(struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t, void *); + +struct sg { + struct ext4_group_info info; + ext4_grpblk_t counters[18]; +}; + +struct migrate_struct { + ext4_lblk_t first_block; + ext4_lblk_t last_block; + ext4_lblk_t curr_block; + ext4_fsblk_t first_pblock; + ext4_fsblk_t last_pblock; +}; + +struct mmp_struct { + __le32 mmp_magic; + __le32 mmp_seq; + __le64 mmp_time; + char mmp_nodename[64]; + char mmp_bdevname[32]; + __le16 mmp_check_interval; + __le16 mmp_pad1; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; +}; + +struct mmpd_data { + struct buffer_head *bh; + struct super_block *sb; +}; + +struct ext4_dir_entry { + __le32 inode; + __le16 rec_len; + __le16 name_len; + char name[255]; +}; + +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; + __le16 det_rec_len; + __u8 det_reserved_zero2; + __u8 det_reserved_ft; + __le32 det_checksum; +}; + +typedef enum { + EITHER = 0, + INDEX = 1, + DIRENT = 2, + DIRENT_HTREE = 3, +} dirblock_type_t; + +struct fake_dirent { + __le32 inode; + __le16 rec_len; + u8 name_len; + u8 file_type; +}; + +struct dx_countlimit { + __le16 limit; + __le16 count; +}; + +struct dx_entry { + __le32 hash; + __le32 block; +}; + +struct dx_root_info { + __le32 reserved_zero; + u8 hash_version; + u8 info_length; + u8 indirect_levels; + u8 unused_flags; +}; + +struct dx_root { + struct fake_dirent dot; + char dot_name[4]; + struct fake_dirent dotdot; + char dotdot_name[4]; + struct dx_root_info info; + struct dx_entry entries[0]; +}; + +struct dx_node { + struct fake_dirent fake; + struct dx_entry entries[0]; +}; + +struct dx_frame { + struct buffer_head *bh; + struct dx_entry *entries; + struct dx_entry *at; +}; + +struct dx_map_entry { + u32 hash; + u16 offs; + u16 size; +}; + +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; +}; + +struct ext4_renament { + struct inode *dir; + struct dentry *dentry; + struct inode *inode; + bool is_dir; + int dir_nlink_delta; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; + int inlined; + struct buffer_head *dir_bh; + struct ext4_dir_entry_2 *parent_de; + int dir_inlined; +}; + +enum bio_post_read_step { + STEP_INITIAL = 0, + STEP_DECRYPT = 1, + STEP_VERITY = 2, + STEP_MAX = 3, +}; + +struct bio_post_read_ctx { + struct bio *bio; + struct work_struct work; + unsigned int cur_step; + unsigned int enabled_steps; +}; + +enum { + BLOCK_BITMAP = 0, + INODE_BITMAP = 1, + INODE_TABLE = 2, + GROUP_TABLE_COUNT = 3, +}; + +struct ext4_rcu_ptr { + struct callback_head rcu; + void *ptr; +}; + +struct ext4_new_flex_group_data { + struct ext4_new_group_data *groups; + __u16 *bg_flags; + ext4_group_t count; +}; + +enum stat_group { + STAT_READ = 0, + STAT_WRITE = 1, + STAT_DISCARD = 2, + STAT_FLUSH = 3, + NR_STAT_GROUPS = 4, +}; + +enum { + I_DATA_SEM_NORMAL = 0, + I_DATA_SEM_OTHER = 1, + I_DATA_SEM_QUOTA = 2, +}; + +struct ext4_lazy_init { + long unsigned int li_state; + struct list_head li_request_list; + struct mutex li_list_mtx; +}; + +struct ext4_journal_cb_entry { + struct list_head jce_list; + void (*jce_func)(struct super_block *, struct ext4_journal_cb_entry *, int); +}; + +struct trace_event_raw_ext4_other_inode_update_time { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t orig_ino; + uid_t uid; + gid_t gid; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + uid_t uid; + gid_t gid; + __u64 blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_inode { + struct trace_entry ent; + dev_t dev; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t dir; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_evict_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int nlink; + char __data[0]; +}; + +struct trace_event_raw_ext4_drop_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int drop; + char __data[0]; +}; + +struct trace_event_raw_ext4_nfs_commit_metadata { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_mark_inode_dirty { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int ip; + char __data[0]; +}; + +struct trace_event_raw_ext4_begin_ordered_truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t new_size; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_begin { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__write_end { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int len; + unsigned int copied; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long int nr_to_write; + long int pages_skipped; + loff_t range_start; + loff_t range_end; + long unsigned int writeback_index; + int sync_mode; + char for_kupdate; + char range_cyclic; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int first_page; + long int nr_to_write; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_write_pages_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 lblk; + __u32 len; + __u32 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_writepages_result { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + int pages_written; + long int pages_skipped; + long unsigned int writeback_index; + int sync_mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__page_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + char __data[0]; +}; + +struct trace_event_raw_ext4_invalidatepage_op { + struct trace_entry ent; + dev_t dev; + ino_t ino; + long unsigned int index; + unsigned int offset; + unsigned int length; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_blocks { + struct trace_entry ent; + dev_t dev; + __u64 blk; + __u64 count; + char __data[0]; +}; + +struct trace_event_raw_ext4__mb_new_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 pa_pstart; + __u64 pa_lstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_inode_pa { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + __u32 count; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_release_group_pa { + struct trace_entry ent; + dev_t dev; + __u64 pa_pstart; + __u32 pa_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_discard_preallocations { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + unsigned int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_mb_discard_preallocations { + struct trace_entry ent; + dev_t dev; + int needed; + char __data[0]; +}; + +struct trace_event_raw_ext4_request_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_allocate_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + unsigned int len; + __u32 logical; + __u32 lleft; + __u32 lright; + __u64 goal; + __u64 pleft; + __u64 pright; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_free_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + long unsigned int count; + int flags; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + int datasync; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_file_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_sync_fs { + struct trace_entry ent; + dev_t dev; + int wait; + char __data[0]; +}; + +struct trace_event_raw_ext4_alloc_da_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int data_blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 goal_logical; + int goal_start; + __u32 goal_group; + int goal_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + __u16 found; + __u16 groups; + __u16 buddy; + __u16 flags; + __u16 tail; + __u8 cr; + char __data[0]; +}; + +struct trace_event_raw_ext4_mballoc_prealloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u32 orig_logical; + int orig_start; + __u32 orig_group; + int orig_len; + __u32 result_logical; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4__mballoc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int result_start; + __u32 result_group; + int result_len; + char __data[0]; +}; + +struct trace_event_raw_ext4_forget { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 block; + int is_metadata; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_update_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int used_blocks; + int reserved_data_blocks; + int quota_claim; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_reserve_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_da_release_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 i_blocks; + int freed_blocks; + int reserved_data_blocks; + __u16 mode; + char __data[0]; +}; + +struct trace_event_raw_ext4__bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_read_block_bitmap_load { + struct trace_entry ent; + dev_t dev; + __u32 group; + bool prefetch; + char __data[0]; +}; + +struct trace_event_raw_ext4_direct_IO_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + long unsigned int len; + int rw; + char __data[0]; +}; + +struct trace_event_raw_ext4_direct_IO_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + long unsigned int len; + int rw; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__fallocate_mode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + int mode; + char __data[0]; +}; + +struct trace_event_raw_ext4_fallocate_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t pos; + unsigned int blocks; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ino_t parent; + loff_t size; + char __data[0]; +}; + +struct trace_event_raw_ext4_unlink_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4__truncate { + struct trace_entry ent; + dev_t dev; + ino_t ino; + __u64 blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_convert_to_initialized_fastpath { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t m_lblk; + unsigned int m_len; + ext4_lblk_t u_lblk; + unsigned int u_len; + ext4_fsblk_t u_pblk; + ext4_lblk_t i_lblk; + unsigned int i_len; + ext4_fsblk_t i_pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4__map_blocks_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + unsigned int flags; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + unsigned int len; + unsigned int mflags; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_load_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_load_inode { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + int rsv_blocks; + int revoke_creds; + char __data[0]; +}; + +struct trace_event_raw_ext4_journal_start_reserved { + struct trace_entry ent; + dev_t dev; + long unsigned int ip; + int blocks; + char __data[0]; +}; + +struct trace_event_raw_ext4__trim { + struct trace_entry ent; + int dev_major; + int dev_minor; + __u32 group; + int start; + int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_handle_unwritten_extents { + struct trace_entry ent; + dev_t dev; + ino_t ino; + int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + unsigned int allocated; + ext4_fsblk_t newblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_implied_cluster_alloc_exit { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + ext4_lblk_t lblk; + ext4_fsblk_t pblk; + unsigned int len; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_put_in_cache { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + ext4_fsblk_t start; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_in_cache { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + int ret; + char __data[0]; +}; + +struct trace_event_raw_ext4_find_delalloc_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + int reverse; + int found; + ext4_lblk_t found_blk; + char __data[0]; +}; + +struct trace_event_raw_ext4_get_reserved_cluster_alloc { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_show_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + ext4_lblk_t lblk; + short unsigned int len; + char __data[0]; +}; + +struct trace_event_raw_ext4_remove_blocks { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t from; + ext4_lblk_t to; + ext4_fsblk_t ee_pblk; + ext4_lblk_t ee_lblk; + short unsigned int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_leaf { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t ee_lblk; + ext4_fsblk_t ee_pblk; + short int ee_len; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_rm_idx { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_fsblk_t pblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + char __data[0]; +}; + +struct trace_event_raw_ext4_ext_remove_space_done { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t start; + ext4_lblk_t end; + int depth; + ext4_fsblk_t pc_pclu; + ext4_lblk_t pc_lblk; + int pc_state; + short unsigned int eh_entries; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_remove_extent { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t lblk; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_find_extent_range_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_enter { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_lookup_extent_exit { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + int found; + char __data[0]; +}; + +struct trace_event_raw_ext4__es_shrink_enter { + struct trace_entry ent; + dev_t dev; + int nr_to_scan; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink_scan_exit { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + int cache_cnt; + char __data[0]; +}; + +struct trace_event_raw_ext4_collapse_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_insert_range { + struct trace_entry ent; + dev_t dev; + ino_t ino; + loff_t offset; + loff_t len; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_shrink { + struct trace_entry ent; + dev_t dev; + int nr_shrunk; + long long unsigned int scan_time; + int nr_skipped; + int retried; + char __data[0]; +}; + +struct trace_event_raw_ext4_es_insert_delayed_block { + struct trace_entry ent; + dev_t dev; + ino_t ino; + ext4_lblk_t lblk; + ext4_lblk_t len; + ext4_fsblk_t pblk; + char status; + bool allocated; + char __data[0]; +}; + +struct trace_event_raw_ext4_fsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u32 agno; + u64 bno; + u64 len; + u64 owner; + char __data[0]; +}; + +struct trace_event_raw_ext4_getfsmap_class { + struct trace_entry ent; + dev_t dev; + dev_t keydev; + u64 block; + u64 len; + u64 owner; + u64 flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_shutdown { + struct trace_entry ent; + dev_t dev; + unsigned int flags; + char __data[0]; +}; + +struct trace_event_raw_ext4_error { + struct trace_entry ent; + dev_t dev; + const char *function; + unsigned int line; + char __data[0]; +}; + +struct trace_event_raw_ext4_prefetch_bitmaps { + struct trace_entry ent; + dev_t dev; + __u32 group; + __u32 next; + __u32 ios; + char __data[0]; +}; + +struct trace_event_raw_ext4_lazy_itable_init { + struct trace_entry ent; + dev_t dev; + __u32 group; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay_scan { + struct trace_entry ent; + dev_t dev; + int error; + int off; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_replay { + struct trace_entry ent; + dev_t dev; + int tag; + int ino; + int priv1; + int priv2; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_start { + struct trace_entry ent; + dev_t dev; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_commit_stop { + struct trace_entry ent; + dev_t dev; + int nblks; + int reason; + int num_fc; + int num_fc_ineligible; + int nblks_agg; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_stats { + struct trace_entry ent; + dev_t dev; + struct ext4_sb_info *sbi; + int count; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_create { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_link { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_unlink { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_inode { + struct trace_entry ent; + dev_t dev; + int ino; + int error; + char __data[0]; +}; + +struct trace_event_raw_ext4_fc_track_range { + struct trace_entry ent; + dev_t dev; + int ino; + long int start; + long int end; + int error; + char __data[0]; +}; + +struct trace_event_data_offsets_ext4_other_inode_update_time {}; + +struct trace_event_data_offsets_ext4_free_inode {}; + +struct trace_event_data_offsets_ext4_request_inode {}; + +struct trace_event_data_offsets_ext4_allocate_inode {}; + +struct trace_event_data_offsets_ext4_evict_inode {}; + +struct trace_event_data_offsets_ext4_drop_inode {}; + +struct trace_event_data_offsets_ext4_nfs_commit_metadata {}; + +struct trace_event_data_offsets_ext4_mark_inode_dirty {}; + +struct trace_event_data_offsets_ext4_begin_ordered_truncate {}; + +struct trace_event_data_offsets_ext4__write_begin {}; + +struct trace_event_data_offsets_ext4__write_end {}; + +struct trace_event_data_offsets_ext4_writepages {}; + +struct trace_event_data_offsets_ext4_da_write_pages {}; + +struct trace_event_data_offsets_ext4_da_write_pages_extent {}; + +struct trace_event_data_offsets_ext4_writepages_result {}; + +struct trace_event_data_offsets_ext4__page_op {}; + +struct trace_event_data_offsets_ext4_invalidatepage_op {}; + +struct trace_event_data_offsets_ext4_discard_blocks {}; + +struct trace_event_data_offsets_ext4__mb_new_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_inode_pa {}; + +struct trace_event_data_offsets_ext4_mb_release_group_pa {}; + +struct trace_event_data_offsets_ext4_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_mb_discard_preallocations {}; + +struct trace_event_data_offsets_ext4_request_blocks {}; + +struct trace_event_data_offsets_ext4_allocate_blocks {}; + +struct trace_event_data_offsets_ext4_free_blocks {}; + +struct trace_event_data_offsets_ext4_sync_file_enter {}; + +struct trace_event_data_offsets_ext4_sync_file_exit {}; + +struct trace_event_data_offsets_ext4_sync_fs {}; + +struct trace_event_data_offsets_ext4_alloc_da_blocks {}; + +struct trace_event_data_offsets_ext4_mballoc_alloc {}; + +struct trace_event_data_offsets_ext4_mballoc_prealloc {}; + +struct trace_event_data_offsets_ext4__mballoc {}; + +struct trace_event_data_offsets_ext4_forget {}; + +struct trace_event_data_offsets_ext4_da_update_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_reserve_space {}; + +struct trace_event_data_offsets_ext4_da_release_space {}; + +struct trace_event_data_offsets_ext4__bitmap_load {}; + +struct trace_event_data_offsets_ext4_read_block_bitmap_load {}; + +struct trace_event_data_offsets_ext4_direct_IO_enter {}; + +struct trace_event_data_offsets_ext4_direct_IO_exit {}; + +struct trace_event_data_offsets_ext4__fallocate_mode {}; + +struct trace_event_data_offsets_ext4_fallocate_exit {}; + +struct trace_event_data_offsets_ext4_unlink_enter {}; + +struct trace_event_data_offsets_ext4_unlink_exit {}; + +struct trace_event_data_offsets_ext4__truncate {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_enter {}; + +struct trace_event_data_offsets_ext4_ext_convert_to_initialized_fastpath {}; + +struct trace_event_data_offsets_ext4__map_blocks_enter {}; + +struct trace_event_data_offsets_ext4__map_blocks_exit {}; + +struct trace_event_data_offsets_ext4_ext_load_extent {}; + +struct trace_event_data_offsets_ext4_load_inode {}; + +struct trace_event_data_offsets_ext4_journal_start {}; + +struct trace_event_data_offsets_ext4_journal_start_reserved {}; + +struct trace_event_data_offsets_ext4__trim {}; + +struct trace_event_data_offsets_ext4_ext_handle_unwritten_extents {}; + +struct trace_event_data_offsets_ext4_get_implied_cluster_alloc_exit {}; + +struct trace_event_data_offsets_ext4_ext_put_in_cache {}; + +struct trace_event_data_offsets_ext4_ext_in_cache {}; + +struct trace_event_data_offsets_ext4_find_delalloc_range {}; + +struct trace_event_data_offsets_ext4_get_reserved_cluster_alloc {}; + +struct trace_event_data_offsets_ext4_ext_show_extent {}; + +struct trace_event_data_offsets_ext4_remove_blocks {}; + +struct trace_event_data_offsets_ext4_ext_rm_leaf {}; + +struct trace_event_data_offsets_ext4_ext_rm_idx {}; + +struct trace_event_data_offsets_ext4_ext_remove_space {}; + +struct trace_event_data_offsets_ext4_ext_remove_space_done {}; + +struct trace_event_data_offsets_ext4__es_extent {}; + +struct trace_event_data_offsets_ext4_es_remove_extent {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_enter {}; + +struct trace_event_data_offsets_ext4_es_find_extent_range_exit {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_enter {}; + +struct trace_event_data_offsets_ext4_es_lookup_extent_exit {}; + +struct trace_event_data_offsets_ext4__es_shrink_enter {}; + +struct trace_event_data_offsets_ext4_es_shrink_scan_exit {}; + +struct trace_event_data_offsets_ext4_collapse_range {}; + +struct trace_event_data_offsets_ext4_insert_range {}; + +struct trace_event_data_offsets_ext4_es_shrink {}; + +struct trace_event_data_offsets_ext4_es_insert_delayed_block {}; + +struct trace_event_data_offsets_ext4_fsmap_class {}; + +struct trace_event_data_offsets_ext4_getfsmap_class {}; + +struct trace_event_data_offsets_ext4_shutdown {}; + +struct trace_event_data_offsets_ext4_error {}; + +struct trace_event_data_offsets_ext4_prefetch_bitmaps {}; + +struct trace_event_data_offsets_ext4_lazy_itable_init {}; + +struct trace_event_data_offsets_ext4_fc_replay_scan {}; + +struct trace_event_data_offsets_ext4_fc_replay {}; + +struct trace_event_data_offsets_ext4_fc_commit_start {}; + +struct trace_event_data_offsets_ext4_fc_commit_stop {}; + +struct trace_event_data_offsets_ext4_fc_stats {}; + +struct trace_event_data_offsets_ext4_fc_track_create {}; + +struct trace_event_data_offsets_ext4_fc_track_link {}; + +struct trace_event_data_offsets_ext4_fc_track_unlink {}; + +struct trace_event_data_offsets_ext4_fc_track_inode {}; + +struct trace_event_data_offsets_ext4_fc_track_range {}; + +typedef void (*btf_trace_ext4_other_inode_update_time)(void *, struct inode *, ino_t); + +typedef void (*btf_trace_ext4_free_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_request_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_allocate_inode)(void *, struct inode *, struct inode *, int); + +typedef void (*btf_trace_ext4_evict_inode)(void *, struct inode *); + +typedef void (*btf_trace_ext4_drop_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_nfs_commit_metadata)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mark_inode_dirty)(void *, struct inode *, long unsigned int); + +typedef void (*btf_trace_ext4_begin_ordered_truncate)(void *, struct inode *, loff_t); + +typedef void (*btf_trace_ext4_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_begin)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_da_write_end)(void *, struct inode *, loff_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_writepages)(void *, struct inode *, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages)(void *, struct inode *, long unsigned int, struct writeback_control *); + +typedef void (*btf_trace_ext4_da_write_pages_extent)(void *, struct inode *, struct ext4_map_blocks *); + +typedef void (*btf_trace_ext4_writepages_result)(void *, struct inode *, struct writeback_control *, int, int); + +typedef void (*btf_trace_ext4_writepage)(void *, struct page *); + +typedef void (*btf_trace_ext4_readpage)(void *, struct page *); + +typedef void (*btf_trace_ext4_releasepage)(void *, struct page *); + +typedef void (*btf_trace_ext4_invalidatepage)(void *, struct page *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_journalled_invalidatepage)(void *, struct page *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_discard_blocks)(void *, struct super_block *, long long unsigned int, long long unsigned int); + +typedef void (*btf_trace_ext4_mb_new_inode_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_new_group_pa)(void *, struct ext4_allocation_context *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_mb_release_inode_pa)(void *, struct ext4_prealloc_space *, long long unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_release_group_pa)(void *, struct super_block *, struct ext4_prealloc_space *); + +typedef void (*btf_trace_ext4_discard_preallocations)(void *, struct inode *, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_mb_discard_preallocations)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_request_blocks)(void *, struct ext4_allocation_request *); + +typedef void (*btf_trace_ext4_allocate_blocks)(void *, struct ext4_allocation_request *, long long unsigned int); + +typedef void (*btf_trace_ext4_free_blocks)(void *, struct inode *, __u64, long unsigned int, int); + +typedef void (*btf_trace_ext4_sync_file_enter)(void *, struct file *, int); + +typedef void (*btf_trace_ext4_sync_file_exit)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_sync_fs)(void *, struct super_block *, int); + +typedef void (*btf_trace_ext4_alloc_da_blocks)(void *, struct inode *); + +typedef void (*btf_trace_ext4_mballoc_alloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_prealloc)(void *, struct ext4_allocation_context *); + +typedef void (*btf_trace_ext4_mballoc_discard)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_mballoc_free)(void *, struct super_block *, struct inode *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_forget)(void *, struct inode *, int, __u64); + +typedef void (*btf_trace_ext4_da_update_reserve_space)(void *, struct inode *, int, int); + +typedef void (*btf_trace_ext4_da_reserve_space)(void *, struct inode *); + +typedef void (*btf_trace_ext4_da_release_space)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_mb_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_mb_buddy_bitmap_load)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_load_inode_bitmap)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_read_block_bitmap_load)(void *, struct super_block *, long unsigned int, bool); + +typedef void (*btf_trace_ext4_direct_IO_enter)(void *, struct inode *, loff_t, long unsigned int, int); + +typedef void (*btf_trace_ext4_direct_IO_exit)(void *, struct inode *, loff_t, long unsigned int, int, int); + +typedef void (*btf_trace_ext4_fallocate_enter)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_punch_hole)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_zero_range)(void *, struct inode *, loff_t, loff_t, int); + +typedef void (*btf_trace_ext4_fallocate_exit)(void *, struct inode *, loff_t, unsigned int, int); + +typedef void (*btf_trace_ext4_unlink_enter)(void *, struct inode *, struct dentry *); + +typedef void (*btf_trace_ext4_unlink_exit)(void *, struct dentry *, int); + +typedef void (*btf_trace_ext4_truncate_enter)(void *, struct inode *); + +typedef void (*btf_trace_ext4_truncate_exit)(void *, struct inode *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_enter)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_convert_to_initialized_fastpath)(void *, struct inode *, struct ext4_map_blocks *, struct ext4_extent *, struct ext4_extent *); + +typedef void (*btf_trace_ext4_ext_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ind_map_blocks_enter)(void *, struct inode *, ext4_lblk_t, unsigned int, unsigned int); + +typedef void (*btf_trace_ext4_ext_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ind_map_blocks_exit)(void *, struct inode *, unsigned int, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_load_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_load_inode)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start)(void *, struct super_block *, int, int, int, long unsigned int); + +typedef void (*btf_trace_ext4_journal_start_reserved)(void *, struct super_block *, int, long unsigned int); + +typedef void (*btf_trace_ext4_trim_extent)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_trim_all_free)(void *, struct super_block *, ext4_group_t, ext4_grpblk_t, ext4_grpblk_t); + +typedef void (*btf_trace_ext4_ext_handle_unwritten_extents)(void *, struct inode *, struct ext4_map_blocks *, int, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_get_implied_cluster_alloc_exit)(void *, struct super_block *, struct ext4_map_blocks *, int); + +typedef void (*btf_trace_ext4_ext_put_in_cache)(void *, struct inode *, ext4_lblk_t, unsigned int, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_in_cache)(void *, struct inode *, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_find_delalloc_range)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, int, ext4_lblk_t); + +typedef void (*btf_trace_ext4_get_reserved_cluster_alloc)(void *, struct inode *, ext4_lblk_t, unsigned int); + +typedef void (*btf_trace_ext4_ext_show_extent)(void *, struct inode *, ext4_lblk_t, ext4_fsblk_t, short unsigned int); + +typedef void (*btf_trace_ext4_remove_blocks)(void *, struct inode *, struct ext4_extent *, ext4_lblk_t, ext4_fsblk_t, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_leaf)(void *, struct inode *, ext4_lblk_t, struct ext4_extent *, struct partial_cluster *); + +typedef void (*btf_trace_ext4_ext_rm_idx)(void *, struct inode *, ext4_fsblk_t); + +typedef void (*btf_trace_ext4_ext_remove_space)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int); + +typedef void (*btf_trace_ext4_ext_remove_space_done)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t, int, struct partial_cluster *, __le16); + +typedef void (*btf_trace_ext4_es_insert_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_cache_extent)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_remove_extent)(void *, struct inode *, ext4_lblk_t, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_find_extent_range_exit)(void *, struct inode *, struct extent_status *); + +typedef void (*btf_trace_ext4_es_lookup_extent_enter)(void *, struct inode *, ext4_lblk_t); + +typedef void (*btf_trace_ext4_es_lookup_extent_exit)(void *, struct inode *, struct extent_status *, int); + +typedef void (*btf_trace_ext4_es_shrink_count)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_enter)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_es_shrink_scan_exit)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_collapse_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_insert_range)(void *, struct inode *, loff_t, loff_t); + +typedef void (*btf_trace_ext4_es_shrink)(void *, struct super_block *, int, u64, int, int); + +typedef void (*btf_trace_ext4_es_insert_delayed_block)(void *, struct inode *, struct extent_status *, bool); + +typedef void (*btf_trace_ext4_fsmap_low_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_high_key)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_fsmap_mapping)(void *, struct super_block *, u32, u32, u64, u64, u64); + +typedef void (*btf_trace_ext4_getfsmap_low_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_high_key)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_getfsmap_mapping)(void *, struct super_block *, struct ext4_fsmap *); + +typedef void (*btf_trace_ext4_shutdown)(void *, struct super_block *, long unsigned int); + +typedef void (*btf_trace_ext4_error)(void *, struct super_block *, const char *, unsigned int); + +typedef void (*btf_trace_ext4_prefetch_bitmaps)(void *, struct super_block *, ext4_group_t, ext4_group_t, unsigned int); + +typedef void (*btf_trace_ext4_lazy_itable_init)(void *, struct super_block *, ext4_group_t); + +typedef void (*btf_trace_ext4_fc_replay_scan)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_replay)(void *, struct super_block *, int, int, int, int); + +typedef void (*btf_trace_ext4_fc_commit_start)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_commit_stop)(void *, struct super_block *, int, int); + +typedef void (*btf_trace_ext4_fc_stats)(void *, struct super_block *); + +typedef void (*btf_trace_ext4_fc_track_create)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_link)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_unlink)(void *, struct inode *, struct dentry *, int); + +typedef void (*btf_trace_ext4_fc_track_inode)(void *, struct inode *, int); + +typedef void (*btf_trace_ext4_fc_track_range)(void *, struct inode *, long int, long int, int); + +enum { + Opt_bsd_df = 0, + Opt_minix_df = 1, + Opt_grpid = 2, + Opt_nogrpid = 3, + Opt_resgid = 4, + Opt_resuid = 5, + Opt_sb = 6, + Opt_err_cont = 7, + Opt_err_panic = 8, + Opt_err_ro = 9, + Opt_nouid32 = 10, + Opt_debug = 11, + Opt_removed = 12, + Opt_user_xattr = 13, + Opt_nouser_xattr = 14, + Opt_acl = 15, + Opt_noacl = 16, + Opt_auto_da_alloc = 17, + Opt_noauto_da_alloc = 18, + Opt_noload = 19, + Opt_commit = 20, + Opt_min_batch_time = 21, + Opt_max_batch_time = 22, + Opt_journal_dev = 23, + Opt_journal_path = 24, + Opt_journal_checksum = 25, + Opt_journal_async_commit = 26, + Opt_abort = 27, + Opt_data_journal = 28, + Opt_data_ordered = 29, + Opt_data_writeback = 30, + Opt_data_err_abort = 31, + Opt_data_err_ignore = 32, + Opt_test_dummy_encryption = 33, + Opt_inlinecrypt = 34, + Opt_usrjquota = 35, + Opt_grpjquota = 36, + Opt_offusrjquota = 37, + Opt_offgrpjquota = 38, + Opt_jqfmt_vfsold = 39, + Opt_jqfmt_vfsv0 = 40, + Opt_jqfmt_vfsv1 = 41, + Opt_quota = 42, + Opt_noquota = 43, + Opt_barrier = 44, + Opt_nobarrier = 45, + Opt_err___2 = 46, + Opt_usrquota = 47, + Opt_grpquota = 48, + Opt_prjquota = 49, + Opt_i_version = 50, + Opt_dax = 51, + Opt_dax_always = 52, + Opt_dax_inode = 53, + Opt_dax_never = 54, + Opt_stripe = 55, + Opt_delalloc = 56, + Opt_nodelalloc = 57, + Opt_warn_on_error = 58, + Opt_nowarn_on_error = 59, + Opt_mblk_io_submit = 60, + Opt_lazytime = 61, + Opt_nolazytime = 62, + Opt_debug_want_extra_isize = 63, + Opt_nomblk_io_submit = 64, + Opt_block_validity = 65, + Opt_noblock_validity = 66, + Opt_inode_readahead_blks = 67, + Opt_journal_ioprio = 68, + Opt_dioread_nolock = 69, + Opt_dioread_lock = 70, + Opt_discard = 71, + Opt_nodiscard = 72, + Opt_init_itable = 73, + Opt_noinit_itable = 74, + Opt_max_dir_size_kb = 75, + Opt_nojournal_checksum = 76, + Opt_nombcache = 77, + Opt_prefetch_block_bitmaps = 78, +}; + +struct mount_opts { + int token; + int mount_opt; + int flags; +}; + +struct ext4_sb_encodings { + __u16 magic; + char *name; + char *version; +}; + +struct ext4_mount_options { + long unsigned int s_mount_opt; + long unsigned int s_mount_opt2; + kuid_t s_resuid; + kgid_t s_resgid; + long unsigned int s_commit_interval; + u32 s_min_batch_time; + u32 s_max_batch_time; + int s_jquota_fmt; + char *s_qf_names[3]; +}; + +enum { + attr_noop = 0, + attr_delayed_allocation_blocks = 1, + attr_session_write_kbytes = 2, + attr_lifetime_write_kbytes = 3, + attr_reserved_clusters = 4, + attr_inode_readahead = 5, + attr_trigger_test_error = 6, + attr_first_error_time = 7, + attr_last_error_time = 8, + attr_feature = 9, + attr_pointer_ui = 10, + attr_pointer_ul = 11, + attr_pointer_u64 = 12, + attr_pointer_u8 = 13, + attr_pointer_string = 14, + attr_pointer_atomic = 15, + attr_journal_task = 16, +}; + +enum { + ptr_explicit = 0, + ptr_ext4_sb_info_offset = 1, + ptr_ext4_super_block_offset = 2, +}; + +struct ext4_attr { + struct attribute attr; + short int attr_id; + short int attr_ptr; + short unsigned int attr_size; + union { + int offset; + void *explicit_ptr; + } u; +}; + +struct ext4_xattr_header { + __le32 h_magic; + __le32 h_refcount; + __le32 h_blocks; + __le32 h_hash; + __le32 h_checksum; + __u32 h_reserved[3]; +}; + +struct ext4_xattr_block_find { + struct ext4_xattr_search s; + struct buffer_head *bh; +}; + +struct ext4_fc_tl { + __le16 fc_tag; + __le16 fc_len; +}; + +struct ext4_fc_head { + __le32 fc_features; + __le32 fc_tid; +}; + +struct ext4_fc_add_range { + __le32 fc_ino; + __u8 fc_ex[12]; +}; + +struct ext4_fc_del_range { + __le32 fc_ino; + __le32 fc_lblk; + __le32 fc_len; +}; + +struct ext4_fc_dentry_info { + __le32 fc_parent_ino; + __le32 fc_ino; + u8 fc_dname[0]; +}; + +struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[0]; +}; + +struct ext4_fc_tail { + __le32 fc_tid; + __le32 fc_crc; +}; + +struct ext4_fc_dentry_update { + int fcd_op; + int fcd_parent; + int fcd_ino; + struct qstr fcd_name; + unsigned char fcd_iname[32]; + struct list_head fcd_list; +}; + +struct __track_dentry_update_args { + struct dentry *dentry; + int op; +}; + +struct __track_range_args { + ext4_lblk_t start; + ext4_lblk_t end; +}; + +struct dentry_info_args { + int parent_ino; + int dname_len; + int ino; + int inode_len; + char *dname; +}; + +typedef struct { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +} ext4_acl_entry; + +typedef struct { + __le32 a_version; +} ext4_acl_header; + +struct commit_header { + __be32 h_magic; + __be32 h_blocktype; + __be32 h_sequence; + unsigned char h_chksum_type; + unsigned char h_chksum_size; + unsigned char h_padding[2]; + __be32 h_chksum[8]; + __be64 h_commit_sec; + __be32 h_commit_nsec; +}; + +struct journal_block_tag3_s { + __be32 t_blocknr; + __be32 t_flags; + __be32 t_blocknr_high; + __be32 t_checksum; +}; + +typedef struct journal_block_tag3_s journal_block_tag3_t; + +struct journal_block_tag_s { + __be32 t_blocknr; + __be16 t_checksum; + __be16 t_flags; + __be32 t_blocknr_high; +}; + +typedef struct journal_block_tag_s journal_block_tag_t; + +struct jbd2_journal_block_tail { + __be32 t_checksum; +}; + +struct jbd2_journal_revoke_header_s { + journal_header_t r_header; + __be32 r_count; +}; + +typedef struct jbd2_journal_revoke_header_s jbd2_journal_revoke_header_t; + +struct recovery_info { + tid_t start_transaction; + tid_t end_transaction; + int nr_replays; + int nr_revokes; + int nr_revoke_hits; +}; + +struct jbd2_revoke_table_s { + int hash_size; + int hash_shift; + struct list_head *hash_table; +}; + +struct jbd2_revoke_record_s { + struct list_head hash; + tid_t sequence; + long long unsigned int blocknr; +}; + +struct trace_event_raw_jbd2_checkpoint { + struct trace_entry ent; + dev_t dev; + int result; + char __data[0]; +}; + +struct trace_event_raw_jbd2_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + int transaction; + char __data[0]; +}; + +struct trace_event_raw_jbd2_end_commit { + struct trace_entry ent; + dev_t dev; + char sync_commit; + int transaction; + int head; + char __data[0]; +}; + +struct trace_event_raw_jbd2_submit_inode_data { + struct trace_entry ent; + dev_t dev; + ino_t ino; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_start_class { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_extend { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int buffer_credits; + int requested_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_handle_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + unsigned int type; + unsigned int line_no; + int interval; + int sync; + int requested_blocks; + int dirtied_blocks; + char __data[0]; +}; + +struct trace_event_raw_jbd2_run_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + long unsigned int wait; + long unsigned int request_delay; + long unsigned int running; + long unsigned int locked; + long unsigned int flushing; + long unsigned int logging; + __u32 handle_count; + __u32 blocks; + __u32 blocks_logged; + char __data[0]; +}; + +struct trace_event_raw_jbd2_checkpoint_stats { + struct trace_entry ent; + dev_t dev; + long unsigned int tid; + long unsigned int chp_time; + __u32 forced_to_close; + __u32 written; + __u32 dropped; + char __data[0]; +}; + +struct trace_event_raw_jbd2_update_log_tail { + struct trace_entry ent; + dev_t dev; + tid_t tail_sequence; + tid_t first_tid; + long unsigned int block_nr; + long unsigned int freed; + char __data[0]; +}; + +struct trace_event_raw_jbd2_write_superblock { + struct trace_entry ent; + dev_t dev; + int write_op; + char __data[0]; +}; + +struct trace_event_raw_jbd2_lock_buffer_stall { + struct trace_entry ent; + dev_t dev; + long unsigned int stall_ms; + char __data[0]; +}; + +struct trace_event_data_offsets_jbd2_checkpoint {}; + +struct trace_event_data_offsets_jbd2_commit {}; + +struct trace_event_data_offsets_jbd2_end_commit {}; + +struct trace_event_data_offsets_jbd2_submit_inode_data {}; + +struct trace_event_data_offsets_jbd2_handle_start_class {}; + +struct trace_event_data_offsets_jbd2_handle_extend {}; + +struct trace_event_data_offsets_jbd2_handle_stats {}; + +struct trace_event_data_offsets_jbd2_run_stats {}; + +struct trace_event_data_offsets_jbd2_checkpoint_stats {}; + +struct trace_event_data_offsets_jbd2_update_log_tail {}; + +struct trace_event_data_offsets_jbd2_write_superblock {}; + +struct trace_event_data_offsets_jbd2_lock_buffer_stall {}; + +typedef void (*btf_trace_jbd2_checkpoint)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_start_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_locking)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_flushing)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_commit_logging)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_drop_transaction)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_end_commit)(void *, journal_t *, transaction_t *); + +typedef void (*btf_trace_jbd2_submit_inode_data)(void *, struct inode *); + +typedef void (*btf_trace_jbd2_handle_start)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_restart)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int); + +typedef void (*btf_trace_jbd2_handle_extend)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int); + +typedef void (*btf_trace_jbd2_handle_stats)(void *, dev_t, long unsigned int, unsigned int, unsigned int, int, int, int, int); + +typedef void (*btf_trace_jbd2_run_stats)(void *, dev_t, long unsigned int, struct transaction_run_stats_s *); + +typedef void (*btf_trace_jbd2_checkpoint_stats)(void *, dev_t, long unsigned int, struct transaction_chp_stats_s *); + +typedef void (*btf_trace_jbd2_update_log_tail)(void *, journal_t *, tid_t, long unsigned int, long unsigned int); + +typedef void (*btf_trace_jbd2_write_superblock)(void *, journal_t *, int); + +typedef void (*btf_trace_jbd2_lock_buffer_stall)(void *, dev_t, long unsigned int); + +struct jbd2_stats_proc_session { + journal_t *journal; + struct transaction_stats_s *stats; + int start; + int max; +}; + +struct ramfs_mount_opts { + umode_t mode; +}; + +struct ramfs_fs_info { + struct ramfs_mount_opts mount_opts; +}; + +enum ramfs_param { + Opt_mode___3 = 0, +}; + +enum hugetlbfs_size_type { + NO_SIZE = 0, + SIZE_STD = 1, + SIZE_PERCENT = 2, +}; + +struct hugetlbfs_fs_context { + struct hstate *hstate; + long long unsigned int max_size_opt; + long long unsigned int min_size_opt; + long int max_hpages; + long int nr_inodes; + long int min_hpages; + enum hugetlbfs_size_type max_val_type; + enum hugetlbfs_size_type min_val_type; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum hugetlb_param { + Opt_gid___4 = 0, + Opt_min_size = 1, + Opt_mode___4 = 2, + Opt_nr_inodes___2 = 3, + Opt_pagesize = 4, + Opt_size___2 = 5, + Opt_uid___3 = 6, +}; + +struct getdents_callback___2 { + struct dir_context ctx; + char *name; + u64 ino; + int found; + int sequence; +}; + +typedef u16 wchar_t; + +typedef u32 unicode_t; + +struct nls_table { + const char *charset; + const char *alias; + int (*uni2char)(wchar_t, unsigned char *, int); + int (*char2uni)(const unsigned char *, int, wchar_t *); + const unsigned char *charset2lower; + const unsigned char *charset2upper; + struct module *owner; + struct nls_table *next; +}; + +enum utf16_endian { + UTF16_HOST_ENDIAN = 0, + UTF16_LITTLE_ENDIAN = 1, + UTF16_BIG_ENDIAN = 2, +}; + +struct utf8_table { + int cmask; + int cval; + int shift; + long int lmask; + long int lval; +}; + +struct utf8data; + +struct utf8cursor { + const struct utf8data *data; + const char *s; + const char *p; + const char *ss; + const char *sp; + unsigned int len; + unsigned int slen; + short int ccc; + short int nccc; + unsigned char hangul[12]; +}; + +struct utf8data { + unsigned int maxage; + unsigned int offset; +}; + +typedef const unsigned char utf8trie_t; + +typedef const unsigned char utf8leaf_t; + +typedef struct vfsmount * (*debugfs_automount_t)(struct dentry *, void *); + +struct debugfs_fsdata { + const struct file_operations *real_fops; + refcount_t active_users; + struct completion active_users_drained; +}; + +struct debugfs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +enum { + Opt_uid___4 = 0, + Opt_gid___5 = 1, + Opt_mode___5 = 2, + Opt_err___3 = 3, +}; + +struct debugfs_fs_info { + struct debugfs_mount_opts mount_opts; +}; + +struct debugfs_blob_wrapper { + void *data; + long unsigned int size; +}; + +struct debugfs_reg32 { + char *name; + long unsigned int offset; +}; + +struct debugfs_regset32 { + const struct debugfs_reg32 *regs; + int nregs; + void *base; + struct device *dev; +}; + +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + +struct debugfs_devm_entry { + int (*read)(struct seq_file *, void *); + struct device *dev; +}; + +struct tracefs_dir_ops { + int (*mkdir)(const char *); + int (*rmdir)(const char *); +}; + +struct tracefs_mount_opts { + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +struct tracefs_fs_info { + struct tracefs_mount_opts mount_opts; +}; + +enum pstore_type_id { + PSTORE_TYPE_DMESG = 0, + PSTORE_TYPE_MCE = 1, + PSTORE_TYPE_CONSOLE = 2, + PSTORE_TYPE_FTRACE = 3, + PSTORE_TYPE_PPC_RTAS = 4, + PSTORE_TYPE_PPC_OF = 5, + PSTORE_TYPE_PPC_COMMON = 6, + PSTORE_TYPE_PMSG = 7, + PSTORE_TYPE_PPC_OPAL = 8, + PSTORE_TYPE_MAX = 9, +}; + +struct pstore_info; + +struct pstore_record { + struct pstore_info *psi; + enum pstore_type_id type; + u64 id; + struct timespec64 time; + char *buf; + ssize_t size; + ssize_t ecc_notice_size; + int count; + enum kmsg_dump_reason reason; + unsigned int part; + bool compressed; +}; + +struct pstore_info { + struct module *owner; + const char *name; + struct semaphore buf_lock; + char *buf; + size_t bufsize; + struct mutex read_mutex; + int flags; + int max_reason; + void *data; + int (*open)(struct pstore_info *); + int (*close)(struct pstore_info *); + ssize_t (*read)(struct pstore_record *); + int (*write)(struct pstore_record *); + int (*write_user)(struct pstore_record *, const char *); + int (*erase)(struct pstore_record *); +}; + +struct pstore_ftrace_record { + long unsigned int ip; + long unsigned int parent_ip; + u64 ts; +}; + +struct pstore_private { + struct list_head list; + struct dentry *dentry; + struct pstore_record *record; + size_t total_size; +}; + +struct pstore_ftrace_seq_data { + const void *ptr; + size_t off; + size_t size; +}; + +enum { + Opt_kmsg_bytes = 0, + Opt_err___4 = 1, +}; + +struct pstore_zbackend { + int (*zbufsize)(size_t); + const char *name; +}; + +typedef s32 compat_key_t; + +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned char __pad1[0]; + short unsigned int seq; + short unsigned int __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; + +typedef u32 __compat_gid32_t; + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + short unsigned int mode; + short unsigned int __pad1; + short unsigned int seq; + short unsigned int __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_ipc_perm { + key_t key; + __compat_uid_t uid; + __compat_gid_t gid; + __compat_uid_t cuid; + __compat_gid_t cgid; + compat_mode_t mode; + short unsigned int seq; +}; + +struct ipc_perm { + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + short unsigned int seq; +}; + +struct ipc_params { + key_t key; + int flg; + union { + size_t size; + int nsems; + } u; +}; + +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); +}; + +struct ipc_proc_iface { + const char *path; + const char *header; + int ids; + int (*show)(struct seq_file *, void *); +}; + +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct pid_namespace *pid_ns; + struct ipc_proc_iface *iface; +}; + +struct msg_msgseg; + +struct msg_msg { + struct list_head m_list; + long int m_type; + size_t m_ts; + struct msg_msgseg *next; + void *security; +}; + +struct msg_msgseg { + struct msg_msgseg *next; +}; + +typedef int __kernel_ipc_pid_t; + +typedef __kernel_long_t __kernel_old_time_t; + +struct msgbuf { + __kernel_long_t mtype; + char mtext[1]; +}; + +struct msg; + +struct msqid_ds { + struct ipc_perm msg_perm; + struct msg *msg_first; + struct msg *msg_last; + __kernel_old_time_t msg_stime; + __kernel_old_time_t msg_rtime; + __kernel_old_time_t msg_ctime; + long unsigned int msg_lcbytes; + long unsigned int msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + __kernel_ipc_pid_t msg_lspid; + __kernel_ipc_pid_t msg_lrpid; +}; + +struct msqid64_ds { + struct ipc64_perm msg_perm; + long int msg_stime; + long int msg_rtime; + long int msg_ctime; + long unsigned int msg_cbytes; + long unsigned int msg_qnum; + long unsigned int msg_qbytes; + __kernel_pid_t msg_lspid; + __kernel_pid_t msg_lrpid; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct msginfo { + int msgpool; + int msgmap; + int msgmax; + int msgmnb; + int msgmni; + int msgssz; + int msgtql; + short unsigned int msgseg; +}; + +typedef u16 compat_ipc_pid_t; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct msg_queue { + struct kern_ipc_perm q_perm; + time64_t q_stime; + time64_t q_rtime; + time64_t q_ctime; + long unsigned int q_cbytes; + long unsigned int q_qnum; + long unsigned int q_qbytes; + struct pid *q_lspid; + struct pid *q_lrpid; + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; + long: 64; + long: 64; +}; + +struct msg_receiver { + struct list_head r_list; + struct task_struct *r_tsk; + int r_mode; + long int r_msgtype; + long int r_maxsize; + struct msg_msg *r_msg; +}; + +struct msg_sender { + struct list_head list; + struct task_struct *tsk; + size_t msgsz; +}; + +struct compat_msqid_ds { + struct compat_ipc_perm msg_perm; + compat_uptr_t msg_first; + compat_uptr_t msg_last; + old_time32_t msg_stime; + old_time32_t msg_rtime; + old_time32_t msg_ctime; + compat_ulong_t msg_lcbytes; + compat_ulong_t msg_lqbytes; + short unsigned int msg_cbytes; + short unsigned int msg_qnum; + short unsigned int msg_qbytes; + compat_ipc_pid_t msg_lspid; + compat_ipc_pid_t msg_lrpid; +}; + +struct compat_msgbuf { + compat_long_t mtype; + char mtext[1]; +}; + +struct sem; + +struct sem_queue; + +struct sem_undo; + +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + short unsigned int sem_nsems; +}; + +struct sem { + int semval; + struct pid *sempid; + spinlock_t lock; + struct list_head pending_alter; + struct list_head pending_const; + time64_t sem_otime; +}; + +struct sem_queue { + struct list_head list; + struct task_struct *sleeper; + struct sem_undo *undo; + struct pid *pid; + int status; + struct sembuf *sops; + struct sembuf *blocking; + int nsops; + bool alter; + bool dupsop; +}; + +struct sem_undo { + struct list_head list_proc; + struct callback_head rcu; + struct sem_undo_list *ulp; + struct list_head list_id; + int semid; + short int *semadj; +}; + +struct semid64_ds { + struct ipc64_perm sem_perm; + long int sem_otime; + long int sem_ctime; + long unsigned int sem_nsems; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; + +struct sem_undo_list { + refcount_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct sem_array { + struct kern_ipc_perm sem_perm; + time64_t sem_ctime; + struct list_head pending_alter; + struct list_head pending_const; + struct list_head list_id; + int sem_nsems; + int complex_count; + unsigned int use_global_lock; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sem sems[0]; +}; + +struct compat_semid_ds { + struct compat_ipc_perm sem_perm; + old_time32_t sem_otime; + old_time32_t sem_ctime; + compat_uptr_t sem_base; + compat_uptr_t sem_pending; + compat_uptr_t sem_pending_last; + compat_uptr_t undo; + short unsigned int sem_nsems; +}; + +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + +struct shmid64_ds { + struct ipc64_perm shm_perm; + size_t shm_segsz; + long int shm_atime; + long int shm_dtime; + long int shm_ctime; + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + long unsigned int shm_nattch; + long unsigned int __unused4; + long unsigned int __unused5; +}; + +struct shminfo64 { + long unsigned int shmmax; + long unsigned int shmmin; + long unsigned int shmmni; + long unsigned int shmseg; + long unsigned int shmall; + long unsigned int __unused1; + long unsigned int __unused2; + long unsigned int __unused3; + long unsigned int __unused4; +}; + +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct shmid_kernel { + struct kern_ipc_perm shm_perm; + struct file *shm_file; + long unsigned int shm_nattch; + long unsigned int shm_segsz; + time64_t shm_atim; + time64_t shm_dtim; + time64_t shm_ctim; + struct pid *shm_cprid; + struct pid *shm_lprid; + struct user_struct *mlock_user; + struct task_struct *shm_creator; + struct list_head shm_clist; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +struct compat_shmid_ds { + struct compat_ipc_perm shm_perm; + int shm_segsz; + old_time32_t shm_atime; + old_time32_t shm_dtime; + old_time32_t shm_ctime; + compat_ipc_pid_t shm_cpid; + compat_ipc_pid_t shm_lpid; + short unsigned int shm_nattch; + short unsigned int shm_unused; + compat_uptr_t shm_unused2; + compat_uptr_t shm_unused3; +}; + +struct compat_shminfo64 { + compat_ulong_t shmmax; + compat_ulong_t shmmin; + compat_ulong_t shmmni; + compat_ulong_t shmseg; + compat_ulong_t shmall; + compat_ulong_t __unused1; + compat_ulong_t __unused2; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_shm_info { + compat_int_t used_ids; + compat_ulong_t shm_tot; + compat_ulong_t shm_rss; + compat_ulong_t shm_swp; + compat_ulong_t swap_attempts; + compat_ulong_t swap_successes; +}; + +struct mqueue_fs_context { + struct ipc_namespace *ipc_ns; +}; + +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; + +struct ext_wait_queue { + struct task_struct *task; + struct list_head list; + struct msg_msg *msg; + int state; +}; + +struct mqueue_inode_info { + spinlock_t lock; + struct inode vfs_inode; + wait_queue_head_t wait_q; + struct rb_root msg_tree; + struct rb_node *msg_tree_rightmost; + struct posix_msg_tree_node *node_cache; + struct mq_attr attr; + struct sigevent notify; + struct pid *notify_owner; + u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct user_struct *user; + struct sock *notify_sock; + struct sk_buff *notify_cookie; + struct ext_wait_queue e_wait_q[2]; + long unsigned int qsize; +}; + +struct compat_mq_attr { + compat_long_t mq_flags; + compat_long_t mq_maxmsg; + compat_long_t mq_msgsize; + compat_long_t mq_curmsgs; + compat_long_t __reserved[4]; +}; + +struct key_user { + struct rb_node node; + struct mutex cons_lock; + spinlock_t lock; + refcount_t usage; + atomic_t nkeys; + atomic_t nikeys; + kuid_t uid; + int qnkeys; + int qnbytes; +}; + +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, + NOTIFY_KEY_UPDATED = 1, + NOTIFY_KEY_LINKED = 2, + NOTIFY_KEY_UNLINKED = 3, + NOTIFY_KEY_CLEARED = 4, + NOTIFY_KEY_REVOKED = 5, + NOTIFY_KEY_INVALIDATED = 6, + NOTIFY_KEY_SETATTR = 7, +}; + +struct key_notification { + struct watch_notification watch; + __u32 key_id; + __u32 aux; +}; + +struct assoc_array_edit; + +struct assoc_array_ops { + long unsigned int (*get_key_chunk)(const void *, int); + long unsigned int (*get_object_key_chunk)(const void *, int); + bool (*compare_object)(const void *, const void *); + int (*diff_objects)(const void *, const void *); + void (*free_object)(void *); +}; + +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[16]; + long unsigned int nr_leaves_on_branch; +}; + +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + long unsigned int index_key[0]; +}; + +struct assoc_array_edit___2 { + struct callback_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[16]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long int adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[17]; +}; + +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + struct key_match_data match_data; + unsigned int flags; + int (*iterator)(const void *, void *); + int skipped_ret; + bool possessed; + key_ref_t result; + time64_t now; +}; + +struct keyring_read_iterator_context { + size_t buflen; + size_t count; + key_serial_t *buffer; +}; + +struct keyctl_dh_params { + union { + __s32 private; + __s32 priv; + }; + __s32 prime; + __s32 base; +}; + +struct keyctl_kdf_params { + char *hashname; + char *otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct keyctl_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + __u32 __spare[7]; +}; + +struct request_key_auth { + struct callback_head rcu; + struct key *target_key; + struct key *dest_keyring; + const struct cred *cred; + void *callout_info; + size_t callout_len; + pid_t pid; + char op[8]; +}; + +enum { + Opt_err___5 = 0, + Opt_enc = 1, + Opt_hash = 2, +}; + +enum hash_algo { + HASH_ALGO_MD4 = 0, + HASH_ALGO_MD5 = 1, + HASH_ALGO_SHA1 = 2, + HASH_ALGO_RIPE_MD_160 = 3, + HASH_ALGO_SHA256 = 4, + HASH_ALGO_SHA384 = 5, + HASH_ALGO_SHA512 = 6, + HASH_ALGO_SHA224 = 7, + HASH_ALGO_RIPE_MD_128 = 8, + HASH_ALGO_RIPE_MD_256 = 9, + HASH_ALGO_RIPE_MD_320 = 10, + HASH_ALGO_WP_256 = 11, + HASH_ALGO_WP_384 = 12, + HASH_ALGO_WP_512 = 13, + HASH_ALGO_TGR_128 = 14, + HASH_ALGO_TGR_160 = 15, + HASH_ALGO_TGR_192 = 16, + HASH_ALGO_SM3_256 = 17, + HASH_ALGO_STREEBOG_256 = 18, + HASH_ALGO_STREEBOG_512 = 19, + HASH_ALGO__LAST = 20, +}; + +enum tpm_duration { + TPM_SHORT = 0, + TPM_MEDIUM = 1, + TPM_LONG = 2, + TPM_LONG_LONG = 3, + TPM_UNDEFINED = 4, + TPM_NUM_DURATIONS = 4, +}; + +struct encrypted_key_payload { + struct callback_head rcu; + char *format; + char *master_desc; + char *datalen; + u8 *iv; + u8 *encrypted_data; + short unsigned int datablob_len; + short unsigned int decrypted_datalen; + short unsigned int payload_datalen; + short unsigned int encrypted_key_format; + u8 *decrypted_data; + u8 payload_data[0]; +}; + +struct ecryptfs_session_key { + u32 flags; + u32 encrypted_key_size; + u32 decrypted_key_size; + u8 encrypted_key[512]; + u8 decrypted_key[64]; +}; + +struct ecryptfs_password { + u32 password_bytes; + s32 hash_algo; + u32 hash_iterations; + u32 session_key_encryption_key_bytes; + u32 flags; + u8 session_key_encryption_key[64]; + u8 signature[17]; + u8 salt[8]; +}; + +struct ecryptfs_private_key { + u32 key_size; + u32 data_len; + u8 signature[17]; + char pki_type[17]; + u8 data[0]; +}; + +struct ecryptfs_auth_tok { + u16 version; + u16 token_type; + u32 flags; + struct ecryptfs_session_key session_key; + u8 reserved[32]; + union { + struct ecryptfs_password password; + struct ecryptfs_private_key private_key; + } token; +}; + +enum { + Opt_new = 0, + Opt_load = 1, + Opt_update = 2, + Opt_err___6 = 3, +}; + +enum { + Opt_default = 0, + Opt_ecryptfs = 1, + Opt_enc32 = 2, + Opt_error = 3, +}; + +enum derived_key_type { + ENC_KEY = 0, + AUTH_KEY = 1, +}; + +enum ecryptfs_token_types { + ECRYPTFS_PASSWORD = 0, + ECRYPTFS_PRIVATE_KEY = 1, +}; + +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; + +struct sctp_endpoint; + +union security_list_options { + int (*binder_set_context_mgr)(struct task_struct *); + int (*binder_transaction)(struct task_struct *, struct task_struct *); + int (*binder_transfer_binder)(struct task_struct *, struct task_struct *); + int (*binder_transfer_file)(struct task_struct *, struct task_struct *, struct file *); + int (*ptrace_access_check)(struct task_struct *, unsigned int); + int (*ptrace_traceme)(struct task_struct *); + int (*capget)(struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *); + int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *); + int (*capable)(const struct cred *, struct user_namespace *, int, unsigned int); + int (*quotactl)(int, int, int, struct super_block *); + int (*quota_on)(struct dentry *); + int (*syslog)(int); + int (*settime)(const struct timespec64 *, const struct timezone *); + int (*vm_enough_memory)(struct mm_struct *, long int); + int (*bprm_creds_for_exec)(struct linux_binprm *); + int (*bprm_creds_from_file)(struct linux_binprm *, struct file *); + int (*bprm_check_security)(struct linux_binprm *); + void (*bprm_committing_creds)(struct linux_binprm *); + void (*bprm_committed_creds)(struct linux_binprm *); + int (*fs_context_dup)(struct fs_context *, struct fs_context *); + int (*fs_context_parse_param)(struct fs_context *, struct fs_parameter *); + int (*sb_alloc_security)(struct super_block *); + void (*sb_free_security)(struct super_block *); + void (*sb_free_mnt_opts)(void *); + int (*sb_eat_lsm_opts)(char *, void **); + int (*sb_remount)(struct super_block *, void *); + int (*sb_kern_mount)(struct super_block *); + int (*sb_show_options)(struct seq_file *, struct super_block *); + int (*sb_statfs)(struct dentry *); + int (*sb_mount)(const char *, const struct path *, const char *, long unsigned int, void *); + int (*sb_umount)(struct vfsmount *, int); + int (*sb_pivotroot)(const struct path *, const struct path *); + int (*sb_set_mnt_opts)(struct super_block *, void *, long unsigned int, long unsigned int *); + int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, long unsigned int, long unsigned int *); + int (*sb_add_mnt_opt)(const char *, const char *, int, void **); + int (*move_mount)(const struct path *, const struct path *); + int (*dentry_init_security)(struct dentry *, int, const struct qstr *, void **, u32 *); + int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *); + int (*path_unlink)(const struct path *, struct dentry *); + int (*path_mkdir)(const struct path *, struct dentry *, umode_t); + int (*path_rmdir)(const struct path *, struct dentry *); + int (*path_mknod)(const struct path *, struct dentry *, umode_t, unsigned int); + int (*path_truncate)(const struct path *); + int (*path_symlink)(const struct path *, struct dentry *, const char *); + int (*path_link)(struct dentry *, const struct path *, struct dentry *); + int (*path_rename)(const struct path *, struct dentry *, const struct path *, struct dentry *); + int (*path_chmod)(const struct path *, umode_t); + int (*path_chown)(const struct path *, kuid_t, kgid_t); + int (*path_chroot)(const struct path *); + int (*path_notify)(const struct path *, u64, unsigned int); + int (*inode_alloc_security)(struct inode *); + void (*inode_free_security)(struct inode *); + int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, const char **, void **, size_t *); + int (*inode_create)(struct inode *, struct dentry *, umode_t); + int (*inode_link)(struct dentry *, struct inode *, struct dentry *); + int (*inode_unlink)(struct inode *, struct dentry *); + int (*inode_symlink)(struct inode *, struct dentry *, const char *); + int (*inode_mkdir)(struct inode *, struct dentry *, umode_t); + int (*inode_rmdir)(struct inode *, struct dentry *); + int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t); + int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *); + int (*inode_readlink)(struct dentry *); + int (*inode_follow_link)(struct dentry *, struct inode *, bool); + int (*inode_permission)(struct inode *, int); + int (*inode_setattr)(struct dentry *, struct iattr *); + int (*inode_getattr)(const struct path *); + int (*inode_setxattr)(struct dentry *, const char *, const void *, size_t, int); + void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int); + int (*inode_getxattr)(struct dentry *, const char *); + int (*inode_listxattr)(struct dentry *); + int (*inode_removexattr)(struct dentry *, const char *); + int (*inode_need_killpriv)(struct dentry *); + int (*inode_killpriv)(struct dentry *); + int (*inode_getsecurity)(struct inode *, const char *, void **, bool); + int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int); + int (*inode_listsecurity)(struct inode *, char *, size_t); + void (*inode_getsecid)(struct inode *, u32 *); + int (*inode_copy_up)(struct dentry *, struct cred **); + int (*inode_copy_up_xattr)(const char *); + int (*kernfs_init_security)(struct kernfs_node *, struct kernfs_node *); + int (*file_permission)(struct file *, int); + int (*file_alloc_security)(struct file *); + void (*file_free_security)(struct file *); + int (*file_ioctl)(struct file *, unsigned int, long unsigned int); + int (*mmap_addr)(long unsigned int); + int (*mmap_file)(struct file *, long unsigned int, long unsigned int, long unsigned int); + int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int); + int (*file_lock)(struct file *, unsigned int); + int (*file_fcntl)(struct file *, unsigned int, long unsigned int); + void (*file_set_fowner)(struct file *); + int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int); + int (*file_receive)(struct file *); + int (*file_open)(struct file *); + int (*task_alloc)(struct task_struct *, long unsigned int); + void (*task_free)(struct task_struct *); + int (*cred_alloc_blank)(struct cred *, gfp_t); + void (*cred_free)(struct cred *); + int (*cred_prepare)(struct cred *, const struct cred *, gfp_t); + void (*cred_transfer)(struct cred *, const struct cred *); + void (*cred_getsecid)(const struct cred *, u32 *); + int (*kernel_act_as)(struct cred *, u32); + int (*kernel_create_files_as)(struct cred *, struct inode *); + int (*kernel_module_request)(char *); + int (*kernel_load_data)(enum kernel_load_data_id, bool); + int (*kernel_post_load_data)(char *, loff_t, enum kernel_load_data_id, char *); + int (*kernel_read_file)(struct file *, enum kernel_read_file_id, bool); + int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id); + int (*task_fix_setuid)(struct cred *, const struct cred *, int); + int (*task_fix_setgid)(struct cred *, const struct cred *, int); + int (*task_setpgid)(struct task_struct *, pid_t); + int (*task_getpgid)(struct task_struct *); + int (*task_getsid)(struct task_struct *); + void (*task_getsecid)(struct task_struct *, u32 *); + int (*task_setnice)(struct task_struct *, int); + int (*task_setioprio)(struct task_struct *, int); + int (*task_getioprio)(struct task_struct *); + int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int); + int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *); + int (*task_setscheduler)(struct task_struct *); + int (*task_getscheduler)(struct task_struct *); + int (*task_movememory)(struct task_struct *); + int (*task_kill)(struct task_struct *, struct kernel_siginfo *, int, const struct cred *); + int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int); + void (*task_to_inode)(struct task_struct *, struct inode *); + int (*ipc_permission)(struct kern_ipc_perm *, short int); + void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *); + int (*msg_msg_alloc_security)(struct msg_msg *); + void (*msg_msg_free_security)(struct msg_msg *); + int (*msg_queue_alloc_security)(struct kern_ipc_perm *); + void (*msg_queue_free_security)(struct kern_ipc_perm *); + int (*msg_queue_associate)(struct kern_ipc_perm *, int); + int (*msg_queue_msgctl)(struct kern_ipc_perm *, int); + int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int); + int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int); + int (*shm_alloc_security)(struct kern_ipc_perm *); + void (*shm_free_security)(struct kern_ipc_perm *); + int (*shm_associate)(struct kern_ipc_perm *, int); + int (*shm_shmctl)(struct kern_ipc_perm *, int); + int (*shm_shmat)(struct kern_ipc_perm *, char *, int); + int (*sem_alloc_security)(struct kern_ipc_perm *); + void (*sem_free_security)(struct kern_ipc_perm *); + int (*sem_associate)(struct kern_ipc_perm *, int); + int (*sem_semctl)(struct kern_ipc_perm *, int); + int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int); + int (*netlink_send)(struct sock *, struct sk_buff *); + void (*d_instantiate)(struct dentry *, struct inode *); + int (*getprocattr)(struct task_struct *, char *, char **); + int (*setprocattr)(const char *, void *, size_t); + int (*ismaclabel)(const char *); + int (*secid_to_secctx)(u32, char **, u32 *); + int (*secctx_to_secid)(const char *, u32, u32 *); + void (*release_secctx)(char *, u32); + void (*inode_invalidate_secctx)(struct inode *); + int (*inode_notifysecctx)(struct inode *, void *, u32); + int (*inode_setsecctx)(struct dentry *, void *, u32); + int (*inode_getsecctx)(struct inode *, void **, u32 *); + int (*post_notification)(const struct cred *, const struct cred *, struct watch_notification *); + int (*watch_key)(struct key *); + int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *); + int (*unix_may_send)(struct socket *, struct socket *); + int (*socket_create)(int, int, int, int); + int (*socket_post_create)(struct socket *, int, int, int, int); + int (*socket_socketpair)(struct socket *, struct socket *); + int (*socket_bind)(struct socket *, struct sockaddr *, int); + int (*socket_connect)(struct socket *, struct sockaddr *, int); + int (*socket_listen)(struct socket *, int); + int (*socket_accept)(struct socket *, struct socket *); + int (*socket_sendmsg)(struct socket *, struct msghdr *, int); + int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int); + int (*socket_getsockname)(struct socket *); + int (*socket_getpeername)(struct socket *); + int (*socket_getsockopt)(struct socket *, int, int); + int (*socket_setsockopt)(struct socket *, int, int); + int (*socket_shutdown)(struct socket *, int); + int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff *); + int (*socket_getpeersec_stream)(struct socket *, char *, int *, unsigned int); + int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff *, u32 *); + int (*sk_alloc_security)(struct sock *, int, gfp_t); + void (*sk_free_security)(struct sock *); + void (*sk_clone_security)(const struct sock *, struct sock *); + void (*sk_getsecid)(struct sock *, u32 *); + void (*sock_graft)(struct sock *, struct socket *); + int (*inet_conn_request)(struct sock *, struct sk_buff *, struct request_sock *); + void (*inet_csk_clone)(struct sock *, const struct request_sock *); + void (*inet_conn_established)(struct sock *, struct sk_buff *); + int (*secmark_relabel_packet)(u32); + void (*secmark_refcount_inc)(); + void (*secmark_refcount_dec)(); + void (*req_classify_flow)(const struct request_sock *, struct flowi *); + int (*tun_dev_alloc_security)(void **); + void (*tun_dev_free_security)(void *); + int (*tun_dev_create)(); + int (*tun_dev_attach_queue)(void *); + int (*tun_dev_attach)(struct sock *, void *); + int (*tun_dev_open)(void *); + int (*sctp_assoc_request)(struct sctp_endpoint *, struct sk_buff *); + int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int); + void (*sctp_sk_clone)(struct sctp_endpoint *, struct sock *, struct sock *); + int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **, struct xfrm_user_sec_ctx *, gfp_t); + int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *, struct xfrm_sec_ctx **); + void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *); + int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *); + int (*xfrm_state_alloc)(struct xfrm_state *, struct xfrm_user_sec_ctx *); + int (*xfrm_state_alloc_acquire)(struct xfrm_state *, struct xfrm_sec_ctx *, u32); + void (*xfrm_state_free_security)(struct xfrm_state *); + int (*xfrm_state_delete_security)(struct xfrm_state *); + int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *, u32, u8); + int (*xfrm_state_pol_flow_match)(struct xfrm_state *, struct xfrm_policy *, const struct flowi *); + int (*xfrm_decode_session)(struct sk_buff *, u32 *, int); + int (*key_alloc)(struct key *, const struct cred *, long unsigned int); + void (*key_free)(struct key *); + int (*key_permission)(key_ref_t, const struct cred *, enum key_need_perm); + int (*key_getsecurity)(struct key *, char **); + int (*audit_rule_init)(u32, u32, char *, void **); + int (*audit_rule_known)(struct audit_krule *); + int (*audit_rule_match)(u32, u32, u32, void *); + void (*audit_rule_free)(void *); + int (*bpf)(int, union bpf_attr *, unsigned int); + int (*bpf_map)(struct bpf_map *, fmode_t); + int (*bpf_prog)(struct bpf_prog *); + int (*bpf_map_alloc_security)(struct bpf_map *); + void (*bpf_map_free_security)(struct bpf_map *); + int (*bpf_prog_alloc_security)(struct bpf_prog_aux *); + void (*bpf_prog_free_security)(struct bpf_prog_aux *); + int (*locked_down)(enum lockdown_reason); + int (*perf_event_open)(struct perf_event_attr *, int); + int (*perf_event_alloc)(struct perf_event *); + void (*perf_event_free)(struct perf_event *); + int (*perf_event_read)(struct perf_event *); + int (*perf_event_write)(struct perf_event *); +}; + +struct security_hook_heads { + struct hlist_head binder_set_context_mgr; + struct hlist_head binder_transaction; + struct hlist_head binder_transfer_binder; + struct hlist_head binder_transfer_file; + struct hlist_head ptrace_access_check; + struct hlist_head ptrace_traceme; + struct hlist_head capget; + struct hlist_head capset; + struct hlist_head capable; + struct hlist_head quotactl; + struct hlist_head quota_on; + struct hlist_head syslog; + struct hlist_head settime; + struct hlist_head vm_enough_memory; + struct hlist_head bprm_creds_for_exec; + struct hlist_head bprm_creds_from_file; + struct hlist_head bprm_check_security; + struct hlist_head bprm_committing_creds; + struct hlist_head bprm_committed_creds; + struct hlist_head fs_context_dup; + struct hlist_head fs_context_parse_param; + struct hlist_head sb_alloc_security; + struct hlist_head sb_free_security; + struct hlist_head sb_free_mnt_opts; + struct hlist_head sb_eat_lsm_opts; + struct hlist_head sb_remount; + struct hlist_head sb_kern_mount; + struct hlist_head sb_show_options; + struct hlist_head sb_statfs; + struct hlist_head sb_mount; + struct hlist_head sb_umount; + struct hlist_head sb_pivotroot; + struct hlist_head sb_set_mnt_opts; + struct hlist_head sb_clone_mnt_opts; + struct hlist_head sb_add_mnt_opt; + struct hlist_head move_mount; + struct hlist_head dentry_init_security; + struct hlist_head dentry_create_files_as; + struct hlist_head path_unlink; + struct hlist_head path_mkdir; + struct hlist_head path_rmdir; + struct hlist_head path_mknod; + struct hlist_head path_truncate; + struct hlist_head path_symlink; + struct hlist_head path_link; + struct hlist_head path_rename; + struct hlist_head path_chmod; + struct hlist_head path_chown; + struct hlist_head path_chroot; + struct hlist_head path_notify; + struct hlist_head inode_alloc_security; + struct hlist_head inode_free_security; + struct hlist_head inode_init_security; + struct hlist_head inode_create; + struct hlist_head inode_link; + struct hlist_head inode_unlink; + struct hlist_head inode_symlink; + struct hlist_head inode_mkdir; + struct hlist_head inode_rmdir; + struct hlist_head inode_mknod; + struct hlist_head inode_rename; + struct hlist_head inode_readlink; + struct hlist_head inode_follow_link; + struct hlist_head inode_permission; + struct hlist_head inode_setattr; + struct hlist_head inode_getattr; + struct hlist_head inode_setxattr; + struct hlist_head inode_post_setxattr; + struct hlist_head inode_getxattr; + struct hlist_head inode_listxattr; + struct hlist_head inode_removexattr; + struct hlist_head inode_need_killpriv; + struct hlist_head inode_killpriv; + struct hlist_head inode_getsecurity; + struct hlist_head inode_setsecurity; + struct hlist_head inode_listsecurity; + struct hlist_head inode_getsecid; + struct hlist_head inode_copy_up; + struct hlist_head inode_copy_up_xattr; + struct hlist_head kernfs_init_security; + struct hlist_head file_permission; + struct hlist_head file_alloc_security; + struct hlist_head file_free_security; + struct hlist_head file_ioctl; + struct hlist_head mmap_addr; + struct hlist_head mmap_file; + struct hlist_head file_mprotect; + struct hlist_head file_lock; + struct hlist_head file_fcntl; + struct hlist_head file_set_fowner; + struct hlist_head file_send_sigiotask; + struct hlist_head file_receive; + struct hlist_head file_open; + struct hlist_head task_alloc; + struct hlist_head task_free; + struct hlist_head cred_alloc_blank; + struct hlist_head cred_free; + struct hlist_head cred_prepare; + struct hlist_head cred_transfer; + struct hlist_head cred_getsecid; + struct hlist_head kernel_act_as; + struct hlist_head kernel_create_files_as; + struct hlist_head kernel_module_request; + struct hlist_head kernel_load_data; + struct hlist_head kernel_post_load_data; + struct hlist_head kernel_read_file; + struct hlist_head kernel_post_read_file; + struct hlist_head task_fix_setuid; + struct hlist_head task_fix_setgid; + struct hlist_head task_setpgid; + struct hlist_head task_getpgid; + struct hlist_head task_getsid; + struct hlist_head task_getsecid; + struct hlist_head task_setnice; + struct hlist_head task_setioprio; + struct hlist_head task_getioprio; + struct hlist_head task_prlimit; + struct hlist_head task_setrlimit; + struct hlist_head task_setscheduler; + struct hlist_head task_getscheduler; + struct hlist_head task_movememory; + struct hlist_head task_kill; + struct hlist_head task_prctl; + struct hlist_head task_to_inode; + struct hlist_head ipc_permission; + struct hlist_head ipc_getsecid; + struct hlist_head msg_msg_alloc_security; + struct hlist_head msg_msg_free_security; + struct hlist_head msg_queue_alloc_security; + struct hlist_head msg_queue_free_security; + struct hlist_head msg_queue_associate; + struct hlist_head msg_queue_msgctl; + struct hlist_head msg_queue_msgsnd; + struct hlist_head msg_queue_msgrcv; + struct hlist_head shm_alloc_security; + struct hlist_head shm_free_security; + struct hlist_head shm_associate; + struct hlist_head shm_shmctl; + struct hlist_head shm_shmat; + struct hlist_head sem_alloc_security; + struct hlist_head sem_free_security; + struct hlist_head sem_associate; + struct hlist_head sem_semctl; + struct hlist_head sem_semop; + struct hlist_head netlink_send; + struct hlist_head d_instantiate; + struct hlist_head getprocattr; + struct hlist_head setprocattr; + struct hlist_head ismaclabel; + struct hlist_head secid_to_secctx; + struct hlist_head secctx_to_secid; + struct hlist_head release_secctx; + struct hlist_head inode_invalidate_secctx; + struct hlist_head inode_notifysecctx; + struct hlist_head inode_setsecctx; + struct hlist_head inode_getsecctx; + struct hlist_head post_notification; + struct hlist_head watch_key; + struct hlist_head unix_stream_connect; + struct hlist_head unix_may_send; + struct hlist_head socket_create; + struct hlist_head socket_post_create; + struct hlist_head socket_socketpair; + struct hlist_head socket_bind; + struct hlist_head socket_connect; + struct hlist_head socket_listen; + struct hlist_head socket_accept; + struct hlist_head socket_sendmsg; + struct hlist_head socket_recvmsg; + struct hlist_head socket_getsockname; + struct hlist_head socket_getpeername; + struct hlist_head socket_getsockopt; + struct hlist_head socket_setsockopt; + struct hlist_head socket_shutdown; + struct hlist_head socket_sock_rcv_skb; + struct hlist_head socket_getpeersec_stream; + struct hlist_head socket_getpeersec_dgram; + struct hlist_head sk_alloc_security; + struct hlist_head sk_free_security; + struct hlist_head sk_clone_security; + struct hlist_head sk_getsecid; + struct hlist_head sock_graft; + struct hlist_head inet_conn_request; + struct hlist_head inet_csk_clone; + struct hlist_head inet_conn_established; + struct hlist_head secmark_relabel_packet; + struct hlist_head secmark_refcount_inc; + struct hlist_head secmark_refcount_dec; + struct hlist_head req_classify_flow; + struct hlist_head tun_dev_alloc_security; + struct hlist_head tun_dev_free_security; + struct hlist_head tun_dev_create; + struct hlist_head tun_dev_attach_queue; + struct hlist_head tun_dev_attach; + struct hlist_head tun_dev_open; + struct hlist_head sctp_assoc_request; + struct hlist_head sctp_bind_connect; + struct hlist_head sctp_sk_clone; + struct hlist_head xfrm_policy_alloc_security; + struct hlist_head xfrm_policy_clone_security; + struct hlist_head xfrm_policy_free_security; + struct hlist_head xfrm_policy_delete_security; + struct hlist_head xfrm_state_alloc; + struct hlist_head xfrm_state_alloc_acquire; + struct hlist_head xfrm_state_free_security; + struct hlist_head xfrm_state_delete_security; + struct hlist_head xfrm_policy_lookup; + struct hlist_head xfrm_state_pol_flow_match; + struct hlist_head xfrm_decode_session; + struct hlist_head key_alloc; + struct hlist_head key_free; + struct hlist_head key_permission; + struct hlist_head key_getsecurity; + struct hlist_head audit_rule_init; + struct hlist_head audit_rule_known; + struct hlist_head audit_rule_match; + struct hlist_head audit_rule_free; + struct hlist_head bpf; + struct hlist_head bpf_map; + struct hlist_head bpf_prog; + struct hlist_head bpf_map_alloc_security; + struct hlist_head bpf_map_free_security; + struct hlist_head bpf_prog_alloc_security; + struct hlist_head bpf_prog_free_security; + struct hlist_head locked_down; + struct hlist_head perf_event_open; + struct hlist_head perf_event_alloc; + struct hlist_head perf_event_free; + struct hlist_head perf_event_read; + struct hlist_head perf_event_write; +}; + +struct security_hook_list { + struct hlist_node list; + struct hlist_head *head; + union security_list_options hook; + char *lsm; +}; + +enum lsm_order { + LSM_ORDER_FIRST = 4294967295, + LSM_ORDER_MUTABLE = 0, +}; + +struct lsm_info { + const char *name; + enum lsm_order order; + long unsigned int flags; + int *enabled; + int (*init)(); + struct lsm_blob_sizes *blobs; +}; + +enum lsm_event { + LSM_POLICY_CHANGE = 0, +}; + +typedef int (*initxattrs)(struct inode *, const struct xattr *, void *); + +enum ib_uverbs_write_cmds { + IB_USER_VERBS_CMD_GET_CONTEXT = 0, + IB_USER_VERBS_CMD_QUERY_DEVICE = 1, + IB_USER_VERBS_CMD_QUERY_PORT = 2, + IB_USER_VERBS_CMD_ALLOC_PD = 3, + IB_USER_VERBS_CMD_DEALLOC_PD = 4, + IB_USER_VERBS_CMD_CREATE_AH = 5, + IB_USER_VERBS_CMD_MODIFY_AH = 6, + IB_USER_VERBS_CMD_QUERY_AH = 7, + IB_USER_VERBS_CMD_DESTROY_AH = 8, + IB_USER_VERBS_CMD_REG_MR = 9, + IB_USER_VERBS_CMD_REG_SMR = 10, + IB_USER_VERBS_CMD_REREG_MR = 11, + IB_USER_VERBS_CMD_QUERY_MR = 12, + IB_USER_VERBS_CMD_DEREG_MR = 13, + IB_USER_VERBS_CMD_ALLOC_MW = 14, + IB_USER_VERBS_CMD_BIND_MW = 15, + IB_USER_VERBS_CMD_DEALLOC_MW = 16, + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17, + IB_USER_VERBS_CMD_CREATE_CQ = 18, + IB_USER_VERBS_CMD_RESIZE_CQ = 19, + IB_USER_VERBS_CMD_DESTROY_CQ = 20, + IB_USER_VERBS_CMD_POLL_CQ = 21, + IB_USER_VERBS_CMD_PEEK_CQ = 22, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23, + IB_USER_VERBS_CMD_CREATE_QP = 24, + IB_USER_VERBS_CMD_QUERY_QP = 25, + IB_USER_VERBS_CMD_MODIFY_QP = 26, + IB_USER_VERBS_CMD_DESTROY_QP = 27, + IB_USER_VERBS_CMD_POST_SEND = 28, + IB_USER_VERBS_CMD_POST_RECV = 29, + IB_USER_VERBS_CMD_ATTACH_MCAST = 30, + IB_USER_VERBS_CMD_DETACH_MCAST = 31, + IB_USER_VERBS_CMD_CREATE_SRQ = 32, + IB_USER_VERBS_CMD_MODIFY_SRQ = 33, + IB_USER_VERBS_CMD_QUERY_SRQ = 34, + IB_USER_VERBS_CMD_DESTROY_SRQ = 35, + IB_USER_VERBS_CMD_POST_SRQ_RECV = 36, + IB_USER_VERBS_CMD_OPEN_XRCD = 37, + IB_USER_VERBS_CMD_CLOSE_XRCD = 38, + IB_USER_VERBS_CMD_CREATE_XSRQ = 39, + IB_USER_VERBS_CMD_OPEN_QP = 40, +}; + +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, +}; + +enum ib_uverbs_create_qp_mask { + IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1, +}; + +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, +}; + +enum ib_uverbs_access_flags { + IB_UVERBS_ACCESS_LOCAL_WRITE = 1, + IB_UVERBS_ACCESS_REMOTE_WRITE = 2, + IB_UVERBS_ACCESS_REMOTE_READ = 4, + IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8, + IB_UVERBS_ACCESS_MW_BIND = 16, + IB_UVERBS_ACCESS_ZERO_BASED = 32, + IB_UVERBS_ACCESS_ON_DEMAND = 64, + IB_UVERBS_ACCESS_HUGETLB = 128, + IB_UVERBS_ACCESS_RELAXED_ORDERING = 1048576, + IB_UVERBS_ACCESS_OPTIONAL_RANGE = 1072693248, +}; + +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC = 0, + IB_UVERBS_SRQT_XRC = 1, + IB_UVERBS_SRQT_TM = 2, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ = 0, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 2, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 4, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 8, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC = 3, + IB_UVERBS_QPT_UD = 4, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI = 9, + IB_UVERBS_QPT_XRC_TGT = 10, + IB_UVERBS_QPT_DRIVER = 255, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 2, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 256, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 512, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 2048, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 4096, +}; + +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB = 0, + IB_UVERBS_GID_TYPE_ROCE_V1 = 1, + IB_UVERBS_GID_TYPE_ROCE_V2 = 2, +}; + +enum ib_poll_context { + IB_POLL_SOFTIRQ = 0, + IB_POLL_WORKQUEUE = 1, + IB_POLL_UNBOUND_WORKQUEUE = 2, + IB_POLL_LAST_POOL_TYPE = 2, + IB_POLL_DIRECT = 3, +}; + +struct lsm_network_audit { + int netif; + struct sock *sk; + u16 family; + __be16 dport; + __be16 sport; + union { + struct { + __be32 daddr; + __be32 saddr; + } v4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } v6; + } fam; +}; + +struct lsm_ioctlop_audit { + struct path path; + u16 cmd; +}; + +struct lsm_ibpkey_audit { + u64 subnet_prefix; + u16 pkey; +}; + +struct lsm_ibendport_audit { + char dev_name[64]; + u8 port; +}; + +struct selinux_state; + +struct selinux_audit_data { + u32 ssid; + u32 tsid; + u16 tclass; + u32 requested; + u32 audited; + u32 denied; + int result; + struct selinux_state *state; +}; + +struct smack_audit_data; + +struct apparmor_audit_data; + +struct common_audit_data { + char type; + union { + struct path path; + struct dentry *dentry; + struct inode *inode; + struct lsm_network_audit *net; + int cap; + int ipc_id; + struct task_struct *tsk; + struct { + key_serial_t key; + char *key_desc; + } key_struct; + char *kmod_name; + struct lsm_ioctlop_audit *op; + struct file *file; + struct lsm_ibpkey_audit *ibpkey; + struct lsm_ibendport_audit *ibendport; + int reason; + } u; + union { + struct smack_audit_data *smack_audit_data; + struct selinux_audit_data *selinux_audit_data; + struct apparmor_audit_data *apparmor_audit_data; + }; +}; + +enum { + POLICYDB_CAPABILITY_NETPEER = 0, + POLICYDB_CAPABILITY_OPENPERM = 1, + POLICYDB_CAPABILITY_EXTSOCKCLASS = 2, + POLICYDB_CAPABILITY_ALWAYSNETWORK = 3, + POLICYDB_CAPABILITY_CGROUPSECLABEL = 4, + POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION = 5, + POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS = 6, + __POLICYDB_CAPABILITY_MAX = 7, +}; + +struct selinux_avc; + +struct selinux_policy; + +struct selinux_state { + bool disabled; + bool enforcing; + bool checkreqprot; + bool initialized; + bool policycap[7]; + struct page *status_page; + struct mutex status_lock; + struct selinux_avc *avc; + struct selinux_policy *policy; + struct mutex policy_mutex; +}; + +struct avc_cache { + struct hlist_head slots[512]; + spinlock_t slots_lock[512]; + atomic_t lru_hint; + atomic_t active_nodes; + u32 latest_notif; +}; + +struct selinux_avc { + unsigned int avc_cache_threshold; + struct avc_cache avc_cache; +}; + +struct av_decision { + u32 allowed; + u32 auditallow; + u32 auditdeny; + u32 seqno; + u32 flags; +}; + +struct extended_perms_data { + u32 p[8]; +}; + +struct extended_perms_decision { + u8 used; + u8 driver; + struct extended_perms_data *allowed; + struct extended_perms_data *auditallow; + struct extended_perms_data *dontaudit; +}; + +struct extended_perms { + u16 len; + struct extended_perms_data drivers; +}; + +struct avc_cache_stats { + unsigned int lookups; + unsigned int misses; + unsigned int allocations; + unsigned int reclaims; + unsigned int frees; +}; + +struct security_class_mapping { + const char *name; + const char *perms[33]; +}; + +struct trace_event_raw_selinux_audited { + struct trace_entry ent; + u32 requested; + u32 denied; + u32 audited; + int result; + u32 __data_loc_scontext; + u32 __data_loc_tcontext; + u32 __data_loc_tclass; + char __data[0]; +}; + +struct trace_event_data_offsets_selinux_audited { + u32 scontext; + u32 tcontext; + u32 tclass; +}; + +typedef void (*btf_trace_selinux_audited)(void *, struct selinux_audit_data *, char *, char *, const char *); + +struct avc_xperms_node; + +struct avc_entry { + u32 ssid; + u32 tsid; + u16 tclass; + struct av_decision avd; + struct avc_xperms_node *xp_node; +}; + +struct avc_xperms_node { + struct extended_perms xp; + struct list_head xpd_head; +}; + +struct avc_node { + struct avc_entry ae; + struct hlist_node list; + struct callback_head rhead; +}; + +struct avc_xperms_decision_node { + struct extended_perms_decision xpd; + struct list_head xpd_list; +}; + +struct avc_callback_node { + int (*callback)(u32); + u32 events; + struct avc_callback_node *next; +}; + +typedef __u16 __sum16; + +enum sctp_endpoint_type { + SCTP_EP_TYPE_SOCKET = 0, + SCTP_EP_TYPE_ASSOCIATION = 1, +}; + +struct sctp_chunk; + +struct sctp_inq { + struct list_head in_chunk_list; + struct sctp_chunk *in_progress; + struct work_struct immediate; +}; + +struct sctp_bind_addr { + __u16 port; + struct list_head address_list; +}; + +struct sctp_ep_common { + struct hlist_node node; + int hashent; + enum sctp_endpoint_type type; + refcount_t refcnt; + bool dead; + struct sock *sk; + struct net *net; + struct sctp_inq inqueue; + struct sctp_bind_addr bind_addr; +}; + +struct crypto_shash___2; + +struct sctp_hmac_algo_param; + +struct sctp_chunks_param; + +struct sctp_endpoint { + struct sctp_ep_common base; + struct list_head asocs; + __u8 secret_key[32]; + __u8 *digest; + __u32 sndbuf_policy; + __u32 rcvbuf_policy; + struct crypto_shash___2 **auth_hmacs; + struct sctp_hmac_algo_param *auth_hmacs_list; + struct sctp_chunks_param *auth_chunk_list; + struct list_head endpoint_shared_keys; + __u16 active_key_id; + __u8 ecn_enable: 1; + __u8 auth_enable: 1; + __u8 intl_enable: 1; + __u8 prsctp_enable: 1; + __u8 asconf_enable: 1; + __u8 reconf_enable: 1; + __u8 strreset_enable; + u32 secid; + u32 peer_secid; +}; + +struct sockaddr_in6 { + short unsigned int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + +struct in_addr { + __be32 s_addr; +}; + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + unsigned char __pad[8]; +}; + +struct nf_hook_state; + +typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *); + +struct nf_hook_entry { + nf_hookfn *hook; + void *priv; +}; + +struct nf_hook_entries { + u16 num_hook_entries; + struct nf_hook_entry hooks[0]; +}; + +struct nf_hook_state { + unsigned int hook; + u_int8_t pf; + struct net_device *in; + struct net_device *out; + struct sock *sk; + struct net *net; + int (*okfn)(struct net *, struct sock *, struct sk_buff *); +}; + +struct nf_hook_ops { + nf_hookfn *hook; + struct net_device *dev; + void *priv; + u_int8_t pf; + unsigned int hooknum; + int priority; +}; + +enum nf_ip_hook_priorities { + NF_IP_PRI_FIRST = 2147483648, + NF_IP_PRI_RAW_BEFORE_DEFRAG = 4294966846, + NF_IP_PRI_CONNTRACK_DEFRAG = 4294966896, + NF_IP_PRI_RAW = 4294966996, + NF_IP_PRI_SELINUX_FIRST = 4294967071, + NF_IP_PRI_CONNTRACK = 4294967096, + NF_IP_PRI_MANGLE = 4294967146, + NF_IP_PRI_NAT_DST = 4294967196, + NF_IP_PRI_FILTER = 0, + NF_IP_PRI_SECURITY = 50, + NF_IP_PRI_NAT_SRC = 100, + NF_IP_PRI_SELINUX_LAST = 225, + NF_IP_PRI_CONNTRACK_HELPER = 300, + NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647, + NF_IP_PRI_LAST = 2147483647, +}; + +enum nf_ip6_hook_priorities { + NF_IP6_PRI_FIRST = 2147483648, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = 4294966846, + NF_IP6_PRI_CONNTRACK_DEFRAG = 4294966896, + NF_IP6_PRI_RAW = 4294966996, + NF_IP6_PRI_SELINUX_FIRST = 4294967071, + NF_IP6_PRI_CONNTRACK = 4294967096, + NF_IP6_PRI_MANGLE = 4294967146, + NF_IP6_PRI_NAT_DST = 4294967196, + NF_IP6_PRI_FILTER = 0, + NF_IP6_PRI_SECURITY = 50, + NF_IP6_PRI_NAT_SRC = 100, + NF_IP6_PRI_SELINUX_LAST = 225, + NF_IP6_PRI_CONNTRACK_HELPER = 300, + NF_IP6_PRI_LAST = 2147483647, +}; + +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct ip_options { + __be32 faddr; + __be32 nexthop; + unsigned char optlen; + unsigned char srr; + unsigned char rr; + unsigned char ts; + unsigned char is_strictroute: 1; + unsigned char srr_is_hit: 1; + unsigned char is_changed: 1; + unsigned char rr_needaddr: 1; + unsigned char ts_needtime: 1; + unsigned char ts_needaddr: 1; + unsigned char router_alert; + unsigned char cipso; + unsigned char __pad2; + unsigned char __data[0]; +}; + +struct ip_options_rcu { + struct callback_head rcu; + struct ip_options opt; +}; + +struct ipv6_opt_hdr; + +struct ipv6_rt_hdr; + +struct ipv6_txoptions { + refcount_t refcnt; + int tot_len; + __u16 opt_flen; + __u16 opt_nflen; + struct ipv6_opt_hdr *hopopt; + struct ipv6_opt_hdr *dst0opt; + struct ipv6_rt_hdr *srcrt; + struct ipv6_opt_hdr *dst1opt; + struct callback_head rcu; +}; + +struct inet_cork { + unsigned int flags; + __be32 addr; + struct ip_options *opt; + unsigned int fragsize; + int length; + struct dst_entry *dst; + u8 tx_flags; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; + u64 transmit_time; + u32 mark; +}; + +struct inet_cork_full { + struct inet_cork base; + struct flowi fl; +}; + +struct ipv6_pinfo; + +struct ip_mc_socklist; + +struct inet_sock { + struct sock sk; + struct ipv6_pinfo *pinet6; + __be32 inet_saddr; + __s16 uc_ttl; + __u16 cmsg_flags; + __be16 inet_sport; + __u16 inet_id; + struct ip_options_rcu *inet_opt; + int rx_dst_ifindex; + __u8 tos; + __u8 min_ttl; + __u8 mc_ttl; + __u8 pmtudisc; + __u8 recverr: 1; + __u8 is_icsk: 1; + __u8 freebind: 1; + __u8 hdrincl: 1; + __u8 mc_loop: 1; + __u8 transparent: 1; + __u8 mc_all: 1; + __u8 nodefrag: 1; + __u8 bind_address_no_port: 1; + __u8 recverr_rfc4884: 1; + __u8 defer_connect: 1; + __u8 rcv_tos; + __u8 convert_csum; + int uc_index; + int mc_index; + __be32 mc_addr; + struct ip_mc_socklist *mc_list; + struct inet_cork_full cork; +}; + +struct in6_pktinfo { + struct in6_addr ipi6_addr; + int ipi6_ifindex; +}; + +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + +struct ipv6_mc_socklist; + +struct ipv6_ac_socklist; + +struct ipv6_fl_socklist; + +struct ipv6_pinfo { + struct in6_addr saddr; + struct in6_pktinfo sticky_pktinfo; + const struct in6_addr *daddr_cache; + const struct in6_addr *saddr_cache; + __be32 flow_label; + __u32 frag_size; + __u16 __unused_1: 7; + __s16 hop_limit: 9; + __u16 mc_loop: 1; + __u16 __unused_2: 6; + __s16 mcast_hops: 9; + int ucast_oif; + int mcast_oif; + union { + struct { + __u16 srcrt: 1; + __u16 osrcrt: 1; + __u16 rxinfo: 1; + __u16 rxoinfo: 1; + __u16 rxhlim: 1; + __u16 rxohlim: 1; + __u16 hopopts: 1; + __u16 ohopopts: 1; + __u16 dstopts: 1; + __u16 odstopts: 1; + __u16 rxflow: 1; + __u16 rxtclass: 1; + __u16 rxpmtu: 1; + __u16 rxorigdstaddr: 1; + __u16 recvfragsize: 1; + } bits; + __u16 all; + } rxopt; + __u16 recverr: 1; + __u16 sndflow: 1; + __u16 repflow: 1; + __u16 pmtudisc: 3; + __u16 padding: 1; + __u16 srcprefs: 3; + __u16 dontfrag: 1; + __u16 autoflowlabel: 1; + __u16 autoflowlabel_set: 1; + __u16 mc_all: 1; + __u16 recverr_rfc4884: 1; + __u16 rtalert_isolate: 1; + __u8 min_hopcount; + __u8 tclass; + __be32 rcv_flowinfo; + __u32 dst_cookie; + __u32 rx_dst_cookie; + struct ipv6_mc_socklist *ipv6_mc_list; + struct ipv6_ac_socklist *ipv6_ac_list; + struct ipv6_fl_socklist *ipv6_fl_list; + struct ipv6_txoptions *opt; + struct sk_buff *pktoptions; + struct sk_buff *rxpmtu; + struct inet6_cork cork; +}; + +struct tcphdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __u16 res1: 4; + __u16 doff: 4; + __u16 fin: 1; + __u16 syn: 1; + __u16 rst: 1; + __u16 psh: 1; + __u16 ack: 1; + __u16 urg: 1; + __u16 ece: 1; + __u16 cwr: 1; + __be16 window; + __sum16 check; + __be16 urg_ptr; +}; + +struct iphdr { + __u8 ihl: 4; + __u8 version: 4; + __u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + __u8 ttl; + __u8 protocol; + __sum16 check; + __be32 saddr; + __be32 daddr; +}; + +struct ipv6_rt_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +}; + +struct ipv6_opt_hdr { + __u8 nexthdr; + __u8 hdrlen; +}; + +struct ipv6hdr { + __u8 priority: 4; + __u8 version: 4; + __u8 flow_lbl[3]; + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; + struct in6_addr saddr; + struct in6_addr daddr; +}; + +struct udphdr { + __be16 source; + __be16 dest; + __be16 len; + __sum16 check; +}; + +struct inet6_skb_parm { + int iif; + __be16 ra; + __u16 dst0; + __u16 srcrt; + __u16 dst1; + __u16 lastopt; + __u16 nhoff; + __u16 flags; + __u16 dsthao; + __u16 frag_max_size; +}; + +struct ip6_sf_socklist; + +struct ipv6_mc_socklist { + struct in6_addr addr; + int ifindex; + unsigned int sfmode; + struct ipv6_mc_socklist *next; + rwlock_t sflock; + struct ip6_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ipv6_ac_socklist { + struct in6_addr acl_addr; + int acl_ifindex; + struct ipv6_ac_socklist *acl_next; +}; + +struct ip6_flowlabel; + +struct ipv6_fl_socklist { + struct ipv6_fl_socklist *next; + struct ip6_flowlabel *fl; + struct callback_head rcu; +}; + +struct ip6_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct in6_addr sl_addr[0]; +}; + +struct ip6_flowlabel { + struct ip6_flowlabel *next; + __be32 label; + atomic_t users; + struct in6_addr dst; + struct ipv6_txoptions *opt; + long unsigned int linger; + struct callback_head rcu; + u8 share; + union { + struct pid *pid; + kuid_t uid; + } owner; + long unsigned int lastuse; + long unsigned int expires; + struct net *fl_net; +}; + +struct inet_skb_parm { + int iif; + struct ip_options opt; + u16 flags; + u16 frag_max_size; +}; + +struct tty_file_private { + struct tty_struct *tty; + struct file *file; + struct list_head list; +}; + +struct netlbl_lsm_cache { + refcount_t refcount; + void (*free)(const void *); + void *data; +}; + +struct netlbl_lsm_catmap { + u32 startbit; + u64 bitmap[4]; + struct netlbl_lsm_catmap *next; +}; + +struct netlbl_lsm_secattr { + u32 flags; + u32 type; + char *domain; + struct netlbl_lsm_cache *cache; + struct { + struct { + struct netlbl_lsm_catmap *cat; + u32 lvl; + } mls; + u32 secid; + } attr; +}; + +struct dccp_hdr { + __be16 dccph_sport; + __be16 dccph_dport; + __u8 dccph_doff; + __u8 dccph_cscov: 4; + __u8 dccph_ccval: 4; + __sum16 dccph_checksum; + __u8 dccph_x: 1; + __u8 dccph_type: 4; + __u8 dccph_reserved: 3; + __u8 dccph_seq2; + __be16 dccph_seq; +}; + +enum dccp_state { + DCCP_OPEN = 1, + DCCP_REQUESTING = 2, + DCCP_LISTEN = 10, + DCCP_RESPOND = 3, + DCCP_ACTIVE_CLOSEREQ = 4, + DCCP_PASSIVE_CLOSE = 8, + DCCP_CLOSING = 11, + DCCP_TIME_WAIT = 6, + DCCP_CLOSED = 7, + DCCP_NEW_SYN_RECV = 12, + DCCP_PARTOPEN = 13, + DCCP_PASSIVE_CLOSEREQ = 14, + DCCP_MAX_STATES = 15, +}; + +typedef __s32 sctp_assoc_t; + +enum sctp_msg_flags { + MSG_NOTIFICATION = 32768, +}; + +struct sctp_initmsg { + __u16 sinit_num_ostreams; + __u16 sinit_max_instreams; + __u16 sinit_max_attempts; + __u16 sinit_max_init_timeo; +}; + +struct sctp_sndrcvinfo { + __u16 sinfo_stream; + __u16 sinfo_ssn; + __u16 sinfo_flags; + __u32 sinfo_ppid; + __u32 sinfo_context; + __u32 sinfo_timetolive; + __u32 sinfo_tsn; + __u32 sinfo_cumtsn; + sctp_assoc_t sinfo_assoc_id; +}; + +struct sctp_rtoinfo { + sctp_assoc_t srto_assoc_id; + __u32 srto_initial; + __u32 srto_max; + __u32 srto_min; +}; + +struct sctp_assocparams { + sctp_assoc_t sasoc_assoc_id; + __u16 sasoc_asocmaxrxt; + __u16 sasoc_number_peer_destinations; + __u32 sasoc_peer_rwnd; + __u32 sasoc_local_rwnd; + __u32 sasoc_cookie_life; +}; + +struct sctp_paddrparams { + sctp_assoc_t spp_assoc_id; + struct __kernel_sockaddr_storage spp_address; + __u32 spp_hbinterval; + __u16 spp_pathmaxrxt; + __u32 spp_pathmtu; + __u32 spp_sackdelay; + __u32 spp_flags; + __u32 spp_ipv6_flowlabel; + __u8 spp_dscp; + char: 8; +} __attribute__((packed)); + +struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +}; + +struct sctp_chunkhdr { + __u8 type; + __u8 flags; + __be16 length; +}; + +enum sctp_cid { + SCTP_CID_DATA = 0, + SCTP_CID_INIT = 1, + SCTP_CID_INIT_ACK = 2, + SCTP_CID_SACK = 3, + SCTP_CID_HEARTBEAT = 4, + SCTP_CID_HEARTBEAT_ACK = 5, + SCTP_CID_ABORT = 6, + SCTP_CID_SHUTDOWN = 7, + SCTP_CID_SHUTDOWN_ACK = 8, + SCTP_CID_ERROR = 9, + SCTP_CID_COOKIE_ECHO = 10, + SCTP_CID_COOKIE_ACK = 11, + SCTP_CID_ECN_ECNE = 12, + SCTP_CID_ECN_CWR = 13, + SCTP_CID_SHUTDOWN_COMPLETE = 14, + SCTP_CID_AUTH = 15, + SCTP_CID_I_DATA = 64, + SCTP_CID_FWD_TSN = 192, + SCTP_CID_ASCONF = 193, + SCTP_CID_I_FWD_TSN = 194, + SCTP_CID_ASCONF_ACK = 128, + SCTP_CID_RECONF = 130, +}; + +struct sctp_paramhdr { + __be16 type; + __be16 length; +}; + +enum sctp_param { + SCTP_PARAM_HEARTBEAT_INFO = 256, + SCTP_PARAM_IPV4_ADDRESS = 1280, + SCTP_PARAM_IPV6_ADDRESS = 1536, + SCTP_PARAM_STATE_COOKIE = 1792, + SCTP_PARAM_UNRECOGNIZED_PARAMETERS = 2048, + SCTP_PARAM_COOKIE_PRESERVATIVE = 2304, + SCTP_PARAM_HOST_NAME_ADDRESS = 2816, + SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = 3072, + SCTP_PARAM_ECN_CAPABLE = 128, + SCTP_PARAM_RANDOM = 640, + SCTP_PARAM_CHUNKS = 896, + SCTP_PARAM_HMAC_ALGO = 1152, + SCTP_PARAM_SUPPORTED_EXT = 2176, + SCTP_PARAM_FWD_TSN_SUPPORT = 192, + SCTP_PARAM_ADD_IP = 448, + SCTP_PARAM_DEL_IP = 704, + SCTP_PARAM_ERR_CAUSE = 960, + SCTP_PARAM_SET_PRIMARY = 1216, + SCTP_PARAM_SUCCESS_REPORT = 1472, + SCTP_PARAM_ADAPTATION_LAYER_IND = 1728, + SCTP_PARAM_RESET_OUT_REQUEST = 3328, + SCTP_PARAM_RESET_IN_REQUEST = 3584, + SCTP_PARAM_RESET_TSN_REQUEST = 3840, + SCTP_PARAM_RESET_RESPONSE = 4096, + SCTP_PARAM_RESET_ADD_OUT_STREAMS = 4352, + SCTP_PARAM_RESET_ADD_IN_STREAMS = 4608, +}; + +struct sctp_datahdr { + __be32 tsn; + __be16 stream; + __be16 ssn; + __u32 ppid; + __u8 payload[0]; +}; + +struct sctp_idatahdr { + __be32 tsn; + __be16 stream; + __be16 reserved; + __be32 mid; + union { + __u32 ppid; + __be32 fsn; + }; + __u8 payload[0]; +}; + +struct sctp_inithdr { + __be32 init_tag; + __be32 a_rwnd; + __be16 num_outbound_streams; + __be16 num_inbound_streams; + __be32 initial_tsn; + __u8 params[0]; +}; + +struct sctp_init_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; + +struct sctp_ipv4addr_param { + struct sctp_paramhdr param_hdr; + struct in_addr addr; +}; + +struct sctp_ipv6addr_param { + struct sctp_paramhdr param_hdr; + struct in6_addr addr; +}; + +struct sctp_cookie_preserve_param { + struct sctp_paramhdr param_hdr; + __be32 lifespan_increment; +}; + +struct sctp_hostname_param { + struct sctp_paramhdr param_hdr; + uint8_t hostname[0]; +}; + +struct sctp_supported_addrs_param { + struct sctp_paramhdr param_hdr; + __be16 types[0]; +}; + +struct sctp_adaptation_ind_param { + struct sctp_paramhdr param_hdr; + __be32 adaptation_ind; +}; + +struct sctp_supported_ext_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +struct sctp_random_param { + struct sctp_paramhdr param_hdr; + __u8 random_val[0]; +}; + +struct sctp_chunks_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +}; + +struct sctp_hmac_algo_param { + struct sctp_paramhdr param_hdr; + __be16 hmac_ids[0]; +}; + +struct sctp_cookie_param { + struct sctp_paramhdr p; + __u8 body[0]; +}; + +struct sctp_gap_ack_block { + __be16 start; + __be16 end; +}; + +union sctp_sack_variable { + struct sctp_gap_ack_block gab; + __be32 dup; +}; + +struct sctp_sackhdr { + __be32 cum_tsn_ack; + __be32 a_rwnd; + __be16 num_gap_ack_blocks; + __be16 num_dup_tsns; + union sctp_sack_variable variable[0]; +}; + +struct sctp_heartbeathdr { + struct sctp_paramhdr info; +}; + +struct sctp_shutdownhdr { + __be32 cum_tsn_ack; +}; + +struct sctp_errhdr { + __be16 cause; + __be16 length; + __u8 variable[0]; +}; + +struct sctp_ecnehdr { + __be32 lowest_tsn; +}; + +struct sctp_cwrhdr { + __be32 lowest_tsn; +}; + +struct sctp_fwdtsn_skip { + __be16 stream; + __be16 ssn; +}; + +struct sctp_fwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_fwdtsn_skip skip[0]; +}; + +struct sctp_ifwdtsn_skip { + __be16 stream; + __u8 reserved; + __u8 flags; + __be32 mid; +}; + +struct sctp_ifwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_ifwdtsn_skip skip[0]; +}; + +struct sctp_addip_param { + struct sctp_paramhdr param_hdr; + __be32 crr_id; +}; + +struct sctp_addiphdr { + __be32 serial; + __u8 params[0]; +}; + +struct sctp_authhdr { + __be16 shkey_id; + __be16 hmac_id; + __u8 hmac[0]; +}; + +union sctp_addr { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + struct sockaddr sa; +}; + +struct sctp_cookie { + __u32 my_vtag; + __u32 peer_vtag; + __u32 my_ttag; + __u32 peer_ttag; + ktime_t expiration; + __u16 sinit_num_ostreams; + __u16 sinit_max_instreams; + __u32 initial_tsn; + union sctp_addr peer_addr; + __u16 my_port; + __u8 prsctp_capable; + __u8 padding; + __u32 adaptation_ind; + __u8 auth_random[36]; + __u8 auth_hmacs[10]; + __u8 auth_chunks[20]; + __u32 raw_addr_list_len; + struct sctp_init_chunk peer_init[0]; +}; + +struct sctp_tsnmap { + long unsigned int *tsn_map; + __u32 base_tsn; + __u32 cumulative_tsn_ack_point; + __u32 max_tsn_seen; + __u16 len; + __u16 pending_data; + __u16 num_dup_tsns; + __be32 dup_tsns[16]; +}; + +struct sctp_inithdr_host { + __u32 init_tag; + __u32 a_rwnd; + __u16 num_outbound_streams; + __u16 num_inbound_streams; + __u32 initial_tsn; +}; + +enum sctp_state { + SCTP_STATE_CLOSED = 0, + SCTP_STATE_COOKIE_WAIT = 1, + SCTP_STATE_COOKIE_ECHOED = 2, + SCTP_STATE_ESTABLISHED = 3, + SCTP_STATE_SHUTDOWN_PENDING = 4, + SCTP_STATE_SHUTDOWN_SENT = 5, + SCTP_STATE_SHUTDOWN_RECEIVED = 6, + SCTP_STATE_SHUTDOWN_ACK_SENT = 7, +}; + +struct sctp_stream_out_ext; + +struct sctp_stream_out { + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; + struct sctp_stream_out_ext *ext; + __u8 state; +}; + +struct sctp_stream_in { + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; + __u32 fsn; + __u32 fsn_uo; + char pd_mode; + char pd_mode_uo; +}; + +struct sctp_stream_interleave; + +struct sctp_stream { + struct { + struct __genradix tree; + struct sctp_stream_out type[0]; + } out; + struct { + struct __genradix tree; + struct sctp_stream_in type[0]; + } in; + __u16 outcnt; + __u16 incnt; + struct sctp_stream_out *out_curr; + union { + struct { + struct list_head prio_list; + }; + struct { + struct list_head rr_list; + struct sctp_stream_out_ext *rr_next; + }; + }; + struct sctp_stream_interleave *si; +}; + +struct sctp_sched_ops; + +struct sctp_association; + +struct sctp_outq { + struct sctp_association *asoc; + struct list_head out_chunk_list; + struct sctp_sched_ops *sched; + unsigned int out_qlen; + unsigned int error; + struct list_head control_chunk_list; + struct list_head sacked; + struct list_head retransmit; + struct list_head abandoned; + __u32 outstanding_bytes; + char fast_rtx; + char cork; +}; + +struct sctp_ulpq { + char pd_mode; + struct sctp_association *asoc; + struct sk_buff_head reasm; + struct sk_buff_head reasm_uo; + struct sk_buff_head lobby; +}; + +struct sctp_priv_assoc_stats { + struct __kernel_sockaddr_storage obs_rto_ipaddr; + __u64 max_obs_rto; + __u64 isacks; + __u64 osacks; + __u64 opackets; + __u64 ipackets; + __u64 rtxchunks; + __u64 outofseqtsns; + __u64 idupchunks; + __u64 gapcnt; + __u64 ouodchunks; + __u64 iuodchunks; + __u64 oodchunks; + __u64 iodchunks; + __u64 octrlchunks; + __u64 ictrlchunks; +}; + +struct sctp_transport; + +struct sctp_auth_bytes; + +struct sctp_shared_key; + +struct sctp_association { + struct sctp_ep_common base; + struct list_head asocs; + sctp_assoc_t assoc_id; + struct sctp_endpoint *ep; + struct sctp_cookie c; + struct { + struct list_head transport_addr_list; + __u32 rwnd; + __u16 transport_count; + __u16 port; + struct sctp_transport *primary_path; + union sctp_addr primary_addr; + struct sctp_transport *active_path; + struct sctp_transport *retran_path; + struct sctp_transport *last_sent_to; + struct sctp_transport *last_data_from; + struct sctp_tsnmap tsn_map; + __be16 addip_disabled_mask; + __u16 ecn_capable: 1; + __u16 ipv4_address: 1; + __u16 ipv6_address: 1; + __u16 hostname_address: 1; + __u16 asconf_capable: 1; + __u16 prsctp_capable: 1; + __u16 reconf_capable: 1; + __u16 intl_capable: 1; + __u16 auth_capable: 1; + __u16 sack_needed: 1; + __u16 sack_generation: 1; + __u16 zero_window_announced: 1; + __u32 sack_cnt; + __u32 adaptation_ind; + struct sctp_inithdr_host i; + void *cookie; + int cookie_len; + __u32 addip_serial; + struct sctp_random_param *peer_random; + struct sctp_chunks_param *peer_chunks; + struct sctp_hmac_algo_param *peer_hmacs; + } peer; + enum sctp_state state; + int overall_error_count; + ktime_t cookie_life; + long unsigned int rto_initial; + long unsigned int rto_max; + long unsigned int rto_min; + int max_burst; + int max_retrans; + __u16 pf_retrans; + __u16 ps_retrans; + __u16 max_init_attempts; + __u16 init_retries; + long unsigned int max_init_timeo; + long unsigned int hbinterval; + __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + __u8 pmtu_pending; + __u32 pathmtu; + __u32 param_flags; + __u32 sackfreq; + long unsigned int sackdelay; + long unsigned int timeouts[11]; + struct timer_list timers[11]; + struct sctp_transport *shutdown_last_sent_to; + struct sctp_transport *init_last_sent_to; + int shutdown_retries; + __u32 next_tsn; + __u32 ctsn_ack_point; + __u32 adv_peer_ack_point; + __u32 highest_sacked; + __u32 fast_recovery_exit; + __u8 fast_recovery; + __u16 unack_data; + __u32 rtx_data_chunks; + __u32 rwnd; + __u32 a_rwnd; + __u32 rwnd_over; + __u32 rwnd_press; + int sndbuf_used; + atomic_t rmem_alloc; + wait_queue_head_t wait; + __u32 frag_point; + __u32 user_frag; + int init_err_counter; + int init_cycle; + __u16 default_stream; + __u16 default_flags; + __u32 default_ppid; + __u32 default_context; + __u32 default_timetolive; + __u32 default_rcv_context; + struct sctp_stream stream; + struct sctp_outq outqueue; + struct sctp_ulpq ulpq; + __u32 last_ecne_tsn; + __u32 last_cwr_tsn; + int numduptsns; + struct sctp_chunk *addip_last_asconf; + struct list_head asconf_ack_list; + struct list_head addip_chunk_list; + __u32 addip_serial; + int src_out_of_asoc_ok; + union sctp_addr *asconf_addr_del_pending; + struct sctp_transport *new_transport; + struct list_head endpoint_shared_keys; + struct sctp_auth_bytes *asoc_shared_key; + struct sctp_shared_key *shkey; + __u16 default_hmac_id; + __u16 active_key_id; + __u8 need_ecne: 1; + __u8 temp: 1; + __u8 pf_expose: 2; + __u8 force_delay: 1; + __u8 strreset_enable; + __u8 strreset_outstanding; + __u32 strreset_outseq; + __u32 strreset_inseq; + __u32 strreset_result[2]; + struct sctp_chunk *strreset_chunk; + struct sctp_priv_assoc_stats stats; + int sent_cnt_removable; + __u16 subscribe; + __u64 abandoned_unsent[3]; + __u64 abandoned_sent[3]; + struct callback_head rcu; +}; + +struct sctp_auth_bytes { + refcount_t refcnt; + __u32 len; + __u8 data[0]; +}; + +struct sctp_shared_key { + struct list_head key_list; + struct sctp_auth_bytes *key; + refcount_t refcnt; + __u16 key_id; + __u8 deactivated; +}; + +enum { + SCTP_MAX_STREAM = 65535, +}; + +enum sctp_event_timeout { + SCTP_EVENT_TIMEOUT_NONE = 0, + SCTP_EVENT_TIMEOUT_T1_COOKIE = 1, + SCTP_EVENT_TIMEOUT_T1_INIT = 2, + SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3, + SCTP_EVENT_TIMEOUT_T3_RTX = 4, + SCTP_EVENT_TIMEOUT_T4_RTO = 5, + SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6, + SCTP_EVENT_TIMEOUT_HEARTBEAT = 7, + SCTP_EVENT_TIMEOUT_RECONF = 8, + SCTP_EVENT_TIMEOUT_SACK = 9, + SCTP_EVENT_TIMEOUT_AUTOCLOSE = 10, +}; + +enum { + SCTP_MAX_DUP_TSNS = 16, +}; + +enum sctp_scope { + SCTP_SCOPE_GLOBAL = 0, + SCTP_SCOPE_PRIVATE = 1, + SCTP_SCOPE_LINK = 2, + SCTP_SCOPE_LOOPBACK = 3, + SCTP_SCOPE_UNUSABLE = 4, +}; + +enum { + SCTP_AUTH_HMAC_ID_RESERVED_0 = 0, + SCTP_AUTH_HMAC_ID_SHA1 = 1, + SCTP_AUTH_HMAC_ID_RESERVED_2 = 2, + SCTP_AUTH_HMAC_ID_SHA256 = 3, + __SCTP_AUTH_HMAC_MAX = 4, +}; + +struct sctp_ulpevent { + struct sctp_association *asoc; + struct sctp_chunk *chunk; + unsigned int rmem_len; + union { + __u32 mid; + __u16 ssn; + }; + union { + __u32 ppid; + __u32 fsn; + }; + __u32 tsn; + __u32 cumtsn; + __u16 stream; + __u16 flags; + __u16 msg_flags; +} __attribute__((packed)); + +union sctp_addr_param; + +union sctp_params { + void *v; + struct sctp_paramhdr *p; + struct sctp_cookie_preserve_param *life; + struct sctp_hostname_param *dns; + struct sctp_cookie_param *cookie; + struct sctp_supported_addrs_param *sat; + struct sctp_ipv4addr_param *v4; + struct sctp_ipv6addr_param *v6; + union sctp_addr_param *addr; + struct sctp_adaptation_ind_param *aind; + struct sctp_supported_ext_param *ext; + struct sctp_random_param *random; + struct sctp_chunks_param *chunks; + struct sctp_hmac_algo_param *hmac_algo; + struct sctp_addip_param *addip; +}; + +struct sctp_sender_hb_info; + +struct sctp_signed_cookie; + +struct sctp_datamsg; + +struct sctp_chunk { + struct list_head list; + refcount_t refcnt; + int sent_count; + union { + struct list_head transmitted_list; + struct list_head stream_list; + }; + struct list_head frag_list; + struct sk_buff *skb; + union { + struct sk_buff *head_skb; + struct sctp_shared_key *shkey; + }; + union sctp_params param_hdr; + union { + __u8 *v; + struct sctp_datahdr *data_hdr; + struct sctp_inithdr *init_hdr; + struct sctp_sackhdr *sack_hdr; + struct sctp_heartbeathdr *hb_hdr; + struct sctp_sender_hb_info *hbs_hdr; + struct sctp_shutdownhdr *shutdown_hdr; + struct sctp_signed_cookie *cookie_hdr; + struct sctp_ecnehdr *ecne_hdr; + struct sctp_cwrhdr *ecn_cwr_hdr; + struct sctp_errhdr *err_hdr; + struct sctp_addiphdr *addip_hdr; + struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_authhdr *auth_hdr; + struct sctp_idatahdr *idata_hdr; + struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; + } subh; + __u8 *chunk_end; + struct sctp_chunkhdr *chunk_hdr; + struct sctphdr *sctp_hdr; + struct sctp_sndrcvinfo sinfo; + struct sctp_association *asoc; + struct sctp_ep_common *rcvr; + long unsigned int sent_at; + union sctp_addr source; + union sctp_addr dest; + struct sctp_datamsg *msg; + struct sctp_transport *transport; + struct sk_buff *auth_chunk; + __u16 rtt_in_progress: 1; + __u16 has_tsn: 1; + __u16 has_ssn: 1; + __u16 singleton: 1; + __u16 end_of_packet: 1; + __u16 ecn_ce_done: 1; + __u16 pdiscard: 1; + __u16 tsn_gap_acked: 1; + __u16 data_accepted: 1; + __u16 auth: 1; + __u16 has_asconf: 1; + __u16 tsn_missing_report: 2; + __u16 fast_retransmit: 2; +}; + +struct sctp_stream_interleave { + __u16 data_chunk_len; + __u16 ftsn_chunk_len; + struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t); + void (*assign_number)(struct sctp_chunk *); + bool (*validate_data)(struct sctp_chunk *); + int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); + int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *); + void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); + void (*start_pd)(struct sctp_ulpq *, gfp_t); + void (*abort_pd)(struct sctp_ulpq *, gfp_t); + void (*generate_ftsn)(struct sctp_outq *, __u32); + bool (*validate_ftsn)(struct sctp_chunk *); + void (*report_ftsn)(struct sctp_ulpq *, __u32); + void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *); +}; + +struct sctp_bind_bucket { + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct hlist_node node; + struct hlist_head owner; + struct net *net; +}; + +enum sctp_socket_type { + SCTP_SOCKET_UDP = 0, + SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1, + SCTP_SOCKET_TCP = 2, +}; + +struct sctp_pf; + +struct sctp_sock { + struct inet_sock inet; + enum sctp_socket_type type; + int: 32; + struct sctp_pf *pf; + struct crypto_shash___2 *hmac; + char *sctp_hmac_alg; + struct sctp_endpoint *ep; + struct sctp_bind_bucket *bind_hash; + __u16 default_stream; + short: 16; + __u32 default_ppid; + __u16 default_flags; + short: 16; + __u32 default_context; + __u32 default_timetolive; + __u32 default_rcv_context; + int max_burst; + __u32 hbinterval; + __u16 pathmaxrxt; + short: 16; + __u32 flowlabel; + __u8 dscp; + char: 8; + __u16 pf_retrans; + __u16 ps_retrans; + short: 16; + __u32 pathmtu; + __u32 sackdelay; + __u32 sackfreq; + __u32 param_flags; + __u32 default_ss; + struct sctp_rtoinfo rtoinfo; + struct sctp_paddrparams paddrparam; + struct sctp_assocparams assocparams; + __u16 subscribe; + struct sctp_initmsg initmsg; + short: 16; + int user_frag; + __u32 autoclose; + __u32 adaptation_ind; + __u32 pd_point; + __u16 nodelay: 1; + __u16 pf_expose: 2; + __u16 reuse: 1; + __u16 disable_fragments: 1; + __u16 v4mapped: 1; + __u16 frag_interleave: 1; + __u16 recvrcvinfo: 1; + __u16 recvnxtinfo: 1; + __u16 data_ready_signalled: 1; + int: 22; + atomic_t pd_mode; + struct sk_buff_head pd_lobby; + struct list_head auto_asconf_list; + int do_auto_asconf; + int: 32; +} __attribute__((packed)); + +struct sctp_af; + +struct sctp_pf { + void (*event_msgname)(struct sctp_ulpevent *, char *, int *); + void (*skb_msgname)(struct sk_buff *, char *, int *); + int (*af_supported)(sa_family_t, struct sctp_sock *); + int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *); + int (*bind_verify)(struct sctp_sock *, union sctp_addr *); + int (*send_verify)(struct sctp_sock *, union sctp_addr *); + int (*supported_addrs)(const struct sctp_sock *, __be16 *); + struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool); + int (*addr_to_user)(struct sctp_sock *, union sctp_addr *); + void (*to_sk_saddr)(union sctp_addr *, struct sock *); + void (*to_sk_daddr)(union sctp_addr *, struct sock *); + void (*copy_ip_options)(struct sock *, struct sock *); + struct sctp_af *af; +}; + +struct sctp_signed_cookie { + __u8 signature[32]; + __u32 __pad; + struct sctp_cookie c; +} __attribute__((packed)); + +union sctp_addr_param { + struct sctp_paramhdr p; + struct sctp_ipv4addr_param v4; + struct sctp_ipv6addr_param v6; +}; + +struct sctp_sender_hb_info { + struct sctp_paramhdr param_hdr; + union sctp_addr daddr; + long unsigned int sent_at; + __u64 hb_nonce; +}; + +struct sctp_af { + int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *); + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *); + void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *); + void (*copy_addrlist)(struct list_head *, struct net_device *); + int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *); + void (*addr_copy)(union sctp_addr *, union sctp_addr *); + void (*from_skb)(union sctp_addr *, struct sk_buff *, int); + void (*from_sk)(union sctp_addr *, struct sock *); + void (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int); + int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *); + int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *); + enum sctp_scope (*scope)(union sctp_addr *); + void (*inaddr_any)(union sctp_addr *, __be16); + int (*is_any)(const union sctp_addr *); + int (*available)(union sctp_addr *, struct sctp_sock *); + int (*skb_iif)(const struct sk_buff *); + int (*is_ce)(const struct sk_buff *); + void (*seq_dump_addr)(struct seq_file *, union sctp_addr *); + void (*ecn_capable)(struct sock *); + __u16 net_header_len; + int sockaddr_len; + int (*ip_options_len)(struct sock *); + sa_family_t sa_family; + struct list_head list; +}; + +struct sctp_packet { + __u16 source_port; + __u16 destination_port; + __u32 vtag; + struct list_head chunk_list; + size_t overhead; + size_t size; + size_t max_size; + struct sctp_transport *transport; + struct sctp_chunk *auth; + u8 has_cookie_echo: 1; + u8 has_sack: 1; + u8 has_auth: 1; + u8 has_data: 1; + u8 ipfragok: 1; +}; + +struct sctp_transport { + struct list_head transports; + struct rhlist_head node; + refcount_t refcnt; + __u32 rto_pending: 1; + __u32 hb_sent: 1; + __u32 pmtu_pending: 1; + __u32 dst_pending_confirm: 1; + __u32 sack_generation: 1; + u32 dst_cookie; + struct flowi fl; + union sctp_addr ipaddr; + struct sctp_af *af_specific; + struct sctp_association *asoc; + long unsigned int rto; + __u32 rtt; + __u32 rttvar; + __u32 srtt; + __u32 cwnd; + __u32 ssthresh; + __u32 partial_bytes_acked; + __u32 flight_size; + __u32 burst_limited; + struct dst_entry *dst; + union sctp_addr saddr; + long unsigned int hbinterval; + long unsigned int sackdelay; + __u32 sackfreq; + atomic_t mtu_info; + ktime_t last_time_heard; + long unsigned int last_time_sent; + long unsigned int last_time_ecne_reduced; + __u16 pathmaxrxt; + __u32 flowlabel; + __u8 dscp; + __u16 pf_retrans; + __u16 ps_retrans; + __u32 pathmtu; + __u32 param_flags; + int init_sent_count; + int state; + short unsigned int error_count; + struct timer_list T3_rtx_timer; + struct timer_list hb_timer; + struct timer_list proto_unreach_timer; + struct timer_list reconf_timer; + struct list_head transmitted; + struct sctp_packet packet; + struct list_head send_ready; + struct { + __u32 next_tsn_at_change; + char changeover_active; + char cycling_changeover; + char cacc_saw_newack; + } cacc; + __u64 hb_nonce; + struct callback_head rcu; +}; + +struct sctp_datamsg { + struct list_head chunks; + refcount_t refcnt; + long unsigned int expires_at; + int send_error; + u8 send_failed: 1; + u8 can_delay: 1; + u8 abandoned: 1; +}; + +struct sctp_stream_priorities { + struct list_head prio_sched; + struct list_head active; + struct sctp_stream_out_ext *next; + __u16 prio; +}; + +struct sctp_stream_out_ext { + __u64 abandoned_unsent[3]; + __u64 abandoned_sent[3]; + struct list_head outq; + union { + struct { + struct list_head prio_list; + struct sctp_stream_priorities *prio_head; + }; + struct { + struct list_head rr_list; + }; + }; +}; + +struct task_security_struct { + u32 osid; + u32 sid; + u32 exec_sid; + u32 create_sid; + u32 keycreate_sid; + u32 sockcreate_sid; +}; + +enum label_initialized { + LABEL_INVALID = 0, + LABEL_INITIALIZED = 1, + LABEL_PENDING = 2, +}; + +struct inode_security_struct { + struct inode *inode; + struct list_head list; + u32 task_sid; + u32 sid; + u16 sclass; + unsigned char initialized; + spinlock_t lock; +}; + +struct file_security_struct { + u32 sid; + u32 fown_sid; + u32 isid; + u32 pseqno; +}; + +struct superblock_security_struct { + struct super_block *sb; + u32 sid; + u32 def_sid; + u32 mntpoint_sid; + short unsigned int behavior; + short unsigned int flags; + struct mutex lock; + struct list_head isec_head; + spinlock_t isec_lock; +}; + +struct msg_security_struct { + u32 sid; +}; + +struct ipc_security_struct { + u16 sclass; + u32 sid; +}; + +struct sk_security_struct { + enum { + NLBL_UNSET = 0, + NLBL_REQUIRE = 1, + NLBL_LABELED = 2, + NLBL_REQSKB = 3, + NLBL_CONNLABELED = 4, + } nlbl_state; + struct netlbl_lsm_secattr *nlbl_secattr; + u32 sid; + u32 peer_sid; + u16 sclass; + enum { + SCTP_ASSOC_UNSET = 0, + SCTP_ASSOC_SET = 1, + } sctp_assoc_state; +}; + +struct tun_security_struct { + u32 sid; +}; + +struct key_security_struct { + u32 sid; +}; + +struct bpf_security_struct { + u32 sid; +}; + +struct perf_event_security_struct { + u32 sid; +}; + +struct selinux_mnt_opts { + const char *fscontext; + const char *context; + const char *rootcontext; + const char *defcontext; +}; + +enum { + Opt_error___2 = 4294967295, + Opt_context = 0, + Opt_defcontext = 1, + Opt_fscontext = 2, + Opt_rootcontext = 3, + Opt_seclabel = 4, +}; + +enum sel_inos { + SEL_ROOT_INO = 2, + SEL_LOAD = 3, + SEL_ENFORCE = 4, + SEL_CONTEXT = 5, + SEL_ACCESS = 6, + SEL_CREATE = 7, + SEL_RELABEL = 8, + SEL_USER = 9, + SEL_POLICYVERS = 10, + SEL_COMMIT_BOOLS = 11, + SEL_MLS = 12, + SEL_DISABLE = 13, + SEL_MEMBER = 14, + SEL_CHECKREQPROT = 15, + SEL_COMPAT_NET = 16, + SEL_REJECT_UNKNOWN = 17, + SEL_DENY_UNKNOWN = 18, + SEL_STATUS = 19, + SEL_POLICY = 20, + SEL_VALIDATE_TRANS = 21, + SEL_INO_NEXT = 22, +}; + +struct selinux_fs_info { + struct dentry *bool_dir; + unsigned int bool_num; + char **bool_pending_names; + unsigned int *bool_pending_values; + struct dentry *class_dir; + long unsigned int last_class_ino; + bool policy_opened; + struct dentry *policycap_dir; + long unsigned int last_ino; + struct selinux_state *state; + struct super_block *sb; +}; + +struct policy_load_memory { + size_t len; + void *data; +}; + +enum { + SELNL_MSG_SETENFORCE = 16, + SELNL_MSG_POLICYLOAD = 17, + SELNL_MSG_MAX = 18, +}; + +enum selinux_nlgroups { + SELNLGRP_NONE = 0, + SELNLGRP_AVC = 1, + __SELNLGRP_MAX = 2, +}; + +struct selnl_msg_setenforce { + __s32 val; +}; + +struct selnl_msg_policyload { + __u32 seqno; +}; + +enum { + XFRM_MSG_BASE = 16, + XFRM_MSG_NEWSA = 16, + XFRM_MSG_DELSA = 17, + XFRM_MSG_GETSA = 18, + XFRM_MSG_NEWPOLICY = 19, + XFRM_MSG_DELPOLICY = 20, + XFRM_MSG_GETPOLICY = 21, + XFRM_MSG_ALLOCSPI = 22, + XFRM_MSG_ACQUIRE = 23, + XFRM_MSG_EXPIRE = 24, + XFRM_MSG_UPDPOLICY = 25, + XFRM_MSG_UPDSA = 26, + XFRM_MSG_POLEXPIRE = 27, + XFRM_MSG_FLUSHSA = 28, + XFRM_MSG_FLUSHPOLICY = 29, + XFRM_MSG_NEWAE = 30, + XFRM_MSG_GETAE = 31, + XFRM_MSG_REPORT = 32, + XFRM_MSG_MIGRATE = 33, + XFRM_MSG_NEWSADINFO = 34, + XFRM_MSG_GETSADINFO = 35, + XFRM_MSG_NEWSPDINFO = 36, + XFRM_MSG_GETSPDINFO = 37, + XFRM_MSG_MAPPING = 38, + __XFRM_MSG_MAX = 39, +}; + +enum { + RTM_BASE = 16, + RTM_NEWLINK = 16, + RTM_DELLINK = 17, + RTM_GETLINK = 18, + RTM_SETLINK = 19, + RTM_NEWADDR = 20, + RTM_DELADDR = 21, + RTM_GETADDR = 22, + RTM_NEWROUTE = 24, + RTM_DELROUTE = 25, + RTM_GETROUTE = 26, + RTM_NEWNEIGH = 28, + RTM_DELNEIGH = 29, + RTM_GETNEIGH = 30, + RTM_NEWRULE = 32, + RTM_DELRULE = 33, + RTM_GETRULE = 34, + RTM_NEWQDISC = 36, + RTM_DELQDISC = 37, + RTM_GETQDISC = 38, + RTM_NEWTCLASS = 40, + RTM_DELTCLASS = 41, + RTM_GETTCLASS = 42, + RTM_NEWTFILTER = 44, + RTM_DELTFILTER = 45, + RTM_GETTFILTER = 46, + RTM_NEWACTION = 48, + RTM_DELACTION = 49, + RTM_GETACTION = 50, + RTM_NEWPREFIX = 52, + RTM_GETMULTICAST = 58, + RTM_GETANYCAST = 62, + RTM_NEWNEIGHTBL = 64, + RTM_GETNEIGHTBL = 66, + RTM_SETNEIGHTBL = 67, + RTM_NEWNDUSEROPT = 68, + RTM_NEWADDRLABEL = 72, + RTM_DELADDRLABEL = 73, + RTM_GETADDRLABEL = 74, + RTM_GETDCB = 78, + RTM_SETDCB = 79, + RTM_NEWNETCONF = 80, + RTM_DELNETCONF = 81, + RTM_GETNETCONF = 82, + RTM_NEWMDB = 84, + RTM_DELMDB = 85, + RTM_GETMDB = 86, + RTM_NEWNSID = 88, + RTM_DELNSID = 89, + RTM_GETNSID = 90, + RTM_NEWSTATS = 92, + RTM_GETSTATS = 94, + RTM_NEWCACHEREPORT = 96, + RTM_NEWCHAIN = 100, + RTM_DELCHAIN = 101, + RTM_GETCHAIN = 102, + RTM_NEWNEXTHOP = 104, + RTM_DELNEXTHOP = 105, + RTM_GETNEXTHOP = 106, + RTM_NEWLINKPROP = 108, + RTM_DELLINKPROP = 109, + RTM_GETLINKPROP = 110, + RTM_NEWVLAN = 112, + RTM_DELVLAN = 113, + RTM_GETVLAN = 114, + __RTM_MAX = 115, +}; + +struct nlmsg_perm { + u16 nlmsg_type; + u32 perm; +}; + +struct netif_security_struct { + struct net *ns; + int ifindex; + u32 sid; +}; + +struct sel_netif { + struct list_head list; + struct netif_security_struct nsec; + struct callback_head callback_head; +}; + +struct netnode_security_struct { + union { + __be32 ipv4; + struct in6_addr ipv6; + } addr; + u32 sid; + u16 family; +}; + +struct sel_netnode_bkt { + unsigned int size; + struct list_head list; +}; + +struct sel_netnode { + struct netnode_security_struct nsec; + struct list_head list; + struct callback_head rcu; +}; + +struct netport_security_struct { + u32 sid; + u16 port; + u8 protocol; +}; + +struct sel_netport_bkt { + int size; + struct list_head list; +}; + +struct sel_netport { + struct netport_security_struct psec; + struct list_head list; + struct callback_head rcu; +}; + +struct selinux_kernel_status { + u32 version; + u32 sequence; + u32 enforcing; + u32 policyload; + u32 deny_unknown; +}; + +struct ebitmap_node { + struct ebitmap_node *next; + long unsigned int maps[6]; + u32 startbit; +}; + +struct ebitmap { + struct ebitmap_node *node; + u32 highbit; +}; + +struct policy_file { + char *data; + size_t len; +}; + +struct hashtab_node { + void *key; + void *datum; + struct hashtab_node *next; +}; + +struct hashtab { + struct hashtab_node **htable; + u32 size; + u32 nel; +}; + +struct hashtab_info { + u32 slots_used; + u32 max_chain_len; +}; + +struct hashtab_key_params { + u32 (*hash)(const void *); + int (*cmp)(const void *, const void *); +}; + +struct symtab { + struct hashtab table; + u32 nprim; +}; + +struct mls_level { + u32 sens; + struct ebitmap cat; +}; + +struct mls_range { + struct mls_level level[2]; +}; + +struct context { + u32 user; + u32 role; + u32 type; + u32 len; + struct mls_range range; + char *str; +}; + +struct sidtab_str_cache; + +struct sidtab_entry { + u32 sid; + u32 hash; + struct context context; + struct sidtab_str_cache *cache; + struct hlist_node list; +}; + +struct sidtab_str_cache { + struct callback_head rcu_member; + struct list_head lru_member; + struct sidtab_entry *parent; + u32 len; + char str[0]; +}; + +struct sidtab_node_inner; + +struct sidtab_node_leaf; + +union sidtab_entry_inner { + struct sidtab_node_inner *ptr_inner; + struct sidtab_node_leaf *ptr_leaf; +}; + +struct sidtab_node_inner { + union sidtab_entry_inner entries[512]; +}; + +struct sidtab_node_leaf { + struct sidtab_entry entries[39]; +}; + +struct sidtab_isid_entry { + int set; + struct sidtab_entry entry; +}; + +struct sidtab; + +struct sidtab_convert_params { + int (*func)(struct context *, struct context *, void *); + void *args; + struct sidtab *target; +}; + +struct sidtab { + union sidtab_entry_inner roots[4]; + u32 count; + struct sidtab_convert_params *convert; + spinlock_t lock; + u32 cache_free_slots; + struct list_head cache_lru_list; + spinlock_t cache_lock; + struct sidtab_isid_entry isids[27]; + struct hlist_head context_to_sid[512]; +}; + +struct avtab_key { + u16 source_type; + u16 target_type; + u16 target_class; + u16 specified; +}; + +struct avtab_extended_perms { + u8 specified; + u8 driver; + struct extended_perms_data perms; +}; + +struct avtab_datum { + union { + u32 data; + struct avtab_extended_perms *xperms; + } u; +}; + +struct avtab_node { + struct avtab_key key; + struct avtab_datum datum; + struct avtab_node *next; +}; + +struct avtab { + struct avtab_node **htable; + u32 nel; + u32 nslot; + u32 mask; +}; + +struct type_set; + +struct constraint_expr { + u32 expr_type; + u32 attr; + u32 op; + struct ebitmap names; + struct type_set *type_names; + struct constraint_expr *next; +}; + +struct type_set { + struct ebitmap types; + struct ebitmap negset; + u32 flags; +}; + +struct constraint_node { + u32 permissions; + struct constraint_expr *expr; + struct constraint_node *next; +}; + +struct common_datum { + u32 value; + struct symtab permissions; +}; + +struct class_datum { + u32 value; + char *comkey; + struct common_datum *comdatum; + struct symtab permissions; + struct constraint_node *constraints; + struct constraint_node *validatetrans; + char default_user; + char default_role; + char default_type; + char default_range; +}; + +struct role_datum { + u32 value; + u32 bounds; + struct ebitmap dominates; + struct ebitmap types; +}; + +struct role_allow { + u32 role; + u32 new_role; + struct role_allow *next; +}; + +struct type_datum { + u32 value; + u32 bounds; + unsigned char primary; + unsigned char attribute; +}; + +struct user_datum { + u32 value; + u32 bounds; + struct ebitmap roles; + struct mls_range range; + struct mls_level dfltlevel; +}; + +struct cond_bool_datum { + __u32 value; + int state; +}; + +struct ocontext { + union { + char *name; + struct { + u8 protocol; + u16 low_port; + u16 high_port; + } port; + struct { + u32 addr; + u32 mask; + } node; + struct { + u32 addr[4]; + u32 mask[4]; + } node6; + struct { + u64 subnet_prefix; + u16 low_pkey; + u16 high_pkey; + } ibpkey; + struct { + char *dev_name; + u8 port; + } ibendport; + } u; + union { + u32 sclass; + u32 behavior; + } v; + struct context context[2]; + u32 sid[2]; + struct ocontext *next; +}; + +struct genfs { + char *fstype; + struct ocontext *head; + struct genfs *next; +}; + +struct cond_node; + +struct policydb { + int mls_enabled; + struct symtab symtab[8]; + char **sym_val_to_name[8]; + struct class_datum **class_val_to_struct; + struct role_datum **role_val_to_struct; + struct user_datum **user_val_to_struct; + struct type_datum **type_val_to_struct; + struct avtab te_avtab; + struct hashtab role_tr; + struct ebitmap filename_trans_ttypes; + struct hashtab filename_trans; + u32 compat_filename_trans_count; + struct cond_bool_datum **bool_val_to_struct; + struct avtab te_cond_avtab; + struct cond_node *cond_list; + u32 cond_list_len; + struct role_allow *role_allow; + struct ocontext *ocontexts[9]; + struct genfs *genfs; + struct hashtab range_tr; + struct ebitmap *type_attr_map_array; + struct ebitmap policycaps; + struct ebitmap permissive_map; + size_t len; + unsigned int policyvers; + unsigned int reject_unknown: 1; + unsigned int allow_unknown: 1; + u16 process_class; + u32 process_trans_perms; +}; + +struct perm_datum { + u32 value; +}; + +struct role_trans_key { + u32 role; + u32 type; + u32 tclass; +}; + +struct role_trans_datum { + u32 new_role; +}; + +struct filename_trans_key { + u32 ttype; + u16 tclass; + const char *name; +}; + +struct filename_trans_datum { + struct ebitmap stypes; + u32 otype; + struct filename_trans_datum *next; +}; + +struct level_datum { + struct mls_level *level; + unsigned char isalias; +}; + +struct cat_datum { + u32 value; + unsigned char isalias; +}; + +struct range_trans { + u32 source_type; + u32 target_type; + u32 target_class; +}; + +struct cond_expr_node; + +struct cond_expr { + struct cond_expr_node *nodes; + u32 len; +}; + +struct cond_av_list { + struct avtab_node **nodes; + u32 len; +}; + +struct cond_node { + int cur_state; + struct cond_expr expr; + struct cond_av_list true_list; + struct cond_av_list false_list; +}; + +struct policy_data { + struct policydb *p; + void *fp; +}; + +struct cond_expr_node { + u32 expr_type; + u32 bool; +}; + +struct policydb_compat_info { + int version; + int sym_num; + int ocon_num; +}; + +struct selinux_mapping; + +struct selinux_map { + struct selinux_mapping *mapping; + u16 size; +}; + +struct selinux_policy { + struct sidtab *sidtab; + struct policydb policydb; + struct selinux_map map; + u32 latest_granting; +}; + +struct selinux_mapping { + u16 value; + unsigned int num_perms; + u32 perms[32]; +}; + +struct convert_context_args { + struct selinux_state *state; + struct policydb *oldp; + struct policydb *newp; +}; + +struct selinux_audit_rule { + u32 au_seqno; + struct context au_ctxt; +}; + +struct cond_insertf_data { + struct policydb *p; + struct avtab_node **dst; + struct cond_av_list *other; +}; + +struct rt6key { + struct in6_addr addr; + int plen; +}; + +struct rtable; + +struct fnhe_hash_bucket; + +struct fib_nh_common { + struct net_device *nhc_dev; + int nhc_oif; + unsigned char nhc_scope; + u8 nhc_family; + u8 nhc_gw_family; + unsigned char nhc_flags; + struct lwtunnel_state *nhc_lwtstate; + union { + __be32 ipv4; + struct in6_addr ipv6; + } nhc_gw; + int nhc_weight; + atomic_t nhc_upper_bound; + struct rtable **nhc_pcpu_rth_output; + struct rtable *nhc_rth_input; + struct fnhe_hash_bucket *nhc_exceptions; +}; + +struct rt6_exception_bucket; + +struct fib6_nh { + struct fib_nh_common nh_common; + long unsigned int last_probe; + struct rt6_info **rt6i_pcpu; + struct rt6_exception_bucket *rt6i_exception_bucket; +}; + +struct fib6_node; + +struct dst_metrics; + +struct nexthop; + +struct fib6_info { + struct fib6_table *fib6_table; + struct fib6_info *fib6_next; + struct fib6_node *fib6_node; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; + unsigned int fib6_nsiblings; + refcount_t fib6_ref; + long unsigned int expires; + struct dst_metrics *fib6_metrics; + struct rt6key fib6_dst; + u32 fib6_flags; + struct rt6key fib6_src; + struct rt6key fib6_prefsrc; + u32 fib6_metric; + u8 fib6_protocol; + u8 fib6_type; + u8 should_flush: 1; + u8 dst_nocount: 1; + u8 dst_nopolicy: 1; + u8 fib6_destroying: 1; + u8 offload: 1; + u8 trap: 1; + u8 unused: 2; + struct callback_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; +}; + +struct uncached_list; + +struct rt6_info { + struct dst_entry dst; + struct fib6_info *from; + int sernum; + struct rt6key rt6i_dst; + struct rt6key rt6i_src; + struct in6_addr rt6i_gateway; + struct inet6_dev *rt6i_idev; + u32 rt6i_flags; + struct list_head rt6i_uncached; + struct uncached_list *rt6i_uncached_list; + short unsigned int rt6i_nfheader_len; +}; + +struct rt6_statistics { + __u32 fib_nodes; + __u32 fib_route_nodes; + __u32 fib_rt_entries; + __u32 fib_rt_cache; + __u32 fib_discarded_routes; + atomic_t fib_rt_alloc; + atomic_t fib_rt_uncache; +}; + +struct fib6_node { + struct fib6_node *parent; + struct fib6_node *left; + struct fib6_node *right; + struct fib6_node *subtree; + struct fib6_info *leaf; + __u16 fn_bit; + __u16 fn_flags; + int fn_sernum; + struct fib6_info *rr_ptr; + struct callback_head rcu; +}; + +struct fib6_table { + struct hlist_node tb6_hlist; + u32 tb6_id; + spinlock_t tb6_lock; + struct fib6_node tb6_root; + struct inet_peer_base tb6_peers; + unsigned int flags; + unsigned int fib_seq; +}; + +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255, +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + +enum xfrm_attr_type_t { + XFRMA_UNSPEC = 0, + XFRMA_ALG_AUTH = 1, + XFRMA_ALG_CRYPT = 2, + XFRMA_ALG_COMP = 3, + XFRMA_ENCAP = 4, + XFRMA_TMPL = 5, + XFRMA_SA = 6, + XFRMA_POLICY = 7, + XFRMA_SEC_CTX = 8, + XFRMA_LTIME_VAL = 9, + XFRMA_REPLAY_VAL = 10, + XFRMA_REPLAY_THRESH = 11, + XFRMA_ETIMER_THRESH = 12, + XFRMA_SRCADDR = 13, + XFRMA_COADDR = 14, + XFRMA_LASTUSED = 15, + XFRMA_POLICY_TYPE = 16, + XFRMA_MIGRATE = 17, + XFRMA_ALG_AEAD = 18, + XFRMA_KMADDRESS = 19, + XFRMA_ALG_AUTH_TRUNC = 20, + XFRMA_MARK = 21, + XFRMA_TFCPAD = 22, + XFRMA_REPLAY_ESN_VAL = 23, + XFRMA_SA_EXTRA_FLAGS = 24, + XFRMA_PROTO = 25, + XFRMA_ADDRESS_FILTER = 26, + XFRMA_PAD = 27, + XFRMA_OFFLOAD_DEV = 28, + XFRMA_SET_MARK = 29, + XFRMA_SET_MARK_MASK = 30, + XFRMA_IF_ID = 31, + __XFRMA_MAX = 32, +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +struct xfrm_state_walk { + struct list_head all; + u8 state; + u8 dying; + u8 proto; + u32 seq; + struct xfrm_address_filter *filter; +}; + +struct xfrm_state_offload { + struct net_device *dev; + struct net_device *real_dev; + long unsigned int offload_handle; + unsigned int num_exthdrs; + u8 flags; +}; + +struct xfrm_mode { + u8 encap; + u8 family; + u8 flags; +}; + +struct xfrm_replay; + +struct xfrm_type; + +struct xfrm_type_offload; + +struct xfrm_state { + possible_net_t xs_net; + union { + struct hlist_node gclist; + struct hlist_node bydst; + }; + struct hlist_node bysrc; + struct hlist_node byspi; + refcount_t refcnt; + spinlock_t lock; + struct xfrm_id id; + struct xfrm_selector sel; + struct xfrm_mark mark; + u32 if_id; + u32 tfcpad; + u32 genid; + struct xfrm_state_walk km; + struct { + u32 reqid; + u8 mode; + u8 replay_window; + u8 aalgo; + u8 ealgo; + u8 calgo; + u8 flags; + u16 family; + xfrm_address_t saddr; + int header_len; + int trailer_len; + u32 extra_flags; + struct xfrm_mark smark; + } props; + struct xfrm_lifetime_cfg lft; + struct xfrm_algo_auth *aalg; + struct xfrm_algo *ealg; + struct xfrm_algo *calg; + struct xfrm_algo_aead *aead; + const char *geniv; + struct xfrm_encap_tmpl *encap; + struct sock *encap_sk; + xfrm_address_t *coaddr; + struct xfrm_state *tunnel; + atomic_t tunnel_users; + struct xfrm_replay_state replay; + struct xfrm_replay_state_esn *replay_esn; + struct xfrm_replay_state preplay; + struct xfrm_replay_state_esn *preplay_esn; + const struct xfrm_replay *repl; + u32 xflags; + u32 replay_maxage; + u32 replay_maxdiff; + struct timer_list rtimer; + struct xfrm_stats stats; + struct xfrm_lifetime_cur curlft; + struct hrtimer mtimer; + struct xfrm_state_offload xso; + long int saved_tmo; + time64_t lastused; + struct page_frag xfrag; + const struct xfrm_type *type; + struct xfrm_mode inner_mode; + struct xfrm_mode inner_mode_iaf; + struct xfrm_mode outer_mode; + const struct xfrm_type_offload *type_offload; + struct xfrm_sec_ctx *security; + void *data; +}; + +struct dst_metrics { + u32 metrics[17]; + refcount_t refcnt; +}; + +struct xfrm_policy_walk_entry { + struct list_head all; + u8 dead; +}; + +struct xfrm_policy_queue { + struct sk_buff_head hold_queue; + struct timer_list hold_timer; + long unsigned int timeout; +}; + +struct xfrm_tmpl { + struct xfrm_id id; + xfrm_address_t saddr; + short unsigned int encap_family; + u32 reqid; + u8 mode; + u8 share; + u8 optional; + u8 allalgs; + u32 aalgos; + u32 ealgos; + u32 calgos; +}; + +struct xfrm_policy { + possible_net_t xp_net; + struct hlist_node bydst; + struct hlist_node byidx; + rwlock_t lock; + refcount_t refcnt; + u32 pos; + struct timer_list timer; + atomic_t genid; + u32 priority; + u32 index; + u32 if_id; + struct xfrm_mark mark; + struct xfrm_selector selector; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + struct xfrm_policy_walk_entry walk; + struct xfrm_policy_queue polq; + bool bydst_reinsert; + u8 type; + u8 action; + u8 flags; + u8 xfrm_nr; + u16 family; + struct xfrm_sec_ctx *security; + struct xfrm_tmpl xfrm_vec[6]; + struct hlist_node bydst_inexact_list; + struct callback_head rcu; +}; + +struct udp_hslot; + +struct udp_table { + struct udp_hslot *hash; + struct udp_hslot *hash2; + unsigned int mask; + unsigned int log; +}; + +struct fib_nh_exception { + struct fib_nh_exception *fnhe_next; + int fnhe_genid; + __be32 fnhe_daddr; + u32 fnhe_pmtu; + bool fnhe_mtu_locked; + __be32 fnhe_gw; + long unsigned int fnhe_expires; + struct rtable *fnhe_rth_input; + struct rtable *fnhe_rth_output; + long unsigned int fnhe_stamp; + struct callback_head rcu; +}; + +struct rtable { + struct dst_entry dst; + int rt_genid; + unsigned int rt_flags; + __u16 rt_type; + __u8 rt_is_input; + __u8 rt_uses_gateway; + int rt_iif; + u8 rt_gw_family; + union { + __be32 rt_gw4; + struct in6_addr rt_gw6; + }; + u32 rt_mtu_locked: 1; + u32 rt_pmtu: 31; + struct list_head rt_uncached; + struct uncached_list *rt_uncached_list; +}; + +struct fnhe_hash_bucket { + struct fib_nh_exception *chain; +}; + +struct rt6_exception_bucket { + struct hlist_head chain; + int depth; +}; + +struct xfrm_replay { + void (*advance)(struct xfrm_state *, __be32); + int (*check)(struct xfrm_state *, struct sk_buff *, __be32); + int (*recheck)(struct xfrm_state *, struct sk_buff *, __be32); + void (*notify)(struct xfrm_state *, int); + int (*overflow)(struct xfrm_state *, struct sk_buff *); +}; + +struct xfrm_type { + char *description; + struct module *owner; + u8 proto; + u8 flags; + int (*init_state)(struct xfrm_state *); + void (*destructor)(struct xfrm_state *); + int (*input)(struct xfrm_state *, struct sk_buff *); + int (*output)(struct xfrm_state *, struct sk_buff *); + int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); + int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); +}; + +struct xfrm_type_offload { + char *description; + struct module *owner; + u8 proto; + void (*encap)(struct xfrm_state *, struct sk_buff *); + int (*input_tail)(struct xfrm_state *, struct sk_buff *); + int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t); +}; + +struct xfrm_dst { + union { + struct dst_entry dst; + struct rtable rt; + struct rt6_info rt6; + } u; + struct dst_entry *route; + struct dst_entry *child; + struct dst_entry *path; + struct xfrm_policy *pols[2]; + int num_pols; + int num_xfrms; + u32 xfrm_genid; + u32 policy_genid; + u32 route_mtu_cached; + u32 child_mtu_cached; + u32 route_cookie; + u32 path_cookie; +}; + +struct xfrm_offload { + struct { + __u32 low; + __u32 hi; + } seq; + __u32 flags; + __u32 status; + __u8 proto; +}; + +struct sec_path { + int len; + int olen; + struct xfrm_state *xvec[6]; + struct xfrm_offload ovec[1]; +}; + +struct udp_hslot { + struct hlist_head head; + int count; + spinlock_t lock; +}; + +struct smack_audit_data { + const char *function; + char *subject; + char *object; + char *request; + int result; +}; + +struct smack_known { + struct list_head list; + struct hlist_node smk_hashed; + char *smk_known; + u32 smk_secid; + struct netlbl_lsm_secattr smk_netlabel; + struct list_head smk_rules; + struct mutex smk_rules_lock; +}; + +struct superblock_smack { + struct smack_known *smk_root; + struct smack_known *smk_floor; + struct smack_known *smk_hat; + struct smack_known *smk_default; + int smk_flags; +}; + +struct socket_smack { + struct smack_known *smk_out; + struct smack_known *smk_in; + struct smack_known *smk_packet; + int smk_state; +}; + +struct inode_smack { + struct smack_known *smk_inode; + struct smack_known *smk_task; + struct smack_known *smk_mmap; + int smk_flags; +}; + +struct task_smack { + struct smack_known *smk_task; + struct smack_known *smk_forked; + struct list_head smk_rules; + struct mutex smk_rules_lock; + struct list_head smk_relabel; +}; + +struct smack_rule { + struct list_head list; + struct smack_known *smk_subject; + struct smack_known *smk_object; + int smk_access; +}; + +struct smk_net4addr { + struct list_head list; + struct in_addr smk_host; + struct in_addr smk_mask; + int smk_masks; + struct smack_known *smk_label; +}; + +struct smk_net6addr { + struct list_head list; + struct in6_addr smk_host; + struct in6_addr smk_mask; + int smk_masks; + struct smack_known *smk_label; +}; + +struct smack_known_list_elem { + struct list_head list; + struct smack_known *smk_label; +}; + +enum { + Opt_error___3 = 4294967295, + Opt_fsdefault = 0, + Opt_fsfloor = 1, + Opt_fshat = 2, + Opt_fsroot = 3, + Opt_fstransmute = 4, +}; + +struct smk_audit_info { + struct common_audit_data a; + struct smack_audit_data sad; +}; + +struct smack_mnt_opts { + const char *fsdefault; + const char *fsfloor; + const char *fshat; + const char *fsroot; + const char *fstransmute; +}; + +struct netlbl_audit { + u32 secid; + kuid_t loginuid; + unsigned int sessionid; +}; + +struct cipso_v4_std_map_tbl { + struct { + u32 *cipso; + u32 *local; + u32 cipso_size; + u32 local_size; + } lvl; + struct { + u32 *cipso; + u32 *local; + u32 cipso_size; + u32 local_size; + } cat; +}; + +struct cipso_v4_doi { + u32 doi; + u32 type; + union { + struct cipso_v4_std_map_tbl *std; + } map; + u8 tags[5]; + refcount_t refcount; + struct list_head list; + struct callback_head rcu; +}; + +enum smk_inos { + SMK_ROOT_INO = 2, + SMK_LOAD = 3, + SMK_CIPSO = 4, + SMK_DOI = 5, + SMK_DIRECT = 6, + SMK_AMBIENT = 7, + SMK_NET4ADDR = 8, + SMK_ONLYCAP = 9, + SMK_LOGGING = 10, + SMK_LOAD_SELF = 11, + SMK_ACCESSES = 12, + SMK_MAPPED = 13, + SMK_LOAD2 = 14, + SMK_LOAD_SELF2 = 15, + SMK_ACCESS2 = 16, + SMK_CIPSO2 = 17, + SMK_REVOKE_SUBJ = 18, + SMK_CHANGE_RULE = 19, + SMK_SYSLOG = 20, + SMK_PTRACE = 21, + SMK_NET6ADDR = 23, + SMK_RELABEL_SELF = 24, +}; + +struct smack_parsed_rule { + struct smack_known *smk_subject; + struct smack_known *smk_object; + int smk_access1; + int smk_access2; +}; + +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; + +struct unix_address { + refcount_t refcnt; + int len; + unsigned int hash; + struct sockaddr_un name[0]; +}; + +struct scm_stat { + atomic_t nr_fds; +}; + +struct unix_sock { + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock; + struct mutex bindlock; + struct sock *peer; + struct list_head link; + atomic_long_t inflight; + spinlock_t lock; + long unsigned int gc_flags; + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; + long: 32; + long: 64; + long: 64; +}; + +typedef unsigned char Byte; + +typedef long unsigned int uLong; + +struct internal_state; + +struct z_stream_s { + const Byte *next_in; + uLong avail_in; + uLong total_in; + Byte *next_out; + uLong avail_out; + uLong total_out; + char *msg; + struct internal_state *state; + void *workspace; + int data_type; + uLong adler; + uLong reserved; +}; + +struct internal_state { + int dummy; +}; + +enum audit_mode { + AUDIT_NORMAL = 0, + AUDIT_QUIET_DENIED = 1, + AUDIT_QUIET = 2, + AUDIT_NOQUIET = 3, + AUDIT_ALL = 4, +}; + +enum aa_sfs_type { + AA_SFS_TYPE_BOOLEAN = 0, + AA_SFS_TYPE_STRING = 1, + AA_SFS_TYPE_U64 = 2, + AA_SFS_TYPE_FOPS = 3, + AA_SFS_TYPE_DIR = 4, +}; + +struct aa_sfs_entry { + const char *name; + struct dentry *dentry; + umode_t mode; + enum aa_sfs_type v_type; + union { + bool boolean; + char *string; + long unsigned int u64; + struct aa_sfs_entry *files; + } v; + const struct file_operations *file_ops; +}; + +enum aafs_ns_type { + AAFS_NS_DIR = 0, + AAFS_NS_PROFS = 1, + AAFS_NS_NS = 2, + AAFS_NS_RAW_DATA = 3, + AAFS_NS_LOAD = 4, + AAFS_NS_REPLACE = 5, + AAFS_NS_REMOVE = 6, + AAFS_NS_REVISION = 7, + AAFS_NS_COUNT = 8, + AAFS_NS_MAX_COUNT = 9, + AAFS_NS_SIZE = 10, + AAFS_NS_MAX_SIZE = 11, + AAFS_NS_OWNER = 12, + AAFS_NS_SIZEOF = 13, +}; + +enum aafs_prof_type { + AAFS_PROF_DIR = 0, + AAFS_PROF_PROFS = 1, + AAFS_PROF_NAME = 2, + AAFS_PROF_MODE = 3, + AAFS_PROF_ATTACH = 4, + AAFS_PROF_HASH = 5, + AAFS_PROF_RAW_DATA = 6, + AAFS_PROF_RAW_HASH = 7, + AAFS_PROF_RAW_ABI = 8, + AAFS_PROF_SIZEOF = 9, +}; + +struct table_header { + u16 td_id; + u16 td_flags; + u32 td_hilen; + u32 td_lolen; + char td_data[0]; +}; + +struct aa_dfa { + struct kref count; + u16 flags; + u32 max_oob; + struct table_header *tables[8]; +}; + +struct aa_policy { + const char *name; + char *hname; + struct list_head list; + struct list_head profiles; +}; + +struct aa_labelset { + rwlock_t lock; + struct rb_root root; +}; + +enum label_flags { + FLAG_HAT = 1, + FLAG_UNCONFINED = 2, + FLAG_NULL = 4, + FLAG_IX_ON_NAME_ERROR = 8, + FLAG_IMMUTIBLE = 16, + FLAG_USER_DEFINED = 32, + FLAG_NO_LIST_REF = 64, + FLAG_NS_COUNT = 128, + FLAG_IN_TREE = 256, + FLAG_PROFILE = 512, + FLAG_EXPLICIT = 1024, + FLAG_STALE = 2048, + FLAG_RENAMED = 4096, + FLAG_REVOKED = 8192, +}; + +struct aa_label; + +struct aa_proxy { + struct kref count; + struct aa_label *label; +}; + +struct aa_profile; + +struct aa_label { + struct kref count; + struct rb_node node; + struct callback_head rcu; + struct aa_proxy *proxy; + char *hname; + long int flags; + u32 secid; + int size; + struct aa_profile *vec[0]; +}; + +struct label_it { + int i; + int j; +}; + +struct aa_policydb { + struct aa_dfa *dfa; + unsigned int start[17]; +}; + +struct aa_domain { + int size; + char **table; +}; + +struct aa_file_rules { + unsigned int start; + struct aa_dfa *dfa; + struct aa_domain trans; +}; + +struct aa_caps { + kernel_cap_t allow; + kernel_cap_t audit; + kernel_cap_t denied; + kernel_cap_t quiet; + kernel_cap_t kill; + kernel_cap_t extended; +}; + +struct aa_rlimit { + unsigned int mask; + struct rlimit limits[16]; +}; + +struct aa_ns; + +struct aa_secmark; + +struct aa_loaddata; + +struct aa_profile { + struct aa_policy base; + struct aa_profile *parent; + struct aa_ns *ns; + const char *rename; + const char *attach; + struct aa_dfa *xmatch; + int xmatch_len; + enum audit_mode audit; + long int mode; + u32 path_flags; + const char *disconnected; + int size; + struct aa_policydb policy; + struct aa_file_rules file; + struct aa_caps caps; + int xattr_count; + char **xattrs; + struct aa_rlimit rlimits; + int secmark_count; + struct aa_secmark *secmark; + struct aa_loaddata *rawdata; + unsigned char *hash; + char *dirname; + struct dentry *dents[9]; + struct rhashtable *data; + struct aa_label label; +}; + +struct aa_perms { + u32 allow; + u32 audit; + u32 deny; + u32 quiet; + u32 kill; + u32 stop; + u32 complain; + u32 cond; + u32 hide; + u32 prompt; + u16 xindex; +}; + +struct path_cond { + kuid_t uid; + umode_t mode; +}; + +struct aa_secmark { + u8 audit; + u8 deny; + u32 secid; + char *label; +}; + +enum profile_mode { + APPARMOR_ENFORCE = 0, + APPARMOR_COMPLAIN = 1, + APPARMOR_KILL = 2, + APPARMOR_UNCONFINED = 3, +}; + +struct aa_data { + char *key; + u32 size; + char *data; + struct rhash_head head; +}; + +struct aa_ns_acct { + int max_size; + int max_count; + int size; + int count; +}; + +struct aa_ns { + struct aa_policy base; + struct aa_ns *parent; + struct mutex lock; + struct aa_ns_acct acct; + struct aa_profile *unconfined; + struct list_head sub_ns; + atomic_t uniq_null; + long int uniq_id; + int level; + long int revision; + wait_queue_head_t wait; + struct aa_labelset labels; + struct list_head rawdata_list; + struct dentry *dents[13]; +}; + +struct aa_loaddata { + struct kref count; + struct list_head list; + struct work_struct work; + struct dentry *dents[6]; + struct aa_ns *ns; + char *name; + size_t size; + size_t compressed_size; + long int revision; + int abi; + unsigned char *hash; + char *data; +}; + +enum { + AAFS_LOADDATA_ABI = 0, + AAFS_LOADDATA_REVISION = 1, + AAFS_LOADDATA_HASH = 2, + AAFS_LOADDATA_DATA = 3, + AAFS_LOADDATA_COMPRESSED_SIZE = 4, + AAFS_LOADDATA_DIR = 5, + AAFS_LOADDATA_NDENTS = 6, +}; + +struct rawdata_f_data { + struct aa_loaddata *loaddata; +}; + +struct aa_revision { + struct aa_ns *ns; + long int last_read; +}; + +struct multi_transaction { + struct kref count; + ssize_t size; + char data[0]; +}; + +struct apparmor_audit_data { + int error; + int type; + const char *op; + struct aa_label *label; + const char *name; + const char *info; + u32 request; + u32 denied; + union { + struct { + struct aa_label *peer; + union { + struct { + const char *target; + kuid_t ouid; + } fs; + struct { + int rlim; + long unsigned int max; + } rlim; + struct { + int signal; + int unmappedsig; + }; + struct { + int type; + int protocol; + struct sock *peer_sk; + void *addr; + int addrlen; + } net; + }; + }; + struct { + struct aa_profile *profile; + const char *ns; + long int pos; + } iface; + struct { + const char *src_name; + const char *type; + const char *trans; + const char *data; + long unsigned int flags; + } mnt; + }; +}; + +enum audit_type { + AUDIT_APPARMOR_AUDIT = 0, + AUDIT_APPARMOR_ALLOWED = 1, + AUDIT_APPARMOR_DENIED = 2, + AUDIT_APPARMOR_HINT = 3, + AUDIT_APPARMOR_STATUS = 4, + AUDIT_APPARMOR_ERROR = 5, + AUDIT_APPARMOR_KILL = 6, + AUDIT_APPARMOR_AUTO = 7, +}; + +struct aa_audit_rule { + struct aa_label *label; +}; + +struct audit_cache { + struct aa_profile *profile; + kernel_cap_t caps; +}; + +struct aa_task_ctx { + struct aa_label *nnp; + struct aa_label *onexec; + struct aa_label *previous; + u64 token; +}; + +struct counted_str { + struct kref count; + char name[0]; +}; + +struct match_workbuf { + unsigned int count; + unsigned int pos; + unsigned int len; + unsigned int size; + unsigned int history[24]; +}; + +enum path_flags { + PATH_IS_DIR = 1, + PATH_CONNECT_PATH = 4, + PATH_CHROOT_REL = 8, + PATH_CHROOT_NSCONNECT = 16, + PATH_DELEGATE_DELETED = 32768, + PATH_MEDIATE_DELETED = 65536, +}; + +struct aa_load_ent { + struct list_head list; + struct aa_profile *new; + struct aa_profile *old; + struct aa_profile *rename; + const char *ns_name; +}; + +enum aa_code { + AA_U8 = 0, + AA_U16 = 1, + AA_U32 = 2, + AA_U64 = 3, + AA_NAME = 4, + AA_STRING = 5, + AA_BLOB = 6, + AA_STRUCT = 7, + AA_STRUCTEND = 8, + AA_LIST = 9, + AA_LISTEND = 10, + AA_ARRAY = 11, + AA_ARRAYEND = 12, +}; + +struct aa_ext { + void *start; + void *end; + void *pos; + u32 version; +}; + +struct aa_file_ctx { + spinlock_t lock; + struct aa_label *label; + u32 allow; +}; + +struct aa_sk_ctx { + struct aa_label *label; + struct aa_label *peer; +}; + +union aa_buffer { + struct list_head list; + char buffer[1]; +}; + +struct ptrace_relation { + struct task_struct *tracer; + struct task_struct *tracee; + bool invalid; + struct list_head node; + struct callback_head rcu; +}; + +struct access_report_info { + struct callback_head work; + const char *access; + struct task_struct *target; + struct task_struct *agent; +}; + +enum sid_policy_type { + SIDPOL_DEFAULT = 0, + SIDPOL_CONSTRAINED = 1, + SIDPOL_ALLOWED = 2, +}; + +typedef union { + kuid_t uid; + kgid_t gid; +} kid_t; + +enum setid_type { + UID = 0, + GID = 1, +}; + +struct setid_rule { + struct hlist_node next; + kid_t src_id; + kid_t dst_id; + enum setid_type type; +}; + +struct setid_ruleset { + struct hlist_head rules[256]; + char *policy_str; + struct callback_head rcu; + enum setid_type type; +}; + +enum devcg_behavior { + DEVCG_DEFAULT_NONE = 0, + DEVCG_DEFAULT_ALLOW = 1, + DEVCG_DEFAULT_DENY = 2, +}; + +struct dev_exception_item { + u32 major; + u32 minor; + short int type; + short int access; + struct list_head list; + struct callback_head rcu; +}; + +struct dev_cgroup { + struct cgroup_subsys_state css; + struct list_head exceptions; + enum devcg_behavior behavior; +}; + +struct altha_list_struct { + struct path path; + char *spath; + char *spath_p; + struct list_head list; +}; + +struct kiosk_list_struct { + struct path path; + struct list_head list; +}; + +enum kiosk_cmd { + KIOSK_UNSPEC = 0, + KIOSK_REQUEST = 1, + KIOSK_REPLY = 2, + KIOSK_CMD_LAST = 3, +}; + +enum kiosk_mode { + KIOSK_PERMISSIVE = 0, + KIOSK_NONSYSTEM = 1, + KIOSK_MODE_LAST = 2, +}; + +enum kiosk_action { + KIOSK_SET_MODE = 0, + KIOSK_USERLIST_ADD = 1, + KIOSK_USERLIST_DEL = 2, + KIOSK_USER_LIST = 3, +}; + +enum kiosk_attrs { + KIOSK_NOATTR = 0, + KIOSK_ACTION = 1, + KIOSK_DATA = 2, + KIOSK_MAX_ATTR = 3, +}; + +enum integrity_status { + INTEGRITY_PASS = 0, + INTEGRITY_PASS_IMMUTABLE = 1, + INTEGRITY_FAIL = 2, + INTEGRITY_NOLABEL = 3, + INTEGRITY_NOXATTRS = 4, + INTEGRITY_UNKNOWN = 5, +}; + +struct ima_digest_data { + u8 algo; + u8 length; + union { + struct { + u8 unused; + u8 type; + } sha1; + struct { + u8 type; + u8 algo; + } ng; + u8 data[2]; + } xattr; + u8 digest[0]; +}; + +struct integrity_iint_cache { + struct rb_node rb_node; + struct mutex mutex; + struct inode *inode; + u64 version; + long unsigned int flags; + long unsigned int measured_pcrs; + long unsigned int atomic_flags; + enum integrity_status ima_file_status: 4; + enum integrity_status ima_mmap_status: 4; + enum integrity_status ima_bprm_status: 4; + enum integrity_status ima_read_status: 4; + enum integrity_status ima_creds_status: 4; + enum integrity_status evm_status: 4; + struct ima_digest_data *ima_hash; +}; + +struct modsig; + +struct asymmetric_key_id; + +struct public_key_signature { + struct asymmetric_key_id *auth_ids[2]; + u8 *s; + u32 s_size; + u8 *digest; + u8 digest_size; + const char *pkey_algo; + const char *hash_algo; + const char *encoding; + const void *data; + unsigned int data_size; +}; + +struct asymmetric_key_id { + short unsigned int len; + unsigned char data[0]; +}; + +struct signature_v2_hdr { + uint8_t type; + uint8_t version; + uint8_t hash_algo; + __be32 keyid; + __be16 sig_size; + uint8_t sig[0]; +} __attribute__((packed)); + +struct tpm_digest { + u16 alg_id; + u8 digest[64]; +}; + +struct evm_ima_xattr_data { + u8 type; + u8 data[0]; +}; + +enum ima_show_type { + IMA_SHOW_BINARY = 0, + IMA_SHOW_BINARY_NO_FIELD_LEN = 1, + IMA_SHOW_BINARY_OLD_STRING_FMT = 2, + IMA_SHOW_ASCII = 3, +}; + +struct ima_event_data { + struct integrity_iint_cache *iint; + struct file *file; + const unsigned char *filename; + struct evm_ima_xattr_data *xattr_value; + int xattr_len; + const struct modsig *modsig; + const char *violation; + const void *buf; + int buf_len; +}; + +struct ima_field_data { + u8 *data; + u32 len; +}; + +struct ima_template_field { + const char field_id[16]; + int (*field_init)(struct ima_event_data *, struct ima_field_data *); + void (*field_show)(struct seq_file *, enum ima_show_type, struct ima_field_data *); +}; + +struct ima_template_desc { + struct list_head list; + char *name; + char *fmt; + int num_fields; + const struct ima_template_field **fields; +}; + +struct ima_template_entry { + int pcr; + struct tpm_digest *digests; + struct ima_template_desc *template_desc; + u32 template_data_len; + struct ima_field_data template_data[0]; +}; + +struct ima_queue_entry { + struct hlist_node hnext; + struct list_head later; + struct ima_template_entry *entry; +}; + +struct ima_h_table { + atomic_long_t len; + atomic_long_t violations; + struct hlist_head queue[1024]; +}; + +enum ima_fs_flags { + IMA_FS_BUSY = 0, +}; + +struct hwrng { + const char *name; + int (*init)(struct hwrng *); + void (*cleanup)(struct hwrng *); + int (*data_present)(struct hwrng *, int); + int (*data_read)(struct hwrng *, u32 *); + int (*read)(struct hwrng *, void *, size_t, bool); + long unsigned int priv; + short unsigned int quality; + struct list_head list; + struct kref ref; + struct completion cleanup_done; +}; + +struct tpm_bank_info { + u16 alg_id; + u16 digest_size; + u16 crypto_id; +}; + +struct tpm_chip; + +struct tpm_class_ops { + unsigned int flags; + const u8 req_complete_mask; + const u8 req_complete_val; + bool (*req_canceled)(struct tpm_chip *, u8); + int (*recv)(struct tpm_chip *, u8 *, size_t); + int (*send)(struct tpm_chip *, u8 *, size_t); + void (*cancel)(struct tpm_chip *); + u8 (*status)(struct tpm_chip *); + void (*update_timeouts)(struct tpm_chip *, long unsigned int *); + void (*update_durations)(struct tpm_chip *, long unsigned int *); + int (*go_idle)(struct tpm_chip *); + int (*cmd_ready)(struct tpm_chip *); + int (*request_locality)(struct tpm_chip *, int); + int (*relinquish_locality)(struct tpm_chip *, int); + void (*clk_enable)(struct tpm_chip *, bool); +}; + +struct tpm_bios_log { + void *bios_event_log; + void *bios_event_log_end; +}; + +struct tpm_chip_seqops { + struct tpm_chip *chip; + const struct seq_operations *seqops; +}; + +struct tpm_space { + u32 context_tbl[3]; + u8 *context_buf; + u32 session_tbl[3]; + u8 *session_buf; + u32 buf_size; +}; + +struct tpm_chip { + struct device dev; + struct device devs; + struct cdev cdev; + struct cdev cdevs; + struct rw_semaphore ops_sem; + const struct tpm_class_ops *ops; + struct tpm_bios_log log; + struct tpm_chip_seqops bin_log_seqops; + struct tpm_chip_seqops ascii_log_seqops; + unsigned int flags; + int dev_num; + long unsigned int is_open; + char hwrng_name[64]; + struct hwrng hwrng; + struct mutex tpm_mutex; + long unsigned int timeout_a; + long unsigned int timeout_b; + long unsigned int timeout_c; + long unsigned int timeout_d; + bool timeout_adjusted; + long unsigned int duration[4]; + bool duration_adjusted; + struct dentry *bios_dir[3]; + const struct attribute_group *groups[3]; + unsigned int groups_cnt; + u32 nr_allocated_banks; + struct tpm_bank_info *allocated_banks; + acpi_handle acpi_dev_handle; + char ppi_version[4]; + struct tpm_space work_space; + u32 last_cc; + u32 nr_commands; + u32 *cc_attrs_tbl; + int locality; +}; + +enum evm_ima_xattr_type { + IMA_XATTR_DIGEST = 1, + EVM_XATTR_HMAC = 2, + EVM_IMA_XATTR_DIGSIG = 3, + IMA_XATTR_DIGEST_NG = 4, + EVM_XATTR_PORTABLE_DIGSIG = 5, + IMA_XATTR_LAST = 6, +}; + +enum ima_hooks { + NONE = 0, + FILE_CHECK = 1, + MMAP_CHECK = 2, + BPRM_CHECK = 3, + CREDS_CHECK = 4, + POST_SETATTR = 5, + MODULE_CHECK = 6, + FIRMWARE_CHECK = 7, + KEXEC_KERNEL_CHECK = 8, + KEXEC_INITRAMFS_CHECK = 9, + POLICY_CHECK = 10, + KEXEC_CMDLINE = 11, + KEY_CHECK = 12, + MAX_CHECK = 13, +}; + +enum tpm_algorithms { + TPM_ALG_ERROR = 0, + TPM_ALG_SHA1 = 4, + TPM_ALG_KEYEDHASH = 8, + TPM_ALG_SHA256 = 11, + TPM_ALG_SHA384 = 12, + TPM_ALG_SHA512 = 13, + TPM_ALG_NULL = 16, + TPM_ALG_SM3_256 = 18, +}; + +enum tpm_pcrs { + TPM_PCR0 = 0, + TPM_PCR8 = 8, + TPM_PCR10 = 10, +}; + +struct ima_algo_desc { + struct crypto_shash *tfm; + enum hash_algo algo; +}; + +enum lsm_rule_types { + LSM_OBJ_USER = 0, + LSM_OBJ_ROLE = 1, + LSM_OBJ_TYPE = 2, + LSM_SUBJ_USER = 3, + LSM_SUBJ_ROLE = 4, + LSM_SUBJ_TYPE = 5, +}; + +enum policy_types { + ORIGINAL_TCB = 1, + DEFAULT_TCB = 2, +}; + +enum policy_rule_list { + IMA_DEFAULT_POLICY = 1, + IMA_CUSTOM_POLICY = 2, +}; + +struct ima_rule_opt_list { + size_t count; + char *items[0]; +}; + +struct ima_rule_entry { + struct list_head list; + int action; + unsigned int flags; + enum ima_hooks func; + int mask; + long unsigned int fsmagic; + uuid_t fsuuid; + kuid_t uid; + kuid_t fowner; + bool (*uid_op)(kuid_t, kuid_t); + bool (*fowner_op)(kuid_t, kuid_t); + int pcr; + struct { + void *rule; + char *args_p; + int type; + } lsm[6]; + char *fsname; + struct ima_rule_opt_list *keyrings; + struct ima_template_desc *template; +}; + +enum { + Opt_measure = 0, + Opt_dont_measure = 1, + Opt_appraise = 2, + Opt_dont_appraise = 3, + Opt_audit = 4, + Opt_hash___2 = 5, + Opt_dont_hash = 6, + Opt_obj_user = 7, + Opt_obj_role = 8, + Opt_obj_type = 9, + Opt_subj_user = 10, + Opt_subj_role = 11, + Opt_subj_type = 12, + Opt_func = 13, + Opt_mask = 14, + Opt_fsmagic = 15, + Opt_fsname = 16, + Opt_fsuuid = 17, + Opt_uid_eq = 18, + Opt_euid_eq = 19, + Opt_fowner_eq = 20, + Opt_uid_gt = 21, + Opt_euid_gt = 22, + Opt_fowner_gt = 23, + Opt_uid_lt = 24, + Opt_euid_lt = 25, + Opt_fowner_lt = 26, + Opt_appraise_type = 27, + Opt_appraise_flag = 28, + Opt_permit_directio = 29, + Opt_pcr = 30, + Opt_template = 31, + Opt_keyrings = 32, + Opt_err___7 = 33, +}; + +enum { + mask_exec = 0, + mask_write = 1, + mask_read = 2, + mask_append = 3, +}; + +struct ima_kexec_hdr { + u16 version; + u16 _reserved0; + u32 _reserved1; + u64 buffer_size; + u64 count; +}; + +enum header_fields { + HDR_PCR = 0, + HDR_DIGEST = 1, + HDR_TEMPLATE_NAME = 2, + HDR_TEMPLATE_DATA = 3, + HDR__LAST = 4, +}; + +enum data_formats { + DATA_FMT_DIGEST = 0, + DATA_FMT_DIGEST_WITH_ALGO = 1, + DATA_FMT_STRING = 2, + DATA_FMT_HEX = 3, +}; + +struct ima_key_entry { + struct list_head list; + void *payload; + size_t payload_len; + char *keyring_name; +}; + +struct evm_xattr { + struct evm_ima_xattr_data data; + u8 digest[20]; +}; + +struct xattr_list { + struct list_head list; + char *name; +}; + +struct evm_digest { + struct ima_digest_data hdr; + char digest[64]; +}; + +struct h_misc { + long unsigned int ino; + __u32 generation; + uid_t uid; + gid_t gid; + umode_t mode; +}; + +enum { + CRYPTO_MSG_ALG_REQUEST = 0, + CRYPTO_MSG_ALG_REGISTER = 1, + CRYPTO_MSG_ALG_LOADED = 2, +}; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC = 0, + CRYPTOA_ALG = 1, + CRYPTOA_TYPE = 2, + CRYPTOA_U32 = 3, + __CRYPTOA_MAX = 4, +}; + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + +struct rtattr { + short unsigned int rta_len; + short unsigned int rta_type; +}; + +struct crypto_queue { + struct list_head list; + struct list_head *backlog; + unsigned int qlen; + unsigned int max_qlen; +}; + +enum { + NAPI_STATE_SCHED = 0, + NAPI_STATE_MISSED = 1, + NAPI_STATE_DISABLE = 2, + NAPI_STATE_NPSVC = 3, + NAPI_STATE_LISTED = 4, + NAPI_STATE_NO_BUSY_POLL = 5, + NAPI_STATE_IN_BUSY_POLL = 6, +}; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE = 3, +}; + +enum { + NETIF_MSG_DRV_BIT = 0, + NETIF_MSG_PROBE_BIT = 1, + NETIF_MSG_LINK_BIT = 2, + NETIF_MSG_TIMER_BIT = 3, + NETIF_MSG_IFDOWN_BIT = 4, + NETIF_MSG_IFUP_BIT = 5, + NETIF_MSG_RX_ERR_BIT = 6, + NETIF_MSG_TX_ERR_BIT = 7, + NETIF_MSG_TX_QUEUED_BIT = 8, + NETIF_MSG_INTR_BIT = 9, + NETIF_MSG_TX_DONE_BIT = 10, + NETIF_MSG_RX_STATUS_BIT = 11, + NETIF_MSG_PKTDATA_BIT = 12, + NETIF_MSG_HW_BIT = 13, + NETIF_MSG_WOL_BIT = 14, + NETIF_MSG_CLASS_COUNT = 15, +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct aead_request { + struct crypto_async_request base; + unsigned int assoclen; + unsigned int cryptlen; + u8 *iv; + struct scatterlist *src; + struct scatterlist *dst; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_aead; + +struct aead_alg { + int (*setkey)(struct crypto_aead *, const u8 *, unsigned int); + int (*setauthsize)(struct crypto_aead *, unsigned int); + int (*encrypt)(struct aead_request *); + int (*decrypt)(struct aead_request *); + int (*init)(struct crypto_aead *); + void (*exit)(struct crypto_aead *); + unsigned int ivsize; + unsigned int maxauthsize; + unsigned int chunksize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct crypto_aead { + unsigned int authsize; + unsigned int reqsize; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_tfm base; +}; + +struct aead_instance { + void (*free)(struct aead_instance *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct aead_alg alg; + }; +}; + +struct crypto_aead_spawn { + struct crypto_spawn base; +}; + +enum crypto_attr_type_t { + CRYPTOCFGA_UNSPEC = 0, + CRYPTOCFGA_PRIORITY_VAL = 1, + CRYPTOCFGA_REPORT_LARVAL = 2, + CRYPTOCFGA_REPORT_HASH = 3, + CRYPTOCFGA_REPORT_BLKCIPHER = 4, + CRYPTOCFGA_REPORT_AEAD = 5, + CRYPTOCFGA_REPORT_COMPRESS = 6, + CRYPTOCFGA_REPORT_RNG = 7, + CRYPTOCFGA_REPORT_CIPHER = 8, + CRYPTOCFGA_REPORT_AKCIPHER = 9, + CRYPTOCFGA_REPORT_KPP = 10, + CRYPTOCFGA_REPORT_ACOMP = 11, + CRYPTOCFGA_STAT_LARVAL = 12, + CRYPTOCFGA_STAT_HASH = 13, + CRYPTOCFGA_STAT_BLKCIPHER = 14, + CRYPTOCFGA_STAT_AEAD = 15, + CRYPTOCFGA_STAT_COMPRESS = 16, + CRYPTOCFGA_STAT_RNG = 17, + CRYPTOCFGA_STAT_CIPHER = 18, + CRYPTOCFGA_STAT_AKCIPHER = 19, + CRYPTOCFGA_STAT_KPP = 20, + CRYPTOCFGA_STAT_ACOMP = 21, + __CRYPTOCFGA_MAX = 22, +}; + +struct crypto_report_aead { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int maxauthsize; + unsigned int ivsize; +}; + +struct crypto_sync_skcipher; + +struct aead_geniv_ctx { + spinlock_t lock; + struct crypto_aead *child; + struct crypto_sync_skcipher *sknull; + u8 salt[0]; +}; + +struct crypto_rng; + +struct rng_alg { + int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int); + int (*seed)(struct crypto_rng *, const u8 *, unsigned int); + void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int); + unsigned int seedsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct crypto_rng { + struct crypto_tfm base; +}; + +struct crypto_cipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + +struct skcipher_instance { + void (*free)(struct skcipher_instance *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct skcipher_alg alg; + }; +}; + +struct crypto_skcipher_spawn { + struct crypto_spawn base; +}; + +struct skcipher_walk { + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } src; + union { + struct { + struct page *page; + long unsigned int offset; + } phys; + struct { + u8 *page; + void *addr; + } virt; + } dst; + struct scatter_walk in; + unsigned int nbytes; + struct scatter_walk out; + unsigned int total; + struct list_head buffers; + u8 *page; + u8 *buffer; + u8 *oiv; + void *iv; + unsigned int ivsize; + int flags; + unsigned int blocksize; + unsigned int stride; + unsigned int alignmask; +}; + +struct skcipher_ctx_simple { + struct crypto_cipher *cipher; +}; + +struct crypto_report_blkcipher { + char type[64]; + char geniv[64]; + unsigned int blocksize; + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + +enum { + SKCIPHER_WALK_PHYS = 1, + SKCIPHER_WALK_SLOW = 2, + SKCIPHER_WALK_COPY = 4, + SKCIPHER_WALK_DIFF = 8, + SKCIPHER_WALK_SLEEP = 16, +}; + +struct skcipher_walk_buffer { + struct list_head entry; + struct scatter_walk dst; + unsigned int len; + u8 *data; + u8 buffer[0]; +}; + +struct ahash_alg { + int (*init)(struct ahash_request *); + int (*update)(struct ahash_request *); + int (*final)(struct ahash_request *); + int (*finup)(struct ahash_request *); + int (*digest)(struct ahash_request *); + int (*export)(struct ahash_request *, void *); + int (*import)(struct ahash_request *, const void *); + int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int); + int (*init_tfm)(struct crypto_ahash *); + void (*exit_tfm)(struct crypto_ahash *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct hash_alg_common halg; +}; + +struct crypto_hash_walk { + char *data; + unsigned int offset; + unsigned int alignmask; + struct page *pg; + unsigned int entrylen; + unsigned int total; + struct scatterlist *sg; + unsigned int flags; +}; + +struct ahash_instance { + void (*free)(struct ahash_instance *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct ahash_alg alg; + }; +}; + +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_hash { + char type[64]; + unsigned int blocksize; + unsigned int digestsize; +}; + +struct ahash_request_priv { + crypto_completion_t complete; + void *data; + u8 *result; + u32 flags; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *ubuf[0]; +}; + +struct shash_instance { + void (*free)(struct shash_instance *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct { + char head[256]; + struct crypto_instance base; + } s; + struct shash_alg alg; + }; +}; + +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_akcipher { + char type[64]; +}; + +struct akcipher_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_akcipher { + struct crypto_tfm base; +}; + +struct akcipher_alg { + int (*sign)(struct akcipher_request *); + int (*verify)(struct akcipher_request *); + int (*encrypt)(struct akcipher_request *); + int (*decrypt)(struct akcipher_request *); + int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int); + int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int); + unsigned int (*max_size)(struct crypto_akcipher *); + int (*init)(struct crypto_akcipher *); + void (*exit)(struct crypto_akcipher *); + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct akcipher_instance { + void (*free)(struct akcipher_instance *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct { + char head[128]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; + +struct crypto_report_kpp { + char type[64]; +}; + +struct kpp_request { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int src_len; + unsigned int dst_len; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_kpp { + struct crypto_tfm base; +}; + +struct kpp_alg { + int (*set_secret)(struct crypto_kpp *, const void *, unsigned int); + int (*generate_public_key)(struct kpp_request *); + int (*compute_shared_secret)(struct kpp_request *); + unsigned int (*max_size)(struct crypto_kpp *); + int (*init)(struct crypto_kpp *); + void (*exit)(struct crypto_kpp *); + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +enum asn1_class { + ASN1_UNIV = 0, + ASN1_APPL = 1, + ASN1_CONT = 2, + ASN1_PRIV = 3, +}; + +enum asn1_method { + ASN1_PRIM = 0, + ASN1_CONS = 1, +}; + +enum asn1_tag { + ASN1_EOC = 0, + ASN1_BOOL = 1, + ASN1_INT = 2, + ASN1_BTS = 3, + ASN1_OTS = 4, + ASN1_NULL = 5, + ASN1_OID = 6, + ASN1_ODE = 7, + ASN1_EXT = 8, + ASN1_REAL = 9, + ASN1_ENUM = 10, + ASN1_EPDV = 11, + ASN1_UTF8STR = 12, + ASN1_RELOID = 13, + ASN1_SEQ = 16, + ASN1_SET = 17, + ASN1_NUMSTR = 18, + ASN1_PRNSTR = 19, + ASN1_TEXSTR = 20, + ASN1_VIDSTR = 21, + ASN1_IA5STR = 22, + ASN1_UNITIM = 23, + ASN1_GENTIM = 24, + ASN1_GRASTR = 25, + ASN1_VISSTR = 26, + ASN1_GENSTR = 27, + ASN1_UNISTR = 28, + ASN1_CHRSTR = 29, + ASN1_BMPSTR = 30, + ASN1_LONG_TAG = 31, +}; + +typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t); + +struct asn1_decoder { + const unsigned char *machine; + size_t machlen; + const asn1_action_t *actions; +}; + +enum asn1_opcode { + ASN1_OP_MATCH = 0, + ASN1_OP_MATCH_OR_SKIP = 1, + ASN1_OP_MATCH_ACT = 2, + ASN1_OP_MATCH_ACT_OR_SKIP = 3, + ASN1_OP_MATCH_JUMP = 4, + ASN1_OP_MATCH_JUMP_OR_SKIP = 5, + ASN1_OP_MATCH_ANY = 8, + ASN1_OP_MATCH_ANY_OR_SKIP = 9, + ASN1_OP_MATCH_ANY_ACT = 10, + ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11, + ASN1_OP_COND_MATCH_OR_SKIP = 17, + ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19, + ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21, + ASN1_OP_COND_MATCH_ANY = 24, + ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25, + ASN1_OP_COND_MATCH_ANY_ACT = 26, + ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27, + ASN1_OP_COND_FAIL = 28, + ASN1_OP_COMPLETE = 29, + ASN1_OP_ACT = 30, + ASN1_OP_MAYBE_ACT = 31, + ASN1_OP_END_SEQ = 32, + ASN1_OP_END_SET = 33, + ASN1_OP_END_SEQ_OF = 34, + ASN1_OP_END_SET_OF = 35, + ASN1_OP_END_SEQ_ACT = 36, + ASN1_OP_END_SET_ACT = 37, + ASN1_OP_END_SEQ_OF_ACT = 38, + ASN1_OP_END_SET_OF_ACT = 39, + ASN1_OP_RETURN = 40, + ASN1_OP__NR = 41, +}; + +enum rsapubkey_actions { + ACT_rsa_get_e = 0, + ACT_rsa_get_n = 1, + NR__rsapubkey_actions = 2, +}; + +enum rsaprivkey_actions { + ACT_rsa_get_d = 0, + ACT_rsa_get_dp = 1, + ACT_rsa_get_dq = 2, + ACT_rsa_get_e___2 = 3, + ACT_rsa_get_n___2 = 4, + ACT_rsa_get_p = 5, + ACT_rsa_get_q = 6, + ACT_rsa_get_qinv = 7, + NR__rsaprivkey_actions = 8, +}; + +typedef long unsigned int mpi_limb_t; + +struct gcry_mpi { + int alloced; + int nlimbs; + int nbits; + int sign; + unsigned int flags; + mpi_limb_t *d; +}; + +typedef struct gcry_mpi *MPI; + +struct rsa_key { + const u8 *n; + const u8 *e; + const u8 *d; + const u8 *p; + const u8 *q; + const u8 *dp; + const u8 *dq; + const u8 *qinv; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + size_t dp_sz; + size_t dq_sz; + size_t qinv_sz; +}; + +struct rsa_mpi_key { + MPI n; + MPI e; + MPI d; +}; + +struct asn1_decoder___2; + +struct rsa_asn1_template { + const char *name; + const u8 *data; + size_t size; +}; + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + unsigned int key_size; +}; + +struct pkcs1pad_inst_ctx { + struct crypto_akcipher_spawn spawn; + const struct rsa_asn1_template *digest_info; +}; + +struct pkcs1pad_request { + struct scatterlist in_sg[2]; + struct scatterlist out_sg[1]; + uint8_t *in_buf; + uint8_t *out_buf; + long: 64; + long: 64; + struct akcipher_request child_req; +}; + +struct crypto_report_acomp { + char type[64]; +}; + +struct acomp_req { + struct crypto_async_request base; + struct scatterlist *src; + struct scatterlist *dst; + unsigned int slen; + unsigned int dlen; + u32 flags; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + void *__ctx[0]; +}; + +struct crypto_acomp { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_tfm base; +}; + +struct acomp_alg { + int (*compress)(struct acomp_req *); + int (*decompress)(struct acomp_req *); + void (*dst_free)(struct scatterlist *); + int (*init)(struct crypto_acomp *); + void (*exit)(struct crypto_acomp *); + unsigned int reqsize; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct crypto_report_comp { + char type[64]; +}; + +struct crypto_scomp { + struct crypto_tfm base; +}; + +struct scomp_alg { + void * (*alloc_ctx)(struct crypto_scomp *); + void (*free_ctx)(struct crypto_scomp *, void *); + int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct crypto_alg base; +}; + +struct scomp_scratch { + spinlock_t lock; + void *src; + void *dst; +}; + +struct cryptomgr_param { + struct rtattr *tb[34]; + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; + union { + struct rtattr attr; + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } alg; + struct { + struct rtattr attr; + struct crypto_attr_u32 data; + } nu32; + } attrs[32]; + char template[128]; + struct crypto_larval *larval; + u32 otype; + u32 omask; +}; + +struct crypto_test_param { + char driver[128]; + char alg[128]; + u32 type; +}; + +struct hmac_ctx { + struct crypto_shash *hash; +}; + +struct md5_state { + u32 hash[4]; + u32 block[16]; + u64 byte_count; +}; + +struct sha1_state { + u32 state[5]; + u64 count; + u8 buffer[64]; +}; + +typedef void sha1_block_fn(struct sha1_state *, const u8 *, int); + +struct sha256_state { + u32 state[8]; + u64 count; + u8 buf[64]; +}; + +struct sha512_state { + u64 state[8]; + u64 count[2]; + u8 buf[128]; +}; + +typedef void sha512_block_fn(struct sha512_state *, const u8 *, int); + +typedef struct { + u64 a; + u64 b; +} u128; + +typedef struct { + __be64 a; + __be64 b; +} be128; + +typedef struct { + __le64 b; + __le64 a; +} le128; + +struct gf128mul_4k { + be128 t[256]; +}; + +struct gf128mul_64k { + struct gf128mul_4k *t[16]; +}; + +struct crypto_cts_ctx { + struct crypto_skcipher *child; +}; + +struct crypto_cts_reqctx { + struct scatterlist sg[2]; + unsigned int offset; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct skcipher_request subreq; +}; + +struct xts_tfm_ctx { + struct crypto_skcipher *child; + struct crypto_cipher *tweak; +}; + +struct xts_instance_ctx { + struct crypto_skcipher_spawn spawn; + char name[128]; +}; + +struct xts_request_ctx { + le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct skcipher_request subreq; +}; + +struct crypto_rfc3686_ctx { + struct crypto_skcipher *child; + u8 nonce[4]; +}; + +struct crypto_rfc3686_req_ctx { + u8 iv[16]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct skcipher_request subreq; +}; + +struct gcm_instance_ctx { + struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; +}; + +struct crypto_gcm_ctx { + struct crypto_skcipher *ctr; + struct crypto_ahash *ghash; +}; + +struct crypto_rfc4106_ctx { + struct crypto_aead *child; + u8 nonce[4]; +}; + +struct crypto_rfc4106_req_ctx { + struct scatterlist src[3]; + struct scatterlist dst[3]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct aead_request subreq; +}; + +struct crypto_rfc4543_instance_ctx { + struct crypto_aead_spawn aead; +}; + +struct crypto_rfc4543_ctx { + struct crypto_aead *child; + struct crypto_sync_skcipher *null; + u8 nonce[4]; +}; + +struct crypto_rfc4543_req_ctx { + struct aead_request subreq; +}; + +struct crypto_gcm_ghash_ctx { + unsigned int cryptlen; + struct scatterlist *src; + int (*complete)(struct aead_request *, u32); +}; + +struct crypto_gcm_req_priv_ctx { + u8 iv[16]; + u8 auth_tag[16]; + u8 iauth_tag[16]; + struct scatterlist src[3]; + struct scatterlist dst[3]; + struct scatterlist sg; + struct crypto_gcm_ghash_ctx ghash_ctx; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + union { + struct ahash_request ahreq; + struct skcipher_request skreq; + } u; +}; + +struct crypto_aes_ctx { + u32 key_enc[60]; + u32 key_dec[60]; + u32 key_length; +}; + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +struct chksum_desc_ctx___2 { + __u16 crc; +}; + +struct lzo_ctx { + void *lzo_comp_mem; +}; + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +struct crypto_report_rng { + char type[64]; + unsigned int seedsize; +}; + +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *); + struct module *owner; +}; + +struct drbg_string { + const unsigned char *buf; + size_t len; + struct list_head list; +}; + +typedef uint32_t drbg_flag_t; + +struct drbg_core { + drbg_flag_t flags; + __u8 statelen; + __u8 blocklen_bytes; + char cra_name[128]; + char backend_cra_name[128]; +}; + +struct drbg_state; + +struct drbg_state_ops { + int (*update)(struct drbg_state *, struct list_head *, int); + int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *); + int (*crypto_init)(struct drbg_state *); + int (*crypto_fini)(struct drbg_state *); +}; + +struct drbg_state { + struct mutex drbg_mutex; + unsigned char *V; + unsigned char *Vbuf; + unsigned char *C; + unsigned char *Cbuf; + size_t reseed_ctr; + size_t reseed_threshold; + unsigned char *scratchpad; + unsigned char *scratchpadbuf; + void *priv_data; + struct crypto_skcipher *ctr_handle; + struct skcipher_request *ctr_req; + __u8 *outscratchpadbuf; + __u8 *outscratchpad; + struct crypto_wait ctr_wait; + struct scatterlist sg_in; + struct scatterlist sg_out; + bool seeded; + bool pr; + bool fips_primed; + unsigned char *prev; + struct work_struct seed_work; + struct crypto_rng *jent; + const struct drbg_state_ops *d_ops; + const struct drbg_core *core; + struct drbg_string test_data; + struct random_ready_callback random_ready; +}; + +enum drbg_prefixes { + DRBG_PREFIX0 = 0, + DRBG_PREFIX1 = 1, + DRBG_PREFIX2 = 2, + DRBG_PREFIX3 = 3, +}; + +struct sdesc { + struct shash_desc shash; + char ctx[0]; +}; + +struct s { + __be32 conv; +}; + +struct rand_data { + __u64 data; + __u64 old_data; + __u64 prev_time; + __u64 last_delta; + __s64 last_delta2; + unsigned int osr; + unsigned char *mem; + unsigned int memlocation; + unsigned int memblocks; + unsigned int memblocksize; + unsigned int memaccessloops; + int rct_count; + unsigned int apt_observations; + unsigned int apt_count; + unsigned int apt_base; + unsigned int apt_base_set: 1; + unsigned int health_failure: 1; +}; + +struct rand_data___2; + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data___2 *entropy_collector; + unsigned int reset_cnt; +}; + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[16]; + u32 bytes; +}; + +typedef enum { + ZSTD_fast = 0, + ZSTD_dfast = 1, + ZSTD_greedy = 2, + ZSTD_lazy = 3, + ZSTD_lazy2 = 4, + ZSTD_btlazy2 = 5, + ZSTD_btopt = 6, + ZSTD_btopt2 = 7, +} ZSTD_strategy; + +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int searchLength; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef struct { + unsigned int contentSizeFlag; + unsigned int checksumFlag; + unsigned int noDictIDFlag; +} ZSTD_frameParameters; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +struct ZSTD_CCtx_s; + +typedef struct ZSTD_CCtx_s ZSTD_CCtx; + +struct ZSTD_DCtx_s; + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +struct zstd_ctx { + ZSTD_CCtx *cctx; + ZSTD_DCtx *dctx; + void *cwksp; + void *dwksp; +}; + +enum asymmetric_payload_bits { + asym_crypto = 0, + asym_subtype = 1, + asym_key_ids = 2, + asym_auth = 3, +}; + +struct asymmetric_key_ids { + void *id[2]; +}; + +struct asymmetric_key_subtype { + struct module *owner; + const char *name; + short unsigned int name_len; + void (*describe)(const struct key *, struct seq_file *); + void (*destroy)(void *, void *); + int (*query)(const struct kernel_pkey_params *, struct kernel_pkey_query *); + int (*eds_op)(struct kernel_pkey_params *, const void *, void *); + int (*verify_signature)(const struct key *, const struct public_key_signature *); +}; + +struct asymmetric_key_parser { + struct list_head link; + struct module *owner; + const char *name; + int (*parse)(struct key_preparsed_payload *); +}; + +enum OID { + OID_id_dsa_with_sha1 = 0, + OID_id_dsa = 1, + OID_id_ecdsa_with_sha1 = 2, + OID_id_ecPublicKey = 3, + OID_rsaEncryption = 4, + OID_md2WithRSAEncryption = 5, + OID_md3WithRSAEncryption = 6, + OID_md4WithRSAEncryption = 7, + OID_sha1WithRSAEncryption = 8, + OID_sha256WithRSAEncryption = 9, + OID_sha384WithRSAEncryption = 10, + OID_sha512WithRSAEncryption = 11, + OID_sha224WithRSAEncryption = 12, + OID_data = 13, + OID_signed_data = 14, + OID_email_address = 15, + OID_contentType = 16, + OID_messageDigest = 17, + OID_signingTime = 18, + OID_smimeCapabilites = 19, + OID_smimeAuthenticatedAttrs = 20, + OID_md2 = 21, + OID_md4 = 22, + OID_md5 = 23, + OID_msIndirectData = 24, + OID_msStatementType = 25, + OID_msSpOpusInfo = 26, + OID_msPeImageDataObjId = 27, + OID_msIndividualSPKeyPurpose = 28, + OID_msOutlookExpress = 29, + OID_certAuthInfoAccess = 30, + OID_sha1 = 31, + OID_sha256 = 32, + OID_sha384 = 33, + OID_sha512 = 34, + OID_sha224 = 35, + OID_commonName = 36, + OID_surname = 37, + OID_countryName = 38, + OID_locality = 39, + OID_stateOrProvinceName = 40, + OID_organizationName = 41, + OID_organizationUnitName = 42, + OID_title = 43, + OID_description = 44, + OID_name = 45, + OID_givenName = 46, + OID_initials = 47, + OID_generationalQualifier = 48, + OID_subjectKeyIdentifier = 49, + OID_keyUsage = 50, + OID_subjectAltName = 51, + OID_issuerAltName = 52, + OID_basicConstraints = 53, + OID_crlDistributionPoints = 54, + OID_certPolicies = 55, + OID_authorityKeyIdentifier = 56, + OID_extKeyUsage = 57, + OID_gostCPSignA = 58, + OID_gostCPSignB = 59, + OID_gostCPSignC = 60, + OID_gost2012PKey256 = 61, + OID_gost2012PKey512 = 62, + OID_gost2012Digest256 = 63, + OID_gost2012Digest512 = 64, + OID_gost2012Signature256 = 65, + OID_gost2012Signature512 = 66, + OID_gostTC26Sign256A = 67, + OID_gostTC26Sign256B = 68, + OID_gostTC26Sign256C = 69, + OID_gostTC26Sign256D = 70, + OID_gostTC26Sign512A = 71, + OID_gostTC26Sign512B = 72, + OID_gostTC26Sign512C = 73, + OID_sm2 = 74, + OID_sm3 = 75, + OID_SM2_with_SM3 = 76, + OID_sm3WithRSAEncryption = 77, + OID__NR = 78, +}; + +struct public_key { + void *key; + u32 keylen; + enum OID algo; + void *params; + u32 paramlen; + bool key_is_private; + const char *id_type; + const char *pkey_algo; +}; + +enum x509_actions { + ACT_x509_extract_key_data = 0, + ACT_x509_extract_name_segment = 1, + ACT_x509_note_OID = 2, + ACT_x509_note_issuer = 3, + ACT_x509_note_not_after = 4, + ACT_x509_note_not_before = 5, + ACT_x509_note_params = 6, + ACT_x509_note_pkey_algo = 7, + ACT_x509_note_serial = 8, + ACT_x509_note_signature = 9, + ACT_x509_note_subject = 10, + ACT_x509_note_tbs_certificate = 11, + ACT_x509_process_extension = 12, + NR__x509_actions = 13, +}; + +enum x509_akid_actions { + ACT_x509_akid_note_kid = 0, + ACT_x509_akid_note_name = 1, + ACT_x509_akid_note_serial = 2, + ACT_x509_extract_name_segment___2 = 3, + ACT_x509_note_OID___2 = 4, + NR__x509_akid_actions = 5, +}; + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; + struct public_key *pub; + struct public_key_signature *sig; + char *issuer; + char *subject; + struct asymmetric_key_id *id; + struct asymmetric_key_id *skid; + time64_t valid_from; + time64_t valid_to; + const void *tbs; + unsigned int tbs_size; + unsigned int raw_sig_size; + const void *raw_sig; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_subject; + unsigned int raw_subject_size; + unsigned int raw_skid_size; + const void *raw_skid; + unsigned int index; + bool seen; + bool verified; + bool self_signed; + bool unsupported_key; + bool unsupported_sig; + bool blacklisted; +}; + +struct x509_parse_context { + struct x509_certificate *cert; + long unsigned int data; + const void *cert_start; + const void *key; + size_t key_size; + const void *params; + size_t params_size; + enum OID key_algo; + enum OID last_oid; + enum OID algo_oid; + unsigned char nr_mpi; + u8 o_size; + u8 cn_size; + u8 email_size; + u16 o_offset; + u16 cn_offset; + u16 email_offset; + unsigned int raw_akid_size; + const void *raw_akid; + const void *akid_raw_issuer; + unsigned int akid_raw_issuer_size; +}; + +enum pkcs7_actions { + ACT_pkcs7_check_content_type = 0, + ACT_pkcs7_extract_cert = 1, + ACT_pkcs7_note_OID = 2, + ACT_pkcs7_note_certificate_list = 3, + ACT_pkcs7_note_content = 4, + ACT_pkcs7_note_data = 5, + ACT_pkcs7_note_signed_info = 6, + ACT_pkcs7_note_signeddata_version = 7, + ACT_pkcs7_note_signerinfo_version = 8, + ACT_pkcs7_sig_note_authenticated_attr = 9, + ACT_pkcs7_sig_note_digest_algo = 10, + ACT_pkcs7_sig_note_issuer = 11, + ACT_pkcs7_sig_note_pkey_algo = 12, + ACT_pkcs7_sig_note_serial = 13, + ACT_pkcs7_sig_note_set_of_authattrs = 14, + ACT_pkcs7_sig_note_signature = 15, + ACT_pkcs7_sig_note_skid = 16, + NR__pkcs7_actions = 17, +}; + +struct pkcs7_signed_info { + struct pkcs7_signed_info *next; + struct x509_certificate *signer; + unsigned int index; + bool unsupported_crypto; + bool blacklisted; + const void *msgdigest; + unsigned int msgdigest_len; + unsigned int authattrs_len; + const void *authattrs; + long unsigned int aa_set; + time64_t signing_time; + struct public_key_signature *sig; +}; + +struct pkcs7_message___2 { + struct x509_certificate *certs; + struct x509_certificate *crl; + struct pkcs7_signed_info *signed_infos; + u8 version; + bool have_authattrs; + enum OID data_type; + size_t data_len; + size_t data_hdrlen; + const void *data; +}; + +struct pkcs7_parse_context { + struct pkcs7_message___2 *msg; + struct pkcs7_signed_info *sinfo; + struct pkcs7_signed_info **ppsinfo; + struct x509_certificate *certs; + struct x509_certificate **ppcerts; + long unsigned int data; + enum OID last_oid; + unsigned int x509_index; + unsigned int sinfo_index; + const void *raw_serial; + unsigned int raw_serial_size; + unsigned int raw_issuer_size; + const void *raw_issuer; + const void *raw_skid; + unsigned int raw_skid_size; + bool expect_skid; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +enum rq_qos_id { + RQ_QOS_WBT = 0, + RQ_QOS_LATENCY = 1, + RQ_QOS_COST = 2, +}; + +struct rq_qos_ops; + +struct rq_qos { + struct rq_qos_ops *ops; + struct request_queue *q; + enum rq_qos_id id; + struct rq_qos *next; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT = 0, + HCTX_TYPE_READ = 1, + HCTX_TYPE_POLL = 2, + HCTX_MAX_TYPES = 3, +}; + +struct blk_mq_debugfs_attr; + +struct rq_qos_ops { + void (*throttle)(struct rq_qos *, struct bio *); + void (*track)(struct rq_qos *, struct request *, struct bio *); + void (*merge)(struct rq_qos *, struct request *, struct bio *); + void (*issue)(struct rq_qos *, struct request *); + void (*requeue)(struct rq_qos *, struct request *); + void (*done)(struct rq_qos *, struct request *); + void (*done_bio)(struct rq_qos *, struct bio *); + void (*cleanup)(struct rq_qos *, struct bio *); + void (*queue_depth_changed)(struct rq_qos *); + void (*exit)(struct rq_qos *); + const struct blk_mq_debugfs_attr *debugfs_attrs; +}; + +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; + +enum { + BLK_MQ_F_SHOULD_MERGE = 1, + BLK_MQ_F_TAG_QUEUE_SHARED = 2, + BLK_MQ_F_STACKING = 4, + BLK_MQ_F_TAG_HCTX_SHARED = 8, + BLK_MQ_F_BLOCKING = 32, + BLK_MQ_F_NO_SCHED = 64, + BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, + BLK_MQ_F_ALLOC_POLICY_BITS = 1, + BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_MAX_DEPTH = 10240, + BLK_MQ_CPU_WORK_BATCH = 8, +}; + +enum { + WBT_RWQ_BG = 0, + WBT_RWQ_KSWAPD = 1, + WBT_RWQ_DISCARD = 2, + WBT_NUM_RWQ = 3, +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +struct blk_plug_cb; + +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); + +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; + +enum { + BLK_MQ_REQ_NOWAIT = 1, + BLK_MQ_REQ_RESERVED = 2, + BLK_MQ_REQ_PM = 4, +}; + +struct trace_event_raw_block_buffer { + struct trace_entry ent; + dev_t dev; + sector_t sector; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_block_rq_requeue { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_rq { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + unsigned int bytes; + char rwbs[8]; + char comm[16]; + u32 __data_loc_cmd; + char __data[0]; +}; + +struct trace_event_raw_block_bio_bounce { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_complete { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + int error; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_merge { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_queue { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_get_rq { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_plug { + struct trace_entry ent; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_unplug { + struct trace_entry ent; + int nr_rq; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_split { + struct trace_entry ent; + dev_t dev; + sector_t sector; + sector_t new_sector; + char rwbs[8]; + char comm[16]; + char __data[0]; +}; + +struct trace_event_raw_block_bio_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_raw_block_rq_remap { + struct trace_entry ent; + dev_t dev; + sector_t sector; + unsigned int nr_sector; + dev_t old_dev; + sector_t old_sector; + unsigned int nr_bios; + char rwbs[8]; + char __data[0]; +}; + +struct trace_event_data_offsets_block_buffer {}; + +struct trace_event_data_offsets_block_rq_requeue { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq_complete { + u32 cmd; +}; + +struct trace_event_data_offsets_block_rq { + u32 cmd; +}; + +struct trace_event_data_offsets_block_bio_bounce {}; + +struct trace_event_data_offsets_block_bio_complete {}; + +struct trace_event_data_offsets_block_bio_merge {}; + +struct trace_event_data_offsets_block_bio_queue {}; + +struct trace_event_data_offsets_block_get_rq {}; + +struct trace_event_data_offsets_block_plug {}; + +struct trace_event_data_offsets_block_unplug {}; + +struct trace_event_data_offsets_block_split {}; + +struct trace_event_data_offsets_block_bio_remap {}; + +struct trace_event_data_offsets_block_rq_remap {}; + +typedef void (*btf_trace_block_touch_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_dirty_buffer)(void *, struct buffer_head *); + +typedef void (*btf_trace_block_rq_requeue)(void *, struct request_queue *, struct request *); + +typedef void (*btf_trace_block_rq_complete)(void *, struct request *, int, unsigned int); + +typedef void (*btf_trace_block_rq_insert)(void *, struct request_queue *, struct request *); + +typedef void (*btf_trace_block_rq_issue)(void *, struct request_queue *, struct request *); + +typedef void (*btf_trace_block_rq_merge)(void *, struct request_queue *, struct request *); + +typedef void (*btf_trace_block_bio_bounce)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_complete)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_bio_backmerge)(void *, struct request_queue *, struct request *, struct bio *); + +typedef void (*btf_trace_block_bio_frontmerge)(void *, struct request_queue *, struct request *, struct bio *); + +typedef void (*btf_trace_block_bio_queue)(void *, struct request_queue *, struct bio *); + +typedef void (*btf_trace_block_getrq)(void *, struct request_queue *, struct bio *, int); + +typedef void (*btf_trace_block_sleeprq)(void *, struct request_queue *, struct bio *, int); + +typedef void (*btf_trace_block_plug)(void *, struct request_queue *); + +typedef void (*btf_trace_block_unplug)(void *, struct request_queue *, unsigned int, bool); + +typedef void (*btf_trace_block_split)(void *, struct request_queue *, struct bio *, unsigned int); + +typedef void (*btf_trace_block_bio_remap)(void *, struct request_queue *, struct bio *, dev_t, sector_t); + +typedef void (*btf_trace_block_rq_remap)(void *, struct request_queue *, struct request *, dev_t, sector_t); + +enum { + BLK_MQ_NO_TAG = 4294967295, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = 4294967294, +}; + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct request_queue *, char *); + ssize_t (*store)(struct request_queue *, const char *, size_t); +}; + +enum { + REQ_FSEQ_PREFLUSH = 1, + REQ_FSEQ_DATA = 2, + REQ_FSEQ_POSTFLUSH = 4, + REQ_FSEQ_DONE = 8, + REQ_FSEQ_ACTIONS = 7, + FLUSH_PENDING_TIMEOUT = 5000, +}; + +enum { + ICQ_EXITED = 4, + ICQ_DESTROYED = 8, +}; + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + long unsigned int offset; + int null_mapped; + int from_user; +}; + +struct bio_map_data { + bool is_our_pages: 1; + bool is_null_mapped: 1; + struct iov_iter iter; + struct iovec iov[0]; +}; + +enum bio_merge_status { + BIO_MERGE_OK = 0, + BIO_MERGE_NONE = 1, + BIO_MERGE_FAILED = 2, +}; + +typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); + +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = 65535, +}; + +struct mq_inflight { + struct hd_struct *part; + unsigned int inflight[2]; +}; + +struct flush_busy_ctx_data { + struct blk_mq_hw_ctx *hctx; + struct list_head *list; +}; + +struct dispatch_rq_data { + struct blk_mq_hw_ctx *hctx; + struct request *rq; +}; + +enum prep_dispatch { + PREP_DISPATCH_OK = 0, + PREP_DISPATCH_NO_TAG = 1, + PREP_DISPATCH_NO_BUDGET = 2, +}; + +struct rq_iter_data { + struct blk_mq_hw_ctx *hctx; + bool has_rq; +}; + +struct blk_mq_qe_pair { + struct list_head node; + struct request_queue *q; + struct elevator_type *type; +}; + +struct sbq_wait { + struct sbitmap_queue *sbq; + struct wait_queue_entry wait; +}; + +typedef bool busy_iter_fn(struct blk_mq_hw_ctx *, struct request *, void *, bool); + +typedef bool busy_tag_iter_fn(struct request *, void *, bool); + +struct bt_iter_data { + struct blk_mq_hw_ctx *hctx; + busy_iter_fn *fn; + void *data; + bool reserved; +}; + +struct bt_tags_iter_data { + struct blk_mq_tags *tags; + busy_tag_iter_fn *fn; + void *data; + unsigned int flags; +}; + +struct blk_queue_stats { + struct list_head callbacks; + spinlock_t lock; + bool enable_accounting; +}; + +struct blk_mq_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_ctx *, char *); + ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); + ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); +}; + +typedef u32 compat_caddr_t; + +struct hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + long unsigned int start; +}; + +struct blkpg_ioctl_arg { + int op; + int flags; + int datalen; + void *data; +}; + +struct blkpg_partition { + long long int start; + long long int length; + int pno; + char devname[64]; + char volname[64]; +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +struct compat_blkpg_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_caddr_t data; +}; + +struct compat_hd_geometry { + unsigned char heads; + unsigned char sectors; + short unsigned int cylinders; + u32 start; +}; + +struct klist_node; + +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +}; + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +enum { + DISK_EVENT_FLAG_POLL = 1, + DISK_EVENT_FLAG_UEVENT = 2, +}; + +struct disk_events { + struct list_head node; + struct gendisk *disk; + spinlock_t lock; + struct mutex block_mutex; + int block; + unsigned int pending; + unsigned int clearing; + long int poll_msecs; + struct delayed_work dwork; +}; + +struct badblocks { + struct device *dev; + int count; + int unacked_exist; + int shift; + u64 *page; + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; +}; + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; +}; + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP = 2, + IOPRIO_WHO_USER = 3, +}; + +struct parsed_partitions { + struct block_device *bdev; + char name[32]; + struct { + sector_t from; + sector_t size; + int flags; + bool has_info; + struct partition_meta_info info; + } *parts; + int next; + int limit; + bool access_beyond_eod; + char *pp_buf; +}; + +typedef struct { + struct page *v; +} Sector; + +struct RigidDiskBlock { + __u32 rdb_ID; + __be32 rdb_SummedLongs; + __s32 rdb_ChkSum; + __u32 rdb_HostID; + __be32 rdb_BlockBytes; + __u32 rdb_Flags; + __u32 rdb_BadBlockList; + __be32 rdb_PartitionList; + __u32 rdb_FileSysHeaderList; + __u32 rdb_DriveInit; + __u32 rdb_Reserved1[6]; + __u32 rdb_Cylinders; + __u32 rdb_Sectors; + __u32 rdb_Heads; + __u32 rdb_Interleave; + __u32 rdb_Park; + __u32 rdb_Reserved2[3]; + __u32 rdb_WritePreComp; + __u32 rdb_ReducedWrite; + __u32 rdb_StepRate; + __u32 rdb_Reserved3[5]; + __u32 rdb_RDBBlocksLo; + __u32 rdb_RDBBlocksHi; + __u32 rdb_LoCylinder; + __u32 rdb_HiCylinder; + __u32 rdb_CylBlocks; + __u32 rdb_AutoParkSeconds; + __u32 rdb_HighRDSKBlock; + __u32 rdb_Reserved4; + char rdb_DiskVendor[8]; + char rdb_DiskProduct[16]; + char rdb_DiskRevision[4]; + char rdb_ControllerVendor[8]; + char rdb_ControllerProduct[16]; + char rdb_ControllerRevision[4]; + __u32 rdb_Reserved5[10]; +}; + +struct PartitionBlock { + __be32 pb_ID; + __be32 pb_SummedLongs; + __s32 pb_ChkSum; + __u32 pb_HostID; + __be32 pb_Next; + __u32 pb_Flags; + __u32 pb_Reserved1[2]; + __u32 pb_DevFlags; + __u8 pb_DriveName[32]; + __u32 pb_Reserved2[15]; + __be32 pb_Environment[17]; + __u32 pb_EReserved[15]; +}; + +struct partition_info { + u8 flg; + char id[3]; + __be32 st; + __be32 siz; +}; + +struct rootsector { + char unused[342]; + struct partition_info icdpart[8]; + char unused2[12]; + u32 hd_siz; + struct partition_info part[4]; + u32 bsl_st; + u32 bsl_cnt; + u16 checksum; +} __attribute__((packed)); + +struct mac_partition { + __be16 signature; + __be16 res1; + __be32 map_count; + __be32 start_block; + __be32 block_count; + char name[32]; + char type[32]; + __be32 data_start; + __be32 data_count; + __be32 status; + __be32 boot_start; + __be32 boot_size; + __be32 boot_load; + __be32 boot_load2; + __be32 boot_entry; + __be32 boot_entry2; + __be32 boot_cksum; + char processor[16]; +}; + +struct mac_driver_desc { + __be16 signature; + __be16 block_size; + __be32 block_count; +}; + +struct msdos_partition { + u8 boot_ind; + u8 head; + u8 sector; + u8 cyl; + u8 sys_ind; + u8 end_head; + u8 end_sector; + u8 end_cyl; + __le32 start_sect; + __le32 nr_sects; +}; + +struct frag { + struct list_head list; + u32 group; + u8 num; + u8 rec; + u8 map; + u8 data[0]; +}; + +struct privhead { + u16 ver_major; + u16 ver_minor; + u64 logical_disk_start; + u64 logical_disk_size; + u64 config_start; + u64 config_size; + uuid_t disk_id; +}; + +struct tocblock { + u8 bitmap1_name[16]; + u64 bitmap1_start; + u64 bitmap1_size; + u8 bitmap2_name[16]; + u64 bitmap2_start; + u64 bitmap2_size; +}; + +struct vmdb { + u16 ver_major; + u16 ver_minor; + u32 vblk_size; + u32 vblk_offset; + u32 last_vblk_seq; +}; + +struct vblk_comp { + u8 state[16]; + u64 parent_id; + u8 type; + u8 children; + u16 chunksize; +}; + +struct vblk_dgrp { + u8 disk_id[64]; +}; + +struct vblk_disk { + uuid_t disk_id; + u8 alt_name[128]; +}; + +struct vblk_part { + u64 start; + u64 size; + u64 volume_offset; + u64 parent_id; + u64 disk_id; + u8 partnum; +}; + +struct vblk_volu { + u8 volume_type[16]; + u8 volume_state[16]; + u8 guid[16]; + u8 drive_hint[4]; + u64 size; + u8 partition_type; +}; + +struct vblk { + u8 name[64]; + u64 obj_id; + u32 sequence; + u8 flags; + u8 type; + union { + struct vblk_comp comp; + struct vblk_dgrp dgrp; + struct vblk_disk disk; + struct vblk_part part; + struct vblk_volu volu; + } vblk; + struct list_head list; +}; + +struct ldmdb { + struct privhead ph; + struct tocblock toc; + struct vmdb vm; + struct list_head v_dgrp; + struct list_head v_disk; + struct list_head v_volu; + struct list_head v_comp; + struct list_head v_part; +}; + +struct fat_boot_sector { + __u8 ignored[3]; + __u8 system_id[8]; + __u8 sector_size[2]; + __u8 sec_per_clus; + __le16 reserved; + __u8 fats; + __u8 dir_entries[2]; + __u8 sectors[2]; + __u8 media; + __le16 fat_length; + __le16 secs_track; + __le16 heads; + __le32 hidden; + __le32 total_sect; + union { + struct { + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat16; + struct { + __le32 length; + __le16 flags; + __u8 version[2]; + __le32 root_cluster; + __le16 info_sector; + __le16 backup_boot; + __le16 reserved2[6]; + __u8 drive_number; + __u8 state; + __u8 signature; + __u8 vol_id[4]; + __u8 vol_label[11]; + __u8 fs_type[8]; + } fat32; + }; +}; + +enum msdos_sys_ind { + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 133, + WIN98_EXTENDED_PARTITION = 15, + LINUX_DATA_PARTITION = 131, + LINUX_LVM_PARTITION = 142, + LINUX_RAID_PARTITION = 253, + SOLARIS_X86_PARTITION = 130, + NEW_SOLARIS_X86_PARTITION = 191, + DM6_AUX1PARTITION = 81, + DM6_AUX3PARTITION = 83, + DM6_PARTITION = 84, + EZD_PARTITION = 85, + FREEBSD_PARTITION = 165, + OPENBSD_PARTITION = 166, + NETBSD_PARTITION = 169, + BSDI_PARTITION = 183, + MINIX_PARTITION = 129, + UNIXWARE_PARTITION = 99, +}; + +struct solaris_x86_slice { + __le16 s_tag; + __le16 s_flag; + __le32 s_start; + __le32 s_size; +}; + +struct solaris_x86_vtoc { + unsigned int v_bootinfo[3]; + __le32 v_sanity; + __le32 v_version; + char v_volume[8]; + __le16 v_sectorsz; + __le16 v_nparts; + unsigned int v_reserved[10]; + struct solaris_x86_slice v_slice[16]; + unsigned int timestamp[16]; + char v_asciilabel[128]; +}; + +struct bsd_partition { + __le32 p_size; + __le32 p_offset; + __le32 p_fsize; + __u8 p_fstype; + __u8 p_frag; + __le16 p_cpg; +}; + +struct bsd_disklabel { + __le32 d_magic; + __s16 d_type; + __s16 d_subtype; + char d_typename[16]; + char d_packname[16]; + __u32 d_secsize; + __u32 d_nsectors; + __u32 d_ntracks; + __u32 d_ncylinders; + __u32 d_secpercyl; + __u32 d_secperunit; + __u16 d_sparespertrack; + __u16 d_sparespercyl; + __u32 d_acylinders; + __u16 d_rpm; + __u16 d_interleave; + __u16 d_trackskew; + __u16 d_cylskew; + __u32 d_headswitch; + __u32 d_trkseek; + __u32 d_flags; + __u32 d_drivedata[5]; + __u32 d_spare[5]; + __le32 d_magic2; + __le16 d_checksum; + __le16 d_npartitions; + __le32 d_bbsize; + __le32 d_sbsize; + struct bsd_partition d_partitions[16]; +}; + +struct unixware_slice { + __le16 s_label; + __le16 s_flags; + __le32 start_sect; + __le32 nr_sects; +}; + +struct unixware_vtoc { + __le32 v_magic; + __le32 v_version; + char v_name[8]; + __le16 v_nslices; + __le16 v_unknown1; + __le32 v_reserved[10]; + struct unixware_slice v_slice[16]; +}; + +struct unixware_disklabel { + __le32 d_type; + __le32 d_magic; + __le32 d_version; + char d_serial[12]; + __le32 d_ncylinders; + __le32 d_ntracks; + __le32 d_nsectors; + __le32 d_secsize; + __le32 d_part_start; + __le32 d_unknown1[12]; + __le32 d_alt_tbl; + __le32 d_alt_len; + __le32 d_phys_cyl; + __le32 d_phys_trk; + __le32 d_phys_sec; + __le32 d_phys_bytes; + __le32 d_unknown2; + __le32 d_unknown3; + __le32 d_pad[8]; + struct unixware_vtoc vtoc; +}; + +struct d_partition { + __le32 p_size; + __le32 p_offset; + __le32 p_fsize; + u8 p_fstype; + u8 p_frag; + __le16 p_cpg; +}; + +struct disklabel { + __le32 d_magic; + __le16 d_type; + __le16 d_subtype; + u8 d_typename[16]; + u8 d_packname[16]; + __le32 d_secsize; + __le32 d_nsectors; + __le32 d_ntracks; + __le32 d_ncylinders; + __le32 d_secpercyl; + __le32 d_secprtunit; + __le16 d_sparespertrack; + __le16 d_sparespercyl; + __le32 d_acylinders; + __le16 d_rpm; + __le16 d_interleave; + __le16 d_trackskew; + __le16 d_cylskew; + __le32 d_headswitch; + __le32 d_trkseek; + __le32 d_flags; + __le32 d_drivedata[5]; + __le32 d_spare[5]; + __le32 d_magic2; + __le16 d_checksum; + __le16 d_npartitions; + __le32 d_bbsize; + __le32 d_sbsize; + struct d_partition d_partitions[18]; +}; + +enum { + LINUX_RAID_PARTITION___2 = 253, +}; + +struct sgi_volume { + s8 name[8]; + __be32 block_num; + __be32 num_bytes; +}; + +struct sgi_partition { + __be32 num_blocks; + __be32 first_block; + __be32 type; +}; + +struct sgi_disklabel { + __be32 magic_mushroom; + __be16 root_part_num; + __be16 swap_part_num; + s8 boot_file[16]; + u8 _unused0[48]; + struct sgi_volume volume[15]; + struct sgi_partition partitions[16]; + __be32 csum; + __be32 _unused1; +}; + +enum { + SUN_WHOLE_DISK = 5, + LINUX_RAID_PARTITION___3 = 253, +}; + +struct sun_info { + __be16 id; + __be16 flags; +}; + +struct sun_vtoc { + __be32 version; + char volume[8]; + __be16 nparts; + struct sun_info infos[8]; + __be16 padding; + __be32 bootinfo[3]; + __be32 sanity; + __be32 reserved[10]; + __be32 timestamp[8]; +}; + +struct sun_partition { + __be32 start_cylinder; + __be32 num_sectors; +}; + +struct sun_disklabel { + unsigned char info[128]; + struct sun_vtoc vtoc; + __be32 write_reinstruct; + __be32 read_reinstruct; + unsigned char spare[148]; + __be16 rspeed; + __be16 pcylcount; + __be16 sparecyl; + __be16 obs1; + __be16 obs2; + __be16 ilfact; + __be16 ncyl; + __be16 nacyl; + __be16 ntrks; + __be16 nsect; + __be16 obs3; + __be16 obs4; + struct sun_partition partitions[8]; + __be16 magic; + __be16 csum; +}; + +struct pt_info { + s32 pi_nblocks; + u32 pi_blkoff; +}; + +struct ultrix_disklabel { + s32 pt_magic; + s32 pt_valid; + struct pt_info pt_part[8]; +}; + +struct _gpt_header { + __le64 signature; + __le32 revision; + __le32 header_size; + __le32 header_crc32; + __le32 reserved1; + __le64 my_lba; + __le64 alternate_lba; + __le64 first_usable_lba; + __le64 last_usable_lba; + efi_guid_t disk_guid; + __le64 partition_entry_lba; + __le32 num_partition_entries; + __le32 sizeof_partition_entry; + __le32 partition_entry_array_crc32; +} __attribute__((packed)); + +typedef struct _gpt_header gpt_header; + +struct _gpt_entry_attributes { + u64 required_to_function: 1; + u64 reserved: 47; + u64 type_guid_specific: 16; +}; + +typedef struct _gpt_entry_attributes gpt_entry_attributes; + +struct _gpt_entry { + efi_guid_t partition_type_guid; + efi_guid_t unique_partition_guid; + __le64 starting_lba; + __le64 ending_lba; + gpt_entry_attributes attributes; + __le16 partition_name[36]; +}; + +typedef struct _gpt_entry gpt_entry; + +struct _gpt_mbr_record { + u8 boot_indicator; + u8 start_head; + u8 start_sector; + u8 start_track; + u8 os_type; + u8 end_head; + u8 end_sector; + u8 end_track; + __le32 starting_lba; + __le32 size_in_lba; +}; + +typedef struct _gpt_mbr_record gpt_mbr_record; + +struct _legacy_mbr { + u8 boot_code[440]; + __le32 unique_mbr_signature; + __le16 unknown; + gpt_mbr_record partition_record[4]; + __le16 signature; +} __attribute__((packed)); + +typedef struct _legacy_mbr legacy_mbr; + +struct d_partition___2 { + __le32 p_res; + u8 p_fstype; + u8 p_res2[3]; + __le32 p_offset; + __le32 p_size; +}; + +struct disklabel___2 { + u8 d_reserved[270]; + struct d_partition___2 d_partitions[2]; + u8 d_blank[208]; + __le16 d_magic; +} __attribute__((packed)); + +struct volumeid { + u8 vid_unused[248]; + u8 vid_mac[8]; +}; + +struct dkconfig { + u8 ios_unused0[128]; + __be32 ios_slcblk; + __be16 ios_slccnt; + u8 ios_unused1[122]; +}; + +struct dkblk0 { + struct volumeid dk_vid; + struct dkconfig dk_ios; +}; + +struct slice { + __be32 nblocks; + __be32 blkoff; +}; + +struct rq_wait { + wait_queue_head_t wait; + atomic_t inflight; +}; + +struct rq_depth { + unsigned int max_depth; + int scale_step; + bool scaled_max; + unsigned int queue_depth; + unsigned int default_depth; +}; + +typedef bool acquire_inflight_cb_t(struct rq_wait *, void *); + +typedef void cleanup_cb_t(struct rq_wait *, void *); + +struct rq_qos_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wait *rqw; + acquire_inflight_cb_t *cb; + void *private_data; + bool got_token; +}; + +struct cdrom_device_ops; + +struct cdrom_device_info { + const struct cdrom_device_ops *ops; + struct list_head list; + struct gendisk *disk; + void *handle; + int mask; + int speed; + int capacity; + unsigned int options: 30; + unsigned int mc_flags: 2; + unsigned int vfs_events; + unsigned int ioctl_events; + int use_count; + char name[20]; + __u8 sanyo_slot: 2; + __u8 keeplocked: 1; + __u8 reserved: 5; + int cdda_method; + __u8 last_sense; + __u8 media_written; + short unsigned int mmc3_profile; + int for_data; + int (*exit)(struct cdrom_device_info *); + int mrw_mode_page; +}; + +struct scsi_sense_hdr { + u8 response_code; + u8 sense_key; + u8 asc; + u8 ascq; + u8 byte4; + u8 byte5; + u8 byte6; + u8 additional_length; +}; + +struct cdrom_msf0 { + __u8 minute; + __u8 second; + __u8 frame; +}; + +union cdrom_addr { + struct cdrom_msf0 msf; + int lba; +}; + +struct cdrom_multisession { + union cdrom_addr addr; + __u8 xa_flag; + __u8 addr_format; +}; + +struct cdrom_mcn { + __u8 medium_catalog_number[14]; +}; + +struct request_sense; + +struct cdrom_generic_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + union { + void *reserved[1]; + void *unused; + }; +}; + +struct request_sense { + __u8 error_code: 7; + __u8 valid: 1; + __u8 segment_number; + __u8 sense_key: 4; + __u8 reserved2: 1; + __u8 ili: 1; + __u8 reserved1: 2; + __u8 information[4]; + __u8 add_sense_len; + __u8 command_info[4]; + __u8 asc; + __u8 ascq; + __u8 fruc; + __u8 sks[3]; + __u8 asb[46]; +}; + +struct packet_command { + unsigned char cmd[12]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct scsi_sense_hdr *sshdr; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; +}; + +struct cdrom_device_ops { + int (*open)(struct cdrom_device_info *, int); + void (*release)(struct cdrom_device_info *); + int (*drive_status)(struct cdrom_device_info *, int); + unsigned int (*check_events)(struct cdrom_device_info *, unsigned int, int); + int (*tray_move)(struct cdrom_device_info *, int); + int (*lock_door)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, int); + int (*select_disc)(struct cdrom_device_info *, int); + int (*get_last_session)(struct cdrom_device_info *, struct cdrom_multisession *); + int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); + int (*reset)(struct cdrom_device_info *); + int (*audio_ioctl)(struct cdrom_device_info *, unsigned int, void *); + const int capability; + int (*generic_packet)(struct cdrom_device_info *, struct packet_command *); +}; + +struct scsi_ioctl_command { + unsigned int inlen; + unsigned int outlen; + unsigned char data[0]; +}; + +enum scsi_device_event { + SDEV_EVT_MEDIA_CHANGE = 1, + SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2, + SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3, + SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4, + SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5, + SDEV_EVT_LUN_CHANGE_REPORTED = 6, + SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7, + SDEV_EVT_POWER_ON_RESET_OCCURRED = 8, + SDEV_EVT_FIRST = 1, + SDEV_EVT_LAST = 8, + SDEV_EVT_MAXBITS = 9, +}; + +struct scsi_request { + unsigned char __cmd[16]; + unsigned char *cmd; + short unsigned int cmd_len; + int result; + unsigned int sense_len; + unsigned int resid_len; + int retries; + void *sense; +}; + +struct sg_io_hdr { + int interface_id; + int dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + unsigned int dxfer_len; + void *dxferp; + unsigned char *cmdp; + void *sbp; + unsigned int timeout; + unsigned int flags; + int pack_id; + void *usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + int resid; + unsigned int duration; + unsigned int info; +}; + +struct compat_sg_io_hdr { + compat_int_t interface_id; + compat_int_t dxfer_direction; + unsigned char cmd_len; + unsigned char mx_sb_len; + short unsigned int iovec_count; + compat_uint_t dxfer_len; + compat_uint_t dxferp; + compat_uptr_t cmdp; + compat_uptr_t sbp; + compat_uint_t timeout; + compat_uint_t flags; + compat_int_t pack_id; + compat_uptr_t usr_ptr; + unsigned char status; + unsigned char masked_status; + unsigned char msg_status; + unsigned char sb_len_wr; + short unsigned int host_status; + short unsigned int driver_status; + compat_int_t resid; + compat_uint_t duration; + compat_uint_t info; +}; + +struct blk_cmd_filter { + long unsigned int read_ok[4]; + long unsigned int write_ok[4]; +}; + +struct compat_cdrom_generic_command { + unsigned char cmd[12]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + unsigned char pad[3]; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t unused; +}; + +enum { + OMAX_SB_LEN = 16, +}; + +struct bsg_device { + struct request_queue *queue; + spinlock_t lock; + struct hlist_node dev_list; + refcount_t ref_count; + char name[20]; + int max_queue; +}; + +struct bsg_job; + +typedef int bsg_job_fn(struct bsg_job *); + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + struct kref kref; + unsigned int timeout; + void *request; + void *reply; + unsigned int request_len; + unsigned int reply_len; + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + int result; + unsigned int reply_payload_rcv_len; + struct request *bidi_rq; + struct bio *bidi_bio; + void *dd_data; +}; + +typedef enum blk_eh_timer_return bsg_timeout_fn(struct request *); + +struct bsg_set { + struct blk_mq_tag_set tag_set; + bsg_job_fn *job_fn; + bsg_timeout_fn *timeout_fn; +}; + +typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t); + +typedef void blkcg_pol_init_cpd_fn(struct blkcg_policy_data *); + +typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *); + +typedef void blkcg_pol_bind_cpd_fn(struct blkcg_policy_data *); + +typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(gfp_t, struct request_queue *, struct blkcg *); + +typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *); + +typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *); + +typedef size_t blkcg_pol_stat_pd_fn(struct blkg_policy_data *, char *, size_t); + +struct blkcg_policy { + int plid; + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +struct blkg_conf_ctx { + struct gendisk *disk; + struct blkcg_gq *blkg; + char *body; +}; + +enum blkg_rwstat_type { + BLKG_RWSTAT_READ = 0, + BLKG_RWSTAT_WRITE = 1, + BLKG_RWSTAT_SYNC = 2, + BLKG_RWSTAT_ASYNC = 3, + BLKG_RWSTAT_DISCARD = 4, + BLKG_RWSTAT_NR = 5, + BLKG_RWSTAT_TOTAL = 5, +}; + +struct blkg_rwstat { + struct percpu_counter cpu_cnt[5]; + atomic64_t aux_cnt[5]; +}; + +struct blkg_rwstat_sample { + u64 cnt[5]; +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; + struct list_head queued[2]; + unsigned int nr_queued[2]; + struct rb_root_cached pending_tree; + unsigned int nr_pending; + long unsigned int first_pending_disptime; + struct timer_list pending_timer; +}; + +struct latency_bucket { + long unsigned int total_latency; + int samples; +}; + +struct avg_latency_bucket { + long unsigned int latency; + bool valid; +}; + +struct throtl_data { + struct throtl_service_queue service_queue; + struct request_queue *queue; + unsigned int nr_queued[2]; + unsigned int throtl_slice; + struct work_struct dispatch_work; + unsigned int limit_index; + bool limit_valid[2]; + long unsigned int low_upgrade_time; + long unsigned int low_downgrade_time; + unsigned int scale; + struct latency_bucket tmp_buckets[18]; + struct avg_latency_bucket avg_buckets[18]; + struct latency_bucket *latency_buckets[2]; + long unsigned int last_calculate_time; + long unsigned int filtered_latency; + bool track_bio_latency; +}; + +struct throtl_grp; + +struct throtl_qnode { + struct list_head node; + struct bio_list bios; + struct throtl_grp *tg; +}; + +struct throtl_grp { + struct blkg_policy_data pd; + struct rb_node rb_node; + struct throtl_data *td; + struct throtl_service_queue service_queue; + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + long unsigned int disptime; + unsigned int flags; + bool has_rules[2]; + uint64_t bps[4]; + uint64_t bps_conf[4]; + unsigned int iops[4]; + unsigned int iops_conf[4]; + uint64_t bytes_disp[2]; + unsigned int io_disp[2]; + long unsigned int last_low_overflow_time[2]; + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + long unsigned int last_check_time; + long unsigned int latency_target; + long unsigned int latency_target_conf; + long unsigned int slice_start[2]; + long unsigned int slice_end[2]; + long unsigned int last_finish_time; + long unsigned int checked_last_finish_time; + long unsigned int avg_idletime; + long unsigned int idletime_threshold; + long unsigned int idletime_threshold_conf; + unsigned int bio_cnt; + unsigned int bad_bio_cnt; + long unsigned int bio_cnt_reset_time; + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +enum tg_state_flags { + THROTL_TG_PENDING = 1, + THROTL_TG_WAS_EMPTY = 2, +}; + +enum { + LIMIT_LOW = 0, + LIMIT_MAX = 1, + LIMIT_CNT = 2, +}; + +struct blk_iolatency { + struct rq_qos rqos; + struct timer_list timer; + atomic_t enabled; +}; + +struct iolatency_grp; + +struct child_latency_info { + spinlock_t lock; + u64 last_scale_event; + u64 scale_lat; + u64 nr_samples; + struct iolatency_grp *scale_grp; + atomic_t scale_cookie; +}; + +struct percentile_stats { + u64 total; + u64 missed; +}; + +struct latency_stat { + union { + struct percentile_stats ps; + struct blk_rq_stat rqs; + }; +}; + +struct iolatency_grp { + struct blkg_policy_data pd; + struct latency_stat *stats; + struct latency_stat cur_stat; + struct blk_iolatency *blkiolat; + struct rq_depth rq_depth; + struct rq_wait rq_wait; + atomic64_t window_start; + atomic_t scale_cookie; + u64 min_lat_nsec; + u64 cur_win_nsec; + u64 lat_avg; + u64 nr_samples; + bool ssd; + struct child_latency_info child_lat; +}; + +enum { + MILLION = 1000000, + MIN_PERIOD = 1000, + MAX_PERIOD = 1000000, + MARGIN_MIN_PCT = 10, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + INUSE_ADJ_STEP_PCT = 25, + TIMER_SLACK_PCT = 1, + WEIGHT_ONE = 65536, + VTIME_PER_SEC_SHIFT = 37, + VTIME_PER_SEC = 0, + VTIME_PER_USEC = 137438, + VTIME_PER_NSEC = 137, + VRATE_MIN_PPM = 10000, + VRATE_MAX_PPM = 100000000, + VRATE_MIN = 1374, + VRATE_CLAMP_ADJ_PCT = 4, + RQ_WAIT_BUSY_PCT = 5, + UNBUSY_THR_PCT = 75, + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250000, + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100000, + MAX_LAGGING_PERIODS = 10, + AUTOP_CYCLE_NSEC = 1410065408, + IOC_PAGE_SHIFT = 12, + IOC_PAGE_SIZE = 4096, + IOC_SECT_TO_PAGE_SHIFT = 3, + LCOEF_RANDIO_PAGES = 4096, +}; + +enum ioc_running { + IOC_IDLE = 0, + IOC_RUNNING = 1, + IOC_STOP = 2, +}; + +enum { + QOS_ENABLE = 0, + QOS_CTRL = 1, + NR_QOS_CTRL_PARAMS = 2, +}; + +enum { + QOS_RPPM = 0, + QOS_RLAT = 1, + QOS_WPPM = 2, + QOS_WLAT = 3, + QOS_MIN = 4, + QOS_MAX = 5, + NR_QOS_PARAMS = 6, +}; + +enum { + COST_CTRL = 0, + COST_MODEL = 1, + NR_COST_CTRL_PARAMS = 2, +}; + +enum { + I_LCOEF_RBPS = 0, + I_LCOEF_RSEQIOPS = 1, + I_LCOEF_RRANDIOPS = 2, + I_LCOEF_WBPS = 3, + I_LCOEF_WSEQIOPS = 4, + I_LCOEF_WRANDIOPS = 5, + NR_I_LCOEFS = 6, +}; + +enum { + LCOEF_RPAGE = 0, + LCOEF_RSEQIO = 1, + LCOEF_RRANDIO = 2, + LCOEF_WPAGE = 3, + LCOEF_WSEQIO = 4, + LCOEF_WRANDIO = 5, + NR_LCOEFS = 6, +}; + +enum { + AUTOP_INVALID = 0, + AUTOP_HDD = 1, + AUTOP_SSD_QD1 = 2, + AUTOP_SSD_DFL = 3, + AUTOP_SSD_FAST = 4, +}; + +struct ioc_params { + u32 qos[6]; + u64 i_lcoefs[6]; + u64 lcoefs[6]; + u32 too_fast_vrate_pct; + u32 too_slow_vrate_pct; +}; + +struct ioc_margins { + s64 min; + s64 low; + s64 target; +}; + +struct ioc_missed { + local_t nr_met; + local_t nr_missed; + u32 last_met; + u32 last_missed; +}; + +struct ioc_pcpu_stat { + struct ioc_missed missed[2]; + local64_t rq_wait_ns; + u64 last_rq_wait_ns; +}; + +struct ioc { + struct rq_qos rqos; + bool enabled; + struct ioc_params params; + struct ioc_margins margins; + u32 period_us; + u32 timer_slack_ns; + u64 vrate_min; + u64 vrate_max; + spinlock_t lock; + struct timer_list timer; + struct list_head active_iocgs; + struct ioc_pcpu_stat *pcpu_stat; + enum ioc_running running; + atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; + seqcount_spinlock_t period_seqcount; + u64 period_at; + u64 period_at_vtime; + atomic64_t cur_period; + int busy_level; + bool weights_updated; + atomic_t hweight_gen; + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; + u64 autop_too_fast_at; + u64 autop_too_slow_at; + int autop_idx; + bool user_qos_params: 1; + bool user_cost_model: 1; +}; + +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; +}; + +struct ioc_gq { + struct blkg_policy_data pd; + struct ioc *ioc; + u32 cfg_weight; + u32 weight; + u32 active; + u32 inuse; + u32 last_inuse; + s64 saved_margin; + sector_t cursor; + atomic64_t vtime; + atomic64_t done_vtime; + u64 abs_vdebt; + u64 delay; + u64 delay_at; + atomic64_t active_period; + struct list_head active_list; + u64 child_active_sum; + u64 child_inuse_sum; + u64 child_adjusted_sum; + int hweight_gen; + u32 hweight_active; + u32 hweight_inuse; + u32 hweight_donating; + u32 hweight_after_donation; + struct list_head walk_list; + struct list_head surplus_list; + struct wait_queue_head waitq; + struct hrtimer waitq_timer; + u64 activated_at; + struct iocg_pcpu_stat *pcpu_stat; + struct iocg_stat local_stat; + struct iocg_stat desc_stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; + int level; + struct ioc_gq *ancestors[0]; +}; + +struct ioc_cgrp { + struct blkcg_policy_data cpd; + unsigned int dfl_weight; +}; + +struct ioc_now { + u64 now_ns; + u64 now; + u64 vnow; + u64 vrate; +}; + +struct iocg_wait { + struct wait_queue_entry wait; + struct bio *bio; + u64 abs_cost; + bool committed; +}; + +struct iocg_wake_ctx { + struct ioc_gq *iocg; + u32 hw_inuse; + s64 vbudget; +}; + +struct trace_event_raw_iocost_iocg_activate { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u64 vrate; + u64 last_period; + u64 cur_period; + u64 vtime; + u32 weight; + u32 inuse; + u64 hweight_active; + u64 hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocg_inuse_update { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u32 old_inuse; + u32 new_inuse; + u64 old_hweight_inuse; + u64 new_hweight_inuse; + char __data[0]; +}; + +struct trace_event_raw_iocost_ioc_vrate_adj { + struct trace_entry ent; + u32 __data_loc_devname; + u64 old_vrate; + u64 new_vrate; + int busy_level; + u32 read_missed_ppm; + u32 write_missed_ppm; + u32 rq_wait_pct; + int nr_lagging; + int nr_shortages; + char __data[0]; +}; + +struct trace_event_raw_iocost_iocg_forgive_debt { + struct trace_entry ent; + u32 __data_loc_devname; + u32 __data_loc_cgroup; + u64 now; + u64 vnow; + u32 usage_pct; + u64 old_debt; + u64 new_debt; + u64 old_delay; + u64 new_delay; + char __data[0]; +}; + +struct trace_event_data_offsets_iocost_iocg_activate { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocg_inuse_update { + u32 devname; + u32 cgroup; +}; + +struct trace_event_data_offsets_iocost_ioc_vrate_adj { + u32 devname; +}; + +struct trace_event_data_offsets_iocost_iocg_forgive_debt { + u32 devname; + u32 cgroup; +}; + +typedef void (*btf_trace_iocost_iocg_activate)(void *, struct ioc_gq *, const char *, struct ioc_now *, u64, u64, u64); + +typedef void (*btf_trace_iocost_inuse_shortage)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_transfer)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_inuse_adjust)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u32, u64, u64); + +typedef void (*btf_trace_iocost_ioc_vrate_adj)(void *, struct ioc *, u64, u32 *, u32, int, int); + +typedef void (*btf_trace_iocost_iocg_forgive_debt)(void *, struct ioc_gq *, const char *, struct ioc_now *, u32, u64, u64, u64, u64); + +struct deadline_data { + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + struct request *next_rq[2]; + unsigned int batching; + unsigned int starved; + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; + spinlock_t lock; + spinlock_t zone_lock; + struct list_head dispatch; +}; + +struct bfq_entity; + +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + u64 vtime; + long unsigned int wsum; +}; + +struct bfq_sched_data; + +struct bfq_entity { + struct rb_node rb_node; + bool on_st_or_in_serv; + u64 start; + u64 finish; + struct rb_root *tree; + u64 min_start; + int service; + int budget; + int dev_weight; + int weight; + int new_weight; + int orig_weight; + struct bfq_entity *parent; + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + int prio_changed; + bool in_groups_with_pending_reqs; +}; + +struct bfq_sched_data { + struct bfq_entity *in_service_entity; + struct bfq_entity *next_in_service; + struct bfq_service_tree service_tree[3]; + long unsigned int bfq_class_idle_last_service; +}; + +struct bfq_weight_counter { + unsigned int weight; + unsigned int num_active; + struct rb_node weights_node; +}; + +struct bfq_ttime { + u64 last_end_request; + u64 ttime_total; + long unsigned int ttime_samples; + u64 ttime_mean; +}; + +struct bfq_data; + +struct bfq_io_cq; + +struct bfq_queue { + int ref; + struct bfq_data *bfqd; + short unsigned int ioprio; + short unsigned int ioprio_class; + short unsigned int new_ioprio; + short unsigned int new_ioprio_class; + u64 last_serv_time_ns; + unsigned int inject_limit; + long unsigned int decrease_time_jif; + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int allocated; + int meta_pending; + struct list_head fifo; + struct bfq_entity entity; + struct bfq_weight_counter *weight_counter; + int max_budget; + long unsigned int budget_timeout; + int dispatched; + long unsigned int flags; + struct list_head bfqq_list; + struct bfq_ttime ttime; + u32 seek_history; + struct hlist_node burst_list_node; + sector_t last_request_pos; + unsigned int requests_within_timer; + pid_t pid; + struct bfq_io_cq *bic; + long unsigned int wr_cur_max_time; + long unsigned int soft_rt_next_start; + long unsigned int last_wr_start_finish; + unsigned int wr_coeff; + long unsigned int last_idle_bklogged; + long unsigned int service_from_backlogged; + long unsigned int service_from_wr; + long unsigned int wr_start_at_switch_to_srt; + long unsigned int split_time; + long unsigned int first_IO_time; + u32 max_service_rate; + struct bfq_queue *waker_bfqq; + struct hlist_node woken_list_node; + struct hlist_head woken_list; +}; + +struct bfq_group; + +struct bfq_data { + struct request_queue *queue; + struct list_head dispatch; + struct bfq_group *root_group; + struct rb_root_cached queue_weights_tree; + unsigned int num_groups_with_pending_reqs; + unsigned int busy_queues[3]; + int wr_busy_queues; + int queued; + int rq_in_driver; + bool nonrot_with_queueing; + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + int budgets_assigned; + struct hrtimer idle_slice_timer; + struct bfq_queue *in_service_queue; + sector_t last_position; + sector_t in_serv_last_pos; + u64 last_completion; + struct bfq_queue *last_completed_rq_bfqq; + u64 last_empty_occupied_ns; + bool wait_dispatch; + struct request *waited_rq; + bool rqs_injected; + u64 first_dispatch; + u64 last_dispatch; + ktime_t last_budget_start; + ktime_t last_idling_start; + long unsigned int last_idling_start_jiffies; + int peak_rate_samples; + u32 sequential_samples; + u64 tot_sectors_dispatched; + u32 last_rq_max_size; + u64 delta_from_first; + u32 peak_rate; + int bfq_max_budget; + struct list_head active_list; + struct list_head idle_list; + u64 bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + u32 bfq_slice_idle; + int bfq_user_max_budget; + unsigned int bfq_timeout; + unsigned int bfq_requests_within_timer; + bool strict_guarantees; + long unsigned int last_ins_in_burst; + long unsigned int bfq_burst_interval; + int burst_size; + struct bfq_entity *burst_parent_entity; + long unsigned int bfq_large_burst_thresh; + bool large_burst; + struct hlist_head burst_list; + bool low_latency; + unsigned int bfq_wr_coeff; + unsigned int bfq_wr_max_time; + unsigned int bfq_wr_rt_max_time; + unsigned int bfq_wr_min_idle_time; + long unsigned int bfq_wr_min_inter_arr_async; + unsigned int bfq_wr_max_softrt_rate; + u64 rate_dur_prod; + struct bfq_queue oom_bfqq; + spinlock_t lock; + struct bfq_io_cq *bio_bic; + struct bfq_queue *bio_bfqq; + unsigned int word_depths[4]; +}; + +struct bfq_io_cq { + struct io_cq icq; + struct bfq_queue *bfqq[2]; + int ioprio; + uint64_t blkcg_serial_nr; + bool saved_has_short_ttime; + bool saved_IO_bound; + bool saved_in_large_burst; + bool was_in_burst_list; + unsigned int saved_weight; + long unsigned int saved_wr_coeff; + long unsigned int saved_last_wr_start_finish; + long unsigned int saved_wr_start_at_switch_to_srt; + unsigned int saved_wr_cur_max_time; + struct bfq_ttime saved_ttime; +}; + +struct bfqg_stats { + struct blkg_rwstat bytes; + struct blkg_rwstat ios; +}; + +struct bfq_group { + struct blkg_policy_data pd; + char blkg_path[128]; + int ref; + struct bfq_entity entity; + struct bfq_sched_data sched_data; + void *bfqd; + struct bfq_queue *async_bfqq[16]; + struct bfq_queue *async_idle_bfqq; + struct bfq_entity *my_entity; + int active_entities; + struct rb_root rq_pos_tree; + struct bfqg_stats stats; +}; + +enum bfqq_state_flags { + BFQQF_just_created = 0, + BFQQF_busy = 1, + BFQQF_wait_request = 2, + BFQQF_non_blocking_wait_rq = 3, + BFQQF_fifo_expire = 4, + BFQQF_has_short_ttime = 5, + BFQQF_sync = 6, + BFQQF_IO_bound = 7, + BFQQF_in_large_burst = 8, + BFQQF_softrt_update = 9, + BFQQF_coop = 10, + BFQQF_split_coop = 11, + BFQQF_has_waker = 12, +}; + +enum bfqq_expiration { + BFQQE_TOO_IDLE = 0, + BFQQE_BUDGET_TIMEOUT = 1, + BFQQE_BUDGET_EXHAUSTED = 2, + BFQQE_NO_MORE_REQUESTS = 3, + BFQQE_PREEMPTED = 4, +}; + +struct bfq_group_data { + struct blkcg_policy_data pd; + unsigned int weight; +}; + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1, + BIP_MAPPED_INTEGRITY = 2, + BIP_CTRL_NOCHECK = 4, + BIP_DISK_NOCHECK = 8, + BIP_IP_CHECKSUM = 16, +}; + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1, + BLK_INTEGRITY_GENERATE = 2, + BLK_INTEGRITY_DEVICE_CAPABLE = 4, + BLK_INTEGRITY_IP_CHECKSUM = 8, +}; + +struct integrity_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_integrity *, char *); + ssize_t (*store)(struct blk_integrity *, const char *, size_t); +}; + +enum t10_dif_type { + T10_PI_TYPE0_PROTECTION = 0, + T10_PI_TYPE1_PROTECTION = 1, + T10_PI_TYPE2_PROTECTION = 2, + T10_PI_TYPE3_PROTECTION = 3, +}; + +struct t10_pi_tuple { + __be16 guard_tag; + __be16 app_tag; + __be32 ref_tag; +}; + +typedef __be16 csum_fn(void *, unsigned int); + +struct virtio_device_id { + __u32 device; + __u32 vendor; +}; + +struct virtio_device; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + void *priv; +}; + +struct vringh_config_ops; + +struct virtio_config_ops; + +struct virtio_device { + int index; + bool failed; + bool config_enabled; + bool config_change_pending; + spinlock_t config_lock; + struct device dev; + struct virtio_device_id id; + const struct virtio_config_ops *config; + const struct vringh_config_ops *vringh_config; + struct list_head vqs; + u64 features; + void *priv; +}; + +typedef void vq_callback_t(struct virtqueue *); + +struct irq_affinity___2; + +struct virtio_shm_region; + +struct virtio_config_ops { + void (*get)(struct virtio_device *, unsigned int, void *, unsigned int); + void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int); + u32 (*generation)(struct virtio_device *); + u8 (*get_status)(struct virtio_device *); + void (*set_status)(struct virtio_device *, u8); + void (*reset)(struct virtio_device *); + int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity___2 *); + void (*del_vqs)(struct virtio_device *); + u64 (*get_features)(struct virtio_device *); + int (*finalize_features)(struct virtio_device *); + const char * (*bus_name)(struct virtio_device *); + int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *); + const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int); + bool (*get_shm_region)(struct virtio_device *, struct virtio_shm_region *, u8); +}; + +struct virtio_shm_region { + u64 addr; + u64 len; +}; + +struct irq_poll; + +typedef int irq_poll_fn(struct irq_poll *, int); + +struct irq_poll { + struct list_head list; + long unsigned int state; + int weight; + irq_poll_fn *poll; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum rdma_nl_counter_mode { + RDMA_COUNTER_MODE_NONE = 0, + RDMA_COUNTER_MODE_AUTO = 1, + RDMA_COUNTER_MODE_MANUAL = 2, + RDMA_COUNTER_MODE_MAX = 3, +}; + +enum rdma_nl_counter_mask { + RDMA_COUNTER_MASK_QP_TYPE = 1, + RDMA_COUNTER_MASK_PID = 2, +}; + +enum rdma_restrack_type { + RDMA_RESTRACK_PD = 0, + RDMA_RESTRACK_CQ = 1, + RDMA_RESTRACK_QP = 2, + RDMA_RESTRACK_CM_ID = 3, + RDMA_RESTRACK_MR = 4, + RDMA_RESTRACK_CTX = 5, + RDMA_RESTRACK_COUNTER = 6, + RDMA_RESTRACK_MAX = 7, +}; + +struct rdma_restrack_entry { + bool valid; + struct kref kref; + struct completion comp; + struct task_struct *task; + const char *kern_name; + enum rdma_restrack_type type; + bool user; + u32 id; +}; + +struct rdma_link_ops { + struct list_head list; + const char *type; + int (*newlink)(const char *, struct net_device *); +}; + +struct auto_mode_param { + int qp_type; +}; + +struct rdma_counter_mode { + enum rdma_nl_counter_mode mode; + enum rdma_nl_counter_mask mask; + struct auto_mode_param param; +}; + +struct rdma_hw_stats; + +struct rdma_port_counter { + struct rdma_counter_mode mode; + struct rdma_hw_stats *hstats; + unsigned int num_counters; + struct mutex lock; +}; + +struct rdma_hw_stats { + struct mutex lock; + long unsigned int timestamp; + long unsigned int lifespan; + const char * const *names; + int num_counters; + u64 value[0]; +}; + +struct ib_device; + +struct rdma_counter { + struct rdma_restrack_entry res; + struct ib_device *device; + uint32_t id; + struct kref kref; + struct rdma_counter_mode mode; + struct mutex lock; + struct rdma_hw_stats *stats; + u8 port; +}; + +enum rdma_driver_id { + RDMA_DRIVER_UNKNOWN = 0, + RDMA_DRIVER_MLX5 = 1, + RDMA_DRIVER_MLX4 = 2, + RDMA_DRIVER_CXGB3 = 3, + RDMA_DRIVER_CXGB4 = 4, + RDMA_DRIVER_MTHCA = 5, + RDMA_DRIVER_BNXT_RE = 6, + RDMA_DRIVER_OCRDMA = 7, + RDMA_DRIVER_NES = 8, + RDMA_DRIVER_I40IW = 9, + RDMA_DRIVER_VMW_PVRDMA = 10, + RDMA_DRIVER_QEDR = 11, + RDMA_DRIVER_HNS = 12, + RDMA_DRIVER_USNIC = 13, + RDMA_DRIVER_RXE = 14, + RDMA_DRIVER_HFI1 = 15, + RDMA_DRIVER_QIB = 16, + RDMA_DRIVER_EFA = 17, + RDMA_DRIVER_SIW = 18, +}; + +enum ib_cq_notify_flags { + IB_CQ_SOLICITED = 1, + IB_CQ_NEXT_COMP = 2, + IB_CQ_SOLICITED_MASK = 3, + IB_CQ_REPORT_MISSED_EVENTS = 4, +}; + +struct ib_mad; + +enum rdma_link_layer { + IB_LINK_LAYER_UNSPECIFIED = 0, + IB_LINK_LAYER_INFINIBAND = 1, + IB_LINK_LAYER_ETHERNET = 2, +}; + +enum rdma_netdev_t { + RDMA_NETDEV_OPA_VNIC = 0, + RDMA_NETDEV_IPOIB = 1, +}; + +enum ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1, + IB_SRQ_LIMIT = 2, +}; + +enum ib_mr_type { + IB_MR_TYPE_MEM_REG = 0, + IB_MR_TYPE_SG_GAPS = 1, + IB_MR_TYPE_DM = 2, + IB_MR_TYPE_USER = 3, + IB_MR_TYPE_DMA = 4, + IB_MR_TYPE_INTEGRITY = 5, +}; + +enum ib_uverbs_advise_mr_advice { + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH = 0, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE = 1, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT = 2, +}; + +struct uverbs_attr_bundle; + +struct rdma_cm_id; + +struct iw_cm_id; + +struct iw_cm_conn_param; + +struct ib_qp; + +struct ib_send_wr; + +struct ib_recv_wr; + +struct ib_cq; + +struct ib_wc; + +struct ib_srq; + +struct ib_grh; + +struct ib_device_attr; + +struct ib_udata; + +struct ib_device_modify; + +struct ib_port_attr; + +struct ib_port_modify; + +struct ib_port_immutable; + +struct rdma_netdev_alloc_params; + +union ib_gid; + +struct ib_gid_attr; + +struct ib_ucontext; + +struct rdma_user_mmap_entry; + +struct ib_pd; + +struct ib_ah; + +struct rdma_ah_init_attr; + +struct rdma_ah_attr; + +struct ib_srq_init_attr; + +struct ib_srq_attr; + +struct ib_qp_init_attr; + +struct ib_qp_attr; + +struct ib_cq_init_attr; + +struct ib_mr; + +struct ib_sge; + +struct ib_mr_status; + +struct ib_mw; + +struct ib_xrcd; + +struct ib_flow; + +struct ib_flow_attr; + +struct ib_flow_action; + +struct ib_flow_action_attrs_esp; + +struct ib_wq; + +struct ib_wq_init_attr; + +struct ib_wq_attr; + +struct ib_rwq_ind_table; + +struct ib_rwq_ind_table_init_attr; + +struct ib_dm; + +struct ib_dm_alloc_attr; + +struct ib_dm_mr_attr; + +struct ib_counters; + +struct ib_counters_read_attr; + +struct ib_device_ops { + struct module *owner; + enum rdma_driver_id driver_id; + u32 uverbs_abi_ver; + unsigned int uverbs_no_driver_id_binding: 1; + int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **); + int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **); + void (*drain_rq)(struct ib_qp *); + void (*drain_sq)(struct ib_qp *); + int (*poll_cq)(struct ib_cq *, int, struct ib_wc *); + int (*peek_cq)(struct ib_cq *, int); + int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags); + int (*req_ncomp_notif)(struct ib_cq *, int); + int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **); + int (*process_mad)(struct ib_device *, int, u8, const struct ib_wc *, const struct ib_grh *, const struct ib_mad *, struct ib_mad *, size_t *, u16 *); + int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *); + int (*modify_device)(struct ib_device *, int, struct ib_device_modify *); + void (*get_dev_fw_str)(struct ib_device *, char *); + const struct cpumask * (*get_vector_affinity)(struct ib_device *, int); + int (*query_port)(struct ib_device *, u8, struct ib_port_attr *); + int (*modify_port)(struct ib_device *, u8, int, struct ib_port_modify *); + int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); + enum rdma_link_layer (*get_link_layer)(struct ib_device *, u8); + struct net_device * (*get_netdev)(struct ib_device *, u8); + struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u8, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *)); + int (*rdma_netdev_get_params)(struct ib_device *, u8, enum rdma_netdev_t, struct rdma_netdev_alloc_params *); + int (*query_gid)(struct ib_device *, u8, int, union ib_gid *); + int (*add_gid)(const struct ib_gid_attr *, void **); + int (*del_gid)(const struct ib_gid_attr *, void **); + int (*query_pkey)(struct ib_device *, u8, u16, u16 *); + int (*alloc_ucontext)(struct ib_ucontext *, struct ib_udata *); + void (*dealloc_ucontext)(struct ib_ucontext *); + int (*mmap)(struct ib_ucontext *, struct vm_area_struct *); + void (*mmap_free)(struct rdma_user_mmap_entry *); + void (*disassociate_ucontext)(struct ib_ucontext *); + int (*alloc_pd)(struct ib_pd *, struct ib_udata *); + int (*dealloc_pd)(struct ib_pd *, struct ib_udata *); + int (*create_ah)(struct ib_ah *, struct rdma_ah_init_attr *, struct ib_udata *); + int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *); + int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *); + int (*destroy_ah)(struct ib_ah *, u32); + int (*create_srq)(struct ib_srq *, struct ib_srq_init_attr *, struct ib_udata *); + int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *); + int (*query_srq)(struct ib_srq *, struct ib_srq_attr *); + int (*destroy_srq)(struct ib_srq *, struct ib_udata *); + struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *); + int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); + int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *); + int (*destroy_qp)(struct ib_qp *, struct ib_udata *); + int (*create_cq)(struct ib_cq *, const struct ib_cq_init_attr *, struct ib_udata *); + int (*modify_cq)(struct ib_cq *, u16, u16); + int (*destroy_cq)(struct ib_cq *, struct ib_udata *); + int (*resize_cq)(struct ib_cq *, int, struct ib_udata *); + struct ib_mr * (*get_dma_mr)(struct ib_pd *, int); + struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *); + int (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *); + int (*dereg_mr)(struct ib_mr *, struct ib_udata *); + struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32); + struct ib_mr * (*alloc_mr_integrity)(struct ib_pd *, u32, u32); + int (*advise_mr)(struct ib_pd *, enum ib_uverbs_advise_mr_advice, u32, struct ib_sge *, u32, struct uverbs_attr_bundle *); + int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *); + int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *); + int (*alloc_mw)(struct ib_mw *, struct ib_udata *); + int (*dealloc_mw)(struct ib_mw *); + int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16); + int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16); + int (*alloc_xrcd)(struct ib_xrcd *, struct ib_udata *); + int (*dealloc_xrcd)(struct ib_xrcd *, struct ib_udata *); + struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, struct ib_udata *); + int (*destroy_flow)(struct ib_flow *); + struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); + int (*destroy_flow_action)(struct ib_flow_action *); + int (*modify_flow_action_esp)(struct ib_flow_action *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *); + int (*set_vf_link_state)(struct ib_device *, int, u8, int); + int (*get_vf_config)(struct ib_device *, int, u8, struct ifla_vf_info *); + int (*get_vf_stats)(struct ib_device *, int, u8, struct ifla_vf_stats *); + int (*get_vf_guid)(struct ib_device *, int, u8, struct ifla_vf_guid *, struct ifla_vf_guid *); + int (*set_vf_guid)(struct ib_device *, int, u8, u64, int); + struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *); + int (*destroy_wq)(struct ib_wq *, struct ib_udata *); + int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *); + int (*create_rwq_ind_table)(struct ib_rwq_ind_table *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *); + int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *); + struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *); + int (*dealloc_dm)(struct ib_dm *, struct uverbs_attr_bundle *); + struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *); + int (*create_counters)(struct ib_counters *, struct uverbs_attr_bundle *); + int (*destroy_counters)(struct ib_counters *); + int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *); + int (*map_mr_sg_pi)(struct ib_mr *, struct scatterlist *, int, unsigned int *, struct scatterlist *, int, unsigned int *); + struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8); + int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8, int); + int (*init_port)(struct ib_device *, u8, struct kobject *); + int (*fill_res_mr_entry)(struct sk_buff *, struct ib_mr *); + int (*fill_res_mr_entry_raw)(struct sk_buff *, struct ib_mr *); + int (*fill_res_cq_entry)(struct sk_buff *, struct ib_cq *); + int (*fill_res_cq_entry_raw)(struct sk_buff *, struct ib_cq *); + int (*fill_res_qp_entry)(struct sk_buff *, struct ib_qp *); + int (*fill_res_qp_entry_raw)(struct sk_buff *, struct ib_qp *); + int (*fill_res_cm_id_entry)(struct sk_buff *, struct rdma_cm_id *); + int (*enable_driver)(struct ib_device *); + void (*dealloc_driver)(struct ib_device *); + void (*iw_add_ref)(struct ib_qp *); + void (*iw_rem_ref)(struct ib_qp *); + struct ib_qp * (*iw_get_qp)(struct ib_device *, int); + int (*iw_connect)(struct iw_cm_id *, struct iw_cm_conn_param *); + int (*iw_accept)(struct iw_cm_id *, struct iw_cm_conn_param *); + int (*iw_reject)(struct iw_cm_id *, const void *, u8); + int (*iw_create_listen)(struct iw_cm_id *, int); + int (*iw_destroy_listen)(struct iw_cm_id *); + int (*counter_bind_qp)(struct rdma_counter *, struct ib_qp *); + int (*counter_unbind_qp)(struct ib_qp *); + int (*counter_dealloc)(struct rdma_counter *); + struct rdma_hw_stats * (*counter_alloc_stats)(struct rdma_counter *); + int (*counter_update_stats)(struct rdma_counter *); + int (*fill_stat_mr_entry)(struct sk_buff *, struct ib_mr *); + int (*query_ucontext)(struct ib_ucontext *, struct uverbs_attr_bundle *); + size_t size_ib_ah; + size_t size_ib_counters; + size_t size_ib_cq; + size_t size_ib_mw; + size_t size_ib_pd; + size_t size_ib_rwq_ind_table; + size_t size_ib_srq; + size_t size_ib_ucontext; + size_t size_ib_xrcd; +}; + +struct ib_core_device { + struct device dev; + possible_net_t rdma_net; + struct kobject *ports_kobj; + struct list_head port_list; + struct ib_device *owner; +}; + +enum ib_atomic_cap { + IB_ATOMIC_NONE = 0, + IB_ATOMIC_HCA = 1, + IB_ATOMIC_GLOB = 2, +}; + +struct ib_odp_caps { + uint64_t general_caps; + struct { + uint32_t rc_odp_caps; + uint32_t uc_odp_caps; + uint32_t ud_odp_caps; + uint32_t xrc_odp_caps; + } per_transport_caps; +}; + +struct ib_rss_caps { + u32 supported_qpts; + u32 max_rwq_indirection_tables; + u32 max_rwq_indirection_table_size; +}; + +struct ib_tm_caps { + u32 max_rndv_hdr_size; + u32 max_num_tags; + u32 flags; + u32 max_ops; + u32 max_sge; +}; + +struct ib_cq_caps { + u16 max_cq_moderation_count; + u16 max_cq_moderation_period; +}; + +struct ib_device_attr { + u64 fw_ver; + __be64 sys_image_guid; + u64 max_mr_size; + u64 page_size_cap; + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + int max_qp; + int max_qp_wr; + u64 device_cap_flags; + int max_send_sge; + int max_recv_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ib_atomic_cap atomic_cap; + enum ib_atomic_cap masked_atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + int max_ah; + int max_srq; + int max_srq_wr; + int max_srq_sge; + unsigned int max_fast_reg_page_list_len; + unsigned int max_pi_fast_reg_page_list_len; + u16 max_pkeys; + u8 local_ca_ack_delay; + int sig_prot_cap; + int sig_guard_cap; + struct ib_odp_caps odp_caps; + uint64_t timestamp_mask; + uint64_t hca_core_clock; + struct ib_rss_caps rss_caps; + u32 max_wq_type_rq; + u32 raw_packet_caps; + struct ib_tm_caps tm_caps; + struct ib_cq_caps cq_caps; + u64 max_dm_size; + u32 max_sgl_rd; +}; + +struct rdma_restrack_root; + +struct uapi_definition; + +struct ib_port_data; + +struct ib_device { + struct device *dma_device; + struct ib_device_ops ops; + char name[64]; + struct callback_head callback_head; + struct list_head event_handler_list; + struct rw_semaphore event_handler_rwsem; + spinlock_t qp_open_list_lock; + struct rw_semaphore client_data_rwsem; + struct xarray client_data; + struct mutex unregistration_lock; + rwlock_t cache_lock; + struct ib_port_data *port_data; + int num_comp_vectors; + union { + struct device dev; + struct ib_core_device coredev; + }; + const struct attribute_group *groups[3]; + u64 uverbs_cmd_mask; + u64 uverbs_ex_cmd_mask; + char node_desc[64]; + __be64 node_guid; + u32 local_dma_lkey; + u16 is_switch: 1; + u16 kverbs_provider: 1; + u16 use_cq_dim: 1; + u8 node_type; + u8 phys_port_cnt; + struct ib_device_attr attrs; + struct attribute_group *hw_stats_ag; + struct rdma_hw_stats *hw_stats; + u32 index; + spinlock_t cq_pools_lock; + struct list_head cq_pools[3]; + struct rdma_restrack_root *res; + const struct uapi_definition *driver_def; + refcount_t refcount; + struct completion unreg_completion; + struct work_struct unregistration_work; + const struct rdma_link_ops *link_ops; + struct mutex compat_devs_mutex; + struct xarray compat_devs; + char iw_ifname[16]; + u32 iw_driver_flags; + u32 lag_flags; +}; + +enum ib_signature_type { + IB_SIG_TYPE_NONE = 0, + IB_SIG_TYPE_T10_DIF = 1, +}; + +enum ib_t10_dif_bg_type { + IB_T10DIF_CRC = 0, + IB_T10DIF_CSUM = 1, +}; + +struct ib_t10_dif_domain { + enum ib_t10_dif_bg_type bg_type; + u16 pi_interval; + u16 bg; + u16 app_tag; + u32 ref_tag; + bool ref_remap; + bool app_escape; + bool ref_escape; + u16 apptag_check_mask; +}; + +struct ib_sig_domain { + enum ib_signature_type sig_type; + union { + struct ib_t10_dif_domain dif; + } sig; +}; + +struct ib_sig_attrs { + u8 check_mask; + struct ib_sig_domain mem; + struct ib_sig_domain wire; + int meta_length; +}; + +enum ib_sig_err_type { + IB_SIG_BAD_GUARD = 0, + IB_SIG_BAD_REFTAG = 1, + IB_SIG_BAD_APPTAG = 2, +}; + +struct ib_sig_err { + enum ib_sig_err_type err_type; + u32 expected; + u32 actual; + u64 sig_err_offset; + u32 key; +}; + +enum ib_uverbs_flow_action_esp_keymat { + IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM = 0, +}; + +struct ib_uverbs_flow_action_esp_keymat_aes_gcm { + __u64 iv; + __u32 iv_algo; + __u32 salt; + __u32 icv_len; + __u32 key_len; + __u32 aes_key[8]; +}; + +enum ib_uverbs_flow_action_esp_replay { + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE = 0, + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP = 1, +}; + +struct ib_uverbs_flow_action_esp_replay_bmp { + __u32 size; +}; + +union ib_gid { + u8 raw[16]; + struct { + __be64 subnet_prefix; + __be64 interface_id; + } global; +}; + +enum ib_gid_type { + IB_GID_TYPE_IB = 0, + IB_GID_TYPE_ROCE = 1, + IB_GID_TYPE_ROCE_UDP_ENCAP = 2, + IB_GID_TYPE_SIZE = 3, +}; + +struct ib_gid_attr { + struct net_device *ndev; + struct ib_device *device; + union ib_gid gid; + enum ib_gid_type gid_type; + u16 index; + u8 port_num; +}; + +struct ib_cq_init_attr { + unsigned int cqe; + u32 comp_vector; + u32 flags; +}; + +struct ib_dm_mr_attr { + u64 length; + u64 offset; + u32 access_flags; +}; + +struct ib_dm_alloc_attr { + u64 length; + u32 alignment; + u32 flags; +}; + +enum ib_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5, +}; + +enum ib_port_state { + IB_PORT_NOP = 0, + IB_PORT_DOWN = 1, + IB_PORT_INIT = 2, + IB_PORT_ARMED = 3, + IB_PORT_ACTIVE = 4, + IB_PORT_ACTIVE_DEFER = 5, +}; + +struct ib_port_attr { + u64 subnet_prefix; + enum ib_port_state state; + enum ib_mtu max_mtu; + enum ib_mtu active_mtu; + u32 phys_mtu; + int gid_tbl_len; + unsigned int ip_gids: 1; + u32 port_cap_flags; + u32 max_msg_sz; + u32 bad_pkey_cntr; + u32 qkey_viol_cntr; + u16 pkey_tbl_len; + u32 sm_lid; + u32 lid; + u8 lmc; + u8 max_vl_num; + u8 sm_sl; + u8 subnet_timeout; + u8 init_type_reply; + u8 active_width; + u16 active_speed; + u8 phys_state; + u16 port_cap_flags2; +}; + +struct ib_device_modify { + u64 sys_image_guid; + char node_desc[64]; +}; + +struct ib_port_modify { + u32 set_port_cap_mask; + u32 clr_port_cap_mask; + u8 init_type; +}; + +enum ib_event_type { + IB_EVENT_CQ_ERR = 0, + IB_EVENT_QP_FATAL = 1, + IB_EVENT_QP_REQ_ERR = 2, + IB_EVENT_QP_ACCESS_ERR = 3, + IB_EVENT_COMM_EST = 4, + IB_EVENT_SQ_DRAINED = 5, + IB_EVENT_PATH_MIG = 6, + IB_EVENT_PATH_MIG_ERR = 7, + IB_EVENT_DEVICE_FATAL = 8, + IB_EVENT_PORT_ACTIVE = 9, + IB_EVENT_PORT_ERR = 10, + IB_EVENT_LID_CHANGE = 11, + IB_EVENT_PKEY_CHANGE = 12, + IB_EVENT_SM_CHANGE = 13, + IB_EVENT_SRQ_ERR = 14, + IB_EVENT_SRQ_LIMIT_REACHED = 15, + IB_EVENT_QP_LAST_WQE_REACHED = 16, + IB_EVENT_CLIENT_REREGISTER = 17, + IB_EVENT_GID_CHANGE = 18, + IB_EVENT_WQ_FATAL = 19, +}; + +struct ib_ucq_object; + +typedef void (*ib_comp_handler)(struct ib_cq *, void *); + +struct ib_event; + +struct ib_cq { + struct ib_device *device; + struct ib_ucq_object *uobject; + ib_comp_handler comp_handler; + void (*event_handler)(struct ib_event *, void *); + void *cq_context; + int cqe; + unsigned int cqe_used; + atomic_t usecnt; + enum ib_poll_context poll_ctx; + struct ib_wc *wc; + struct list_head pool_entry; + union { + struct irq_poll iop; + struct work_struct work; + }; + struct workqueue_struct *comp_wq; + struct dim *dim; + ktime_t timestamp; + u8 interrupt: 1; + u8 shared: 1; + unsigned int comp_vector; + struct rdma_restrack_entry res; +}; + +struct ib_uqp_object; + +enum ib_qp_type { + IB_QPT_SMI = 0, + IB_QPT_GSI = 1, + IB_QPT_RC = 2, + IB_QPT_UC = 3, + IB_QPT_UD = 4, + IB_QPT_RAW_IPV6 = 5, + IB_QPT_RAW_ETHERTYPE = 6, + IB_QPT_RAW_PACKET = 8, + IB_QPT_XRC_INI = 9, + IB_QPT_XRC_TGT = 10, + IB_QPT_MAX = 11, + IB_QPT_DRIVER = 255, + IB_QPT_RESERVED1 = 4096, + IB_QPT_RESERVED2 = 4097, + IB_QPT_RESERVED3 = 4098, + IB_QPT_RESERVED4 = 4099, + IB_QPT_RESERVED5 = 4100, + IB_QPT_RESERVED6 = 4101, + IB_QPT_RESERVED7 = 4102, + IB_QPT_RESERVED8 = 4103, + IB_QPT_RESERVED9 = 4104, + IB_QPT_RESERVED10 = 4105, +}; + +struct ib_qp_security; + +struct ib_qp { + struct ib_device *device; + struct ib_pd *pd; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + spinlock_t mr_lock; + int mrs_used; + struct list_head rdma_mrs; + struct list_head sig_mrs; + struct ib_srq *srq; + struct ib_xrcd *xrcd; + struct list_head xrcd_list; + atomic_t usecnt; + struct list_head open_list; + struct ib_qp *real_qp; + struct ib_uqp_object *uobject; + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + const struct ib_gid_attr *av_sgid_attr; + const struct ib_gid_attr *alt_path_sgid_attr; + u32 qp_num; + u32 max_write_sge; + u32 max_read_sge; + enum ib_qp_type qp_type; + struct ib_rwq_ind_table *rwq_ind_tbl; + struct ib_qp_security *qp_sec; + u8 port; + bool integrity_en; + struct rdma_restrack_entry res; + struct rdma_counter *counter; +}; + +struct ib_usrq_object; + +enum ib_srq_type { + IB_SRQT_BASIC = 0, + IB_SRQT_XRC = 1, + IB_SRQT_TM = 2, +}; + +struct ib_srq { + struct ib_device *device; + struct ib_pd *pd; + struct ib_usrq_object *uobject; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + enum ib_srq_type srq_type; + atomic_t usecnt; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + u32 srq_num; + } xrc; + }; + } ext; +}; + +struct ib_uwq_object; + +enum ib_wq_state { + IB_WQS_RESET = 0, + IB_WQS_RDY = 1, + IB_WQS_ERR = 2, +}; + +enum ib_wq_type { + IB_WQT_RQ = 0, +}; + +struct ib_wq { + struct ib_device *device; + struct ib_uwq_object *uobject; + void *wq_context; + void (*event_handler)(struct ib_event *, void *); + struct ib_pd *pd; + struct ib_cq *cq; + u32 wq_num; + enum ib_wq_state state; + enum ib_wq_type wq_type; + atomic_t usecnt; +}; + +struct ib_event { + struct ib_device *device; + union { + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_srq *srq; + struct ib_wq *wq; + u8 port_num; + } element; + enum ib_event_type event; +}; + +struct ib_global_route { + const struct ib_gid_attr *sgid_attr; + union ib_gid dgid; + u32 flow_label; + u8 sgid_index; + u8 hop_limit; + u8 traffic_class; +}; + +struct ib_grh { + __be32 version_tclass_flow; + __be16 paylen; + u8 next_hdr; + u8 hop_limit; + union ib_gid sgid; + union ib_gid dgid; +}; + +struct ib_mr_status { + u32 fail_status; + struct ib_sig_err sig_err; +}; + +struct rdma_ah_init_attr { + struct rdma_ah_attr *ah_attr; + u32 flags; + struct net_device *xmit_slave; +}; + +enum rdma_ah_attr_type { + RDMA_AH_ATTR_TYPE_UNDEFINED = 0, + RDMA_AH_ATTR_TYPE_IB = 1, + RDMA_AH_ATTR_TYPE_ROCE = 2, + RDMA_AH_ATTR_TYPE_OPA = 3, +}; + +struct ib_ah_attr { + u16 dlid; + u8 src_path_bits; +}; + +struct roce_ah_attr { + u8 dmac[6]; +}; + +struct opa_ah_attr { + u32 dlid; + u8 src_path_bits; + bool make_grd; +}; + +struct rdma_ah_attr { + struct ib_global_route grh; + u8 sl; + u8 static_rate; + u8 port_num; + u8 ah_flags; + enum rdma_ah_attr_type type; + union { + struct ib_ah_attr ib; + struct roce_ah_attr roce; + struct opa_ah_attr opa; + }; +}; + +enum ib_wc_status { + IB_WC_SUCCESS = 0, + IB_WC_LOC_LEN_ERR = 1, + IB_WC_LOC_QP_OP_ERR = 2, + IB_WC_LOC_EEC_OP_ERR = 3, + IB_WC_LOC_PROT_ERR = 4, + IB_WC_WR_FLUSH_ERR = 5, + IB_WC_MW_BIND_ERR = 6, + IB_WC_BAD_RESP_ERR = 7, + IB_WC_LOC_ACCESS_ERR = 8, + IB_WC_REM_INV_REQ_ERR = 9, + IB_WC_REM_ACCESS_ERR = 10, + IB_WC_REM_OP_ERR = 11, + IB_WC_RETRY_EXC_ERR = 12, + IB_WC_RNR_RETRY_EXC_ERR = 13, + IB_WC_LOC_RDD_VIOL_ERR = 14, + IB_WC_REM_INV_RD_REQ_ERR = 15, + IB_WC_REM_ABORT_ERR = 16, + IB_WC_INV_EECN_ERR = 17, + IB_WC_INV_EEC_STATE_ERR = 18, + IB_WC_FATAL_ERR = 19, + IB_WC_RESP_TIMEOUT_ERR = 20, + IB_WC_GENERAL_ERR = 21, +}; + +enum ib_wc_opcode { + IB_WC_SEND = 0, + IB_WC_RDMA_WRITE = 1, + IB_WC_RDMA_READ = 2, + IB_WC_COMP_SWAP = 3, + IB_WC_FETCH_ADD = 4, + IB_WC_BIND_MW = 5, + IB_WC_LOCAL_INV = 6, + IB_WC_LSO = 7, + IB_WC_REG_MR = 8, + IB_WC_MASKED_COMP_SWAP = 9, + IB_WC_MASKED_FETCH_ADD = 10, + IB_WC_RECV = 128, + IB_WC_RECV_RDMA_WITH_IMM = 129, +}; + +struct ib_cqe { + void (*done)(struct ib_cq *, struct ib_wc *); +}; + +struct ib_wc { + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + enum ib_wc_status status; + enum ib_wc_opcode opcode; + u32 vendor_err; + u32 byte_len; + struct ib_qp *qp; + union { + __be32 imm_data; + u32 invalidate_rkey; + } ex; + u32 src_qp; + u32 slid; + int wc_flags; + u16 pkey_index; + u8 sl; + u8 dlid_path_bits; + u8 port_num; + u8 smac[6]; + u16 vlan_id; + u8 network_hdr_type; +}; + +struct ib_srq_attr { + u32 max_wr; + u32 max_sge; + u32 srq_limit; +}; + +struct ib_xrcd { + struct ib_device *device; + atomic_t usecnt; + struct inode *inode; + struct rw_semaphore tgt_qps_rwsem; + struct xarray tgt_qps; +}; + +struct ib_srq_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + struct ib_srq_attr attr; + enum ib_srq_type srq_type; + struct { + struct ib_cq *cq; + union { + struct { + struct ib_xrcd *xrcd; + } xrc; + struct { + u32 max_num_tags; + } tag_matching; + }; + } ext; +}; + +struct ib_qp_cap { + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; + u32 max_rdma_ctxs; +}; + +enum ib_sig_type { + IB_SIGNAL_ALL_WR = 0, + IB_SIGNAL_REQ_WR = 1, +}; + +struct ib_qp_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_xrcd *xrcd; + struct ib_qp_cap cap; + enum ib_sig_type sq_sig_type; + enum ib_qp_type qp_type; + u32 create_flags; + u8 port_num; + struct ib_rwq_ind_table *rwq_ind_tbl; + u32 source_qpn; +}; + +struct ib_uobject; + +struct ib_rwq_ind_table { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; + u32 ind_tbl_num; + u32 log_ind_tbl_size; + struct ib_wq **ind_tbl; +}; + +enum ib_qp_state { + IB_QPS_RESET = 0, + IB_QPS_INIT = 1, + IB_QPS_RTR = 2, + IB_QPS_RTS = 3, + IB_QPS_SQD = 4, + IB_QPS_SQE = 5, + IB_QPS_ERR = 6, +}; + +enum ib_mig_state { + IB_MIG_MIGRATED = 0, + IB_MIG_REARM = 1, + IB_MIG_ARMED = 2, +}; + +enum ib_mw_type { + IB_MW_TYPE_1 = 1, + IB_MW_TYPE_2 = 2, +}; + +struct ib_qp_attr { + enum ib_qp_state qp_state; + enum ib_qp_state cur_qp_state; + enum ib_mtu path_mtu; + enum ib_mig_state path_mig_state; + u32 qkey; + u32 rq_psn; + u32 sq_psn; + u32 dest_qp_num; + int qp_access_flags; + struct ib_qp_cap cap; + struct rdma_ah_attr ah_attr; + struct rdma_ah_attr alt_ah_attr; + u16 pkey_index; + u16 alt_pkey_index; + u8 en_sqd_async_notify; + u8 sq_draining; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 min_rnr_timer; + u8 port_num; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 alt_port_num; + u8 alt_timeout; + u32 rate_limit; + struct net_device *xmit_slave; +}; + +enum ib_wr_opcode { + IB_WR_RDMA_WRITE = 0, + IB_WR_RDMA_WRITE_WITH_IMM = 1, + IB_WR_SEND = 2, + IB_WR_SEND_WITH_IMM = 3, + IB_WR_RDMA_READ = 4, + IB_WR_ATOMIC_CMP_AND_SWP = 5, + IB_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_WR_BIND_MW = 8, + IB_WR_LSO = 10, + IB_WR_SEND_WITH_INV = 9, + IB_WR_RDMA_READ_WITH_INV = 11, + IB_WR_LOCAL_INV = 7, + IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_WR_REG_MR = 32, + IB_WR_REG_MR_INTEGRITY = 33, + IB_WR_RESERVED1 = 240, + IB_WR_RESERVED2 = 241, + IB_WR_RESERVED3 = 242, + IB_WR_RESERVED4 = 243, + IB_WR_RESERVED5 = 244, + IB_WR_RESERVED6 = 245, + IB_WR_RESERVED7 = 246, + IB_WR_RESERVED8 = 247, + IB_WR_RESERVED9 = 248, + IB_WR_RESERVED10 = 249, +}; + +struct ib_sge { + u64 addr; + u32 length; + u32 lkey; +}; + +struct ib_send_wr { + struct ib_send_wr *next; + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + struct ib_sge *sg_list; + int num_sge; + enum ib_wr_opcode opcode; + int send_flags; + union { + __be32 imm_data; + u32 invalidate_rkey; + } ex; +}; + +struct ib_ah { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + const struct ib_gid_attr *sgid_attr; + enum rdma_ah_attr_type type; +}; + +struct ib_mr { + struct ib_device *device; + struct ib_pd *pd; + u32 lkey; + u32 rkey; + u64 iova; + u64 length; + unsigned int page_size; + enum ib_mr_type type; + bool need_inval; + union { + struct ib_uobject *uobject; + struct list_head qp_entry; + }; + struct ib_dm *dm; + struct ib_sig_attrs *sig_attrs; + struct rdma_restrack_entry res; +}; + +struct ib_recv_wr { + struct ib_recv_wr *next; + union { + u64 wr_id; + struct ib_cqe *wr_cqe; + }; + struct ib_sge *sg_list; + int num_sge; +}; + +struct ib_rdmacg_object {}; + +struct ib_uverbs_file; + +struct ib_ucontext { + struct ib_device *device; + struct ib_uverbs_file *ufile; + bool cleanup_retryable; + struct ib_rdmacg_object cg_obj; + struct rdma_restrack_entry res; + struct xarray mmap_xa; +}; + +struct uverbs_api_object; + +struct ib_uobject { + u64 user_handle; + struct ib_uverbs_file *ufile; + struct ib_ucontext *context; + void *object; + struct list_head list; + struct ib_rdmacg_object cg_obj; + int id; + struct kref ref; + atomic_t usecnt; + struct callback_head rcu; + const struct uverbs_api_object *uapi_object; +}; + +struct ib_udata { + const void *inbuf; + void *outbuf; + size_t inlen; + size_t outlen; +}; + +struct ib_pd { + u32 local_dma_lkey; + u32 flags; + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; + u32 unsafe_global_rkey; + struct ib_mr *__internal_mr; + struct rdma_restrack_entry res; +}; + +struct ib_wq_init_attr { + void *wq_context; + enum ib_wq_type wq_type; + u32 max_wr; + u32 max_sge; + struct ib_cq *cq; + void (*event_handler)(struct ib_event *, void *); + u32 create_flags; +}; + +struct ib_wq_attr { + enum ib_wq_state wq_state; + enum ib_wq_state curr_wq_state; + u32 flags; + u32 flags_mask; +}; + +struct ib_rwq_ind_table_init_attr { + u32 log_ind_tbl_size; + struct ib_wq **ind_tbl; +}; + +enum port_pkey_state { + IB_PORT_PKEY_NOT_VALID = 0, + IB_PORT_PKEY_VALID = 1, + IB_PORT_PKEY_LISTED = 2, +}; + +struct ib_port_pkey { + enum port_pkey_state state; + u16 pkey_index; + u8 port_num; + struct list_head qp_list; + struct list_head to_error_list; + struct ib_qp_security *sec; +}; + +struct ib_ports_pkeys; + +struct ib_qp_security { + struct ib_qp *qp; + struct ib_device *dev; + struct mutex mutex; + struct ib_ports_pkeys *ports_pkeys; + struct list_head shared_qp_list; + void *security; + bool destroying; + atomic_t error_list_count; + struct completion error_complete; + int error_comps_pending; +}; + +struct ib_ports_pkeys { + struct ib_port_pkey main; + struct ib_port_pkey alt; +}; + +struct ib_dm { + struct ib_device *device; + u32 length; + u32 flags; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + +struct ib_mw { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + u32 rkey; + enum ib_mw_type type; +}; + +enum ib_flow_attr_type { + IB_FLOW_ATTR_NORMAL = 0, + IB_FLOW_ATTR_ALL_DEFAULT = 1, + IB_FLOW_ATTR_MC_DEFAULT = 2, + IB_FLOW_ATTR_SNIFFER = 3, +}; + +enum ib_flow_spec_type { + IB_FLOW_SPEC_ETH = 32, + IB_FLOW_SPEC_IB = 34, + IB_FLOW_SPEC_IPV4 = 48, + IB_FLOW_SPEC_IPV6 = 49, + IB_FLOW_SPEC_ESP = 52, + IB_FLOW_SPEC_TCP = 64, + IB_FLOW_SPEC_UDP = 65, + IB_FLOW_SPEC_VXLAN_TUNNEL = 80, + IB_FLOW_SPEC_GRE = 81, + IB_FLOW_SPEC_MPLS = 96, + IB_FLOW_SPEC_INNER = 256, + IB_FLOW_SPEC_ACTION_TAG = 4096, + IB_FLOW_SPEC_ACTION_DROP = 4097, + IB_FLOW_SPEC_ACTION_HANDLE = 4098, + IB_FLOW_SPEC_ACTION_COUNT = 4099, +}; + +struct ib_flow_eth_filter { + u8 dst_mac[6]; + u8 src_mac[6]; + __be16 ether_type; + __be16 vlan_tag; + u8 real_sz[0]; +}; + +struct ib_flow_spec_eth { + u32 type; + u16 size; + struct ib_flow_eth_filter val; + struct ib_flow_eth_filter mask; +}; + +struct ib_flow_ib_filter { + __be16 dlid; + __u8 sl; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ib { + u32 type; + u16 size; + struct ib_flow_ib_filter val; + struct ib_flow_ib_filter mask; +}; + +struct ib_flow_ipv4_filter { + __be32 src_ip; + __be32 dst_ip; + u8 proto; + u8 tos; + u8 ttl; + u8 flags; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ipv4 { + u32 type; + u16 size; + struct ib_flow_ipv4_filter val; + struct ib_flow_ipv4_filter mask; +}; + +struct ib_flow_ipv6_filter { + u8 src_ip[16]; + u8 dst_ip[16]; + __be32 flow_label; + u8 next_hdr; + u8 traffic_class; + u8 hop_limit; + u8 real_sz[0]; +}; + +struct ib_flow_spec_ipv6 { + u32 type; + u16 size; + struct ib_flow_ipv6_filter val; + struct ib_flow_ipv6_filter mask; +}; + +struct ib_flow_tcp_udp_filter { + __be16 dst_port; + __be16 src_port; + u8 real_sz[0]; +}; + +struct ib_flow_spec_tcp_udp { + u32 type; + u16 size; + struct ib_flow_tcp_udp_filter val; + struct ib_flow_tcp_udp_filter mask; +}; + +struct ib_flow_tunnel_filter { + __be32 tunnel_id; + u8 real_sz[0]; +}; + +struct ib_flow_spec_tunnel { + u32 type; + u16 size; + struct ib_flow_tunnel_filter val; + struct ib_flow_tunnel_filter mask; +}; + +struct ib_flow_esp_filter { + __be32 spi; + __be32 seq; + u8 real_sz[0]; +}; + +struct ib_flow_spec_esp { + u32 type; + u16 size; + struct ib_flow_esp_filter val; + struct ib_flow_esp_filter mask; +}; + +struct ib_flow_gre_filter { + __be16 c_ks_res0_ver; + __be16 protocol; + __be32 key; + u8 real_sz[0]; +}; + +struct ib_flow_spec_gre { + u32 type; + u16 size; + struct ib_flow_gre_filter val; + struct ib_flow_gre_filter mask; +}; + +struct ib_flow_mpls_filter { + __be32 tag; + u8 real_sz[0]; +}; + +struct ib_flow_spec_mpls { + u32 type; + u16 size; + struct ib_flow_mpls_filter val; + struct ib_flow_mpls_filter mask; +}; + +struct ib_flow_spec_action_tag { + enum ib_flow_spec_type type; + u16 size; + u32 tag_id; +}; + +struct ib_flow_spec_action_drop { + enum ib_flow_spec_type type; + u16 size; +}; + +struct ib_flow_spec_action_handle { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_action *act; +}; + +enum ib_flow_action_type { + IB_FLOW_ACTION_UNSPECIFIED = 0, + IB_FLOW_ACTION_ESP = 1, +}; + +struct ib_flow_action { + struct ib_device *device; + struct ib_uobject *uobject; + enum ib_flow_action_type type; + atomic_t usecnt; +}; + +struct ib_flow_spec_action_count { + enum ib_flow_spec_type type; + u16 size; + struct ib_counters *counters; +}; + +struct ib_counters { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + +union ib_flow_spec { + struct { + u32 type; + u16 size; + }; + struct ib_flow_spec_eth eth; + struct ib_flow_spec_ib ib; + struct ib_flow_spec_ipv4 ipv4; + struct ib_flow_spec_tcp_udp tcp_udp; + struct ib_flow_spec_ipv6 ipv6; + struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_esp esp; + struct ib_flow_spec_gre gre; + struct ib_flow_spec_mpls mpls; + struct ib_flow_spec_action_tag flow_tag; + struct ib_flow_spec_action_drop drop; + struct ib_flow_spec_action_handle action; + struct ib_flow_spec_action_count flow_count; +}; + +struct ib_flow_attr { + enum ib_flow_attr_type type; + u16 size; + u16 priority; + u32 flags; + u8 num_of_specs; + u8 port; + union ib_flow_spec flows[0]; +}; + +struct ib_flow { + struct ib_qp *qp; + struct ib_device *device; + struct ib_uobject *uobject; +}; + +struct ib_flow_action_attrs_esp_keymats { + enum ib_uverbs_flow_action_esp_keymat protocol; + union { + struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; + } keymat; +}; + +struct ib_flow_action_attrs_esp_replays { + enum ib_uverbs_flow_action_esp_replay protocol; + union { + struct ib_uverbs_flow_action_esp_replay_bmp bmp; + } replay; +}; + +struct ib_flow_spec_list { + struct ib_flow_spec_list *next; + union ib_flow_spec spec; +}; + +struct ib_flow_action_attrs_esp { + struct ib_flow_action_attrs_esp_keymats *keymat; + struct ib_flow_action_attrs_esp_replays *replay; + struct ib_flow_spec_list *encap; + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + u64 flags; + u64 hard_limit_pkts; +}; + +struct ib_pkey_cache; + +struct ib_gid_table; + +struct ib_port_cache { + u64 subnet_prefix; + struct ib_pkey_cache *pkey; + struct ib_gid_table *gid; + u8 lmc; + enum ib_port_state port_state; +}; + +struct ib_port_immutable { + int pkey_tbl_len; + int gid_tbl_len; + u32 core_cap_flags; + u32 max_mad_size; +}; + +struct ib_port_data { + struct ib_device *ib_dev; + struct ib_port_immutable immutable; + spinlock_t pkey_list_lock; + struct list_head pkey_list; + struct ib_port_cache cache; + spinlock_t netdev_lock; + struct net_device *netdev; + struct hlist_node ndev_hash_link; + struct rdma_port_counter port_counter; + struct rdma_hw_stats *hw_stats; +}; + +struct rdma_netdev_alloc_params { + size_t sizeof_priv; + unsigned int txqs; + unsigned int rxqs; + void *param; + int (*initialize_rdma_netdev)(struct ib_device *, u8, struct net_device *, void *); +}; + +struct ib_counters_read_attr { + u64 *counters_buff; + u32 ncounters; + u32 flags; +}; + +struct rdma_user_mmap_entry { + struct kref ref; + struct ib_ucontext *ucontext; + long unsigned int start_pgoff; + size_t npages; + bool driver_removed; +}; + +enum blk_zone_type { + BLK_ZONE_TYPE_CONVENTIONAL = 1, + BLK_ZONE_TYPE_SEQWRITE_REQ = 2, + BLK_ZONE_TYPE_SEQWRITE_PREF = 3, +}; + +enum blk_zone_cond { + BLK_ZONE_COND_NOT_WP = 0, + BLK_ZONE_COND_EMPTY = 1, + BLK_ZONE_COND_IMP_OPEN = 2, + BLK_ZONE_COND_EXP_OPEN = 3, + BLK_ZONE_COND_CLOSED = 4, + BLK_ZONE_COND_READONLY = 13, + BLK_ZONE_COND_FULL = 14, + BLK_ZONE_COND_OFFLINE = 15, +}; + +enum blk_zone_report_flags { + BLK_ZONE_REP_CAPACITY = 1, +}; + +struct blk_zone_report { + __u64 sector; + __u32 nr_zones; + __u32 flags; + struct blk_zone zones[0]; +}; + +struct blk_zone_range { + __u64 sector; + __u64 nr_sectors; +}; + +struct zone_report_args { + struct blk_zone *zones; +}; + +struct blk_revalidate_zone_args { + struct gendisk *disk; + long unsigned int *conv_zones_bitmap; + long unsigned int *seq_zones_wlock; + unsigned int nr_zones; + sector_t zone_sectors; + sector_t sector; +}; + +enum wbt_flags { + WBT_TRACKED = 1, + WBT_READ = 2, + WBT_KSWAPD = 4, + WBT_DISCARD = 8, + WBT_NR_BITS = 4, +}; + +enum { + WBT_STATE_ON_DEFAULT = 1, + WBT_STATE_ON_MANUAL = 2, +}; + +struct rq_wb { + unsigned int wb_background; + unsigned int wb_normal; + short int enable_state; + unsigned int unknown_cnt; + u64 win_nsec; + u64 cur_win_nsec; + struct blk_stat_callback *cb; + u64 sync_issue; + void *sync_cookie; + unsigned int wc; + long unsigned int last_issue; + long unsigned int last_comp; + long unsigned int min_lat_nsec; + struct rq_qos rqos; + struct rq_wait rq_wait[3]; + struct rq_depth rq_depth; +}; + +struct trace_event_raw_wbt_stat { + struct trace_entry ent; + char name[32]; + s64 rmean; + u64 rmin; + u64 rmax; + s64 rnr_samples; + s64 rtime; + s64 wmean; + u64 wmin; + u64 wmax; + s64 wnr_samples; + s64 wtime; + char __data[0]; +}; + +struct trace_event_raw_wbt_lat { + struct trace_entry ent; + char name[32]; + long unsigned int lat; + char __data[0]; +}; + +struct trace_event_raw_wbt_step { + struct trace_entry ent; + char name[32]; + const char *msg; + int step; + long unsigned int window; + unsigned int bg; + unsigned int normal; + unsigned int max; + char __data[0]; +}; + +struct trace_event_raw_wbt_timer { + struct trace_entry ent; + char name[32]; + unsigned int status; + int step; + unsigned int inflight; + char __data[0]; +}; + +struct trace_event_data_offsets_wbt_stat {}; + +struct trace_event_data_offsets_wbt_lat {}; + +struct trace_event_data_offsets_wbt_step {}; + +struct trace_event_data_offsets_wbt_timer {}; + +typedef void (*btf_trace_wbt_stat)(void *, struct backing_dev_info *, struct blk_rq_stat *); + +typedef void (*btf_trace_wbt_lat)(void *, struct backing_dev_info *, long unsigned int); + +typedef void (*btf_trace_wbt_step)(void *, struct backing_dev_info *, const char *, int, long unsigned int, unsigned int, unsigned int, unsigned int); + +typedef void (*btf_trace_wbt_timer)(void *, struct backing_dev_info *, unsigned int, int, unsigned int); + +enum { + RWB_DEF_DEPTH = 16, + RWB_WINDOW_NSEC = 100000000, + RWB_MIN_WRITE_SAMPLES = 3, + RWB_UNKNOWN_BUMP = 5, +}; + +enum { + LAT_OK = 1, + LAT_UNKNOWN = 2, + LAT_UNKNOWN_WRITES = 3, + LAT_EXCEEDED = 4, +}; + +struct wbt_wait_data { + struct rq_wb *rwb; + enum wbt_flags wb_acct; + long unsigned int rw; +}; + +struct blk_ksm_keyslot { + atomic_t slot_refs; + struct list_head idle_slot_node; + struct hlist_node hash_node; + const struct blk_crypto_key *key; + struct blk_keyslot_manager *ksm; +}; + +struct blk_ksm_ll_ops { + int (*keyslot_program)(struct blk_keyslot_manager *, const struct blk_crypto_key *, unsigned int); + int (*keyslot_evict)(struct blk_keyslot_manager *, const struct blk_crypto_key *, unsigned int); +}; + +struct blk_keyslot_manager { + struct blk_ksm_ll_ops ksm_ll_ops; + unsigned int max_dun_bytes_supported; + unsigned int crypto_modes_supported[4]; + struct device *dev; + unsigned int num_slots; + struct rw_semaphore lock; + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + struct hlist_head *slot_hashtable; + unsigned int log_slot_ht_size; + struct blk_ksm_keyslot *slots; +}; + +struct blk_crypto_mode { + const char *cipher_str; + unsigned int keysize; + unsigned int ivsize; +}; + +struct bio_fallback_crypt_ctx { + struct bio_crypt_ctx crypt_ctx; + struct bvec_iter crypt_iter; + union { + struct { + struct work_struct work; + struct bio *bio; + }; + struct { + void *bi_private_orig; + bio_end_io_t *bi_end_io_orig; + }; + }; +}; + +struct blk_crypto_keyslot { + enum blk_crypto_mode_num crypto_mode; + struct crypto_skcipher *tfms[4]; +}; + +union blk_crypto_iv { + __le64 dun[4]; + u8 bytes[32]; +}; + +typedef void (*swap_func_t)(void *, void *, int); + +typedef int (*cmp_r_func_t)(const void *, const void *, const void *); + +struct siprand_state { + long unsigned int v0; + long unsigned int v1; + long unsigned int v2; + long unsigned int v3; +}; + +typedef __kernel_long_t __kernel_ptrdiff_t; + +typedef __kernel_ptrdiff_t ptrdiff_t; + +struct region { + unsigned int start; + unsigned int off; + unsigned int group_len; + unsigned int end; +}; + +enum { + REG_OP_ISFREE = 0, + REG_OP_ALLOC = 1, + REG_OP_RELEASE = 2, +}; + +typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t); + +typedef void sg_free_fn(struct scatterlist *, unsigned int); + +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + unsigned int __nents; + int __pg_advance; +}; + +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +struct sg_mapping_iter { + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +typedef int (*cmp_func)(void *, const struct list_head *, const struct list_head *); + +struct rhltable { + struct rhashtable ht; +}; + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; + +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +union nested_table { + union nested_table *table; + struct rhash_lock_head *bucket; +}; + +struct once_work { + struct work_struct work; + struct static_key_true *key; +}; + +struct genradix_iter { + size_t offset; + size_t pos; +}; + +struct genradix_node { + union { + struct genradix_node *children[512]; + u8 data[4096]; + }; +}; + +struct reciprocal_value_adv { + u32 m; + u8 sh; + u8 exp; + bool is_wide_m; +}; + +enum devm_ioremap_type { + DEVM_IOREMAP = 0, + DEVM_IOREMAP_UC = 1, + DEVM_IOREMAP_WC = 2, +}; + +struct pcim_iomap_devres { + void *table[6]; +}; + +struct btree_head { + long unsigned int *node; + mempool_t *mempool; + int height; +}; + +struct btree_geo { + int keylen; + int no_pairs; + int no_longs; +}; + +typedef void (*visitor128_t)(void *, long unsigned int, u64, u64, size_t); + +typedef void (*visitorl_t)(void *, long unsigned int, long unsigned int, size_t); + +typedef void (*visitor32_t)(void *, long unsigned int, u32, size_t); + +typedef void (*visitor64_t)(void *, long unsigned int, u64, size_t); + +enum assoc_array_walk_status { + assoc_array_walk_tree_empty = 0, + assoc_array_walk_found_terminal_node = 1, + assoc_array_walk_found_wrong_shortcut = 2, +}; + +struct assoc_array_walk_result { + struct { + struct assoc_array_node *node; + int level; + int slot; + } terminal_node; + struct { + struct assoc_array_shortcut *shortcut; + int level; + int sc_level; + long unsigned int sc_segments; + long unsigned int dissimilarity; + } wrong_shortcut; +}; + +struct assoc_array_delete_collapse_context { + struct assoc_array_node *node; + const void *skip_leaf; + int slot; +}; + +struct linear_range { + unsigned int min; + unsigned int min_sel; + unsigned int max_sel; + unsigned int step; +}; + +enum packing_op { + PACK = 0, + UNPACK = 1, +}; + +struct crc_test { + u32 crc; + u32 start; + u32 length; + u32 crc_le; + u32 crc_be; + u32 crc32c_le; +}; + +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +struct gen_pool_chunk { + struct list_head next_chunk; + atomic_long_t avail; + phys_addr_t phys_addr; + void *owner; + long unsigned int start_addr; + long unsigned int end_addr; + long unsigned int bits[0]; +}; + +struct genpool_data_align { + int align; +}; + +struct genpool_data_fixed { + long unsigned int offset; +}; + +typedef struct z_stream_s z_stream; + +typedef z_stream *z_streamp; + +typedef struct { + unsigned char op; + unsigned char bits; + short unsigned int val; +} code; + +typedef enum { + HEAD = 0, + FLAGS = 1, + TIME = 2, + OS = 3, + EXLEN = 4, + EXTRA = 5, + NAME = 6, + COMMENT = 7, + HCRC = 8, + DICTID = 9, + DICT = 10, + TYPE = 11, + TYPEDO = 12, + STORED = 13, + COPY = 14, + TABLE = 15, + LENLENS = 16, + CODELENS = 17, + LEN = 18, + LENEXT = 19, + DIST = 20, + DISTEXT = 21, + MATCH = 22, + LIT = 23, + CHECK = 24, + LENGTH = 25, + DONE = 26, + BAD = 27, + MEM = 28, + SYNC = 29, +} inflate_mode; + +struct inflate_state { + inflate_mode mode; + int last; + int wrap; + int havedict; + int flags; + unsigned int dmax; + long unsigned int check; + long unsigned int total; + unsigned int wbits; + unsigned int wsize; + unsigned int whave; + unsigned int write; + unsigned char *window; + long unsigned int hold; + unsigned int bits; + unsigned int length; + unsigned int offset; + unsigned int extra; + const code *lencode; + const code *distcode; + unsigned int lenbits; + unsigned int distbits; + unsigned int ncode; + unsigned int nlen; + unsigned int ndist; + unsigned int have; + code *next; + short unsigned int lens[320]; + short unsigned int work[288]; + code codes[2048]; +}; + +union uu { + short unsigned int us; + unsigned char b[2]; +}; + +typedef unsigned int uInt; + +struct inflate_workspace { + struct inflate_state inflate_state; + unsigned char working_window[32768]; +}; + +typedef enum { + CODES = 0, + LENS = 1, + DISTS = 2, +} codetype; + +typedef unsigned char uch; + +typedef short unsigned int ush; + +typedef long unsigned int ulg; + +struct ct_data_s { + union { + ush freq; + ush code; + } fc; + union { + ush dad; + ush len; + } dl; +}; + +typedef struct ct_data_s ct_data; + +struct static_tree_desc_s { + const ct_data *static_tree; + const int *extra_bits; + int extra_base; + int elems; + int max_length; +}; + +typedef struct static_tree_desc_s static_tree_desc; + +struct tree_desc_s { + ct_data *dyn_tree; + int max_code; + static_tree_desc *stat_desc; +}; + +typedef ush Pos; + +typedef unsigned int IPos; + +struct deflate_state { + z_streamp strm; + int status; + Byte *pending_buf; + ulg pending_buf_size; + Byte *pending_out; + int pending; + int noheader; + Byte data_type; + Byte method; + int last_flush; + uInt w_size; + uInt w_bits; + uInt w_mask; + Byte *window; + ulg window_size; + Pos *prev; + Pos *head; + uInt ins_h; + uInt hash_size; + uInt hash_bits; + uInt hash_mask; + uInt hash_shift; + long int block_start; + uInt match_length; + IPos prev_match; + int match_available; + uInt strstart; + uInt match_start; + uInt lookahead; + uInt prev_length; + uInt max_chain_length; + uInt max_lazy_match; + int level; + int strategy; + uInt good_match; + int nice_match; + struct ct_data_s dyn_ltree[573]; + struct ct_data_s dyn_dtree[61]; + struct ct_data_s bl_tree[39]; + struct tree_desc_s l_desc; + struct tree_desc_s d_desc; + struct tree_desc_s bl_desc; + ush bl_count[16]; + int heap[573]; + int heap_len; + int heap_max; + uch depth[573]; + uch *l_buf; + uInt lit_bufsize; + uInt last_lit; + ush *d_buf; + ulg opt_len; + ulg static_len; + ulg compressed_len; + uInt matches; + int last_eob_len; + ush bi_buf; + int bi_valid; +}; + +typedef struct deflate_state deflate_state; + +typedef enum { + need_more = 0, + block_done = 1, + finish_started = 2, + finish_done = 3, +} block_state; + +typedef block_state (*compress_func)(deflate_state *, int); + +struct deflate_workspace { + deflate_state deflate_memory; + Byte *window_memory; + Pos *prev_memory; + Pos *head_memory; + char *overlay_memory; +}; + +typedef struct deflate_workspace deflate_workspace; + +struct config_s { + ush good_length; + ush max_lazy; + ush nice_length; + ush max_chain; + compress_func func; +}; + +typedef struct config_s config; + +typedef struct tree_desc_s tree_desc; + +typedef struct { + uint32_t hashTable[4096]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t *dictionary; + uint8_t *bufferStart; + uint32_t dictSize; +} LZ4_stream_t_internal; + +typedef union { + long long unsigned int table[2052]; + LZ4_stream_t_internal internal_donotuse; +} LZ4_stream_t; + +typedef uint8_t BYTE; + +typedef uint16_t U16; + +typedef uint32_t U32; + +typedef uint64_t U64; + +typedef uintptr_t uptrval; + +typedef enum { + noLimit = 0, + limitedOutput = 1, +} limitedOutput_directive; + +typedef enum { + byPtr = 0, + byU32 = 1, + byU16 = 2, +} tableType_t; + +typedef enum { + noDict = 0, + withPrefix64k = 1, + usingExtDict = 2, +} dict_directive; + +typedef enum { + noDictIssue = 0, + dictSmall = 1, +} dictIssue_directive; + +typedef struct { + const uint8_t *externalDict; + size_t extDictSize; + const uint8_t *prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +typedef union { + long long unsigned int table[4]; + LZ4_streamDecode_t_internal internal_donotuse; +} LZ4_streamDecode_t; + +typedef enum { + endOnOutputSize = 0, + endOnInputSize = 1, +} endCondition_directive; + +typedef enum { + decode_full_block = 0, + partial_decode = 1, +} earlyEnd_directive; + +typedef struct { + size_t bitContainer; + int bitPos; + char *startPtr; + char *ptr; + char *endPtr; +} BIT_CStream_t; + +typedef unsigned int FSE_CTable; + +typedef struct { + ptrdiff_t value; + const void *stateTable; + const void *symbolTT; + unsigned int stateLog; +} FSE_CState_t; + +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; + +typedef int16_t S16; + +struct HUF_CElt_s { + U16 val; + BYTE nbBits; +}; + +typedef struct HUF_CElt_s HUF_CElt; + +typedef enum { + HUF_repeat_none = 0, + HUF_repeat_check = 1, + HUF_repeat_valid = 2, +} HUF_repeat; + +struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +}; + +typedef struct nodeElt_s nodeElt; + +typedef struct { + U32 base; + U32 curr; +} rankPos; + +typedef enum { + ZSTDcs_created = 0, + ZSTDcs_init = 1, + ZSTDcs_ongoing = 2, + ZSTDcs_ending = 3, +} ZSTD_compressionStage_e; + +typedef void * (*ZSTD_allocFunction)(void *, size_t); + +typedef void (*ZSTD_freeFunction)(void *, void *); + +typedef struct { + ZSTD_allocFunction customAlloc; + ZSTD_freeFunction customFree; + void *opaque; +} ZSTD_customMem; + +typedef struct { + U32 price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[3]; +} ZSTD_optimal_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +struct seqDef_s; + +typedef struct seqDef_s seqDef; + +typedef struct { + seqDef *sequencesStart; + seqDef *sequences; + BYTE *litStart; + BYTE *lit; + BYTE *llCode; + BYTE *mlCode; + BYTE *ofCode; + U32 longLengthID; + U32 longLengthPos; + ZSTD_optimal_t *priceTable; + ZSTD_match_t *matchTable; + U32 *matchLengthFreq; + U32 *litLengthFreq; + U32 *litFreq; + U32 *offCodeFreq; + U32 matchLengthSum; + U32 matchSum; + U32 litLengthSum; + U32 litSum; + U32 offCodeSum; + U32 log2matchLengthSum; + U32 log2matchSum; + U32 log2litLengthSum; + U32 log2litSum; + U32 log2offCodeSum; + U32 factor; + U32 staticPrices; + U32 cachedPrice; + U32 cachedLitLength; + const BYTE *cachedLiterals; +} seqStore_t; + +struct HUF_CElt_s___2; + +typedef struct HUF_CElt_s___2 HUF_CElt___2; + +struct ZSTD_CCtx_s___2 { + const BYTE *nextSrc; + const BYTE *base; + const BYTE *dictBase; + U32 dictLimit; + U32 lowLimit; + U32 nextToUpdate; + U32 nextToUpdate3; + U32 hashLog3; + U32 loadedDictEnd; + U32 forceWindow; + U32 forceRawDict; + ZSTD_compressionStage_e stage; + U32 rep[3]; + U32 repToConfirm[3]; + U32 dictID; + ZSTD_parameters params; + void *workSpace; + size_t workSpaceSize; + size_t blockSize; + U64 frameContentSize; + struct xxh64_state xxhState; + ZSTD_customMem customMem; + seqStore_t seqStore; + U32 *hashTable; + U32 *hashTable3; + U32 *chainTable; + HUF_CElt___2 *hufTable; + U32 flagStaticTables; + HUF_repeat flagStaticHufTable; + FSE_CTable offcodeCTable[187]; + FSE_CTable matchlengthCTable[363]; + FSE_CTable litlengthCTable[329]; + unsigned int tmpCounters[1536]; +}; + +typedef struct ZSTD_CCtx_s___2 ZSTD_CCtx___2; + +struct ZSTD_CDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictContentSize; + ZSTD_CCtx___2 *refContext; +}; + +typedef struct ZSTD_CDict_s ZSTD_CDict; + +struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_inBuffer_s ZSTD_inBuffer; + +struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +}; + +typedef struct ZSTD_outBuffer_s ZSTD_outBuffer; + +typedef enum { + zcss_init = 0, + zcss_load = 1, + zcss_flush = 2, + zcss_final = 3, +} ZSTD_cStreamStage; + +struct ZSTD_CStream_s { + ZSTD_CCtx___2 *cctx; + ZSTD_CDict *cdictLocal; + const ZSTD_CDict *cdict; + char *inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + size_t blockSize; + char *outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage stage; + U32 checksum; + U32 frameEnded; + U64 pledgedSrcSize; + U64 inputProcessed; + ZSTD_parameters params; + ZSTD_customMem customMem; +}; + +typedef struct ZSTD_CStream_s ZSTD_CStream; + +typedef int32_t S32; + +typedef enum { + set_basic = 0, + set_rle = 1, + set_compressed = 2, + set_repeat = 3, +} symbolEncodingType_e; + +struct seqDef_s { + U32 offset; + U16 litLength; + U16 matchLength; +}; + +typedef enum { + ZSTDcrp_continue = 0, + ZSTDcrp_noMemset = 1, + ZSTDcrp_fullReset = 2, +} ZSTD_compResetPolicy_e; + +typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx___2 *, const void *, size_t); + +typedef enum { + zsf_gather = 0, + zsf_flush = 1, + zsf_end = 2, +} ZSTD_flush_e; + +typedef size_t (*searchMax_f)(ZSTD_CCtx___2 *, const BYTE *, const BYTE *, size_t *, U32, U32); + +typedef struct { + size_t bitContainer; + unsigned int bitsConsumed; + const char *ptr; + const char *start; +} BIT_DStream_t; + +typedef enum { + BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3, +} BIT_DStream_status; + +typedef unsigned int FSE_DTable; + +typedef struct { + size_t state; + const void *table; +} FSE_DState_t; + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; + +typedef struct { + short unsigned int newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; + +typedef struct { + void *ptr; + const void *end; +} ZSTD_stack; + +typedef U32 HUF_DTable; + +typedef struct { + BYTE maxTableLog; + BYTE tableType; + BYTE tableLog; + BYTE reserved; +} DTableDesc; + +typedef struct { + BYTE byte; + BYTE nbBits; +} HUF_DEltX2; + +typedef struct { + U16 sequence; + BYTE nbBits; + BYTE length; +} HUF_DEltX4; + +typedef struct { + BYTE symbol; + BYTE weight; +} sortedSymbol_t; + +typedef U32 rankValCol_t[13]; + +typedef struct { + U32 tableTime; + U32 decode256Time; +} algo_time_t; + +typedef struct { + FSE_DTable LLTable[513]; + FSE_DTable OFTable[257]; + FSE_DTable MLTable[513]; + HUF_DTable hufTable[4097]; + U64 workspace[384]; + U32 rep[3]; +} ZSTD_entropyTables_t; + +typedef struct { + long long unsigned int frameContentSize; + unsigned int windowSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameParams; + +typedef enum { + bt_raw = 0, + bt_rle = 1, + bt_compressed = 2, + bt_reserved = 3, +} blockType_e; + +typedef enum { + ZSTDds_getFrameHeaderSize = 0, + ZSTDds_decodeFrameHeader = 1, + ZSTDds_decodeBlockHeader = 2, + ZSTDds_decompressBlock = 3, + ZSTDds_decompressLastBlock = 4, + ZSTDds_checkChecksum = 5, + ZSTDds_decodeSkippableHeader = 6, + ZSTDds_skipFrame = 7, +} ZSTD_dStage; + +struct ZSTD_DCtx_s___2 { + const FSE_DTable *LLTptr; + const FSE_DTable *MLTptr; + const FSE_DTable *OFTptr; + const HUF_DTable *HUFptr; + ZSTD_entropyTables_t entropy; + const void *previousDstEnd; + const void *base; + const void *vBase; + const void *dictEnd; + size_t expected; + ZSTD_frameParams fParams; + blockType_e bType; + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + struct xxh64_state xxhState; + size_t headerSize; + U32 dictID; + const BYTE *litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + BYTE litBuffer[131080]; + BYTE headerBuffer[18]; +}; + +typedef struct ZSTD_DCtx_s___2 ZSTD_DCtx___2; + +struct ZSTD_DDict_s { + void *dictBuffer; + const void *dictContent; + size_t dictSize; + ZSTD_entropyTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +typedef enum { + zdss_init = 0, + zdss_loadHeader = 1, + zdss_read = 2, + zdss_load = 3, + zdss_flush = 4, +} ZSTD_dStreamStage; + +struct ZSTD_DStream_s { + ZSTD_DCtx___2 *dctx; + ZSTD_DDict *ddictLocal; + const ZSTD_DDict *ddict; + ZSTD_frameParams fParams; + ZSTD_dStreamStage stage; + char *inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char *outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t blockSize; + BYTE headerBuffer[18]; + size_t lhSize; + ZSTD_customMem customMem; + void *legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; +}; + +typedef struct ZSTD_DStream_s ZSTD_DStream; + +typedef enum { + ZSTDnit_frameHeader = 0, + ZSTDnit_blockHeader = 1, + ZSTDnit_block = 2, + ZSTDnit_lastBlock = 3, + ZSTDnit_checksum = 4, + ZSTDnit_skippableFrame = 5, +} ZSTD_nextInputType_e; + +typedef uintptr_t uPtrDiff; + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +typedef union { + FSE_decode_t realData; + U32 alignedBy4; +} FSE_decode_t4; + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE *match; +} seq_t; + +typedef struct { + BIT_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset[3]; + const BYTE *base; + size_t pos; + uPtrDiff gotoDict; +} seqState_t; + +enum xz_mode { + XZ_SINGLE = 0, + XZ_PREALLOC = 1, + XZ_DYNALLOC = 2, +}; + +enum xz_ret { + XZ_OK = 0, + XZ_STREAM_END = 1, + XZ_UNSUPPORTED_CHECK = 2, + XZ_MEM_ERROR = 3, + XZ_MEMLIMIT_ERROR = 4, + XZ_FORMAT_ERROR = 5, + XZ_OPTIONS_ERROR = 6, + XZ_DATA_ERROR = 7, + XZ_BUF_ERROR = 8, +}; + +struct xz_buf { + const uint8_t *in; + size_t in_pos; + size_t in_size; + uint8_t *out; + size_t out_pos; + size_t out_size; +}; + +typedef uint64_t vli_type; + +enum xz_check { + XZ_CHECK_NONE = 0, + XZ_CHECK_CRC32 = 1, + XZ_CHECK_CRC64 = 4, + XZ_CHECK_SHA256 = 10, +}; + +struct xz_dec_hash { + vli_type unpadded; + vli_type uncompressed; + uint32_t crc32; +}; + +struct xz_dec_lzma2; + +struct xz_dec_bcj; + +struct xz_dec { + enum { + SEQ_STREAM_HEADER = 0, + SEQ_BLOCK_START = 1, + SEQ_BLOCK_HEADER = 2, + SEQ_BLOCK_UNCOMPRESS = 3, + SEQ_BLOCK_PADDING = 4, + SEQ_BLOCK_CHECK = 5, + SEQ_INDEX = 6, + SEQ_INDEX_PADDING = 7, + SEQ_INDEX_CRC32 = 8, + SEQ_STREAM_FOOTER = 9, + } sequence; + uint32_t pos; + vli_type vli; + size_t in_start; + size_t out_start; + uint32_t crc32; + enum xz_check check_type; + enum xz_mode mode; + bool allow_buf_error; + struct { + vli_type compressed; + vli_type uncompressed; + uint32_t size; + } block_header; + struct { + vli_type compressed; + vli_type uncompressed; + vli_type count; + struct xz_dec_hash hash; + } block; + struct { + enum { + SEQ_INDEX_COUNT = 0, + SEQ_INDEX_UNPADDED = 1, + SEQ_INDEX_UNCOMPRESSED = 2, + } sequence; + vli_type size; + vli_type count; + struct xz_dec_hash hash; + } index; + struct { + size_t pos; + size_t size; + uint8_t buf[1024]; + } temp; + struct xz_dec_lzma2 *lzma2; + struct xz_dec_bcj *bcj; + bool bcj_active; +}; + +enum lzma_state { + STATE_LIT_LIT = 0, + STATE_MATCH_LIT_LIT = 1, + STATE_REP_LIT_LIT = 2, + STATE_SHORTREP_LIT_LIT = 3, + STATE_MATCH_LIT = 4, + STATE_REP_LIT = 5, + STATE_SHORTREP_LIT = 6, + STATE_LIT_MATCH = 7, + STATE_LIT_LONGREP = 8, + STATE_LIT_SHORTREP = 9, + STATE_NONLIT_MATCH = 10, + STATE_NONLIT_REP = 11, +}; + +struct dictionary { + uint8_t *buf; + size_t start; + size_t pos; + size_t full; + size_t limit; + size_t end; + uint32_t size; + uint32_t size_max; + uint32_t allocated; + enum xz_mode mode; +}; + +struct rc_dec { + uint32_t range; + uint32_t code; + uint32_t init_bytes_left; + const uint8_t *in; + size_t in_pos; + size_t in_limit; +}; + +struct lzma_len_dec { + uint16_t choice; + uint16_t choice2; + uint16_t low[128]; + uint16_t mid[128]; + uint16_t high[256]; +}; + +struct lzma_dec { + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; + enum lzma_state state; + uint32_t len; + uint32_t lc; + uint32_t literal_pos_mask; + uint32_t pos_mask; + uint16_t is_match[192]; + uint16_t is_rep[12]; + uint16_t is_rep0[12]; + uint16_t is_rep1[12]; + uint16_t is_rep2[12]; + uint16_t is_rep0_long[192]; + uint16_t dist_slot[256]; + uint16_t dist_special[114]; + uint16_t dist_align[16]; + struct lzma_len_dec match_len_dec; + struct lzma_len_dec rep_len_dec; + uint16_t literal[12288]; +}; + +enum lzma2_seq { + SEQ_CONTROL = 0, + SEQ_UNCOMPRESSED_1 = 1, + SEQ_UNCOMPRESSED_2 = 2, + SEQ_COMPRESSED_0 = 3, + SEQ_COMPRESSED_1 = 4, + SEQ_PROPERTIES = 5, + SEQ_LZMA_PREPARE = 6, + SEQ_LZMA_RUN = 7, + SEQ_COPY = 8, +}; + +struct lzma2_dec { + enum lzma2_seq sequence; + enum lzma2_seq next_sequence; + uint32_t uncompressed; + uint32_t compressed; + bool need_dict_reset; + bool need_props; +}; + +struct xz_dec_lzma2___2 { + struct rc_dec rc; + struct dictionary dict; + struct lzma2_dec lzma2; + struct lzma_dec lzma; + struct { + uint32_t size; + uint8_t buf[63]; + } temp; +}; + +struct xz_dec_bcj___2 { + enum { + BCJ_X86 = 4, + BCJ_POWERPC = 5, + BCJ_IA64 = 6, + BCJ_ARM = 7, + BCJ_ARMTHUMB = 8, + BCJ_SPARC = 9, + } type; + enum xz_ret ret; + bool single_call; + uint32_t pos; + uint32_t x86_prev_mask; + uint8_t *out; + size_t out_pos; + size_t out_size; + struct { + size_t filtered; + size_t size; + uint8_t buf[16]; + } temp; +}; + +struct ts_state { + unsigned int offset; + char cb[40]; +}; + +struct ts_config; + +struct ts_ops { + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; + +struct ts_config { + struct ts_ops *ops; + int flags; + unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *); + void (*finish)(struct ts_config *, struct ts_state *); +}; + +struct ts_linear_state { + unsigned int len; + const void *data; +}; + +struct ei_entry { + struct list_head list; + long unsigned int start_addr; + long unsigned int end_addr; + int etype; + void *priv; +}; + +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE = 0, + NLA_VALIDATE_RANGE = 1, + NLA_VALIDATE_RANGE_WARN_TOO_LONG = 2, + NLA_VALIDATE_MIN = 3, + NLA_VALIDATE_MAX = 4, + NLA_VALIDATE_MASK = 5, + NLA_VALIDATE_RANGE_PTR = 6, + NLA_VALIDATE_FUNCTION = 7, +}; + +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = 1, + NL_VALIDATE_MAXTYPE = 2, + NL_VALIDATE_UNSPEC = 4, + NL_VALIDATE_STRICT_ATTRS = 8, + NL_VALIDATE_NESTED = 16, +}; + +struct cpu_rmap { + struct kref refcount; + u16 size; + u16 used; + void **obj; + struct { + u16 index; + u16 dist; + } near[0]; +}; + +struct irq_glue { + struct irq_affinity_notify notify; + struct cpu_rmap *rmap; + u16 index; +}; + +typedef mpi_limb_t *mpi_ptr_t; + +typedef int mpi_size_t; + +typedef mpi_limb_t UWtype; + +typedef unsigned int UHWtype; + +enum gcry_mpi_constants { + MPI_C_ZERO = 0, + MPI_C_ONE = 1, + MPI_C_TWO = 2, + MPI_C_THREE = 3, + MPI_C_FOUR = 4, + MPI_C_EIGHT = 5, +}; + +struct barrett_ctx_s; + +typedef struct barrett_ctx_s *mpi_barrett_t; + +struct gcry_mpi_point { + MPI x; + MPI y; + MPI z; +}; + +typedef struct gcry_mpi_point *MPI_POINT; + +enum gcry_mpi_ec_models { + MPI_EC_WEIERSTRASS = 0, + MPI_EC_MONTGOMERY = 1, + MPI_EC_EDWARDS = 2, +}; + +enum ecc_dialects { + ECC_DIALECT_STANDARD = 0, + ECC_DIALECT_ED25519 = 1, + ECC_DIALECT_SAFECURVE = 2, +}; + +struct mpi_ec_ctx { + enum gcry_mpi_ec_models model; + enum ecc_dialects dialect; + int flags; + unsigned int nbits; + MPI p; + MPI a; + MPI b; + MPI_POINT G; + MPI n; + unsigned int h; + MPI_POINT Q; + MPI d; + const char *name; + struct { + struct { + unsigned int a_is_pminus3: 1; + unsigned int two_inv_p: 1; + } valid; + int a_is_pminus3; + MPI two_inv_p; + mpi_barrett_t p_barrett; + MPI scratch[11]; + } t; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); +}; + +struct field_table { + const char *p; + void (*addm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*subm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mulm)(MPI, MPI, MPI, struct mpi_ec_ctx *); + void (*mul2)(MPI, MPI, struct mpi_ec_ctx *); + void (*pow2)(MPI, const MPI, struct mpi_ec_ctx *); +}; + +enum gcry_mpi_format { + GCRYMPI_FMT_NONE = 0, + GCRYMPI_FMT_STD = 1, + GCRYMPI_FMT_PGP = 2, + GCRYMPI_FMT_SSH = 3, + GCRYMPI_FMT_HEX = 4, + GCRYMPI_FMT_USG = 5, + GCRYMPI_FMT_OPAQUE = 8, +}; + +struct barrett_ctx_s___2; + +typedef struct barrett_ctx_s___2 *mpi_barrett_t___2; + +struct barrett_ctx_s___2 { + MPI m; + int m_copied; + int k; + MPI y; + MPI r1; + MPI r2; + MPI r3; +}; + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + mpi_size_t tspace_size; + mpi_ptr_t tp; + mpi_size_t tp_size; +}; + +typedef long int mpi_limb_signed_t; + +enum dim_tune_state { + DIM_PARKING_ON_TOP = 0, + DIM_PARKING_TIRED = 1, + DIM_GOING_RIGHT = 2, + DIM_GOING_LEFT = 3, +}; + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 1, + DIM_CQ_PERIOD_NUM_MODES = 2, +}; + +enum dim_state { + DIM_START_MEASURE = 0, + DIM_MEASURE_IN_PROGRESS = 1, + DIM_APPLY_NEW_PROFILE = 2, +}; + +enum dim_stats_state { + DIM_STATS_WORSE = 0, + DIM_STATS_SAME = 1, + DIM_STATS_BETTER = 2, +}; + +enum dim_step_result { + DIM_STEPPED = 0, + DIM_TOO_TIRED = 1, + DIM_ON_EDGE = 2, +}; + +enum pubkey_algo { + PUBKEY_ALGO_RSA = 0, + PUBKEY_ALGO_MAX = 1, +}; + +struct pubkey_hdr { + uint8_t version; + uint32_t timestamp; + uint8_t algo; + uint8_t nmpi; + char mpi[0]; +} __attribute__((packed)); + +struct signature_hdr { + uint8_t version; + uint32_t timestamp; + uint8_t algo; + uint8_t hash; + uint8_t keyid[8]; + uint8_t nmpi; + char mpi[0]; +} __attribute__((packed)); + +struct sg_splitter { + struct scatterlist *in_sg0; + int nents; + off_t skip_sg0; + unsigned int length_last_sg; + struct scatterlist *out_sg; +}; + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +enum { + IRQ_POLL_F_SCHED = 0, + IRQ_POLL_F_DISABLE = 1, +}; + +struct font_desc { + int idx; + const char *name; + int width; + int height; + const void *data; + int pref; +}; + +struct font_data { + unsigned int extra[4]; + const unsigned char data[0]; +}; + +typedef u16 ucs2_char_t; + +struct firmware { + size_t size; + const u8 *data; + void *priv; +}; + +struct pldmfw_record { + struct list_head entry; + struct list_head descs; + const u8 *version_string; + u8 version_type; + u8 version_len; + u16 package_data_len; + u32 device_update_flags; + const u8 *package_data; + long unsigned int *component_bitmap; + u16 component_bitmap_len; +}; + +struct pldmfw_desc_tlv { + struct list_head entry; + const u8 *data; + u16 type; + u16 size; +}; + +struct pldmfw_component { + struct list_head entry; + u16 classification; + u16 identifier; + u16 options; + u16 activation_method; + u32 comparison_stamp; + u32 component_size; + const u8 *component_data; + const u8 *version_string; + u8 version_type; + u8 version_len; + u8 index; +}; + +struct pldmfw_ops; + +struct pldmfw { + const struct pldmfw_ops *ops; + struct device *dev; +}; + +struct pldmfw_ops { + bool (*match_record)(struct pldmfw *, struct pldmfw_record *); + int (*send_package_data)(struct pldmfw *, const u8 *, u16); + int (*send_component_table)(struct pldmfw *, struct pldmfw_component *, u8); + int (*flash_component)(struct pldmfw *, struct pldmfw_component *); + int (*finalize_update)(struct pldmfw *); +}; + +struct __pldm_timestamp { + u8 b[13]; +}; + +struct __pldm_header { + uuid_t id; + u8 revision; + __le16 size; + struct __pldm_timestamp release_date; + __le16 component_bitmap_len; + u8 version_type; + u8 version_len; + u8 version_string[0]; +} __attribute__((packed)); + +struct __pldmfw_record_info { + __le16 record_len; + u8 descriptor_count; + __le32 device_update_flags; + u8 version_type; + u8 version_len; + __le16 package_data_len; + u8 variable_record_data[0]; +} __attribute__((packed)); + +struct __pldmfw_desc_tlv { + __le16 type; + __le16 size; + u8 data[0]; +}; + +struct __pldmfw_record_area { + u8 record_count; + u8 records[0]; +}; + +struct __pldmfw_component_info { + __le16 classification; + __le16 identifier; + __le32 comparison_stamp; + __le16 options; + __le16 activation_method; + __le32 location_offset; + __le32 size; + u8 version_type; + u8 version_len; + u8 version_string[0]; +} __attribute__((packed)); + +struct __pldmfw_component_area { + __le16 component_image_count; + u8 components[0]; +}; + +struct pldmfw_priv { + struct pldmfw *context; + const struct firmware *fw; + size_t offset; + struct list_head records; + struct list_head components; + const struct __pldm_header *header; + u16 total_header_size; + u16 component_bitmap_len; + u16 bitmap_size; + u16 component_count; + const u8 *component_start; + const u8 *record_start; + u8 record_count; + u32 header_crc; + struct pldmfw_record *matching_record; +}; + +struct pldm_pci_record_id { + int vendor; + int device; + int subsystem_vendor; + int subsystem_device; +}; + +typedef long unsigned int cycles_t; + +struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +}; + +struct group_data { + int limit[21]; + int base[20]; + int permute[258]; + int minLen; + int maxLen; +}; + +struct bunzip_data { + int writeCopies; + int writePos; + int writeRunCountdown; + int writeCount; + int writeCurrent; + long int (*fill)(void *, long unsigned int); + long int inbufCount; + long int inbufPos; + unsigned char *inbuf; + unsigned int inbufBitCount; + unsigned int inbufBits; + unsigned int crc32Table[256]; + unsigned int headerCRC; + unsigned int totalCRC; + unsigned int writeCRC; + unsigned int *dbuf; + unsigned int dbufSize; + unsigned char selectors[32768]; + struct group_data groups[6]; + int io_error; + int byteCount[256]; + unsigned char symToByte[256]; + unsigned char mtfSymbol[256]; +}; + +struct rc { + long int (*fill)(void *, long unsigned int); + uint8_t *ptr; + uint8_t *buffer; + uint8_t *buffer_end; + long int buffer_size; + uint32_t code; + uint32_t range; + uint32_t bound; + void (*error)(char *); +}; + +struct lzma_header { + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} __attribute__((packed)); + +struct writer { + uint8_t *buffer; + uint8_t previous_byte; + size_t buffer_pos; + int bufsize; + size_t global_pos; + long int (*flush)(void *, long unsigned int); + struct lzma_header *header; +}; + +struct cstate { + int state; + uint32_t rep0; + uint32_t rep1; + uint32_t rep2; + uint32_t rep3; +}; + +struct xz_dec___2; + +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 2, + ZSTD_error_version_unsupported = 3, + ZSTD_error_parameter_unknown = 4, + ZSTD_error_frameParameter_unsupported = 5, + ZSTD_error_frameParameter_unsupportedBy32bits = 6, + ZSTD_error_frameParameter_windowTooLarge = 7, + ZSTD_error_compressionParameter_unsupported = 8, + ZSTD_error_init_missing = 9, + ZSTD_error_memory_allocation = 10, + ZSTD_error_stage_wrong = 11, + ZSTD_error_dstSize_tooSmall = 12, + ZSTD_error_srcSize_wrong = 13, + ZSTD_error_corruption_detected = 14, + ZSTD_error_checksum_wrong = 15, + ZSTD_error_tableLog_tooLarge = 16, + ZSTD_error_maxSymbolValue_tooLarge = 17, + ZSTD_error_maxSymbolValue_tooSmall = 18, + ZSTD_error_dictionary_corrupted = 19, + ZSTD_error_dictionary_wrong = 20, + ZSTD_error_dictionaryCreation_failed = 21, + ZSTD_error_maxCode = 22, +} ZSTD_ErrorCode; + +struct ZSTD_DStream_s___2; + +typedef struct ZSTD_DStream_s___2 ZSTD_DStream___2; + +struct cpio_data { + void *data; + size_t size; + char name[18]; +}; + +enum cpio_fields { + C_MAGIC = 0, + C_INO = 1, + C_MODE = 2, + C_UID = 3, + C_GID = 4, + C_NLINK = 5, + C_MTIME = 6, + C_FILESIZE = 7, + C_MAJ = 8, + C_MIN = 9, + C_RMAJ = 10, + C_RMIN = 11, + C_NAMESIZE = 12, + C_CHKSUM = 13, + C_NFIELDS = 14, +}; + +enum { + ASSUME_PERFECT = 255, + ASSUME_VALID_DTB = 1, + ASSUME_VALID_INPUT = 2, + ASSUME_LATEST = 4, + ASSUME_NO_ROLLBACK = 8, + ASSUME_LIBFDT_ORDER = 16, + ASSUME_LIBFDT_FLAWLESS = 32, +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +struct fdt_errtabent { + const char *str; +}; + +struct fprop_local_single { + long unsigned int events; + unsigned int period; + raw_spinlock_t lock; +}; + +struct ida_bitmap { + long unsigned int bitmap[16]; +}; + +struct klist_waiter { + struct list_head list; + struct klist_node *node; + struct task_struct *process; + int woken; +}; + +struct uevent_sock { + struct list_head list; + struct sock *sk; +}; + +enum { + LOGIC_PIO_INDIRECT = 0, + LOGIC_PIO_CPU_MMIO = 1, +}; + +struct logic_pio_host_ops; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + long unsigned int flags; + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *, long unsigned int, size_t); + void (*out)(void *, long unsigned int, u32, size_t); + u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int); + void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int); +}; + +struct radix_tree_preload { + unsigned int nr; + struct xa_node *nodes; +}; + +typedef struct { + long unsigned int key[2]; +} hsiphash_key_t; + +enum format_type { + FORMAT_TYPE_NONE = 0, + FORMAT_TYPE_WIDTH = 1, + FORMAT_TYPE_PRECISION = 2, + FORMAT_TYPE_CHAR = 3, + FORMAT_TYPE_STR = 4, + FORMAT_TYPE_PTR = 5, + FORMAT_TYPE_PERCENT_CHAR = 6, + FORMAT_TYPE_INVALID = 7, + FORMAT_TYPE_LONG_LONG = 8, + FORMAT_TYPE_ULONG = 9, + FORMAT_TYPE_LONG = 10, + FORMAT_TYPE_UBYTE = 11, + FORMAT_TYPE_BYTE = 12, + FORMAT_TYPE_USHORT = 13, + FORMAT_TYPE_SHORT = 14, + FORMAT_TYPE_UINT = 15, + FORMAT_TYPE_INT = 16, + FORMAT_TYPE_SIZE_T = 17, + FORMAT_TYPE_PTRDIFF = 18, +}; + +struct printf_spec { + unsigned int type: 8; + int field_width: 24; + unsigned int flags: 8; + unsigned int base: 8; + int precision: 16; +}; + +struct minmax_sample { + u32 t; + u32 v; +}; + +struct minmax { + struct minmax_sample s[3]; +}; + +struct xa_limit { + u32 max; + u32 min; +}; + +typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *); + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const long unsigned int); + +struct acpi_probe_entry; + +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); + +struct acpi_probe_entry { + __u8 id[5]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *); + +typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); + +struct armctrl_ic { + void *base; + void *pending[3]; + void *enable[3]; + void *disable[3]; + struct irq_domain *domain; +}; + +struct bcm2836_arm_irqchip_intc { + struct irq_domain *domain; + void *base; +}; + +struct tegra_ictlr_soc { + unsigned int num_ictlrs; +}; + +struct tegra_ictlr_info { + void *base[6]; + u32 cop_ier[6]; + u32 cop_iep[6]; + u32 cpu_ier[6]; + u32 cpu_iep[6]; + u32 ictlr_wake_mask[6]; +}; + +struct sun4i_irq_chip_data { + void *irq_base; + struct irq_domain *irq_domain; + u32 enable_reg_offset; + u32 mask_reg_offset; +}; + +enum { + SUNXI_SRC_TYPE_LEVEL_LOW = 0, + SUNXI_SRC_TYPE_EDGE_FALLING = 1, + SUNXI_SRC_TYPE_LEVEL_HIGH = 2, + SUNXI_SRC_TYPE_EDGE_RISING = 3, +}; + +struct sunxi_sc_nmi_reg_offs { + u32 ctrl; + u32 pend; + u32 enable; +}; + +struct acpi_madt_generic_distributor { + struct acpi_subtable_header header; + u16 reserved; + u32 gic_id; + u64 base_address; + u32 global_irq_base; + u8 version; + u8 reserved2[3]; +}; + +enum acpi_madt_gic_version { + ACPI_MADT_GIC_VERSION_NONE = 0, + ACPI_MADT_GIC_VERSION_V1 = 1, + ACPI_MADT_GIC_VERSION_V2 = 2, + ACPI_MADT_GIC_VERSION_V3 = 3, + ACPI_MADT_GIC_VERSION_V4 = 4, + ACPI_MADT_GIC_VERSION_RESERVED = 5, +}; + +enum acpi_irq_model_id { + ACPI_IRQ_MODEL_PIC = 0, + ACPI_IRQ_MODEL_IOAPIC = 1, + ACPI_IRQ_MODEL_IOSAPIC = 2, + ACPI_IRQ_MODEL_PLATFORM = 3, + ACPI_IRQ_MODEL_GIC = 4, + ACPI_IRQ_MODEL_COUNT = 5, +}; + +union gic_base { + void *common_base; + void **percpu_base; +}; + +struct gic_chip_data { + struct irq_chip chip; + union gic_base dist_base; + union gic_base cpu_base; + void *raw_dist_base; + void *raw_cpu_base; + u32 percpu_offset; + u32 saved_spi_enable[32]; + u32 saved_spi_active[32]; + u32 saved_spi_conf[64]; + u32 saved_spi_target[255]; + u32 *saved_ppi_enable; + u32 *saved_ppi_active; + u32 *saved_ppi_conf; + struct irq_domain *domain; + unsigned int gic_irqs; +}; + +struct gic_quirk { + const char *desc; + const char *compatible; + bool (*init)(void *); + u32 iidr; + u32 mask; +}; + +struct clk_bulk_data { + const char *id; + struct clk *clk; +}; + +struct gic_clk_data { + unsigned int num_clocks; + const char * const *clocks; +}; + +struct gic_chip_data___2; + +struct gic_chip_pm { + struct gic_chip_data___2 *chip_data; + const struct gic_clk_data *clk_data; + struct clk_bulk_data *clks; +}; + +struct acpi_table_madt { + struct acpi_table_header header; + u32 address; + u32 flags; +}; + +struct acpi_madt_generic_msi_frame { + struct acpi_subtable_header header; + u16 reserved; + u32 msi_frame_id; + u64 base_address; + u32 flags; + u16 spi_count; + u16 spi_base; +}; + +struct v2m_data { + struct list_head entry; + struct fwnode_handle *fwnode; + struct resource res; + void *base; + u32 spi_start; + u32 nr_spis; + u32 spi_offset; + long unsigned int *bm; + u32 flags; +}; + +struct acpi_madt_generic_redistributor { + struct acpi_subtable_header header; + u16 reserved; + u64 base_address; + u32 length; +} __attribute__((packed)); + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + bool lpi_enabled; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct partition_affinity { + cpumask_t mask; + void *partition_id; +}; + +struct redist_region { + void *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct partition_desc; + +struct gic_chip_data___3 { + struct fwnode_handle *fwnode; + void *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +enum gic_intid_range { + SGI_RANGE = 0, + PPI_RANGE = 1, + SPI_RANGE = 2, + EPPI_RANGE = 3, + ESPI_RANGE = 4, + LPI_RANGE = 5, + __INVALID_RANGE__ = 6, +}; + +struct mbi_range { + u32 spi_start; + u32 nr_spis; + long unsigned int *bm; +}; + +struct acpi_madt_generic_translator { + struct acpi_subtable_header header; + u16 reserved; + u32 translation_id; + u64 base_address; + u32 reserved2; +} __attribute__((packed)); + +struct acpi_srat_gic_its_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u32 its_id; +} __attribute__((packed)); + +enum its_vcpu_info_cmd_type { + MAP_VLPI = 0, + GET_VLPI = 1, + PROP_UPDATE_VLPI = 2, + PROP_UPDATE_AND_INV_VLPI = 3, + SCHEDULE_VPE = 4, + DESCHEDULE_VPE = 5, + INVALL_VPE = 6, + PROP_UPDATE_VSGI = 7, +}; + +struct its_cmd_info { + enum its_vcpu_info_cmd_type cmd_type; + union { + struct its_vlpi_map *map; + u8 config; + bool req_db; + struct { + bool g0en; + bool g1en; + }; + struct { + u8 priority; + bool group; + }; + }; +}; + +struct its_collection___2 { + u64 target_address; + u16 col_id; +}; + +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_cmd_block; + +struct its_device___2; + +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void *base; + void *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[8]; + struct its_collection___2 *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device___2 *); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + long unsigned int list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; + int vlpi_redist_offset; +}; + +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +struct event_lpi_map { + long unsigned int *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +struct its_device___2 { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +struct its_cmd_desc { + union { + struct { + struct its_device___2 *dev; + u32 event_id; + } its_inv_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_clear_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_int_cmd; + struct { + struct its_device___2 *dev; + int valid; + } its_mapd_cmd; + struct { + struct its_collection___2 *col; + int valid; + } its_mapc_cmd; + struct { + struct its_device___2 *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + struct { + struct its_device___2 *dev; + struct its_collection___2 *col; + u32 event_id; + } its_movi_cmd; + struct { + struct its_device___2 *dev; + u32 event_id; + } its_discard_cmd; + struct { + struct its_collection___2 *col; + } its_invall_cmd; + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + struct { + struct its_vpe *vpe; + struct its_collection___2 *col; + bool valid; + } its_vmapp_cmd; + struct { + struct its_vpe *vpe; + struct its_device___2 *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + struct { + struct its_vpe *vpe; + struct its_device___2 *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + struct { + struct its_vpe *vpe; + struct its_collection___2 *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +typedef struct its_collection___2 * (*its_cmd_builder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); + +typedef struct its_vpe * (*its_cmd_vbuilder_t)(struct its_node *, struct its_cmd_block *, struct its_cmd_desc *); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +struct its_srat_map { + u32 numa_node; + u32 its_id; +}; + +struct msi_controller { + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct list_head list; + int (*setup_irq)(struct msi_controller *, struct pci_dev *, struct msi_desc *); + int (*setup_irqs)(struct msi_controller *, struct pci_dev *, int, int); + void (*teardown_irq)(struct msi_controller *, unsigned int); +}; + +struct partition_desc___2 { + int nr_parts; + struct partition_affinity *parts; + struct irq_domain *domain; + struct irq_desc *chained_desc; + long unsigned int *bitmap; + struct irq_domain_ops ops; +}; + +struct mbigen_device { + struct platform_device *pdev; + void *base; +}; + +struct mtk_sysirq_chip_data { + raw_spinlock_t lock; + u32 nr_intpol_bases; + void **intpol_bases; + u32 *intpol_words; + u8 *intpol_idx; + u16 *which_word; +}; + +struct mtk_cirq_chip_data { + void *base; + unsigned int ext_irq_start; + unsigned int ext_irq_end; + struct irq_domain *domain; +}; + +struct mvebu_gicp_spi_range { + unsigned int start; + unsigned int count; +}; + +struct mvebu_gicp { + struct mvebu_gicp_spi_range *spi_ranges; + unsigned int spi_ranges_cnt; + unsigned int spi_cnt; + long unsigned int *spi_bitmap; + spinlock_t spi_lock; + struct resource *res; + struct device *dev; +}; + +struct mvebu_icu_subset_data { + unsigned int icu_group; + unsigned int offset_set_ah; + unsigned int offset_set_al; + unsigned int offset_clr_ah; + unsigned int offset_clr_al; +}; + +struct mvebu_icu { + void *base; + struct device *dev; +}; + +struct mvebu_icu_msi_data { + struct mvebu_icu *icu; + atomic_t initialized; + const struct mvebu_icu_subset_data *subset_data; +}; + +struct mvebu_icu_irq_data { + struct mvebu_icu *icu; + unsigned int icu_group; + unsigned int type; +}; + +struct odmi_data { + struct resource res; + void *base; + unsigned int spi_base; +}; + +struct mvebu_pic { + void *base; + u32 parent_irq; + struct irq_domain *domain; + struct irq_chip irq_chip; +}; + +struct mvebu_sei_interrupt_range { + u32 first; + u32 size; +}; + +struct mvebu_sei_caps { + struct mvebu_sei_interrupt_range ap_range; + struct mvebu_sei_interrupt_range cp_range; +}; + +struct mvebu_sei { + struct device *dev; + void *base; + struct resource *res; + struct irq_domain *sei_domain; + struct irq_domain *ap_domain; + struct irq_domain *cp_domain; + const struct mvebu_sei_caps *caps; + struct mutex cp_msi_lock; + long unsigned int cp_msi_bitmap[1]; + raw_spinlock_t mask_lock; +}; + +struct meson_gpio_irq_controller; + +struct irq_ctl_ops { + void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *, unsigned int, long unsigned int); + void (*gpio_irq_init)(struct meson_gpio_irq_controller *); +}; + +struct meson_gpio_irq_params; + +struct meson_gpio_irq_controller { + const struct meson_gpio_irq_params *params; + void *base; + u32 channel_irqs[8]; + long unsigned int channel_map[1]; + spinlock_t lock; +}; + +struct meson_gpio_irq_params { + unsigned int nr_hwirq; + bool support_edge_both; + unsigned int edge_both_offset; + unsigned int edge_single_offset; + unsigned int pol_low_offset; + unsigned int pin_sel_mask; + struct irq_ctl_ops ops; +}; + +struct regmap_irq_type { + unsigned int type_reg_offset; + unsigned int type_reg_mask; + unsigned int type_rising_val; + unsigned int type_falling_val; + unsigned int type_level_low_val; + unsigned int type_level_high_val; + unsigned int types_supported; +}; + +struct regmap_irq { + unsigned int reg_offset; + unsigned int mask; + struct regmap_irq_type type; +}; + +struct regmap_irq_sub_irq_map { + unsigned int num_regs; + unsigned int *offset; +}; + +struct regmap_irq_chip { + const char *name; + unsigned int main_status; + unsigned int num_main_status_bits; + struct regmap_irq_sub_irq_map *sub_reg_offsets; + int num_main_regs; + unsigned int status_base; + unsigned int mask_base; + unsigned int unmask_base; + unsigned int ack_base; + unsigned int wake_base; + unsigned int type_base; + unsigned int irq_reg_stride; + bool mask_writeonly: 1; + bool init_ack_masked: 1; + bool mask_invert: 1; + bool use_ack: 1; + bool ack_invert: 1; + bool clear_ack: 1; + bool wake_invert: 1; + bool runtime_pm: 1; + bool type_invert: 1; + bool type_in_mask: 1; + bool clear_on_unmask: 1; + int num_regs; + const struct regmap_irq *irqs; + int num_irqs; + int num_type_reg; + unsigned int type_reg_stride; + int (*handle_pre_irq)(void *); + int (*handle_post_irq)(void *); + void *irq_drv_data; +}; + +struct gpio_desc; + +struct regulator_init_data; + +struct arizona_ldo1_pdata { + const struct regulator_init_data *init_data; +}; + +struct regulator_state { + int uV; + int min_uV; + int max_uV; + unsigned int mode; + int enabled; + bool changeable; +}; + +struct regulation_constraints { + const char *name; + int min_uV; + int max_uV; + int uV_offset; + int min_uA; + int max_uA; + int ilim_uA; + int system_load; + u32 *max_spread; + int max_uV_step; + unsigned int valid_modes_mask; + unsigned int valid_ops_mask; + int input_uV; + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; + unsigned int initial_mode; + unsigned int ramp_delay; + unsigned int settling_time; + unsigned int settling_time_up; + unsigned int settling_time_down; + unsigned int enable_time; + unsigned int active_discharge; + unsigned int always_on: 1; + unsigned int boot_on: 1; + unsigned int apply_uV: 1; + unsigned int ramp_disable: 1; + unsigned int soft_start: 1; + unsigned int pull_down: 1; + unsigned int over_current_protection: 1; +}; + +struct regulator_consumer_supply; + +struct regulator_init_data { + const char *supply_regulator; + struct regulation_constraints constraints; + int num_consumer_supplies; + struct regulator_consumer_supply *consumer_supplies; + int (*regulator_init)(void *); + void *driver_data; +}; + +struct arizona_micsupp_pdata { + const struct regulator_init_data *init_data; +}; + +struct regulator; + +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; + int ret; +}; + +struct regulator_consumer_supply { + const char *dev_name; + const char *supply; +}; + +struct madera_codec_pdata { + u32 max_channels_clocked[4]; + u32 dmic_ref[6]; + u32 inmode[24]; + bool out_mono[6]; + u32 pdm_fmt[2]; + u32 pdm_mute[2]; +}; + +struct pinctrl_map; + +struct madera_pdata { + struct gpio_desc *reset; + struct arizona_ldo1_pdata ldo1; + struct arizona_micsupp_pdata micvdd; + unsigned int irq_flags; + int gpio_base; + const struct pinctrl_map *gpio_configs; + int n_gpio_configs; + u32 gpsw[2]; + struct madera_codec_pdata codec; +}; + +enum pinctrl_map_type { + PIN_MAP_TYPE_INVALID = 0, + PIN_MAP_TYPE_DUMMY_STATE = 1, + PIN_MAP_TYPE_MUX_GROUP = 2, + PIN_MAP_TYPE_CONFIGS_PIN = 3, + PIN_MAP_TYPE_CONFIGS_GROUP = 4, +}; + +struct pinctrl_map_mux { + const char *group; + const char *function; +}; + +struct pinctrl_map_configs { + const char *group_or_pin; + long unsigned int *configs; + unsigned int num_configs; +}; + +struct pinctrl_map { + const char *dev_name; + const char *name; + enum pinctrl_map_type type; + const char *ctrl_dev_name; + union { + struct pinctrl_map_mux mux; + struct pinctrl_map_configs configs; + } data; +}; + +enum madera_type { + CS47L35 = 1, + CS47L85 = 2, + CS47L90 = 3, + CS47L91 = 4, + CS47L92 = 5, + CS47L93 = 6, + WM1840 = 7, + CS47L15 = 8, + CS42L92 = 9, +}; + +enum { + MADERA_MCLK1 = 0, + MADERA_MCLK2 = 1, + MADERA_MCLK3 = 2, + MADERA_NUM_MCLK = 3, +}; + +struct regmap; + +struct regmap_irq_chip_data; + +struct snd_soc_dapm_context; + +struct madera { + struct regmap *regmap; + struct regmap *regmap_32bit; + struct device *dev; + enum madera_type type; + unsigned int rev; + const char *type_name; + int num_core_supplies; + struct regulator_bulk_data core_supplies[2]; + struct regulator *dcvdd; + bool internal_dcvdd; + struct madera_pdata pdata; + struct device *irq_dev; + struct regmap_irq_chip_data *irq_data; + int irq; + struct clk_bulk_data mclk[3]; + unsigned int num_micbias; + unsigned int num_childbias[4]; + struct snd_soc_dapm_context *dapm; + struct mutex dapm_ptr_lock; + unsigned int hp_ena; + bool out_clamp[3]; + bool out_shorted[3]; + struct blocking_notifier_head notifier; +}; + +struct mst_intc_chip_data { + raw_spinlock_t lock; + unsigned int irq_start; + unsigned int nr_irqs; + void *base; + bool no_eoi; +}; + +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +struct serial_rs485 { + __u32 flags; + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; + __u32 padding[5]; +}; + +struct serial_iso7816 { + __u32 flags; + __u32 tg; + __u32 sc_fi; + __u32 sc_di; + __u32 clk; + __u32 reserved[5]; +}; + +struct uart_port; + +struct uart_ops { + unsigned int (*tx_empty)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_mctrl)(struct uart_port *); + void (*stop_tx)(struct uart_port *); + void (*start_tx)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + void (*send_xchar)(struct uart_port *, char); + void (*stop_rx)(struct uart_port *); + void (*enable_ms)(struct uart_port *); + void (*break_ctl)(struct uart_port *, int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*flush_buffer)(struct uart_port *); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + const char * (*type)(struct uart_port *); + void (*release_port)(struct uart_port *); + int (*request_port)(struct uart_port *); + void (*config_port)(struct uart_port *, int); + int (*verify_port)(struct uart_port *, struct serial_struct *); + int (*ioctl)(struct uart_port *, unsigned int, long unsigned int); + int (*poll_init)(struct uart_port *); + void (*poll_put_char)(struct uart_port *, unsigned char); + int (*poll_get_char)(struct uart_port *); +}; + +struct uart_icount { + __u32 cts; + __u32 dsr; + __u32 rng; + __u32 dcd; + __u32 rx; + __u32 tx; + __u32 frame; + __u32 overrun; + __u32 parity; + __u32 brk; + __u32 buf_overrun; +}; + +typedef unsigned int upf_t; + +typedef unsigned int upstat_t; + +struct uart_state; + +struct uart_port { + spinlock_t lock; + long unsigned int iobase; + unsigned char *membase; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + void (*set_mctrl)(struct uart_port *, unsigned int); + unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *); + void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int); + int (*startup)(struct uart_port *); + void (*shutdown)(struct uart_port *); + void (*throttle)(struct uart_port *); + void (*unthrottle)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, struct serial_rs485 *); + int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *); + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + unsigned int fifosize; + unsigned char x_char; + unsigned char regshift; + unsigned char iotype; + unsigned char quirks; + unsigned int read_status_mask; + unsigned int ignore_status_mask; + struct uart_state *state; + struct uart_icount icount; + struct console *cons; + upf_t flags; + upstat_t status; + int hw_stopped; + unsigned int mctrl; + unsigned int timeout; + unsigned int type; + const struct uart_ops *ops; + unsigned int custom_divisor; + unsigned int line; + unsigned int minor; + resource_size_t mapbase; + resource_size_t mapsize; + struct device *dev; + long unsigned int sysrq; + unsigned int sysrq_ch; + unsigned char has_sysrq; + unsigned char sysrq_seq; + unsigned char hub6; + unsigned char suspended; + unsigned char console_reinit; + const char *name; + struct attribute_group *attr_group; + const struct attribute_group **tty_groups; + struct serial_rs485 rs485; + struct gpio_desc *rs485_term_gpio; + struct serial_iso7816 iso7816; + void *private_data; +}; + +enum uart_pm_state { + UART_PM_STATE_ON = 0, + UART_PM_STATE_OFF = 3, + UART_PM_STATE_UNDEFINED = 4, +}; + +struct uart_state { + struct tty_port port; + enum uart_pm_state pm_state; + struct circ_buf xmit; + atomic_t refcount; + wait_queue_head_t remove_wait; + struct uart_port *uart_port; +}; + +struct plat_serial8250_port { + long unsigned int iobase; + void *membase; + resource_size_t mapbase; + unsigned int irq; + long unsigned int irqflags; + unsigned int uartclk; + void *private_data; + unsigned char regshift; + unsigned char iotype; + unsigned char hub6; + unsigned char has_sysrq; + upf_t flags; + unsigned int type; + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); + void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *); + void (*set_ldisc)(struct uart_port *, struct ktermios *); + unsigned int (*get_mctrl)(struct uart_port *); + int (*handle_irq)(struct uart_port *); + void (*pm)(struct uart_port *, unsigned int, unsigned int); + void (*handle_break)(struct uart_port *); +}; + +struct lpc_cycle_para { + unsigned int opflags; + unsigned int csize; +}; + +struct hisi_lpc_dev { + spinlock_t cycle_lock; + void *membase; + struct logic_pio_hwaddr *io_host; +}; + +struct hisi_lpc_acpi_cell { + const char *hid; + const char *name; + void *pdata; + size_t pdata_size; +}; + +enum regcache_type { + REGCACHE_NONE = 0, + REGCACHE_RBTREE = 1, + REGCACHE_COMPRESSED = 2, + REGCACHE_FLAT = 3, +}; + +struct reg_default { + unsigned int reg; + unsigned int def; +}; + +enum regmap_endian { + REGMAP_ENDIAN_DEFAULT = 0, + REGMAP_ENDIAN_BIG = 1, + REGMAP_ENDIAN_LITTLE = 2, + REGMAP_ENDIAN_NATIVE = 3, +}; + +struct regmap_range { + unsigned int range_min; + unsigned int range_max; +}; + +struct regmap_access_table { + const struct regmap_range *yes_ranges; + unsigned int n_yes_ranges; + const struct regmap_range *no_ranges; + unsigned int n_no_ranges; +}; + +typedef void (*regmap_lock)(void *); + +typedef void (*regmap_unlock)(void *); + +struct regmap_range_cfg; + +struct regmap_config { + const char *name; + int reg_bits; + int reg_stride; + int pad_bits; + int val_bits; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + bool disable_locking; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + bool fast_io; + unsigned int max_register; + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + const struct reg_default *reg_defaults; + unsigned int num_reg_defaults; + enum regcache_type cache_type; + const void *reg_defaults_raw; + unsigned int num_reg_defaults_raw; + long unsigned int read_flag_mask; + long unsigned int write_flag_mask; + bool zero_flag_mask; + bool use_single_read; + bool use_single_write; + bool can_multi_write; + enum regmap_endian reg_format_endian; + enum regmap_endian val_format_endian; + const struct regmap_range_cfg *ranges; + unsigned int num_ranges; + bool use_hwlock; + unsigned int hwlock_id; + unsigned int hwlock_mode; + bool can_sleep; +}; + +struct regmap_range_cfg { + const char *name; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +struct vexpress_syscfg { + struct device *dev; + void *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[0]; +}; + +struct vexpress_config_bridge_ops { + struct regmap * (*regmap_init)(struct device *, void *); + void (*regmap_exit)(struct regmap *, void *); +}; + +struct vexpress_config_bridge { + struct vexpress_config_bridge_ops *ops; + void *context; +}; + +enum device_link_state { + DL_STATE_NONE = 4294967295, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE = 1, + DL_STATE_CONSUMER_PROBE = 2, + DL_STATE_ACTIVE = 3, + DL_STATE_SUPPLIER_UNBIND = 4, +}; + +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + struct callback_head callback_head; + bool supplier_preactivated; +}; + +struct phy_configure_opts_dp { + unsigned int link_rate; + unsigned int lanes; + unsigned int voltage[4]; + unsigned int pre[4]; + u8 ssc: 1; + u8 set_rate: 1; + u8 set_lanes: 1; + u8 set_voltages: 1; +}; + +struct phy_configure_opts_mipi_dphy { + unsigned int clk_miss; + unsigned int clk_post; + unsigned int clk_pre; + unsigned int clk_prepare; + unsigned int clk_settle; + unsigned int clk_term_en; + unsigned int clk_trail; + unsigned int clk_zero; + unsigned int d_term_en; + unsigned int eot; + unsigned int hs_exit; + unsigned int hs_prepare; + unsigned int hs_settle; + unsigned int hs_skip; + unsigned int hs_trail; + unsigned int hs_zero; + unsigned int init; + unsigned int lpx; + unsigned int ta_get; + unsigned int ta_go; + unsigned int ta_sure; + unsigned int wakeup; + long unsigned int hs_clk_rate; + long unsigned int lp_clk_rate; + unsigned char lanes; +}; + +enum phy_mode { + PHY_MODE_INVALID = 0, + PHY_MODE_USB_HOST = 1, + PHY_MODE_USB_HOST_LS = 2, + PHY_MODE_USB_HOST_FS = 3, + PHY_MODE_USB_HOST_HS = 4, + PHY_MODE_USB_HOST_SS = 5, + PHY_MODE_USB_DEVICE = 6, + PHY_MODE_USB_DEVICE_LS = 7, + PHY_MODE_USB_DEVICE_FS = 8, + PHY_MODE_USB_DEVICE_HS = 9, + PHY_MODE_USB_DEVICE_SS = 10, + PHY_MODE_USB_OTG = 11, + PHY_MODE_UFS_HS_A = 12, + PHY_MODE_UFS_HS_B = 13, + PHY_MODE_PCIE = 14, + PHY_MODE_ETHERNET = 15, + PHY_MODE_MIPI_DPHY = 16, + PHY_MODE_SATA = 17, + PHY_MODE_LVDS = 18, + PHY_MODE_DP = 19, +}; + +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; + struct phy_configure_opts_dp dp; +}; + +struct phy; + +struct phy_ops { + int (*init)(struct phy *); + int (*exit)(struct phy *); + int (*power_on)(struct phy *); + int (*power_off)(struct phy *); + int (*set_mode)(struct phy *, enum phy_mode, int); + int (*configure)(struct phy *, union phy_configure_opts *); + int (*validate)(struct phy *, enum phy_mode, int, union phy_configure_opts *); + int (*reset)(struct phy *); + int (*calibrate)(struct phy *); + void (*release)(struct phy *); + struct module *owner; +}; + +struct phy_attrs { + u32 bus_width; + u32 max_link_rate; + enum phy_mode mode; +}; + +struct phy { + struct device dev; + int id; + const struct phy_ops *ops; + struct mutex mutex; + int init_count; + int power_count; + struct phy_attrs attrs; + struct regulator *pwr; +}; + +struct phy_provider { + struct device *dev; + struct device_node *children; + struct module *owner; + struct list_head list; + struct phy * (*of_xlate)(struct device *, struct of_phandle_args *); +}; + +struct phy_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct phy *phy; +}; + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN = 0, + USB_DR_MODE_HOST = 1, + USB_DR_MODE_PERIPHERAL = 2, + USB_DR_MODE_OTG = 3, +}; + +struct phy_meson8b_usb2_match_data { + bool host_enable_aca; +}; + +struct reset_control; + +struct phy_meson8b_usb2_priv { + struct regmap *regmap; + enum usb_dr_mode dr_mode; + struct clk *clk_usb_general; + struct clk *clk_usb; + struct reset_control *reset; + const struct phy_meson8b_usb2_match_data *match; +}; + +struct phy_meson_gxl_usb2_priv { + struct regmap *regmap; + enum phy_mode mode; + int is_enabled; + struct clk *clk; + struct reset_control *reset; +}; + +enum meson_soc_id { + MESON_SOC_G12A = 0, + MESON_SOC_A1 = 1, +}; + +struct phy_meson_g12a_usb2_priv { + struct device *dev; + struct regmap *regmap; + struct clk *clk; + struct reset_control *reset; + int soc_id; +}; + +struct phy_g12a_usb3_pcie_priv { + struct regmap *regmap; + struct regmap *regmap_cr; + struct clk *clk_ref; + struct reset_control *reset; + struct phy *phy; + unsigned int mode; +}; + +struct phy_axg_pcie_priv { + struct phy *phy; + struct phy *analog; + struct regmap *regmap; + struct reset_control *reset; +}; + +struct phy_axg_mipi_pcie_analog_priv { + struct phy *phy; + unsigned int mode; + struct regmap *regmap; +}; + +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + __ETHTOOL_LINK_MODE_MASK_NBITS = 92, +}; + +struct mvebu_a3700_comphy_conf { + unsigned int lane; + enum phy_mode mode; + int submode; + unsigned int port; + u32 fw_mode; +}; + +struct mvebu_a3700_comphy_lane { + struct device *dev; + unsigned int id; + enum phy_mode mode; + int submode; + int port; +}; + +struct mvebu_a3700_utmi_caps { + int usb32; + const struct phy_ops *ops; +}; + +struct mvebu_a3700_utmi { + void *regs; + struct regmap *usb_misc; + const struct mvebu_a3700_utmi_caps *caps; + struct phy *phy; +}; + +struct exynos_dp_video_phy_drvdata { + u32 phy_ctrl_offset; +}; + +struct exynos_dp_video_phy { + struct regmap *regs; + const struct exynos_dp_video_phy_drvdata *drvdata; +}; + +enum exynos_mipi_phy_id { + EXYNOS_MIPI_PHY_ID_NONE = 4294967295, + EXYNOS_MIPI_PHY_ID_CSIS0 = 0, + EXYNOS_MIPI_PHY_ID_DSIM0 = 1, + EXYNOS_MIPI_PHY_ID_CSIS1 = 2, + EXYNOS_MIPI_PHY_ID_DSIM1 = 3, + EXYNOS_MIPI_PHY_ID_CSIS2 = 4, + EXYNOS_MIPI_PHYS_NUM = 5, +}; + +enum exynos_mipi_phy_regmap_id { + EXYNOS_MIPI_REGMAP_PMU = 0, + EXYNOS_MIPI_REGMAP_DISP = 1, + EXYNOS_MIPI_REGMAP_CAM0 = 2, + EXYNOS_MIPI_REGMAP_CAM1 = 3, + EXYNOS_MIPI_REGMAPS_NUM = 4, +}; + +struct exynos_mipi_phy_desc { + enum exynos_mipi_phy_id coupled_phy_id; + u32 enable_val; + unsigned int enable_reg; + enum exynos_mipi_phy_regmap_id enable_map; + u32 resetn_val; + unsigned int resetn_reg; + enum exynos_mipi_phy_regmap_id resetn_map; +}; + +struct mipi_phy_device_desc { + int num_phys; + int num_regmaps; + const char *regmap_names[4]; + struct exynos_mipi_phy_desc phys[5]; +}; + +struct video_phy_desc { + struct phy *phy; + unsigned int index; + const struct exynos_mipi_phy_desc *data; +}; + +struct exynos_mipi_video_phy { + struct regmap *regmaps[4]; + int num_phys; + struct video_phy_desc phys[5]; + spinlock_t slock; +}; + +struct pinctrl; + +struct pinctrl_state; + +struct dev_pin_info { + struct pinctrl *p; + struct pinctrl_state *default_state; + struct pinctrl_state *init_state; + struct pinctrl_state *sleep_state; + struct pinctrl_state *idle_state; +}; + +struct pinctrl { + struct list_head node; + struct device *dev; + struct list_head states; + struct pinctrl_state *state; + struct list_head dt_maps; + struct kref users; +}; + +struct pinctrl_state { + struct list_head node; + const char *name; + struct list_head settings; +}; + +struct pinctrl_pin_desc { + unsigned int number; + const char *name; + void *drv_data; +}; + +struct gpio_chip; + +struct pinctrl_gpio_range { + struct list_head node; + const char *name; + unsigned int id; + unsigned int base; + unsigned int pin_base; + const unsigned int *pins; + unsigned int npins; + struct gpio_chip *gc; +}; + +struct gpio_irq_chip { + struct irq_chip *chip; + struct irq_domain *domain; + const struct irq_domain_ops *domain_ops; + struct fwnode_handle *fwnode; + struct irq_domain *parent_domain; + int (*child_to_parent_hwirq)(struct gpio_chip *, unsigned int, unsigned int, unsigned int *, unsigned int *); + void * (*populate_parent_alloc_arg)(struct gpio_chip *, unsigned int, unsigned int); + unsigned int (*child_offset_to_irq)(struct gpio_chip *, unsigned int); + struct irq_domain_ops child_irq_domain_ops; + irq_flow_handler_t handler; + unsigned int default_type; + struct lock_class_key *lock_key; + struct lock_class_key *request_key; + irq_flow_handler_t parent_handler; + void *parent_handler_data; + unsigned int num_parents; + unsigned int *parents; + unsigned int *map; + bool threaded; + int (*init_hw)(struct gpio_chip *); + void (*init_valid_mask)(struct gpio_chip *, long unsigned int *, unsigned int); + long unsigned int *valid_mask; + unsigned int first; + void (*irq_enable)(struct irq_data *); + void (*irq_disable)(struct irq_data *); + void (*irq_unmask)(struct irq_data *); + void (*irq_mask)(struct irq_data *); +}; + +struct gpio_device; + +struct gpio_chip { + const char *label; + struct gpio_device *gpiodev; + struct device *parent; + struct module *owner; + int (*request)(struct gpio_chip *, unsigned int); + void (*free)(struct gpio_chip *, unsigned int); + int (*get_direction)(struct gpio_chip *, unsigned int); + int (*direction_input)(struct gpio_chip *, unsigned int); + int (*direction_output)(struct gpio_chip *, unsigned int, int); + int (*get)(struct gpio_chip *, unsigned int); + int (*get_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); + void (*set)(struct gpio_chip *, unsigned int, int); + void (*set_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *); + int (*set_config)(struct gpio_chip *, unsigned int, long unsigned int); + int (*to_irq)(struct gpio_chip *, unsigned int); + void (*dbg_show)(struct seq_file *, struct gpio_chip *); + int (*init_valid_mask)(struct gpio_chip *, long unsigned int *, unsigned int); + int (*add_pin_ranges)(struct gpio_chip *); + int base; + u16 ngpio; + const char * const *names; + bool can_sleep; + long unsigned int (*read_reg)(void *); + void (*write_reg)(void *, long unsigned int); + bool be_bits; + void *reg_dat; + void *reg_set; + void *reg_clr; + void *reg_dir_out; + void *reg_dir_in; + bool bgpio_dir_unreadable; + int bgpio_bits; + spinlock_t bgpio_lock; + long unsigned int bgpio_data; + long unsigned int bgpio_dir; + struct gpio_irq_chip irq; + long unsigned int *valid_mask; + struct device_node *of_node; + unsigned int of_gpio_n_cells; + int (*of_xlate)(struct gpio_chip *, const struct of_phandle_args *, u32 *); +}; + +struct pinctrl_dev; + +struct pinctrl_ops { + int (*get_groups_count)(struct pinctrl_dev *); + const char * (*get_group_name)(struct pinctrl_dev *, unsigned int); + int (*get_group_pins)(struct pinctrl_dev *, unsigned int, const unsigned int **, unsigned int *); + void (*pin_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + int (*dt_node_to_map)(struct pinctrl_dev *, struct device_node *, struct pinctrl_map **, unsigned int *); + void (*dt_free_map)(struct pinctrl_dev *, struct pinctrl_map *, unsigned int); +}; + +struct pinctrl_desc; + +struct pinctrl_dev { + struct list_head node; + struct pinctrl_desc *desc; + struct xarray pin_desc_tree; + struct xarray pin_group_tree; + unsigned int num_groups; + struct xarray pin_function_tree; + unsigned int num_functions; + struct list_head gpio_ranges; + struct device *dev; + struct module *owner; + void *driver_data; + struct pinctrl *p; + struct pinctrl_state *hog_default; + struct pinctrl_state *hog_sleep; + struct mutex mutex; + struct dentry *device_root; +}; + +struct pinmux_ops; + +struct pinconf_ops; + +struct pinconf_generic_params; + +struct pin_config_item; + +struct pinctrl_desc { + const char *name; + const struct pinctrl_pin_desc *pins; + unsigned int npins; + const struct pinctrl_ops *pctlops; + const struct pinmux_ops *pmxops; + const struct pinconf_ops *confops; + struct module *owner; + unsigned int num_custom_params; + const struct pinconf_generic_params *custom_params; + const struct pin_config_item *custom_conf_items; + bool link_consumers; +}; + +struct pinmux_ops { + int (*request)(struct pinctrl_dev *, unsigned int); + int (*free)(struct pinctrl_dev *, unsigned int); + int (*get_functions_count)(struct pinctrl_dev *); + const char * (*get_function_name)(struct pinctrl_dev *, unsigned int); + int (*get_function_groups)(struct pinctrl_dev *, unsigned int, const char * const **, unsigned int *); + int (*set_mux)(struct pinctrl_dev *, unsigned int, unsigned int); + int (*gpio_request_enable)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); + void (*gpio_disable_free)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int); + int (*gpio_set_direction)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int, bool); + bool strict; +}; + +struct pinconf_ops { + bool is_generic; + int (*pin_config_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); + int (*pin_config_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); + int (*pin_config_group_get)(struct pinctrl_dev *, unsigned int, long unsigned int *); + int (*pin_config_group_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int); + void (*pin_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + void (*pin_config_group_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int); + void (*pin_config_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, long unsigned int); +}; + +enum pin_config_param { + PIN_CONFIG_BIAS_BUS_HOLD = 0, + PIN_CONFIG_BIAS_DISABLE = 1, + PIN_CONFIG_BIAS_HIGH_IMPEDANCE = 2, + PIN_CONFIG_BIAS_PULL_DOWN = 3, + PIN_CONFIG_BIAS_PULL_PIN_DEFAULT = 4, + PIN_CONFIG_BIAS_PULL_UP = 5, + PIN_CONFIG_DRIVE_OPEN_DRAIN = 6, + PIN_CONFIG_DRIVE_OPEN_SOURCE = 7, + PIN_CONFIG_DRIVE_PUSH_PULL = 8, + PIN_CONFIG_DRIVE_STRENGTH = 9, + PIN_CONFIG_DRIVE_STRENGTH_UA = 10, + PIN_CONFIG_INPUT_DEBOUNCE = 11, + PIN_CONFIG_INPUT_ENABLE = 12, + PIN_CONFIG_INPUT_SCHMITT = 13, + PIN_CONFIG_INPUT_SCHMITT_ENABLE = 14, + PIN_CONFIG_LOW_POWER_MODE = 15, + PIN_CONFIG_OUTPUT_ENABLE = 16, + PIN_CONFIG_OUTPUT = 17, + PIN_CONFIG_POWER_SOURCE = 18, + PIN_CONFIG_SLEEP_HARDWARE_STATE = 19, + PIN_CONFIG_SLEW_RATE = 20, + PIN_CONFIG_SKEW_DELAY = 21, + PIN_CONFIG_PERSIST_STATE = 22, + PIN_CONFIG_END = 127, + PIN_CONFIG_MAX = 255, +}; + +struct pinconf_generic_params { + const char * const property; + enum pin_config_param param; + u32 default_value; +}; + +struct pin_config_item { + const enum pin_config_param param; + const char * const display; + const char * const format; + bool has_arg; +}; + +struct gpio_desc___2; + +struct gpio_device { + int id; + struct device dev; + struct cdev chrdev; + struct device *mockdev; + struct module *owner; + struct gpio_chip *chip; + struct gpio_desc___2 *descs; + int base; + u16 ngpio; + const char *label; + void *data; + struct list_head list; + struct blocking_notifier_head notifier; + struct list_head pin_ranges; +}; + +struct gpio_desc___2 { + struct gpio_device *gdev; + long unsigned int flags; + const char *label; + const char *name; + struct device_node *hog; + unsigned int debounce_period_us; +}; + +struct pinctrl_setting_mux { + unsigned int group; + unsigned int func; +}; + +struct pinctrl_setting_configs { + unsigned int group_or_pin; + long unsigned int *configs; + unsigned int num_configs; +}; + +struct pinctrl_setting { + struct list_head node; + enum pinctrl_map_type type; + struct pinctrl_dev *pctldev; + const char *dev_name; + union { + struct pinctrl_setting_mux mux; + struct pinctrl_setting_configs configs; + } data; +}; + +struct pin_desc { + struct pinctrl_dev *pctldev; + const char *name; + bool dynamic_name; + void *drv_data; + unsigned int mux_usecount; + const char *mux_owner; + const struct pinctrl_setting_mux *mux_setting; + const char *gpio_owner; +}; + +struct pinctrl_maps { + struct list_head node; + const struct pinctrl_map *maps; + unsigned int num_maps; +}; + +struct group_desc { + const char *name; + int *pins; + int num_pins; + void *data; +}; + +struct pctldev; + +struct function_desc { + const char *name; + const char **group_names; + int num_group_names; + void *data; +}; + +struct pinctrl_dt_map { + struct list_head node; + struct pinctrl_dev *pctldev; + struct pinctrl_map *map; + unsigned int num_maps; +}; + +struct amd_pingroup { + const char *name; + const unsigned int *pins; + unsigned int npins; +}; + +struct amd_gpio { + raw_spinlock_t lock; + void *base; + const struct amd_pingroup *groups; + u32 ngroups; + struct pinctrl_dev *pctrl; + struct gpio_chip gc; + unsigned int hwbank_num; + struct resource *res; + struct platform_device *pdev; + u32 *saved_regs; +}; + +struct meson_pmx_group { + const char *name; + const unsigned int *pins; + unsigned int num_pins; + const void *data; +}; + +struct meson_pmx_func { + const char *name; + const char * const *groups; + unsigned int num_groups; +}; + +struct meson_reg_desc { + unsigned int reg; + unsigned int bit; +}; + +enum meson_reg_type { + REG_PULLEN = 0, + REG_PULL = 1, + REG_DIR = 2, + REG_OUT = 3, + REG_IN = 4, + REG_DS = 5, + NUM_REG = 6, +}; + +enum meson_pinconf_drv { + MESON_PINCONF_DRV_500UA = 0, + MESON_PINCONF_DRV_2500UA = 1, + MESON_PINCONF_DRV_3000UA = 2, + MESON_PINCONF_DRV_4000UA = 3, +}; + +struct meson_bank { + const char *name; + unsigned int first; + unsigned int last; + int irq_first; + int irq_last; + struct meson_reg_desc regs[6]; +}; + +struct meson_pinctrl; + +struct meson_pinctrl_data { + const char *name; + const struct pinctrl_pin_desc *pins; + struct meson_pmx_group *groups; + struct meson_pmx_func *funcs; + unsigned int num_pins; + unsigned int num_groups; + unsigned int num_funcs; + struct meson_bank *banks; + unsigned int num_banks; + const struct pinmux_ops *pmx_ops; + void *pmx_data; + int (*parse_dt)(struct meson_pinctrl *); +}; + +struct meson_pinctrl { + struct device *dev; + struct pinctrl_dev *pcdev; + struct pinctrl_desc desc; + struct meson_pinctrl_data *data; + struct regmap *reg_mux; + struct regmap *reg_pullen; + struct regmap *reg_pull; + struct regmap *reg_gpio; + struct regmap *reg_ds; + struct gpio_chip chip; + struct device_node *of_node; +}; + +struct meson8_pmx_data { + bool is_gpio; + unsigned int reg; + unsigned int bit; +}; + +struct meson_pmx_bank { + const char *name; + unsigned int first; + unsigned int last; + unsigned int reg; + unsigned int offset; +}; + +struct meson_axg_pmx_data { + struct meson_pmx_bank *pmx_banks; + unsigned int num_pmx_banks; +}; + +struct meson_pmx_axg_data { + unsigned int func; +}; + +enum rockchip_pinctrl_type { + PX30 = 0, + RV1108 = 1, + RK2928 = 2, + RK3066B = 3, + RK3128 = 4, + RK3188 = 5, + RK3288 = 6, + RK3308 = 7, + RK3368 = 8, + RK3399 = 9, +}; + +struct rockchip_iomux { + int type; + int offset; +}; + +enum rockchip_pin_drv_type { + DRV_TYPE_IO_DEFAULT = 0, + DRV_TYPE_IO_1V8_OR_3V0 = 1, + DRV_TYPE_IO_1V8_ONLY = 2, + DRV_TYPE_IO_1V8_3V0_AUTO = 3, + DRV_TYPE_IO_3V3_ONLY = 4, + DRV_TYPE_MAX = 5, +}; + +enum rockchip_pin_pull_type { + PULL_TYPE_IO_DEFAULT = 0, + PULL_TYPE_IO_1V8_ONLY = 1, + PULL_TYPE_MAX = 2, +}; + +struct rockchip_drv { + enum rockchip_pin_drv_type drv_type; + int offset; +}; + +struct rockchip_pinctrl; + +struct rockchip_pin_bank { + void *reg_base; + struct regmap *regmap_pull; + struct clk *clk; + int irq; + u32 saved_masks; + u32 pin_base; + u8 nr_pins; + char *name; + u8 bank_num; + struct rockchip_iomux iomux[4]; + struct rockchip_drv drv[4]; + enum rockchip_pin_pull_type pull_type[4]; + bool valid; + struct device_node *of_node; + struct rockchip_pinctrl *drvdata; + struct irq_domain *domain; + struct gpio_chip gpio_chip; + struct pinctrl_gpio_range grange; + raw_spinlock_t slock; + u32 toggle_edge_mode; + u32 recalced_mask; + u32 route_mask; +}; + +struct rockchip_pin_ctrl; + +struct rockchip_pin_group; + +struct rockchip_pmx_func; + +struct rockchip_pinctrl { + struct regmap *regmap_base; + int reg_size; + struct regmap *regmap_pull; + struct regmap *regmap_pmu; + struct device *dev; + struct rockchip_pin_ctrl *ctrl; + struct pinctrl_desc pctl; + struct pinctrl_dev *pctl_dev; + struct rockchip_pin_group *groups; + unsigned int ngroups; + struct rockchip_pmx_func *functions; + unsigned int nfunctions; +}; + +struct rockchip_mux_recalced_data { + u8 num; + u8 pin; + u32 reg; + u8 bit; + u8 mask; +}; + +enum rockchip_mux_route_location { + ROCKCHIP_ROUTE_SAME = 0, + ROCKCHIP_ROUTE_PMU = 1, + ROCKCHIP_ROUTE_GRF = 2, +}; + +struct rockchip_mux_route_data { + u8 bank_num; + u8 pin; + u8 func; + enum rockchip_mux_route_location route_location; + u32 route_offset; + u32 route_val; +}; + +struct rockchip_pin_ctrl { + struct rockchip_pin_bank *pin_banks; + u32 nr_banks; + u32 nr_pins; + char *label; + enum rockchip_pinctrl_type type; + int grf_mux_offset; + int pmu_mux_offset; + int grf_drv_offset; + int pmu_drv_offset; + struct rockchip_mux_recalced_data *iomux_recalced; + u32 niomux_recalced; + struct rockchip_mux_route_data *iomux_routes; + u32 niomux_routes; + void (*pull_calc_reg)(struct rockchip_pin_bank *, int, struct regmap **, int *, u8 *); + void (*drv_calc_reg)(struct rockchip_pin_bank *, int, struct regmap **, int *, u8 *); + int (*schmitt_calc_reg)(struct rockchip_pin_bank *, int, struct regmap **, int *, u8 *); +}; + +struct rockchip_pin_config { + unsigned int func; + long unsigned int *configs; + unsigned int nconfigs; +}; + +struct rockchip_pin_group { + const char *name; + unsigned int npins; + unsigned int *pins; + struct rockchip_pin_config *data; +}; + +struct rockchip_pmx_func { + const char *name; + const char **groups; + u8 ngroups; +}; + +struct pcs_pdata { + int irq; + void (*rearm)(); +}; + +struct pcs_func_vals { + void *reg; + unsigned int val; + unsigned int mask; +}; + +struct pcs_conf_vals { + enum pin_config_param param; + unsigned int val; + unsigned int enable; + unsigned int disable; + unsigned int mask; +}; + +struct pcs_conf_type { + const char *name; + enum pin_config_param param; +}; + +struct pcs_function { + const char *name; + struct pcs_func_vals *vals; + unsigned int nvals; + const char **pgnames; + int npgnames; + struct pcs_conf_vals *conf; + int nconfs; + struct list_head node; +}; + +struct pcs_gpiofunc_range { + unsigned int offset; + unsigned int npins; + unsigned int gpiofunc; + struct list_head node; +}; + +struct pcs_data { + struct pinctrl_pin_desc *pa; + int cur; +}; + +struct pcs_soc_data { + unsigned int flags; + int irq; + unsigned int irq_enable_mask; + unsigned int irq_status_mask; + void (*rearm)(); +}; + +struct pcs_device { + struct resource *res; + void *base; + void *saved_vals; + unsigned int size; + struct device *dev; + struct device_node *np; + struct pinctrl_dev *pctl; + unsigned int flags; + struct property *missing_nr_pinctrl_cells; + struct pcs_soc_data socdata; + raw_spinlock_t lock; + struct mutex mutex; + unsigned int width; + unsigned int fmask; + unsigned int fshift; + unsigned int foff; + unsigned int fmax; + bool bits_per_mux; + unsigned int bits_per_pin; + struct pcs_data pins; + struct list_head gpiofuncs; + struct list_head irqs; + struct irq_chip chip; + struct irq_domain *domain; + struct pinctrl_desc desc; + unsigned int (*read)(void *); + void (*write)(unsigned int, void *); +}; + +struct pcs_interrupt { + void *reg; + irq_hw_number_t hwirq; + unsigned int irq; + struct list_head node; +}; + +struct tegra_pinctrl_soc_data; + +struct tegra_pmx { + struct device *dev; + struct pinctrl_dev *pctl; + const struct tegra_pinctrl_soc_data *soc; + const char **group_pins; + int nbanks; + void **regs; + u32 *backup_regs; +}; + +struct tegra_function; + +struct tegra_pingroup; + +struct tegra_pinctrl_soc_data { + unsigned int ngpios; + const char *gpio_compatible; + const struct pinctrl_pin_desc *pins; + unsigned int npins; + struct tegra_function *functions; + unsigned int nfunctions; + const struct tegra_pingroup *groups; + unsigned int ngroups; + bool hsm_in_mux; + bool schmitt_in_mux; + bool drvtype_in_mux; + bool sfsel_in_mux; +}; + +enum tegra_pinconf_param { + TEGRA_PINCONF_PARAM_PULL = 0, + TEGRA_PINCONF_PARAM_TRISTATE = 1, + TEGRA_PINCONF_PARAM_ENABLE_INPUT = 2, + TEGRA_PINCONF_PARAM_OPEN_DRAIN = 3, + TEGRA_PINCONF_PARAM_LOCK = 4, + TEGRA_PINCONF_PARAM_IORESET = 5, + TEGRA_PINCONF_PARAM_RCV_SEL = 6, + TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE = 7, + TEGRA_PINCONF_PARAM_SCHMITT = 8, + TEGRA_PINCONF_PARAM_LOW_POWER_MODE = 9, + TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH = 10, + TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH = 11, + TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING = 12, + TEGRA_PINCONF_PARAM_SLEW_RATE_RISING = 13, + TEGRA_PINCONF_PARAM_DRIVE_TYPE = 14, +}; + +struct tegra_function { + const char *name; + const char **groups; + unsigned int ngroups; +}; + +struct tegra_pingroup { + const char *name; + const unsigned int *pins; + u8 npins; + u8 funcs[4]; + s32 mux_reg; + s32 pupd_reg; + s32 tri_reg; + s32 drv_reg; + u32 mux_bank: 2; + u32 pupd_bank: 2; + u32 tri_bank: 2; + u32 drv_bank: 2; + s32 mux_bit: 6; + s32 pupd_bit: 6; + s32 tri_bit: 6; + s32 einput_bit: 6; + s32 odrain_bit: 6; + s32 lock_bit: 6; + s32 ioreset_bit: 6; + s32 rcv_sel_bit: 6; + s32 hsm_bit: 6; + char: 2; + s32 sfsel_bit: 6; + s32 schmitt_bit: 6; + s32 lpmd_bit: 6; + s32 drvdn_bit: 6; + s32 drvup_bit: 6; + char: 2; + s32 slwr_bit: 6; + s32 slwf_bit: 6; + s32 drvtype_bit: 6; + s32 drvdn_width: 6; + s32 drvup_width: 6; + char: 2; + s32 slwr_width: 6; + s32 slwf_width: 6; + u32 parked_bitmask; +}; + +struct cfg_param { + const char *property; + enum tegra_pinconf_param param; +}; + +enum tegra_mux { + TEGRA_MUX_BLINK = 0, + TEGRA_MUX_CCLA = 1, + TEGRA_MUX_CEC = 2, + TEGRA_MUX_CLDVFS = 3, + TEGRA_MUX_CLK = 4, + TEGRA_MUX_CLK12 = 5, + TEGRA_MUX_CPU = 6, + TEGRA_MUX_CSI = 7, + TEGRA_MUX_DAP = 8, + TEGRA_MUX_DAP1 = 9, + TEGRA_MUX_DAP2 = 10, + TEGRA_MUX_DEV3 = 11, + TEGRA_MUX_DISPLAYA = 12, + TEGRA_MUX_DISPLAYA_ALT = 13, + TEGRA_MUX_DISPLAYB = 14, + TEGRA_MUX_DP = 15, + TEGRA_MUX_DSI_B = 16, + TEGRA_MUX_DTV = 17, + TEGRA_MUX_EXTPERIPH1 = 18, + TEGRA_MUX_EXTPERIPH2 = 19, + TEGRA_MUX_EXTPERIPH3 = 20, + TEGRA_MUX_GMI = 21, + TEGRA_MUX_GMI_ALT = 22, + TEGRA_MUX_HDA = 23, + TEGRA_MUX_HSI = 24, + TEGRA_MUX_I2C1 = 25, + TEGRA_MUX_I2C2 = 26, + TEGRA_MUX_I2C3 = 27, + TEGRA_MUX_I2C4 = 28, + TEGRA_MUX_I2CPWR = 29, + TEGRA_MUX_I2S0 = 30, + TEGRA_MUX_I2S1 = 31, + TEGRA_MUX_I2S2 = 32, + TEGRA_MUX_I2S3 = 33, + TEGRA_MUX_I2S4 = 34, + TEGRA_MUX_IRDA = 35, + TEGRA_MUX_KBC = 36, + TEGRA_MUX_OWR = 37, + TEGRA_MUX_PE = 38, + TEGRA_MUX_PE0 = 39, + TEGRA_MUX_PE1 = 40, + TEGRA_MUX_PMI = 41, + TEGRA_MUX_PWM0 = 42, + TEGRA_MUX_PWM1 = 43, + TEGRA_MUX_PWM2 = 44, + TEGRA_MUX_PWM3 = 45, + TEGRA_MUX_PWRON = 46, + TEGRA_MUX_RESET_OUT_N = 47, + TEGRA_MUX_RSVD1 = 48, + TEGRA_MUX_RSVD2 = 49, + TEGRA_MUX_RSVD3 = 50, + TEGRA_MUX_RSVD4 = 51, + TEGRA_MUX_RTCK = 52, + TEGRA_MUX_SATA = 53, + TEGRA_MUX_SDMMC1 = 54, + TEGRA_MUX_SDMMC2 = 55, + TEGRA_MUX_SDMMC3 = 56, + TEGRA_MUX_SDMMC4 = 57, + TEGRA_MUX_SOC = 58, + TEGRA_MUX_SPDIF = 59, + TEGRA_MUX_SPI1 = 60, + TEGRA_MUX_SPI2 = 61, + TEGRA_MUX_SPI3 = 62, + TEGRA_MUX_SPI4 = 63, + TEGRA_MUX_SPI5 = 64, + TEGRA_MUX_SPI6 = 65, + TEGRA_MUX_SYS = 66, + TEGRA_MUX_TMDS = 67, + TEGRA_MUX_TRACE = 68, + TEGRA_MUX_UARTA = 69, + TEGRA_MUX_UARTB = 70, + TEGRA_MUX_UARTC = 71, + TEGRA_MUX_UARTD = 72, + TEGRA_MUX_ULPI = 73, + TEGRA_MUX_USB = 74, + TEGRA_MUX_VGP1 = 75, + TEGRA_MUX_VGP2 = 76, + TEGRA_MUX_VGP3 = 77, + TEGRA_MUX_VGP4 = 78, + TEGRA_MUX_VGP5 = 79, + TEGRA_MUX_VGP6 = 80, + TEGRA_MUX_VI = 81, + TEGRA_MUX_VI_ALT1 = 82, + TEGRA_MUX_VI_ALT3 = 83, + TEGRA_MUX_VIMCLK2 = 84, + TEGRA_MUX_VIMCLK2_ALT = 85, +}; + +enum tegra_mux___2 { + TEGRA_MUX_AUD = 0, + TEGRA_MUX_BCL = 1, + TEGRA_MUX_BLINK___2 = 2, + TEGRA_MUX_CCLA___2 = 3, + TEGRA_MUX_CEC___2 = 4, + TEGRA_MUX_CLDVFS___2 = 5, + TEGRA_MUX_CLK___2 = 6, + TEGRA_MUX_CORE = 7, + TEGRA_MUX_CPU___2 = 8, + TEGRA_MUX_DISPLAYA___2 = 9, + TEGRA_MUX_DISPLAYB___2 = 10, + TEGRA_MUX_DMIC1 = 11, + TEGRA_MUX_DMIC2 = 12, + TEGRA_MUX_DMIC3 = 13, + TEGRA_MUX_DP___2 = 14, + TEGRA_MUX_DTV___2 = 15, + TEGRA_MUX_EXTPERIPH3___2 = 16, + TEGRA_MUX_I2C1___2 = 17, + TEGRA_MUX_I2C2___2 = 18, + TEGRA_MUX_I2C3___2 = 19, + TEGRA_MUX_I2CPMU = 20, + TEGRA_MUX_I2CVI = 21, + TEGRA_MUX_I2S1___2 = 22, + TEGRA_MUX_I2S2___2 = 23, + TEGRA_MUX_I2S3___2 = 24, + TEGRA_MUX_I2S4A = 25, + TEGRA_MUX_I2S4B = 26, + TEGRA_MUX_I2S5A = 27, + TEGRA_MUX_I2S5B = 28, + TEGRA_MUX_IQC0 = 29, + TEGRA_MUX_IQC1 = 30, + TEGRA_MUX_JTAG = 31, + TEGRA_MUX_PE___2 = 32, + TEGRA_MUX_PE0___2 = 33, + TEGRA_MUX_PE1___2 = 34, + TEGRA_MUX_PMI___2 = 35, + TEGRA_MUX_PWM0___2 = 36, + TEGRA_MUX_PWM1___2 = 37, + TEGRA_MUX_PWM2___2 = 38, + TEGRA_MUX_PWM3___2 = 39, + TEGRA_MUX_QSPI = 40, + TEGRA_MUX_RSVD0 = 41, + TEGRA_MUX_RSVD1___2 = 42, + TEGRA_MUX_RSVD2___2 = 43, + TEGRA_MUX_RSVD3___2 = 44, + TEGRA_MUX_SATA___2 = 45, + TEGRA_MUX_SDMMC1___2 = 46, + TEGRA_MUX_SDMMC3___2 = 47, + TEGRA_MUX_SHUTDOWN = 48, + TEGRA_MUX_SOC___2 = 49, + TEGRA_MUX_SOR0 = 50, + TEGRA_MUX_SOR1 = 51, + TEGRA_MUX_SPDIF___2 = 52, + TEGRA_MUX_SPI1___2 = 53, + TEGRA_MUX_SPI2___2 = 54, + TEGRA_MUX_SPI3___2 = 55, + TEGRA_MUX_SPI4___2 = 56, + TEGRA_MUX_SYS___2 = 57, + TEGRA_MUX_TOUCH = 58, + TEGRA_MUX_UART = 59, + TEGRA_MUX_UARTA___2 = 60, + TEGRA_MUX_UARTB___2 = 61, + TEGRA_MUX_UARTC___2 = 62, + TEGRA_MUX_UARTD___2 = 63, + TEGRA_MUX_USB___2 = 64, + TEGRA_MUX_VGP1___2 = 65, + TEGRA_MUX_VGP2___2 = 66, + TEGRA_MUX_VGP3___2 = 67, + TEGRA_MUX_VGP4___2 = 68, + TEGRA_MUX_VGP5___2 = 69, + TEGRA_MUX_VGP6___2 = 70, + TEGRA_MUX_VIMCLK = 71, + TEGRA_MUX_VIMCLK2___2 = 72, +}; + +struct tegra_xusb_padctl_function { + const char *name; + const char * const *groups; + unsigned int num_groups; +}; + +struct tegra_xusb_padctl_lane; + +struct tegra_xusb_padctl_soc { + const struct pinctrl_pin_desc *pins; + unsigned int num_pins; + const struct tegra_xusb_padctl_function *functions; + unsigned int num_functions; + const struct tegra_xusb_padctl_lane *lanes; + unsigned int num_lanes; +}; + +struct tegra_xusb_padctl_lane { + const char *name; + unsigned int offset; + unsigned int shift; + unsigned int mask; + unsigned int iddq; + const unsigned int *funcs; + unsigned int num_funcs; +}; + +struct tegra_xusb_padctl { + struct device *dev; + void *regs; + struct mutex lock; + struct reset_control *rst; + const struct tegra_xusb_padctl_soc *soc; + struct pinctrl_dev *pinctrl; + struct pinctrl_desc desc; + struct phy_provider *provider; + struct phy *phys[2]; + unsigned int enable; +}; + +enum tegra_xusb_padctl_param { + TEGRA_XUSB_PADCTL_IDDQ = 0, +}; + +struct tegra_xusb_padctl_property { + const char *name; + enum tegra_xusb_padctl_param param; +}; + +enum tegra124_function { + TEGRA124_FUNC_SNPS = 0, + TEGRA124_FUNC_XUSB = 1, + TEGRA124_FUNC_UART = 2, + TEGRA124_FUNC_PCIE = 3, + TEGRA124_FUNC_USB3 = 4, + TEGRA124_FUNC_SATA = 5, + TEGRA124_FUNC_RSVD = 6, +}; + +struct bcm2835_pinctrl { + struct device *dev; + void *base; + int *wake_irq; + long unsigned int enabled_irq_map[2]; + unsigned int irq_type[58]; + struct pinctrl_dev *pctl_dev; + struct gpio_chip gpio_chip; + struct pinctrl_desc pctl_desc; + struct pinctrl_gpio_range gpio_range; + raw_spinlock_t irq_lock[2]; +}; + +enum bcm2835_fsel { + BCM2835_FSEL_COUNT = 8, + BCM2835_FSEL_MASK = 7, +}; + +struct bcm_plat_data { + const struct gpio_chip *gpio_chip; + const struct pinctrl_desc *pctl_desc; + const struct pinctrl_gpio_range *gpio_range; +}; + +struct mvebu_mpp_ctrl_data { + union { + void *base; + struct { + struct regmap *map; + u32 offset; + } regmap; + }; +}; + +struct mvebu_mpp_ctrl { + const char *name; + u8 pid; + u8 npins; + unsigned int *pins; + int (*mpp_get)(struct mvebu_mpp_ctrl_data *, unsigned int, long unsigned int *); + int (*mpp_set)(struct mvebu_mpp_ctrl_data *, unsigned int, long unsigned int); + int (*mpp_gpio_req)(struct mvebu_mpp_ctrl_data *, unsigned int); + int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl_data *, unsigned int, bool); +}; + +struct mvebu_mpp_ctrl_setting { + u8 val; + const char *name; + const char *subname; + u8 variant; + u8 flags; +}; + +struct mvebu_mpp_mode { + u8 pid; + struct mvebu_mpp_ctrl_setting *settings; +}; + +struct mvebu_pinctrl_soc_info { + u8 variant; + const struct mvebu_mpp_ctrl *controls; + struct mvebu_mpp_ctrl_data *control_data; + int ncontrols; + struct mvebu_mpp_mode *modes; + int nmodes; + struct pinctrl_gpio_range *gpioranges; + int ngpioranges; +}; + +struct mvebu_pinctrl_function { + const char *name; + const char **groups; + unsigned int num_groups; +}; + +struct mvebu_pinctrl_group { + const char *name; + const struct mvebu_mpp_ctrl *ctrl; + struct mvebu_mpp_ctrl_data *data; + struct mvebu_mpp_ctrl_setting *settings; + unsigned int num_settings; + unsigned int gid; + unsigned int *pins; + unsigned int npins; +}; + +struct mvebu_pinctrl { + struct device *dev; + struct pinctrl_dev *pctldev; + struct pinctrl_desc desc; + struct mvebu_pinctrl_group *groups; + unsigned int num_groups; + struct mvebu_pinctrl_function *functions; + unsigned int num_functions; + u8 variant; +}; + +enum { + V_ARMADA_7K = 1, + V_ARMADA_8K_CPM = 2, + V_ARMADA_8K_CPS = 4, + V_CP115_STANDALONE = 8, + V_ARMADA_7K_8K_CPM = 3, + V_ARMADA_7K_8K_CPS = 5, +}; + +struct armada_37xx_pin_group { + const char *name; + unsigned int start_pin; + unsigned int npins; + u32 reg_mask; + u32 val[3]; + unsigned int extra_pin; + unsigned int extra_npins; + const char *funcs[3]; + unsigned int *pins; +}; + +struct armada_37xx_pin_data { + u8 nr_pins; + char *name; + struct armada_37xx_pin_group *groups; + int ngroups; +}; + +struct armada_37xx_pmx_func { + const char *name; + const char **groups; + unsigned int ngroups; +}; + +struct armada_37xx_pm_state { + u32 out_en_l; + u32 out_en_h; + u32 out_val_l; + u32 out_val_h; + u32 irq_en_l; + u32 irq_en_h; + u32 irq_pol_l; + u32 irq_pol_h; + u32 selection; +}; + +struct armada_37xx_pinctrl { + struct regmap *regmap; + void *base; + const struct armada_37xx_pin_data *data; + struct device *dev; + struct gpio_chip gpio_chip; + struct irq_chip irq_chip; + spinlock_t irq_lock; + struct pinctrl_desc pctl; + struct pinctrl_dev *pctl_dev; + struct armada_37xx_pin_group *groups; + unsigned int ngroups; + struct armada_37xx_pmx_func *funcs; + unsigned int nfuncs; + struct armada_37xx_pm_state pm; +}; + +struct msm_function { + const char *name; + const char * const *groups; + unsigned int ngroups; +}; + +struct msm_pingroup { + const char *name; + const unsigned int *pins; + unsigned int npins; + unsigned int *funcs; + unsigned int nfuncs; + u32 ctl_reg; + u32 io_reg; + u32 intr_cfg_reg; + u32 intr_status_reg; + u32 intr_target_reg; + unsigned int tile: 2; + unsigned int mux_bit: 5; + unsigned int pull_bit: 5; + unsigned int drv_bit: 5; + unsigned int od_bit: 5; + unsigned int oe_bit: 5; + unsigned int in_bit: 5; + unsigned int out_bit: 5; + unsigned int intr_enable_bit: 5; + unsigned int intr_status_bit: 5; + unsigned int intr_ack_high: 1; + unsigned int intr_target_bit: 5; + unsigned int intr_target_kpss_val: 5; + unsigned int intr_raw_status_bit: 5; + char: 1; + unsigned int intr_polarity_bit: 5; + unsigned int intr_detection_bit: 5; + unsigned int intr_detection_width: 5; +}; + +struct msm_gpio_wakeirq_map { + unsigned int gpio; + unsigned int wakeirq; +}; + +struct msm_pinctrl_soc_data { + const struct pinctrl_pin_desc *pins; + unsigned int npins; + const struct msm_function *functions; + unsigned int nfunctions; + const struct msm_pingroup *groups; + unsigned int ngroups; + unsigned int ngpios; + bool pull_no_keeper; + const char * const *tiles; + unsigned int ntiles; + const int *reserved_gpios; + const struct msm_gpio_wakeirq_map *wakeirq_map; + unsigned int nwakeirq_map; + bool wakeirq_dual_edge_errata; + unsigned int gpio_func; +}; + +struct msm_pinctrl { + struct device *dev; + struct pinctrl_dev *pctrl; + struct gpio_chip chip; + struct pinctrl_desc desc; + struct notifier_block restart_nb; + struct irq_chip irq_chip; + int irq; + bool intr_target_use_scm; + raw_spinlock_t lock; + long unsigned int dual_edge_irqs[5]; + long unsigned int enabled_irqs[5]; + long unsigned int skip_wake_irqs[5]; + long unsigned int disabled_for_mux[5]; + const struct msm_pinctrl_soc_data *soc; + void *regs[4]; + u32 phys_base[4]; +}; + +enum msm8916_functions { + MSM_MUX_adsp_ext = 0, + MSM_MUX_alsp_int = 1, + MSM_MUX_atest_bbrx0 = 2, + MSM_MUX_atest_bbrx1 = 3, + MSM_MUX_atest_char = 4, + MSM_MUX_atest_char0 = 5, + MSM_MUX_atest_char1 = 6, + MSM_MUX_atest_char2 = 7, + MSM_MUX_atest_char3 = 8, + MSM_MUX_atest_combodac = 9, + MSM_MUX_atest_gpsadc0 = 10, + MSM_MUX_atest_gpsadc1 = 11, + MSM_MUX_atest_tsens = 12, + MSM_MUX_atest_wlan0 = 13, + MSM_MUX_atest_wlan1 = 14, + MSM_MUX_backlight_en = 15, + MSM_MUX_bimc_dte0 = 16, + MSM_MUX_bimc_dte1 = 17, + MSM_MUX_blsp_i2c1 = 18, + MSM_MUX_blsp_i2c2 = 19, + MSM_MUX_blsp_i2c3 = 20, + MSM_MUX_blsp_i2c4 = 21, + MSM_MUX_blsp_i2c5 = 22, + MSM_MUX_blsp_i2c6 = 23, + MSM_MUX_blsp_spi1 = 24, + MSM_MUX_blsp_spi1_cs1 = 25, + MSM_MUX_blsp_spi1_cs2 = 26, + MSM_MUX_blsp_spi1_cs3 = 27, + MSM_MUX_blsp_spi2 = 28, + MSM_MUX_blsp_spi2_cs1 = 29, + MSM_MUX_blsp_spi2_cs2 = 30, + MSM_MUX_blsp_spi2_cs3 = 31, + MSM_MUX_blsp_spi3 = 32, + MSM_MUX_blsp_spi3_cs1 = 33, + MSM_MUX_blsp_spi3_cs2 = 34, + MSM_MUX_blsp_spi3_cs3 = 35, + MSM_MUX_blsp_spi4 = 36, + MSM_MUX_blsp_spi5 = 37, + MSM_MUX_blsp_spi6 = 38, + MSM_MUX_blsp_uart1 = 39, + MSM_MUX_blsp_uart2 = 40, + MSM_MUX_blsp_uim1 = 41, + MSM_MUX_blsp_uim2 = 42, + MSM_MUX_cam1_rst = 43, + MSM_MUX_cam1_standby = 44, + MSM_MUX_cam_mclk0 = 45, + MSM_MUX_cam_mclk1 = 46, + MSM_MUX_cci_async = 47, + MSM_MUX_cci_i2c = 48, + MSM_MUX_cci_timer0 = 49, + MSM_MUX_cci_timer1 = 50, + MSM_MUX_cci_timer2 = 51, + MSM_MUX_cdc_pdm0 = 52, + MSM_MUX_codec_mad = 53, + MSM_MUX_dbg_out = 54, + MSM_MUX_display_5v = 55, + MSM_MUX_dmic0_clk = 56, + MSM_MUX_dmic0_data = 57, + MSM_MUX_dsi_rst = 58, + MSM_MUX_ebi0_wrcdc = 59, + MSM_MUX_euro_us = 60, + MSM_MUX_ext_lpass = 61, + MSM_MUX_flash_strobe = 62, + MSM_MUX_gcc_gp1_clk_a = 63, + MSM_MUX_gcc_gp1_clk_b = 64, + MSM_MUX_gcc_gp2_clk_a = 65, + MSM_MUX_gcc_gp2_clk_b = 66, + MSM_MUX_gcc_gp3_clk_a = 67, + MSM_MUX_gcc_gp3_clk_b = 68, + MSM_MUX_gpio = 69, + MSM_MUX_gsm0_tx0 = 70, + MSM_MUX_gsm0_tx1 = 71, + MSM_MUX_gsm1_tx0 = 72, + MSM_MUX_gsm1_tx1 = 73, + MSM_MUX_gyro_accl = 74, + MSM_MUX_kpsns0 = 75, + MSM_MUX_kpsns1 = 76, + MSM_MUX_kpsns2 = 77, + MSM_MUX_ldo_en = 78, + MSM_MUX_ldo_update = 79, + MSM_MUX_mag_int = 80, + MSM_MUX_mdp_vsync = 81, + MSM_MUX_modem_tsync = 82, + MSM_MUX_m_voc = 83, + MSM_MUX_nav_pps = 84, + MSM_MUX_nav_tsync = 85, + MSM_MUX_pa_indicator = 86, + MSM_MUX_pbs0 = 87, + MSM_MUX_pbs1 = 88, + MSM_MUX_pbs2 = 89, + MSM_MUX_pri_mi2s = 90, + MSM_MUX_pri_mi2s_ws = 91, + MSM_MUX_prng_rosc = 92, + MSM_MUX_pwr_crypto_enabled_a = 93, + MSM_MUX_pwr_crypto_enabled_b = 94, + MSM_MUX_pwr_modem_enabled_a = 95, + MSM_MUX_pwr_modem_enabled_b = 96, + MSM_MUX_pwr_nav_enabled_a = 97, + MSM_MUX_pwr_nav_enabled_b = 98, + MSM_MUX_qdss_ctitrig_in_a0 = 99, + MSM_MUX_qdss_ctitrig_in_a1 = 100, + MSM_MUX_qdss_ctitrig_in_b0 = 101, + MSM_MUX_qdss_ctitrig_in_b1 = 102, + MSM_MUX_qdss_ctitrig_out_a0 = 103, + MSM_MUX_qdss_ctitrig_out_a1 = 104, + MSM_MUX_qdss_ctitrig_out_b0 = 105, + MSM_MUX_qdss_ctitrig_out_b1 = 106, + MSM_MUX_qdss_traceclk_a = 107, + MSM_MUX_qdss_traceclk_b = 108, + MSM_MUX_qdss_tracectl_a = 109, + MSM_MUX_qdss_tracectl_b = 110, + MSM_MUX_qdss_tracedata_a = 111, + MSM_MUX_qdss_tracedata_b = 112, + MSM_MUX_reset_n = 113, + MSM_MUX_sd_card = 114, + MSM_MUX_sd_write = 115, + MSM_MUX_sec_mi2s = 116, + MSM_MUX_smb_int = 117, + MSM_MUX_ssbi_wtr0 = 118, + MSM_MUX_ssbi_wtr1 = 119, + MSM_MUX_uim1 = 120, + MSM_MUX_uim2 = 121, + MSM_MUX_uim3 = 122, + MSM_MUX_uim_batt = 123, + MSM_MUX_wcss_bt = 124, + MSM_MUX_wcss_fm = 125, + MSM_MUX_wcss_wlan = 126, + MSM_MUX_webcam1_rst = 127, + MSM_MUX_NA = 128, +}; + +enum msm8994_functions { + MSM_MUX_audio_ref_clk = 0, + MSM_MUX_blsp_i2c1___2 = 1, + MSM_MUX_blsp_i2c2___2 = 2, + MSM_MUX_blsp_i2c3___2 = 3, + MSM_MUX_blsp_i2c4___2 = 4, + MSM_MUX_blsp_i2c5___2 = 5, + MSM_MUX_blsp_i2c6___2 = 6, + MSM_MUX_blsp_i2c7 = 7, + MSM_MUX_blsp_i2c8 = 8, + MSM_MUX_blsp_i2c9 = 9, + MSM_MUX_blsp_i2c10 = 10, + MSM_MUX_blsp_i2c11 = 11, + MSM_MUX_blsp_i2c12 = 12, + MSM_MUX_blsp_spi1___2 = 13, + MSM_MUX_blsp_spi1_cs1___2 = 14, + MSM_MUX_blsp_spi1_cs2___2 = 15, + MSM_MUX_blsp_spi1_cs3___2 = 16, + MSM_MUX_blsp_spi2___2 = 17, + MSM_MUX_blsp_spi2_cs1___2 = 18, + MSM_MUX_blsp_spi2_cs2___2 = 19, + MSM_MUX_blsp_spi2_cs3___2 = 20, + MSM_MUX_blsp_spi3___2 = 21, + MSM_MUX_blsp_spi4___2 = 22, + MSM_MUX_blsp_spi5___2 = 23, + MSM_MUX_blsp_spi6___2 = 24, + MSM_MUX_blsp_spi7 = 25, + MSM_MUX_blsp_spi8 = 26, + MSM_MUX_blsp_spi9 = 27, + MSM_MUX_blsp_spi10 = 28, + MSM_MUX_blsp_spi10_cs1 = 29, + MSM_MUX_blsp_spi10_cs2 = 30, + MSM_MUX_blsp_spi10_cs3 = 31, + MSM_MUX_blsp_spi11 = 32, + MSM_MUX_blsp_spi12 = 33, + MSM_MUX_blsp_uart1___2 = 34, + MSM_MUX_blsp_uart2___2 = 35, + MSM_MUX_blsp_uart3 = 36, + MSM_MUX_blsp_uart4 = 37, + MSM_MUX_blsp_uart5 = 38, + MSM_MUX_blsp_uart6 = 39, + MSM_MUX_blsp_uart7 = 40, + MSM_MUX_blsp_uart8 = 41, + MSM_MUX_blsp_uart9 = 42, + MSM_MUX_blsp_uart10 = 43, + MSM_MUX_blsp_uart11 = 44, + MSM_MUX_blsp_uart12 = 45, + MSM_MUX_blsp_uim1___2 = 46, + MSM_MUX_blsp_uim2___2 = 47, + MSM_MUX_blsp_uim3 = 48, + MSM_MUX_blsp_uim4 = 49, + MSM_MUX_blsp_uim5 = 50, + MSM_MUX_blsp_uim6 = 51, + MSM_MUX_blsp_uim7 = 52, + MSM_MUX_blsp_uim8 = 53, + MSM_MUX_blsp_uim9 = 54, + MSM_MUX_blsp_uim10 = 55, + MSM_MUX_blsp_uim11 = 56, + MSM_MUX_blsp_uim12 = 57, + MSM_MUX_blsp11_i2c_scl_b = 58, + MSM_MUX_blsp11_i2c_sda_b = 59, + MSM_MUX_blsp11_uart_rx_b = 60, + MSM_MUX_blsp11_uart_tx_b = 61, + MSM_MUX_cam_mclk0___2 = 62, + MSM_MUX_cam_mclk1___2 = 63, + MSM_MUX_cam_mclk2 = 64, + MSM_MUX_cam_mclk3 = 65, + MSM_MUX_cci_async_in0 = 66, + MSM_MUX_cci_async_in1 = 67, + MSM_MUX_cci_async_in2 = 68, + MSM_MUX_cci_i2c0 = 69, + MSM_MUX_cci_i2c1 = 70, + MSM_MUX_cci_timer0___2 = 71, + MSM_MUX_cci_timer1___2 = 72, + MSM_MUX_cci_timer2___2 = 73, + MSM_MUX_cci_timer3 = 74, + MSM_MUX_cci_timer4 = 75, + MSM_MUX_gcc_gp1_clk_a___2 = 76, + MSM_MUX_gcc_gp1_clk_b___2 = 77, + MSM_MUX_gcc_gp2_clk_a___2 = 78, + MSM_MUX_gcc_gp2_clk_b___2 = 79, + MSM_MUX_gcc_gp3_clk_a___2 = 80, + MSM_MUX_gcc_gp3_clk_b___2 = 81, + MSM_MUX_gp_mn = 82, + MSM_MUX_gp_pdm0 = 83, + MSM_MUX_gp_pdm1 = 84, + MSM_MUX_gp_pdm2 = 85, + MSM_MUX_gp0_clk = 86, + MSM_MUX_gp1_clk = 87, + MSM_MUX_gps_tx = 88, + MSM_MUX_gsm_tx = 89, + MSM_MUX_hdmi_cec = 90, + MSM_MUX_hdmi_ddc = 91, + MSM_MUX_hdmi_hpd = 92, + MSM_MUX_hdmi_rcv = 93, + MSM_MUX_mdp_vsync___2 = 94, + MSM_MUX_mss_lte = 95, + MSM_MUX_nav_pps___2 = 96, + MSM_MUX_nav_tsync___2 = 97, + MSM_MUX_qdss_cti_trig_in_a = 98, + MSM_MUX_qdss_cti_trig_in_b = 99, + MSM_MUX_qdss_cti_trig_in_c = 100, + MSM_MUX_qdss_cti_trig_in_d = 101, + MSM_MUX_qdss_cti_trig_out_a = 102, + MSM_MUX_qdss_cti_trig_out_b = 103, + MSM_MUX_qdss_cti_trig_out_c = 104, + MSM_MUX_qdss_cti_trig_out_d = 105, + MSM_MUX_qdss_traceclk_a___2 = 106, + MSM_MUX_qdss_traceclk_b___2 = 107, + MSM_MUX_qdss_tracectl_a___2 = 108, + MSM_MUX_qdss_tracectl_b___2 = 109, + MSM_MUX_qdss_tracedata_a___2 = 110, + MSM_MUX_qdss_tracedata_b___2 = 111, + MSM_MUX_qua_mi2s = 112, + MSM_MUX_pci_e0 = 113, + MSM_MUX_pci_e1 = 114, + MSM_MUX_pri_mi2s___2 = 115, + MSM_MUX_sdc4 = 116, + MSM_MUX_sec_mi2s___2 = 117, + MSM_MUX_slimbus = 118, + MSM_MUX_spkr_i2s = 119, + MSM_MUX_ter_mi2s = 120, + MSM_MUX_tsif1 = 121, + MSM_MUX_tsif2 = 122, + MSM_MUX_uim1___2 = 123, + MSM_MUX_uim2___2 = 124, + MSM_MUX_uim3___2 = 125, + MSM_MUX_uim4 = 126, + MSM_MUX_uim_batt_alarm = 127, + MSM_MUX_gpio___2 = 128, + MSM_MUX_NA___2 = 129, +}; + +enum msm8996_functions { + msm_mux_adsp_ext = 0, + msm_mux_atest_bbrx0 = 1, + msm_mux_atest_bbrx1 = 2, + msm_mux_atest_char = 3, + msm_mux_atest_char0 = 4, + msm_mux_atest_char1 = 5, + msm_mux_atest_char2 = 6, + msm_mux_atest_char3 = 7, + msm_mux_atest_gpsadc0 = 8, + msm_mux_atest_gpsadc1 = 9, + msm_mux_atest_tsens = 10, + msm_mux_atest_tsens2 = 11, + msm_mux_atest_usb1 = 12, + msm_mux_atest_usb10 = 13, + msm_mux_atest_usb11 = 14, + msm_mux_atest_usb12 = 15, + msm_mux_atest_usb13 = 16, + msm_mux_atest_usb2 = 17, + msm_mux_atest_usb20 = 18, + msm_mux_atest_usb21 = 19, + msm_mux_atest_usb22 = 20, + msm_mux_atest_usb23 = 21, + msm_mux_audio_ref = 22, + msm_mux_bimc_dte0 = 23, + msm_mux_bimc_dte1 = 24, + msm_mux_blsp10_spi = 25, + msm_mux_blsp11_i2c_scl_b = 26, + msm_mux_blsp11_i2c_sda_b = 27, + msm_mux_blsp11_uart_rx_b = 28, + msm_mux_blsp11_uart_tx_b = 29, + msm_mux_blsp1_spi = 30, + msm_mux_blsp2_spi = 31, + msm_mux_blsp_i2c1 = 32, + msm_mux_blsp_i2c10 = 33, + msm_mux_blsp_i2c11 = 34, + msm_mux_blsp_i2c12 = 35, + msm_mux_blsp_i2c2 = 36, + msm_mux_blsp_i2c3 = 37, + msm_mux_blsp_i2c4 = 38, + msm_mux_blsp_i2c5 = 39, + msm_mux_blsp_i2c6 = 40, + msm_mux_blsp_i2c7 = 41, + msm_mux_blsp_i2c8 = 42, + msm_mux_blsp_i2c9 = 43, + msm_mux_blsp_spi1 = 44, + msm_mux_blsp_spi10 = 45, + msm_mux_blsp_spi11 = 46, + msm_mux_blsp_spi12 = 47, + msm_mux_blsp_spi2 = 48, + msm_mux_blsp_spi3 = 49, + msm_mux_blsp_spi4 = 50, + msm_mux_blsp_spi5 = 51, + msm_mux_blsp_spi6 = 52, + msm_mux_blsp_spi7 = 53, + msm_mux_blsp_spi8 = 54, + msm_mux_blsp_spi9 = 55, + msm_mux_blsp_uart1 = 56, + msm_mux_blsp_uart10 = 57, + msm_mux_blsp_uart11 = 58, + msm_mux_blsp_uart12 = 59, + msm_mux_blsp_uart2 = 60, + msm_mux_blsp_uart3 = 61, + msm_mux_blsp_uart4 = 62, + msm_mux_blsp_uart5 = 63, + msm_mux_blsp_uart6 = 64, + msm_mux_blsp_uart7 = 65, + msm_mux_blsp_uart8 = 66, + msm_mux_blsp_uart9 = 67, + msm_mux_blsp_uim1 = 68, + msm_mux_blsp_uim10 = 69, + msm_mux_blsp_uim11 = 70, + msm_mux_blsp_uim12 = 71, + msm_mux_blsp_uim2 = 72, + msm_mux_blsp_uim3 = 73, + msm_mux_blsp_uim4 = 74, + msm_mux_blsp_uim5 = 75, + msm_mux_blsp_uim6 = 76, + msm_mux_blsp_uim7 = 77, + msm_mux_blsp_uim8 = 78, + msm_mux_blsp_uim9 = 79, + msm_mux_btfm_slimbus = 80, + msm_mux_cam_mclk = 81, + msm_mux_cci_async = 82, + msm_mux_cci_i2c = 83, + msm_mux_cci_timer0 = 84, + msm_mux_cci_timer1 = 85, + msm_mux_cci_timer2 = 86, + msm_mux_cci_timer3 = 87, + msm_mux_cci_timer4 = 88, + msm_mux_cri_trng = 89, + msm_mux_cri_trng0 = 90, + msm_mux_cri_trng1 = 91, + msm_mux_dac_calib0 = 92, + msm_mux_dac_calib1 = 93, + msm_mux_dac_calib10 = 94, + msm_mux_dac_calib11 = 95, + msm_mux_dac_calib12 = 96, + msm_mux_dac_calib13 = 97, + msm_mux_dac_calib14 = 98, + msm_mux_dac_calib15 = 99, + msm_mux_dac_calib16 = 100, + msm_mux_dac_calib17 = 101, + msm_mux_dac_calib18 = 102, + msm_mux_dac_calib19 = 103, + msm_mux_dac_calib2 = 104, + msm_mux_dac_calib20 = 105, + msm_mux_dac_calib21 = 106, + msm_mux_dac_calib22 = 107, + msm_mux_dac_calib23 = 108, + msm_mux_dac_calib24 = 109, + msm_mux_dac_calib25 = 110, + msm_mux_dac_calib26 = 111, + msm_mux_dac_calib3 = 112, + msm_mux_dac_calib4 = 113, + msm_mux_dac_calib5 = 114, + msm_mux_dac_calib6 = 115, + msm_mux_dac_calib7 = 116, + msm_mux_dac_calib8 = 117, + msm_mux_dac_calib9 = 118, + msm_mux_dac_gpio = 119, + msm_mux_dbg_out = 120, + msm_mux_ddr_bist = 121, + msm_mux_edp_hot = 122, + msm_mux_edp_lcd = 123, + msm_mux_gcc_gp1_clk_a = 124, + msm_mux_gcc_gp1_clk_b = 125, + msm_mux_gcc_gp2_clk_a = 126, + msm_mux_gcc_gp2_clk_b = 127, + msm_mux_gcc_gp3_clk_a = 128, + msm_mux_gcc_gp3_clk_b = 129, + msm_mux_gsm_tx = 130, + msm_mux_hdmi_cec = 131, + msm_mux_hdmi_ddc = 132, + msm_mux_hdmi_hot = 133, + msm_mux_hdmi_rcv = 134, + msm_mux_isense_dbg = 135, + msm_mux_ldo_en = 136, + msm_mux_ldo_update = 137, + msm_mux_lpass_slimbus = 138, + msm_mux_m_voc = 139, + msm_mux_mdp_vsync = 140, + msm_mux_mdp_vsync_p_b = 141, + msm_mux_mdp_vsync_s_b = 142, + msm_mux_modem_tsync = 143, + msm_mux_mss_lte = 144, + msm_mux_nav_dr = 145, + msm_mux_nav_pps = 146, + msm_mux_pa_indicator = 147, + msm_mux_pci_e0 = 148, + msm_mux_pci_e1 = 149, + msm_mux_pci_e2 = 150, + msm_mux_pll_bypassnl = 151, + msm_mux_pll_reset = 152, + msm_mux_pri_mi2s = 153, + msm_mux_prng_rosc = 154, + msm_mux_pwr_crypto = 155, + msm_mux_pwr_modem = 156, + msm_mux_pwr_nav = 157, + msm_mux_qdss_cti = 158, + msm_mux_qdss_cti_trig_in_a = 159, + msm_mux_qdss_cti_trig_in_b = 160, + msm_mux_qdss_cti_trig_out_a = 161, + msm_mux_qdss_cti_trig_out_b = 162, + msm_mux_qdss_stm0 = 163, + msm_mux_qdss_stm1 = 164, + msm_mux_qdss_stm10 = 165, + msm_mux_qdss_stm11 = 166, + msm_mux_qdss_stm12 = 167, + msm_mux_qdss_stm13 = 168, + msm_mux_qdss_stm14 = 169, + msm_mux_qdss_stm15 = 170, + msm_mux_qdss_stm16 = 171, + msm_mux_qdss_stm17 = 172, + msm_mux_qdss_stm18 = 173, + msm_mux_qdss_stm19 = 174, + msm_mux_qdss_stm2 = 175, + msm_mux_qdss_stm20 = 176, + msm_mux_qdss_stm21 = 177, + msm_mux_qdss_stm22 = 178, + msm_mux_qdss_stm23 = 179, + msm_mux_qdss_stm24 = 180, + msm_mux_qdss_stm25 = 181, + msm_mux_qdss_stm26 = 182, + msm_mux_qdss_stm27 = 183, + msm_mux_qdss_stm28 = 184, + msm_mux_qdss_stm29 = 185, + msm_mux_qdss_stm3 = 186, + msm_mux_qdss_stm30 = 187, + msm_mux_qdss_stm31 = 188, + msm_mux_qdss_stm4 = 189, + msm_mux_qdss_stm5 = 190, + msm_mux_qdss_stm6 = 191, + msm_mux_qdss_stm7 = 192, + msm_mux_qdss_stm8 = 193, + msm_mux_qdss_stm9 = 194, + msm_mux_qdss_traceclk_a = 195, + msm_mux_qdss_traceclk_b = 196, + msm_mux_qdss_tracectl_a = 197, + msm_mux_qdss_tracectl_b = 198, + msm_mux_qdss_tracedata_11 = 199, + msm_mux_qdss_tracedata_12 = 200, + msm_mux_qdss_tracedata_a = 201, + msm_mux_qdss_tracedata_b = 202, + msm_mux_qspi0 = 203, + msm_mux_qspi1 = 204, + msm_mux_qspi2 = 205, + msm_mux_qspi3 = 206, + msm_mux_qspi_clk = 207, + msm_mux_qspi_cs = 208, + msm_mux_qua_mi2s = 209, + msm_mux_sd_card = 210, + msm_mux_sd_write = 211, + msm_mux_sdc40 = 212, + msm_mux_sdc41 = 213, + msm_mux_sdc42 = 214, + msm_mux_sdc43 = 215, + msm_mux_sdc4_clk = 216, + msm_mux_sdc4_cmd = 217, + msm_mux_sec_mi2s = 218, + msm_mux_spkr_i2s = 219, + msm_mux_ssbi1 = 220, + msm_mux_ssbi2 = 221, + msm_mux_ssc_irq = 222, + msm_mux_ter_mi2s = 223, + msm_mux_tsense_pwm1 = 224, + msm_mux_tsense_pwm2 = 225, + msm_mux_tsif1_clk = 226, + msm_mux_tsif1_data = 227, + msm_mux_tsif1_en = 228, + msm_mux_tsif1_error = 229, + msm_mux_tsif1_sync = 230, + msm_mux_tsif2_clk = 231, + msm_mux_tsif2_data = 232, + msm_mux_tsif2_en = 233, + msm_mux_tsif2_error = 234, + msm_mux_tsif2_sync = 235, + msm_mux_uim1 = 236, + msm_mux_uim2 = 237, + msm_mux_uim3 = 238, + msm_mux_uim4 = 239, + msm_mux_uim_batt = 240, + msm_mux_vfr_1 = 241, + msm_mux_gpio = 242, + msm_mux_NA = 243, +}; + +enum pincfg_type { + PINCFG_TYPE_FUNC = 0, + PINCFG_TYPE_DAT = 1, + PINCFG_TYPE_PUD = 2, + PINCFG_TYPE_DRV = 3, + PINCFG_TYPE_CON_PDN = 4, + PINCFG_TYPE_PUD_PDN = 5, + PINCFG_TYPE_NUM = 6, +}; + +enum eint_type { + EINT_TYPE_NONE = 0, + EINT_TYPE_GPIO = 1, + EINT_TYPE_WKUP = 2, + EINT_TYPE_WKUP_MUX = 3, +}; + +struct samsung_pin_bank_type { + u8 fld_width[6]; + u8 reg_offset[6]; +}; + +struct samsung_pin_bank_data { + const struct samsung_pin_bank_type *type; + u32 pctl_offset; + u8 pctl_res_idx; + u8 nr_pins; + u8 eint_func; + enum eint_type eint_type; + u32 eint_mask; + u32 eint_offset; + const char *name; +}; + +struct samsung_pinctrl_drv_data; + +struct exynos_irq_chip; + +struct samsung_pin_bank { + const struct samsung_pin_bank_type *type; + void *pctl_base; + u32 pctl_offset; + u8 nr_pins; + void *eint_base; + u8 eint_func; + enum eint_type eint_type; + u32 eint_mask; + u32 eint_offset; + const char *name; + u32 pin_base; + void *soc_priv; + struct device_node *of_node; + struct samsung_pinctrl_drv_data *drvdata; + struct irq_domain *irq_domain; + struct gpio_chip gpio_chip; + struct pinctrl_gpio_range grange; + struct exynos_irq_chip *irq_chip; + spinlock_t slock; + u32 pm_save[7]; +}; + +struct samsung_pin_group; + +struct samsung_pmx_func; + +struct samsung_retention_ctrl; + +struct samsung_pinctrl_drv_data { + struct list_head node; + void *virt_base; + struct device *dev; + int irq; + struct pinctrl_desc pctl; + struct pinctrl_dev *pctl_dev; + const struct samsung_pin_group *pin_groups; + unsigned int nr_groups; + const struct samsung_pmx_func *pmx_functions; + unsigned int nr_functions; + struct samsung_pin_bank *pin_banks; + unsigned int nr_banks; + unsigned int pin_base; + unsigned int nr_pins; + struct samsung_retention_ctrl *retention_ctrl; + void (*suspend)(struct samsung_pinctrl_drv_data *); + void (*resume)(struct samsung_pinctrl_drv_data *); +}; + +struct samsung_retention_ctrl { + const u32 *regs; + int nr_regs; + u32 value; + atomic_t *refcnt; + void *priv; + void (*enable)(struct samsung_pinctrl_drv_data *); + void (*disable)(struct samsung_pinctrl_drv_data *); +}; + +struct samsung_retention_data { + const u32 *regs; + int nr_regs; + u32 value; + atomic_t *refcnt; + struct samsung_retention_ctrl * (*init)(struct samsung_pinctrl_drv_data *, const struct samsung_retention_data *); +}; + +struct samsung_pin_ctrl { + const struct samsung_pin_bank_data *pin_banks; + unsigned int nr_banks; + unsigned int nr_ext_resources; + const struct samsung_retention_data *retention_data; + int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); + int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pinctrl_drv_data *); + void (*resume)(struct samsung_pinctrl_drv_data *); +}; + +struct samsung_pin_group { + const char *name; + const unsigned int *pins; + u8 num_pins; + u8 func; +}; + +struct samsung_pmx_func { + const char *name; + const char **groups; + u8 num_groups; + u32 val; +}; + +struct samsung_pinctrl_of_match_data { + const struct samsung_pin_ctrl *ctrl; + unsigned int num_ctrl; +}; + +struct pin_config { + const char *property; + enum pincfg_type param; +}; + +struct exynos_irq_chip { + struct irq_chip chip; + u32 eint_con; + u32 eint_mask; + u32 eint_pend; + u32 *eint_wake_mask_value; + u32 eint_wake_mask_reg; + void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *, struct exynos_irq_chip *); +}; + +struct exynos_weint_data { + unsigned int irq; + struct samsung_pin_bank *bank; +}; + +struct exynos_muxed_weint_data { + unsigned int nr_banks; + struct samsung_pin_bank *banks[0]; +}; + +struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; + u32 eint_mask; +}; + +enum sunxi_desc_bias_voltage { + BIAS_VOLTAGE_NONE = 0, + BIAS_VOLTAGE_GRP_CONFIG = 1, + BIAS_VOLTAGE_PIO_POW_MODE_SEL = 2, +}; + +struct sunxi_desc_function { + long unsigned int variant; + const char *name; + u8 muxval; + u8 irqbank; + u8 irqnum; +}; + +struct sunxi_desc_pin { + struct pinctrl_pin_desc pin; + long unsigned int variant; + struct sunxi_desc_function *functions; +}; + +struct sunxi_pinctrl_desc { + const struct sunxi_desc_pin *pins; + int npins; + unsigned int pin_base; + unsigned int irq_banks; + const unsigned int *irq_bank_map; + bool irq_read_needs_mux; + bool disable_strict_mode; + enum sunxi_desc_bias_voltage io_bias_cfg_variant; +}; + +struct sunxi_pinctrl_function { + const char *name; + const char **groups; + unsigned int ngroups; +}; + +struct sunxi_pinctrl_group { + const char *name; + unsigned int pin; +}; + +struct sunxi_pinctrl_regulator { + struct regulator *regulator; + refcount_t refcount; +}; + +struct sunxi_pinctrl { + void *membase; + struct gpio_chip *chip; + const struct sunxi_pinctrl_desc *desc; + struct device *dev; + struct sunxi_pinctrl_regulator regulators[9]; + struct irq_domain *domain; + struct sunxi_pinctrl_function *functions; + unsigned int nfunctions; + struct sunxi_pinctrl_group *groups; + unsigned int ngroups; + int *irq; + unsigned int *irq_array; + raw_spinlock_t lock; + struct pinctrl_dev *pctl_dev; + long unsigned int variant; +}; + +struct mtk_eint_regs { + unsigned int stat; + unsigned int ack; + unsigned int mask; + unsigned int mask_set; + unsigned int mask_clr; + unsigned int sens; + unsigned int sens_set; + unsigned int sens_clr; + unsigned int soft; + unsigned int soft_set; + unsigned int soft_clr; + unsigned int pol; + unsigned int pol_set; + unsigned int pol_clr; + unsigned int dom_en; + unsigned int dbnc_ctrl; + unsigned int dbnc_set; + unsigned int dbnc_clr; +}; + +struct mtk_eint_hw { + u8 port_mask; + u8 ports; + unsigned int ap_num; + unsigned int db_cnt; +}; + +struct mtk_eint_xt { + int (*get_gpio_n)(void *, long unsigned int, unsigned int *, struct gpio_chip **); + int (*get_gpio_state)(void *, long unsigned int); + int (*set_gpio_as_eint)(void *, long unsigned int); +}; + +struct mtk_eint { + struct device *dev; + void *base; + struct irq_domain *domain; + int irq; + int *dual_edge; + u32 *wake_mask; + u32 *cur_mask; + const struct mtk_eint_hw *hw; + const struct mtk_eint_regs *regs; + void *pctl; + const struct mtk_eint_xt *gpio_xlate; +}; + +struct mtk_desc_function { + const char *name; + unsigned char muxval; +}; + +struct mtk_desc_eint { + unsigned char eintmux; + unsigned char eintnum; +}; + +struct mtk_desc_pin { + struct pinctrl_pin_desc pin; + const struct mtk_desc_eint eint; + const struct mtk_desc_function *functions; +}; + +struct mtk_pinctrl_group { + const char *name; + long unsigned int config; + unsigned int pin; +}; + +struct mtk_drv_group_desc { + unsigned char min_drv; + unsigned char max_drv; + unsigned char low_bit; + unsigned char high_bit; + unsigned char step; +}; + +struct mtk_pin_drv_grp { + short unsigned int pin; + short unsigned int offset; + unsigned char bit; + unsigned char grp; +}; + +struct mtk_pin_spec_pupd_set_samereg { + short unsigned int pin; + short unsigned int offset; + unsigned char pupd_bit; + unsigned char r1_bit; + unsigned char r0_bit; +}; + +struct mtk_pin_ies_smt_set { + short unsigned int start; + short unsigned int end; + short unsigned int offset; + unsigned char bit; +}; + +struct mtk_pinctrl_devdata { + const struct mtk_desc_pin *pins; + unsigned int npins; + const struct mtk_drv_group_desc *grp_desc; + unsigned int n_grp_cls; + const struct mtk_pin_drv_grp *pin_drv_grp; + unsigned int n_pin_drv_grps; + int (*spec_pull_set)(struct regmap *, unsigned int, unsigned char, bool, unsigned int); + int (*spec_ies_smt_set)(struct regmap *, unsigned int, unsigned char, int, enum pin_config_param); + void (*spec_pinmux_set)(struct regmap *, unsigned int, unsigned int); + void (*spec_dir_set)(unsigned int *, unsigned int); + unsigned int dir_offset; + unsigned int ies_offset; + unsigned int smt_offset; + unsigned int pullen_offset; + unsigned int pullsel_offset; + unsigned int drv_offset; + unsigned int dout_offset; + unsigned int din_offset; + unsigned int pinmux_offset; + short unsigned int type1_start; + short unsigned int type1_end; + unsigned char port_shf; + unsigned char port_mask; + unsigned char port_align; + struct mtk_eint_hw eint_hw; + struct mtk_eint_regs *eint_regs; +}; + +struct mtk_pinctrl { + struct regmap *regmap1; + struct regmap *regmap2; + struct pinctrl_desc pctl_desc; + struct device *dev; + struct gpio_chip *chip; + struct mtk_pinctrl_group *groups; + unsigned int ngroups; + const char **grp_names; + struct pinctrl_dev *pctl_dev; + const struct mtk_pinctrl_devdata *devdata; + struct mtk_eint *eint; +}; + +enum { + PINCTRL_PIN_REG_MODE = 0, + PINCTRL_PIN_REG_DIR = 1, + PINCTRL_PIN_REG_DI = 2, + PINCTRL_PIN_REG_DO = 3, + PINCTRL_PIN_REG_SR = 4, + PINCTRL_PIN_REG_SMT = 5, + PINCTRL_PIN_REG_PD = 6, + PINCTRL_PIN_REG_PU = 7, + PINCTRL_PIN_REG_E4 = 8, + PINCTRL_PIN_REG_E8 = 9, + PINCTRL_PIN_REG_TDSEL = 10, + PINCTRL_PIN_REG_RDSEL = 11, + PINCTRL_PIN_REG_DRV = 12, + PINCTRL_PIN_REG_PUPD = 13, + PINCTRL_PIN_REG_R0 = 14, + PINCTRL_PIN_REG_R1 = 15, + PINCTRL_PIN_REG_IES = 16, + PINCTRL_PIN_REG_PULLEN = 17, + PINCTRL_PIN_REG_PULLSEL = 18, + PINCTRL_PIN_REG_DRV_EN = 19, + PINCTRL_PIN_REG_DRV_E0 = 20, + PINCTRL_PIN_REG_DRV_E1 = 21, + PINCTRL_PIN_REG_MAX = 22, +}; + +enum { + DRV_FIXED = 0, + DRV_GRP0 = 1, + DRV_GRP1 = 2, + DRV_GRP2 = 3, + DRV_GRP3 = 4, + DRV_GRP4 = 5, + DRV_GRP_MAX = 6, +}; + +struct mtk_pin_field { + u8 index; + u32 offset; + u32 mask; + u8 bitpos; + u8 next; +}; + +struct mtk_pin_field_calc { + u16 s_pin; + u16 e_pin; + u8 i_base; + u32 s_addr; + u8 x_addrs; + u8 s_bit; + u8 x_bits; + u8 sz_reg; + u8 fixed; +}; + +struct mtk_pin_reg_calc { + const struct mtk_pin_field_calc *range; + unsigned int nranges; +}; + +struct mtk_func_desc { + const char *name; + u8 muxval; +}; + +struct mtk_eint_desc { + u16 eint_m; + u16 eint_n; +}; + +struct mtk_pin_desc { + unsigned int number; + const char *name; + struct mtk_eint_desc eint; + u8 drv_n; + struct mtk_func_desc *funcs; +}; + +struct mtk_pinctrl___2; + +struct mtk_pin_soc { + const struct mtk_pin_reg_calc *reg_cal; + const struct mtk_pin_desc *pins; + unsigned int npins; + const struct group_desc *grps; + unsigned int ngrps; + const struct function_desc *funcs; + unsigned int nfuncs; + const struct mtk_eint_regs *eint_regs; + const struct mtk_eint_hw *eint_hw; + u8 gpio_m; + bool ies_present; + const char * const *base_names; + unsigned int nbase_names; + int (*bias_disable_set)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *); + int (*bias_disable_get)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, int *); + int (*bias_set)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, bool); + int (*bias_get)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, bool, int *); + int (*bias_set_combo)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, u32, u32); + int (*bias_get_combo)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, u32 *, u32 *); + int (*drive_set)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, u32); + int (*drive_get)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, int *); + int (*adv_pull_set)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, bool, u32); + int (*adv_pull_get)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, bool, u32 *); + int (*adv_drive_set)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, u32); + int (*adv_drive_get)(struct mtk_pinctrl___2 *, const struct mtk_pin_desc *, u32 *); + void *driver_data; +}; + +struct mtk_pinctrl___2 { + struct pinctrl_dev *pctrl; + void **base; + u8 nbase; + struct device *dev; + struct gpio_chip chip; + const struct mtk_pin_soc *soc; + struct mtk_eint *eint; + struct mtk_pinctrl_group *groups; + const char **grp_names; +}; + +struct mtk_drive_desc { + u8 min; + u8 max; + u8 step; + u8 scal; +}; + +struct mt6397_chip { + struct device *dev; + struct regmap *regmap; + struct notifier_block pm_nb; + int irq; + struct irq_domain *irq_domain; + struct mutex irqlock; + u16 wake_mask[2]; + u16 irq_masks_cur[2]; + u16 irq_masks_cache[2]; + u16 int_con[2]; + u16 int_status[2]; + u16 chip_id; + void *irq_data; +}; + +struct madera_pin_groups { + const char *name; + const unsigned int *pins; + unsigned int n_pins; +}; + +struct madera_pin_chip { + unsigned int n_pins; + const struct madera_pin_groups *pin_groups; + unsigned int n_pin_groups; +}; + +struct madera_pin_private { + struct madera *madera; + const struct madera_pin_chip *chip; + struct device *dev; + struct pinctrl_dev *pctl; +}; + +struct gpio_pin_range { + struct list_head node; + struct pinctrl_dev *pctldev; + struct pinctrl_gpio_range range; +}; + +struct gpio_array; + +struct gpio_descs { + struct gpio_array *info; + unsigned int ndescs; + struct gpio_desc___2 *desc[0]; +}; + +struct gpio_array { + struct gpio_desc___2 **desc; + unsigned int size; + struct gpio_chip *chip; + long unsigned int *get_mask; + long unsigned int *set_mask; + long unsigned int invert_mask[0]; +}; + +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = 1, + GPIOD_OUT_LOW = 3, + GPIOD_OUT_HIGH = 7, + GPIOD_OUT_LOW_OPEN_DRAIN = 11, + GPIOD_OUT_HIGH_OPEN_DRAIN = 15, +}; + +enum gpio_lookup_flags { + GPIO_ACTIVE_HIGH = 0, + GPIO_ACTIVE_LOW = 1, + GPIO_OPEN_DRAIN = 2, + GPIO_OPEN_SOURCE = 4, + GPIO_PERSISTENT = 0, + GPIO_TRANSITORY = 8, + GPIO_PULL_UP = 16, + GPIO_PULL_DOWN = 32, + GPIO_LOOKUP_FLAGS_DEFAULT = 0, +}; + +struct gpiod_lookup { + const char *key; + u16 chip_hwnum; + const char *con_id; + unsigned int idx; + long unsigned int flags; +}; + +struct gpiod_lookup_table { + struct list_head list; + const char *dev_id; + struct gpiod_lookup table[0]; +}; + +struct gpiod_hog { + struct list_head list; + const char *chip_label; + u16 chip_hwnum; + const char *line_name; + long unsigned int lflags; + int dflags; +}; + +enum { + GPIOLINE_CHANGED_REQUESTED = 1, + GPIOLINE_CHANGED_RELEASED = 2, + GPIOLINE_CHANGED_CONFIG = 3, +}; + +struct acpi_gpio_info { + struct acpi_device *adev; + enum gpiod_flags flags; + bool gpioint; + int pin_config; + int polarity; + int triggering; + unsigned int quirks; +}; + +struct trace_event_raw_gpio_direction { + struct trace_entry ent; + unsigned int gpio; + int in; + int err; + char __data[0]; +}; + +struct trace_event_raw_gpio_value { + struct trace_entry ent; + unsigned int gpio; + int get; + int value; + char __data[0]; +}; + +struct trace_event_data_offsets_gpio_direction {}; + +struct trace_event_data_offsets_gpio_value {}; + +typedef void (*btf_trace_gpio_direction)(void *, unsigned int, int, int); + +typedef void (*btf_trace_gpio_value)(void *, unsigned int, int, int); + +struct devres; + +struct gpio { + unsigned int gpio; + long unsigned int flags; + const char *label; +}; + +struct of_reconfig_data { + struct device_node *dn; + struct property *prop; + struct property *old_prop; +}; + +enum of_reconfig_change { + OF_RECONFIG_NO_CHANGE = 0, + OF_RECONFIG_CHANGE_ADD = 1, + OF_RECONFIG_CHANGE_REMOVE = 2, +}; + +enum of_gpio_flags { + OF_GPIO_ACTIVE_LOW = 1, + OF_GPIO_SINGLE_ENDED = 2, + OF_GPIO_OPEN_DRAIN = 4, + OF_GPIO_TRANSITORY = 8, + OF_GPIO_PULL_UP = 16, + OF_GPIO_PULL_DOWN = 32, +}; + +struct of_mm_gpio_chip { + struct gpio_chip gc; + void (*save_regs)(struct of_mm_gpio_chip *); + void *regs; +}; + +struct gpiochip_info { + char name[32]; + char label[32]; + __u32 lines; +}; + +enum gpio_v2_line_flag { + GPIO_V2_LINE_FLAG_USED = 1, + GPIO_V2_LINE_FLAG_ACTIVE_LOW = 2, + GPIO_V2_LINE_FLAG_INPUT = 4, + GPIO_V2_LINE_FLAG_OUTPUT = 8, + GPIO_V2_LINE_FLAG_EDGE_RISING = 16, + GPIO_V2_LINE_FLAG_EDGE_FALLING = 32, + GPIO_V2_LINE_FLAG_OPEN_DRAIN = 64, + GPIO_V2_LINE_FLAG_OPEN_SOURCE = 128, + GPIO_V2_LINE_FLAG_BIAS_PULL_UP = 256, + GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN = 512, + GPIO_V2_LINE_FLAG_BIAS_DISABLED = 1024, +}; + +struct gpio_v2_line_values { + __u64 bits; + __u64 mask; +}; + +enum gpio_v2_line_attr_id { + GPIO_V2_LINE_ATTR_ID_FLAGS = 1, + GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES = 2, + GPIO_V2_LINE_ATTR_ID_DEBOUNCE = 3, +}; + +struct gpio_v2_line_attribute { + __u32 id; + __u32 padding; + union { + __u64 flags; + __u64 values; + __u32 debounce_period_us; + }; +}; + +struct gpio_v2_line_config_attribute { + struct gpio_v2_line_attribute attr; + __u64 mask; +}; + +struct gpio_v2_line_config { + __u64 flags; + __u32 num_attrs; + __u32 padding[5]; + struct gpio_v2_line_config_attribute attrs[10]; +}; + +struct gpio_v2_line_request { + __u32 offsets[64]; + char consumer[32]; + struct gpio_v2_line_config config; + __u32 num_lines; + __u32 event_buffer_size; + __u32 padding[5]; + __s32 fd; +}; + +struct gpio_v2_line_info { + char name[32]; + char consumer[32]; + __u32 offset; + __u32 num_attrs; + __u64 flags; + struct gpio_v2_line_attribute attrs[10]; + __u32 padding[4]; +}; + +enum gpio_v2_line_changed_type { + GPIO_V2_LINE_CHANGED_REQUESTED = 1, + GPIO_V2_LINE_CHANGED_RELEASED = 2, + GPIO_V2_LINE_CHANGED_CONFIG = 3, +}; + +struct gpio_v2_line_info_changed { + struct gpio_v2_line_info info; + __u64 timestamp_ns; + __u32 event_type; + __u32 padding[5]; +}; + +enum gpio_v2_line_event_id { + GPIO_V2_LINE_EVENT_RISING_EDGE = 1, + GPIO_V2_LINE_EVENT_FALLING_EDGE = 2, +}; + +struct gpio_v2_line_event { + __u64 timestamp_ns; + __u32 id; + __u32 offset; + __u32 seqno; + __u32 line_seqno; + __u32 padding[6]; +}; + +struct gpioline_info { + __u32 line_offset; + __u32 flags; + char name[32]; + char consumer[32]; +}; + +struct gpioline_info_changed { + struct gpioline_info info; + __u64 timestamp; + __u32 event_type; + __u32 padding[5]; +}; + +struct gpiohandle_request { + __u32 lineoffsets[64]; + __u32 flags; + __u8 default_values[64]; + char consumer_label[32]; + __u32 lines; + int fd; +}; + +struct gpiohandle_config { + __u32 flags; + __u8 default_values[64]; + __u32 padding[4]; +}; + +struct gpiohandle_data { + __u8 values[64]; +}; + +struct gpioevent_request { + __u32 lineoffset; + __u32 handleflags; + __u32 eventflags; + char consumer_label[32]; + int fd; +}; + +struct gpioevent_data { + __u64 timestamp; + __u32 id; +}; + +struct linehandle_state { + struct gpio_device *gdev; + const char *label; + struct gpio_desc___2 *descs[64]; + u32 num_descs; +}; + +struct linereq; + +struct line { + struct gpio_desc___2 *desc; + struct linereq *req; + unsigned int irq; + u64 eflags; + u64 timestamp_ns; + u32 req_seqno; + u32 line_seqno; + struct delayed_work work; + unsigned int sw_debounced; + unsigned int level; +}; + +struct linereq { + struct gpio_device *gdev; + const char *label; + u32 num_lines; + wait_queue_head_t wait; + u32 event_buffer_size; + struct { + union { + struct __kfifo kfifo; + struct gpio_v2_line_event *type; + const struct gpio_v2_line_event *const_type; + char (*rectype)[0]; + struct gpio_v2_line_event *ptr; + const struct gpio_v2_line_event *ptr_const; + }; + struct gpio_v2_line_event buf[0]; + } events; + atomic_t seqno; + struct mutex config_mutex; + struct line lines[0]; +}; + +struct lineevent_state { + struct gpio_device *gdev; + const char *label; + struct gpio_desc___2 *desc; + u32 eflags; + int irq; + wait_queue_head_t wait; + struct { + union { + struct __kfifo kfifo; + struct gpioevent_data *type; + const struct gpioevent_data *const_type; + char (*rectype)[0]; + struct gpioevent_data *ptr; + const struct gpioevent_data *ptr_const; + }; + struct gpioevent_data buf[16]; + } events; + u64 timestamp; +}; + +struct gpio_chardev_data { + struct gpio_device *gdev; + wait_queue_head_t wait; + struct { + union { + struct __kfifo kfifo; + struct gpio_v2_line_info_changed *type; + const struct gpio_v2_line_info_changed *const_type; + char (*rectype)[0]; + struct gpio_v2_line_info_changed *ptr; + const struct gpio_v2_line_info_changed *ptr_const; + }; + struct gpio_v2_line_info_changed buf[32]; + } events; + struct notifier_block lineinfo_changed_nb; + long unsigned int *watched_lines; + atomic_t watch_abi_version; +}; + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *, struct class_attribute *, char *); + ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t); +}; + +struct gpiod_data { + struct gpio_desc___2 *desc; + struct mutex mutex; + struct kernfs_node *value_kn; + int irq; + unsigned char irq_flags; + bool direction_can_change; +}; + +enum dmi_field { + DMI_NONE = 0, + DMI_BIOS_VENDOR = 1, + DMI_BIOS_VERSION = 2, + DMI_BIOS_DATE = 3, + DMI_BIOS_RELEASE = 4, + DMI_EC_FIRMWARE_RELEASE = 5, + DMI_SYS_VENDOR = 6, + DMI_PRODUCT_NAME = 7, + DMI_PRODUCT_VERSION = 8, + DMI_PRODUCT_SERIAL = 9, + DMI_PRODUCT_UUID = 10, + DMI_PRODUCT_SKU = 11, + DMI_PRODUCT_FAMILY = 12, + DMI_BOARD_VENDOR = 13, + DMI_BOARD_NAME = 14, + DMI_BOARD_VERSION = 15, + DMI_BOARD_SERIAL = 16, + DMI_BOARD_ASSET_TAG = 17, + DMI_CHASSIS_VENDOR = 18, + DMI_CHASSIS_TYPE = 19, + DMI_CHASSIS_VERSION = 20, + DMI_CHASSIS_SERIAL = 21, + DMI_CHASSIS_ASSET_TAG = 22, + DMI_STRING_MAX = 23, + DMI_OEM_STRING = 24, +}; + +struct dmi_strmatch { + unsigned char slot: 7; + unsigned char exact_match: 1; + char substr[79]; +}; + +struct dmi_system_id { + int (*callback)(const struct dmi_system_id *); + const char *ident; + struct dmi_strmatch matches[4]; + void *driver_data; +}; + +typedef u8 acpi_adr_space_type; + +struct acpi_connection_info { + u8 *connection; + u16 length; + u8 access_length; +}; + +struct acpi_resource_irq { + u8 descriptor_length; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + u8 interrupts[1]; +}; + +struct acpi_resource_dma { + u8 type; + u8 bus_master; + u8 transfer; + u8 channel_count; + u8 channels[1]; +}; + +struct acpi_resource_start_dependent { + u8 descriptor_length; + u8 compatibility_priority; + u8 performance_robustness; +}; + +struct acpi_resource_io { + u8 io_decode; + u8 alignment; + u8 address_length; + u16 minimum; + u16 maximum; +} __attribute__((packed)); + +struct acpi_resource_fixed_io { + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_dma { + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct acpi_resource_vendor { + u16 byte_length; + u8 byte_data[1]; +} __attribute__((packed)); + +struct acpi_resource_vendor_typed { + u16 byte_length; + u8 uuid_subtype; + u8 uuid[16]; + u8 byte_data[1]; +}; + +struct acpi_resource_end_tag { + u8 checksum; +}; + +struct acpi_resource_memory24 { + u8 write_protect; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct acpi_resource_memory32 { + u8 write_protect; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct acpi_resource_fixed_memory32 { + u8 write_protect; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct acpi_memory_attribute { + u8 write_protect; + u8 caching; + u8 range_type; + u8 translation; +}; + +struct acpi_io_attribute { + u8 range_type; + u8 translation; + u8 translation_type; + u8 reserved1; +}; + +union acpi_resource_attribute { + struct acpi_memory_attribute mem; + struct acpi_io_attribute io; + u8 type_specific; +}; + +struct acpi_resource_label { + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_resource_source { + u8 index; + u16 string_length; + char *string_ptr; +} __attribute__((packed)); + +struct acpi_address16_attribute { + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +}; + +struct acpi_address32_attribute { + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +}; + +struct acpi_address64_attribute { + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +}; + +struct acpi_resource_address { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; +}; + +struct acpi_resource_address16 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address16_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_address32 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address32_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + struct acpi_address64_attribute address; + struct acpi_resource_source resource_source; +} __attribute__((packed)); + +struct acpi_resource_extended_address64 { + u8 resource_type; + u8 producer_consumer; + u8 decode; + u8 min_address_fixed; + u8 max_address_fixed; + union acpi_resource_attribute info; + u8 revision_ID; + struct acpi_address64_attribute address; + u64 type_specific; +} __attribute__((packed)); + +struct acpi_resource_extended_irq { + u8 producer_consumer; + u8 triggering; + u8 polarity; + u8 shareable; + u8 wake_capable; + u8 interrupt_count; + struct acpi_resource_source resource_source; + u32 interrupts[1]; +} __attribute__((packed)); + +struct acpi_resource_generic_register { + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct acpi_resource_gpio { + u8 revision_id; + u8 connection_type; + u8 producer_consumer; + u8 pin_config; + u8 shareable; + u8 wake_capable; + u8 io_restriction; + u8 triggering; + u8 polarity; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_common_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_i2c_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 access_mode; + u16 slave_address; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_spi_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 wire_mode; + u8 device_polarity; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + u32 connection_speed; +} __attribute__((packed)); + +struct acpi_resource_uart_serialbus { + u8 revision_id; + u8 type; + u8 producer_consumer; + u8 slave_mode; + u8 connection_sharing; + u8 type_revision_id; + u16 type_data_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u8 *vendor_data; + u8 endian; + u8 data_bits; + u8 stop_bits; + u8 flow_control; + u8 parity; + u8 lines_enabled; + u16 rx_fifo_size; + u16 tx_fifo_size; + u32 default_baud_rate; +} __attribute__((packed)); + +struct acpi_resource_pin_function { + u8 revision_id; + u8 pin_config; + u8 shareable; + u16 function_number; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_length; + u16 vendor_length; + struct acpi_resource_source resource_source; + u16 *pin_table; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group { + u8 revision_id; + u8 producer_consumer; + u16 pin_table_length; + u16 vendor_length; + u16 *pin_table; + struct acpi_resource_label resource_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_function { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u16 function_number; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +struct acpi_resource_pin_group_config { + u8 revision_id; + u8 producer_consumer; + u8 shareable; + u8 pin_config_type; + u32 pin_config_value; + u16 vendor_length; + struct acpi_resource_source resource_source; + struct acpi_resource_label resource_source_label; + u8 *vendor_data; +} __attribute__((packed)); + +union acpi_resource_data { + struct acpi_resource_irq irq; + struct acpi_resource_dma dma; + struct acpi_resource_start_dependent start_dpf; + struct acpi_resource_io io; + struct acpi_resource_fixed_io fixed_io; + struct acpi_resource_fixed_dma fixed_dma; + struct acpi_resource_vendor vendor; + struct acpi_resource_vendor_typed vendor_typed; + struct acpi_resource_end_tag end_tag; + struct acpi_resource_memory24 memory24; + struct acpi_resource_memory32 memory32; + struct acpi_resource_fixed_memory32 fixed_memory32; + struct acpi_resource_address16 address16; + struct acpi_resource_address32 address32; + struct acpi_resource_address64 address64; + struct acpi_resource_extended_address64 ext_address64; + struct acpi_resource_extended_irq extended_irq; + struct acpi_resource_generic_register generic_reg; + struct acpi_resource_gpio gpio; + struct acpi_resource_i2c_serialbus i2c_serial_bus; + struct acpi_resource_spi_serialbus spi_serial_bus; + struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_common_serialbus common_serial_bus; + struct acpi_resource_pin_function pin_function; + struct acpi_resource_pin_config pin_config; + struct acpi_resource_pin_group pin_group; + struct acpi_resource_pin_group_function pin_group_function; + struct acpi_resource_pin_group_config pin_group_config; + struct acpi_resource_address address; +}; + +struct acpi_resource { + u32 type; + u32 length; + union acpi_resource_data data; +} __attribute__((packed)); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; +}; + +struct acpi_gpio_event { + struct list_head node; + acpi_handle handle; + irq_handler_t handler; + unsigned int pin; + unsigned int irq; + long unsigned int irqflags; + bool irq_is_wake; + bool irq_requested; + struct gpio_desc___2 *desc; +}; + +struct acpi_gpio_connection { + struct list_head node; + unsigned int pin; + struct gpio_desc___2 *desc; +}; + +struct acpi_gpio_chip { + struct acpi_connection_info conn_info; + struct list_head conns; + struct mutex conn_lock; + struct gpio_chip *chip; + struct list_head events; + struct list_head deferred_req_irqs_list_entry; +}; + +struct acpi_gpio_lookup { + struct acpi_gpio_info info; + int index; + int pin_index; + bool active_low; + struct gpio_desc___2 *desc; + int n; +}; + +struct bgpio_pdata { + const char *label; + int base; + int ngpio; +}; + +enum pwm_polarity { + PWM_POLARITY_NORMAL = 0, + PWM_POLARITY_INVERSED = 1, +}; + +struct pwm_args { + u64 period; + enum pwm_polarity polarity; +}; + +struct pwm_state { + u64 period; + u64 duty_cycle; + enum pwm_polarity polarity; + bool enabled; +}; + +struct pwm_chip; + +struct pwm_device { + const char *label; + long unsigned int flags; + unsigned int hwpwm; + unsigned int pwm; + struct pwm_chip *chip; + void *chip_data; + struct pwm_args args; + struct pwm_state state; + struct pwm_state last; +}; + +struct pwm_ops; + +struct pwm_chip { + struct device *dev; + const struct pwm_ops *ops; + int base; + unsigned int npwm; + struct pwm_device * (*of_xlate)(struct pwm_chip *, const struct of_phandle_args *); + unsigned int of_pwm_n_cells; + struct list_head list; + struct pwm_device *pwms; +}; + +struct pwm_capture; + +struct pwm_ops { + int (*request)(struct pwm_chip *, struct pwm_device *); + void (*free)(struct pwm_chip *, struct pwm_device *); + int (*capture)(struct pwm_chip *, struct pwm_device *, struct pwm_capture *, long unsigned int); + int (*apply)(struct pwm_chip *, struct pwm_device *, const struct pwm_state *); + void (*get_state)(struct pwm_chip *, struct pwm_device *, struct pwm_state *); + struct module *owner; + int (*config)(struct pwm_chip *, struct pwm_device *, int, int); + int (*set_polarity)(struct pwm_chip *, struct pwm_device *, enum pwm_polarity); + int (*enable)(struct pwm_chip *, struct pwm_device *); + void (*disable)(struct pwm_chip *, struct pwm_device *); +}; + +struct pwm_capture { + unsigned int period; + unsigned int duty_cycle; +}; + +struct mvebu_gpio_chip; + +struct mvebu_pwm { + void *membase; + long unsigned int clk_rate; + struct gpio_desc *gpiod; + struct pwm_chip chip; + spinlock_t lock; + struct mvebu_gpio_chip *mvchip; + u32 blink_select; + u32 blink_on_duration; + u32 blink_off_duration; +}; + +struct mvebu_gpio_chip { + struct gpio_chip chip; + struct regmap *regs; + u32 offset; + struct regmap *percpu_regs; + int irqbase; + struct irq_domain *domain; + int soc_variant; + struct clk *clk; + struct mvebu_pwm *mvpwm; + u32 out_reg; + u32 io_conf_reg; + u32 blink_en_reg; + u32 in_pol_reg; + u32 edge_mask_regs[4]; + u32 level_mask_regs[4]; +}; + +struct amba_id { + unsigned int id; + unsigned int mask; + void *data; +}; + +struct amba_cs_uci_id { + unsigned int devarch; + unsigned int devarch_mask; + unsigned int devtype; + void *data; +}; + +struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; + struct device_dma_parameters dma_parms; + unsigned int periphid; + unsigned int cid; + struct amba_cs_uci_id uci; + unsigned int irq[9]; + char *driver_override; +}; + +struct amba_driver { + struct device_driver drv; + int (*probe)(struct amba_device *, const struct amba_id *); + int (*remove)(struct amba_device *); + void (*shutdown)(struct amba_device *); + const struct amba_id *id_table; +}; + +struct pl061_context_save_regs { + u8 gpio_data; + u8 gpio_dir; + u8 gpio_is; + u8 gpio_ibe; + u8 gpio_iev; + u8 gpio_ie; +}; + +struct pl061 { + raw_spinlock_t lock; + void *base; + struct gpio_chip gc; + struct irq_chip irq_chip; + int parent_irq; + struct pl061_context_save_regs csave_regs; +}; + +enum rpi_firmware_property_tag { + RPI_FIRMWARE_PROPERTY_END = 0, + RPI_FIRMWARE_GET_FIRMWARE_REVISION = 1, + RPI_FIRMWARE_SET_CURSOR_INFO = 32784, + RPI_FIRMWARE_SET_CURSOR_STATE = 32785, + RPI_FIRMWARE_GET_BOARD_MODEL = 65537, + RPI_FIRMWARE_GET_BOARD_REVISION = 65538, + RPI_FIRMWARE_GET_BOARD_MAC_ADDRESS = 65539, + RPI_FIRMWARE_GET_BOARD_SERIAL = 65540, + RPI_FIRMWARE_GET_ARM_MEMORY = 65541, + RPI_FIRMWARE_GET_VC_MEMORY = 65542, + RPI_FIRMWARE_GET_CLOCKS = 65543, + RPI_FIRMWARE_GET_POWER_STATE = 131073, + RPI_FIRMWARE_GET_TIMING = 131074, + RPI_FIRMWARE_SET_POWER_STATE = 163841, + RPI_FIRMWARE_GET_CLOCK_STATE = 196609, + RPI_FIRMWARE_GET_CLOCK_RATE = 196610, + RPI_FIRMWARE_GET_VOLTAGE = 196611, + RPI_FIRMWARE_GET_MAX_CLOCK_RATE = 196612, + RPI_FIRMWARE_GET_MAX_VOLTAGE = 196613, + RPI_FIRMWARE_GET_TEMPERATURE = 196614, + RPI_FIRMWARE_GET_MIN_CLOCK_RATE = 196615, + RPI_FIRMWARE_GET_MIN_VOLTAGE = 196616, + RPI_FIRMWARE_GET_TURBO = 196617, + RPI_FIRMWARE_GET_MAX_TEMPERATURE = 196618, + RPI_FIRMWARE_GET_STC = 196619, + RPI_FIRMWARE_ALLOCATE_MEMORY = 196620, + RPI_FIRMWARE_LOCK_MEMORY = 196621, + RPI_FIRMWARE_UNLOCK_MEMORY = 196622, + RPI_FIRMWARE_RELEASE_MEMORY = 196623, + RPI_FIRMWARE_EXECUTE_CODE = 196624, + RPI_FIRMWARE_EXECUTE_QPU = 196625, + RPI_FIRMWARE_SET_ENABLE_QPU = 196626, + RPI_FIRMWARE_GET_DISPMANX_RESOURCE_MEM_HANDLE = 196628, + RPI_FIRMWARE_GET_EDID_BLOCK = 196640, + RPI_FIRMWARE_GET_CUSTOMER_OTP = 196641, + RPI_FIRMWARE_GET_DOMAIN_STATE = 196656, + RPI_FIRMWARE_GET_THROTTLED = 196678, + RPI_FIRMWARE_GET_CLOCK_MEASURED = 196679, + RPI_FIRMWARE_NOTIFY_REBOOT = 196680, + RPI_FIRMWARE_SET_CLOCK_STATE = 229377, + RPI_FIRMWARE_SET_CLOCK_RATE = 229378, + RPI_FIRMWARE_SET_VOLTAGE = 229379, + RPI_FIRMWARE_SET_TURBO = 229385, + RPI_FIRMWARE_SET_CUSTOMER_OTP = 229409, + RPI_FIRMWARE_SET_DOMAIN_STATE = 229424, + RPI_FIRMWARE_GET_GPIO_STATE = 196673, + RPI_FIRMWARE_SET_GPIO_STATE = 229441, + RPI_FIRMWARE_SET_SDHOST_CLOCK = 229442, + RPI_FIRMWARE_GET_GPIO_CONFIG = 196675, + RPI_FIRMWARE_SET_GPIO_CONFIG = 229443, + RPI_FIRMWARE_GET_PERIPH_REG = 196677, + RPI_FIRMWARE_SET_PERIPH_REG = 229445, + RPI_FIRMWARE_GET_POE_HAT_VAL = 196681, + RPI_FIRMWARE_SET_POE_HAT_VAL = 196688, + RPI_FIRMWARE_NOTIFY_XHCI_RESET = 196696, + RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 262145, + RPI_FIRMWARE_FRAMEBUFFER_BLANK = 262146, + RPI_FIRMWARE_FRAMEBUFFER_GET_PHYSICAL_WIDTH_HEIGHT = 262147, + RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_WIDTH_HEIGHT = 262148, + RPI_FIRMWARE_FRAMEBUFFER_GET_DEPTH = 262149, + RPI_FIRMWARE_FRAMEBUFFER_GET_PIXEL_ORDER = 262150, + RPI_FIRMWARE_FRAMEBUFFER_GET_ALPHA_MODE = 262151, + RPI_FIRMWARE_FRAMEBUFFER_GET_PITCH = 262152, + RPI_FIRMWARE_FRAMEBUFFER_GET_VIRTUAL_OFFSET = 262153, + RPI_FIRMWARE_FRAMEBUFFER_GET_OVERSCAN = 262154, + RPI_FIRMWARE_FRAMEBUFFER_GET_PALETTE = 262155, + RPI_FIRMWARE_FRAMEBUFFER_GET_TOUCHBUF = 262159, + RPI_FIRMWARE_FRAMEBUFFER_GET_GPIOVIRTBUF = 262160, + RPI_FIRMWARE_FRAMEBUFFER_RELEASE = 294913, + RPI_FIRMWARE_FRAMEBUFFER_TEST_PHYSICAL_WIDTH_HEIGHT = 278531, + RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_WIDTH_HEIGHT = 278532, + RPI_FIRMWARE_FRAMEBUFFER_TEST_DEPTH = 278533, + RPI_FIRMWARE_FRAMEBUFFER_TEST_PIXEL_ORDER = 278534, + RPI_FIRMWARE_FRAMEBUFFER_TEST_ALPHA_MODE = 278535, + RPI_FIRMWARE_FRAMEBUFFER_TEST_VIRTUAL_OFFSET = 278537, + RPI_FIRMWARE_FRAMEBUFFER_TEST_OVERSCAN = 278538, + RPI_FIRMWARE_FRAMEBUFFER_TEST_PALETTE = 278539, + RPI_FIRMWARE_FRAMEBUFFER_TEST_VSYNC = 278542, + RPI_FIRMWARE_FRAMEBUFFER_SET_PHYSICAL_WIDTH_HEIGHT = 294915, + RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_WIDTH_HEIGHT = 294916, + RPI_FIRMWARE_FRAMEBUFFER_SET_DEPTH = 294917, + RPI_FIRMWARE_FRAMEBUFFER_SET_PIXEL_ORDER = 294918, + RPI_FIRMWARE_FRAMEBUFFER_SET_ALPHA_MODE = 294919, + RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET = 294921, + RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN = 294922, + RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE = 294923, + RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF = 294943, + RPI_FIRMWARE_FRAMEBUFFER_SET_GPIOVIRTBUF = 294944, + RPI_FIRMWARE_FRAMEBUFFER_SET_VSYNC = 294926, + RPI_FIRMWARE_FRAMEBUFFER_SET_BACKLIGHT = 294927, + RPI_FIRMWARE_VCHIQ_INIT = 294928, + RPI_FIRMWARE_GET_COMMAND_LINE = 327681, + RPI_FIRMWARE_GET_DMA_CHANNELS = 393217, +}; + +struct rpi_firmware; + +struct rpi_exp_gpio { + struct gpio_chip gc; + struct rpi_firmware *fw; +}; + +struct gpio_set_config { + u32 gpio; + u32 direction; + u32 polarity; + u32 term_en; + u32 term_pull_up; + u32 state; +}; + +struct gpio_get_config { + u32 gpio; + u32 direction; + u32 polarity; + u32 term_en; + u32 term_pull_up; +}; + +struct gpio_get_set_state { + u32 gpio; + u32 state; +}; + +struct tegra_gpio_port { + const char *name; + unsigned int bank; + unsigned int port; + unsigned int pins; +}; + +struct tegra186_pin_range { + unsigned int offset; + const char *group; +}; + +struct tegra_gpio_soc { + const struct tegra_gpio_port *ports; + unsigned int num_ports; + const char *name; + unsigned int instance; + const struct tegra186_pin_range *pin_ranges; + unsigned int num_pin_ranges; + const char *pinmux; +}; + +struct tegra_gpio { + struct gpio_chip gpio; + struct irq_chip intc; + unsigned int num_irq; + unsigned int *irq; + const struct tegra_gpio_soc *soc; + void *secure; + void *base; +}; + +struct tegra_gpio_info; + +struct tegra_gpio_bank { + unsigned int bank; + unsigned int irq; + spinlock_t lvl_lock[4]; + spinlock_t dbc_lock[4]; + u32 cnf[4]; + u32 out[4]; + u32 oe[4]; + u32 int_enb[4]; + u32 int_lvl[4]; + u32 wake_enb[4]; + u32 dbc_enb[4]; + u32 dbc_cnt[4]; + struct tegra_gpio_info *tgi; +}; + +struct tegra_gpio_soc_config; + +struct tegra_gpio_info { + struct device *dev; + void *regs; + struct irq_domain *irq_domain; + struct tegra_gpio_bank *bank_info; + const struct tegra_gpio_soc_config *soc; + struct gpio_chip gc; + struct irq_chip ic; + u32 bank_count; +}; + +struct tegra_gpio_soc_config { + bool debounce_supported; + u32 bank_stride; + u32 upper_offset; +}; + +struct msix_entry { + u32 vector; + u16 entry; +}; + +struct thunderx_gpio; + +struct thunderx_line { + struct thunderx_gpio *txgpio; + unsigned int line; + unsigned int fil_bits; +}; + +struct thunderx_gpio { + struct gpio_chip chip; + u8 *register_base; + struct msix_entry *msix_entries; + struct thunderx_line *line_entries; + raw_spinlock_t lock; + long unsigned int invert_mask[2]; + long unsigned int od_mask[2]; + int base_msi; +}; + +struct xgene_gpio { + struct gpio_chip chip; + void *base; + spinlock_t lock; + u32 set_dr_val[3]; +}; + +enum { + PWMF_REQUESTED = 1, + PWMF_EXPORTED = 2, +}; + +struct pwm_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; + unsigned int period; + enum pwm_polarity polarity; + const char *module; +}; + +struct trace_event_raw_pwm { + struct trace_entry ent; + struct pwm_device *pwm; + u64 period; + u64 duty_cycle; + enum pwm_polarity polarity; + bool enabled; + char __data[0]; +}; + +struct trace_event_data_offsets_pwm {}; + +typedef void (*btf_trace_pwm_apply)(void *, struct pwm_device *, const struct pwm_state *); + +typedef void (*btf_trace_pwm_get)(void *, struct pwm_device *, const struct pwm_state *); + +struct pwm_export { + struct device child; + struct pwm_device *pwm; + struct mutex lock; + struct pwm_state suspend; +}; + +enum { + pci_channel_io_normal = 1, + pci_channel_io_frozen = 2, + pci_channel_io_perm_failure = 3, +}; + +struct pci_sriov { + int pos; + int nres; + u32 cap; + u16 ctrl; + u16 total_VFs; + u16 initial_VFs; + u16 num_VFs; + u16 offset; + u16 stride; + u16 vf_device; + u32 pgsz; + u8 link; + u8 max_VF_buses; + u16 driver_max_VFs; + struct pci_dev *dev; + struct pci_dev *self; + u32 class; + u8 hdr_type; + u16 subsystem_vendor; + u16 subsystem_device; + resource_size_t barsz[6]; + bool drivers_autoprobe; +}; + +struct pci_bus_resource { + struct list_head list; + struct resource *res; + unsigned int flags; +}; + +typedef u64 pci_bus_addr_t; + +struct pci_bus_region { + pci_bus_addr_t start; + pci_bus_addr_t end; +}; + +enum pci_fixup_pass { + pci_fixup_early = 0, + pci_fixup_header = 1, + pci_fixup_final = 2, + pci_fixup_enable = 3, + pci_fixup_resume = 4, + pci_fixup_suspend = 5, + pci_fixup_resume_early = 6, + pci_fixup_suspend_late = 7, +}; + +struct hotplug_slot_ops; + +struct hotplug_slot { + const struct hotplug_slot_ops *ops; + struct list_head slot_list; + struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; +}; + +enum pci_dev_flags { + PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1, + PCI_DEV_FLAGS_NO_D3 = 2, + PCI_DEV_FLAGS_ASSIGNED = 4, + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8, + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32, + PCI_DEV_FLAGS_NO_BUS_RESET = 64, + PCI_DEV_FLAGS_NO_PM_RESET = 128, + PCI_DEV_FLAGS_VPD_REF_F0 = 256, + PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512, + PCI_DEV_FLAGS_NO_FLR_RESET = 1024, + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048, +}; + +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = 1, + PCI_BUS_FLAGS_NO_MMRBC = 2, + PCI_BUS_FLAGS_NO_AERSID = 4, + PCI_BUS_FLAGS_NO_EXTCFG = 8, +}; + +enum pci_bus_speed { + PCI_SPEED_33MHz = 0, + PCI_SPEED_66MHz = 1, + PCI_SPEED_66MHz_PCIX = 2, + PCI_SPEED_100MHz_PCIX = 3, + PCI_SPEED_133MHz_PCIX = 4, + PCI_SPEED_66MHz_PCIX_ECC = 5, + PCI_SPEED_100MHz_PCIX_ECC = 6, + PCI_SPEED_133MHz_PCIX_ECC = 7, + PCI_SPEED_66MHz_PCIX_266 = 9, + PCI_SPEED_100MHz_PCIX_266 = 10, + PCI_SPEED_133MHz_PCIX_266 = 11, + AGP_UNKNOWN = 12, + AGP_1X = 13, + AGP_2X = 14, + AGP_4X = 15, + AGP_8X = 16, + PCI_SPEED_66MHz_PCIX_533 = 17, + PCI_SPEED_100MHz_PCIX_533 = 18, + PCI_SPEED_133MHz_PCIX_533 = 19, + PCIE_SPEED_2_5GT = 20, + PCIE_SPEED_5_0GT = 21, + PCIE_SPEED_8_0GT = 22, + PCIE_SPEED_16_0GT = 23, + PCIE_SPEED_32_0GT = 24, + PCI_SPEED_UNKNOWN = 255, +}; + +enum { + PCI_REASSIGN_ALL_RSRC = 1, + PCI_REASSIGN_ALL_BUS = 2, + PCI_PROBE_ONLY = 4, + PCI_CAN_SKIP_ISA_ALIGN = 8, + PCI_ENABLE_PROC_DOMAINS = 16, + PCI_COMPAT_DOMAIN_0 = 32, + PCI_SCAN_ALL_PCIE_DEVS = 64, +}; + +enum pcie_bus_config_types { + PCIE_BUS_TUNE_OFF = 0, + PCIE_BUS_DEFAULT = 1, + PCIE_BUS_SAFE = 2, + PCIE_BUS_PERFORMANCE = 3, + PCIE_BUS_PEER2PEER = 4, +}; + +struct hotplug_slot_ops { + int (*enable_slot)(struct hotplug_slot *); + int (*disable_slot)(struct hotplug_slot *); + int (*set_attention_status)(struct hotplug_slot *, u8); + int (*hardware_test)(struct hotplug_slot *, u32); + int (*get_power_status)(struct hotplug_slot *, u8 *); + int (*get_attention_status)(struct hotplug_slot *, u8 *); + int (*get_latch_status)(struct hotplug_slot *, u8 *); + int (*get_adapter_status)(struct hotplug_slot *, u8 *); + int (*reset_slot)(struct hotplug_slot *, int); +}; + +enum pci_bar_type { + pci_bar_unknown = 0, + pci_bar_io = 1, + pci_bar_mem32 = 2, + pci_bar_mem64 = 3, +}; + +struct pci_domain_busn_res { + struct list_head list; + struct resource res; + int domain_nr; +}; + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *, char *); + ssize_t (*store)(struct bus_type *, const char *, size_t); +}; + +enum pcie_reset_state { + pcie_deassert_reset = 1, + pcie_warm_reset = 2, + pcie_hot_reset = 3, +}; + +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0, + PCIE_LNK_X1 = 1, + PCIE_LNK_X2 = 2, + PCIE_LNK_X4 = 4, + PCIE_LNK_X8 = 8, + PCIE_LNK_X12 = 12, + PCIE_LNK_X16 = 16, + PCIE_LNK_X32 = 32, + PCIE_LNK_WIDTH_UNKNOWN = 255, +}; + +struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32); + +struct pci_platform_pm_ops { + bool (*bridge_d3)(struct pci_dev *); + bool (*is_manageable)(struct pci_dev *); + int (*set_state)(struct pci_dev *, pci_power_t); + pci_power_t (*get_state)(struct pci_dev *); + void (*refresh_state)(struct pci_dev *); + pci_power_t (*choose_state)(struct pci_dev *); + int (*set_wakeup)(struct pci_dev *, bool); + bool (*need_resume)(struct pci_dev *); +}; + +struct pci_pme_device { + struct list_head list; + struct pci_dev *dev; +}; + +struct pci_saved_state { + u32 config_space[16]; + struct pci_cap_saved_data cap[0]; +}; + +struct pci_devres { + unsigned int enabled: 1; + unsigned int pinned: 1; + unsigned int orig_intx: 1; + unsigned int restore_intx: 1; + unsigned int mwi: 1; + u32 region_mask; +}; + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char *); + ssize_t (*store)(struct device_driver *, const char *, size_t); +}; + +enum pci_ers_result { + PCI_ERS_RESULT_NONE = 1, + PCI_ERS_RESULT_CAN_RECOVER = 2, + PCI_ERS_RESULT_NEED_RESET = 3, + PCI_ERS_RESULT_DISCONNECT = 4, + PCI_ERS_RESULT_RECOVERED = 5, + PCI_ERS_RESULT_NO_AER_DRIVER = 6, +}; + +enum dev_dma_attr { + DEV_DMA_NOT_SUPPORTED = 0, + DEV_DMA_NON_COHERENT = 1, + DEV_DMA_COHERENT = 2, +}; + +struct pcie_device { + int irq; + struct pci_dev *port; + u32 service; + void *priv_data; + struct device device; +}; + +struct pcie_port_service_driver { + const char *name; + int (*probe)(struct pcie_device *); + void (*remove)(struct pcie_device *); + int (*suspend)(struct pcie_device *); + int (*resume_noirq)(struct pcie_device *); + int (*resume)(struct pcie_device *); + int (*runtime_suspend)(struct pcie_device *); + int (*runtime_resume)(struct pcie_device *); + void (*error_resume)(struct pci_dev *); + int port_type; + u32 service; + struct device_driver driver; +}; + +struct pci_dynid { + struct list_head node; + struct pci_device_id id; +}; + +struct drv_dev_and_id { + struct pci_driver *drv; + struct pci_dev *dev; + const struct pci_device_id *id; +}; + +enum pci_mmap_state { + pci_mmap_io = 0, + pci_mmap_mem = 1, +}; + +enum pci_mmap_api { + PCI_MMAP_SYSFS = 0, + PCI_MMAP_PROCFS = 1, +}; + +struct pci_vpd_ops; + +struct pci_vpd { + const struct pci_vpd_ops *ops; + struct bin_attribute *attr; + struct mutex lock; + unsigned int len; + u16 flag; + u8 cap; + unsigned int busy: 1; + unsigned int valid: 1; +}; + +struct pci_vpd_ops { + ssize_t (*read)(struct pci_dev *, loff_t, size_t, void *); + ssize_t (*write)(struct pci_dev *, loff_t, size_t, const void *); + int (*set_size)(struct pci_dev *, size_t); +}; + +struct pci_dev_resource { + struct list_head list; + struct resource *res; + struct pci_dev *dev; + resource_size_t start; + resource_size_t end; + resource_size_t add_size; + resource_size_t min_align; + long unsigned int flags; +}; + +enum release_type { + leaf_only = 0, + whole_subtree = 1, +}; + +enum enable_type { + undefined = 4294967295, + user_disabled = 0, + auto_disabled = 1, + user_enabled = 2, + auto_enabled = 3, +}; + +struct portdrv_service_data { + struct pcie_port_service_driver *drv; + struct device *dev; + u32 service; +}; + +typedef int (*pcie_pm_callback_t)(struct pcie_device *); + +struct aspm_latency { + u32 l0s; + u32 l1; +}; + +struct pcie_link_state { + struct pci_dev *pdev; + struct pci_dev *downstream; + struct pcie_link_state *root; + struct pcie_link_state *parent; + struct list_head sibling; + u32 aspm_support: 7; + u32 aspm_enabled: 7; + u32 aspm_capable: 7; + u32 aspm_default: 7; + char: 4; + u32 aspm_disable: 7; + u32 clkpm_capable: 1; + u32 clkpm_enabled: 1; + u32 clkpm_default: 1; + u32 clkpm_disable: 1; + struct aspm_latency latency_up; + struct aspm_latency latency_dw; + struct aspm_latency acceptable[8]; +}; + +enum { + CPER_SEV_RECOVERABLE = 0, + CPER_SEV_FATAL = 1, + CPER_SEV_CORRECTED = 2, + CPER_SEV_INFORMATIONAL = 3, +}; + +struct aer_stats { + u64 dev_cor_errs[16]; + u64 dev_fatal_errs[27]; + u64 dev_nonfatal_errs[27]; + u64 dev_total_cor_errs; + u64 dev_total_fatal_errs; + u64 dev_total_nonfatal_errs; + u64 rootport_total_cor_errs; + u64 rootport_total_fatal_errs; + u64 rootport_total_nonfatal_errs; +}; + +struct aer_header_log_regs { + unsigned int dw0; + unsigned int dw1; + unsigned int dw2; + unsigned int dw3; +}; + +struct aer_capability_regs { + u32 header; + u32 uncor_status; + u32 uncor_mask; + u32 uncor_severity; + u32 cor_status; + u32 cor_mask; + u32 cap_control; + struct aer_header_log_regs header_log; + u32 root_command; + u32 root_status; + u16 cor_err_source; + u16 uncor_err_source; +}; + +struct aer_err_info { + struct pci_dev *dev[5]; + int error_dev_num; + unsigned int id: 16; + unsigned int severity: 2; + unsigned int __pad1: 5; + unsigned int multi_error_valid: 1; + unsigned int first_error: 5; + unsigned int __pad2: 2; + unsigned int tlp_header_valid: 1; + unsigned int status; + unsigned int mask; + struct aer_header_log_regs tlp; +}; + +struct aer_err_source { + unsigned int status; + unsigned int id; +}; + +struct aer_rpc { + struct pci_dev *rpd; + struct { + union { + struct __kfifo kfifo; + struct aer_err_source *type; + const struct aer_err_source *const_type; + char (*rectype)[0]; + struct aer_err_source *ptr; + const struct aer_err_source *ptr_const; + }; + struct aer_err_source buf[128]; + } aer_fifo; +}; + +struct aer_recover_entry { + u8 bus; + u8 devfn; + u16 domain; + int severity; + struct aer_capability_regs *regs; +}; + +struct pcie_pme_service_data { + spinlock_t lock; + struct pcie_device *srv; + struct work_struct work; + bool noirq; +}; + +struct pci_slot_attribute { + struct attribute attr; + ssize_t (*show)(struct pci_slot *, char *); + ssize_t (*store)(struct pci_slot *, const char *, size_t); +}; + +struct acpi_buffer { + acpi_size length; + void *pointer; +}; + +struct acpi_bus_type { + struct list_head list; + const char *name; + bool (*match)(struct device *); + struct acpi_device * (*find_companion)(struct device *); + void (*setup)(struct device *); + void (*cleanup)(struct device *); +}; + +enum pm_qos_flags_status { + PM_QOS_FLAGS_UNDEFINED = 4294967295, + PM_QOS_FLAGS_NONE = 0, + PM_QOS_FLAGS_SOME = 1, + PM_QOS_FLAGS_ALL = 2, +}; + +struct hpx_type0 { + u32 revision; + u8 cache_line_size; + u8 latency_timer; + u8 enable_serr; + u8 enable_perr; +}; + +struct hpx_type1 { + u32 revision; + u8 max_mem_read; + u8 avg_max_split; + u16 tot_max_split; +}; + +struct hpx_type2 { + u32 revision; + u32 unc_err_mask_and; + u32 unc_err_mask_or; + u32 unc_err_sever_and; + u32 unc_err_sever_or; + u32 cor_err_mask_and; + u32 cor_err_mask_or; + u32 adv_err_cap_and; + u32 adv_err_cap_or; + u16 pci_exp_devctl_and; + u16 pci_exp_devctl_or; + u16 pci_exp_lnkctl_and; + u16 pci_exp_lnkctl_or; + u32 sec_unc_err_sever_and; + u32 sec_unc_err_sever_or; + u32 sec_unc_err_mask_and; + u32 sec_unc_err_mask_or; +}; + +struct hpx_type3 { + u16 device_type; + u16 function_type; + u16 config_space_location; + u16 pci_exp_cap_id; + u16 pci_exp_cap_ver; + u16 pci_exp_vendor_id; + u16 dvsec_id; + u16 dvsec_rev; + u16 match_offset; + u32 match_mask_and; + u32 match_value; + u16 reg_offset; + u32 reg_mask_and; + u32 reg_mask_or; +}; + +enum hpx_type3_dev_type { + HPX_TYPE_ENDPOINT = 1, + HPX_TYPE_LEG_END = 2, + HPX_TYPE_RC_END = 4, + HPX_TYPE_RC_EC = 8, + HPX_TYPE_ROOT_PORT = 16, + HPX_TYPE_UPSTREAM = 32, + HPX_TYPE_DOWNSTREAM = 64, + HPX_TYPE_PCI_BRIDGE = 128, + HPX_TYPE_PCIE_BRIDGE = 256, +}; + +enum hpx_type3_fn_type { + HPX_FN_NORMAL = 1, + HPX_FN_SRIOV_PHYS = 2, + HPX_FN_SRIOV_VIRT = 4, +}; + +enum hpx_type3_cfg_loc { + HPX_CFG_PCICFG = 0, + HPX_CFG_PCIE_CAP = 1, + HPX_CFG_PCIE_CAP_EXT = 2, + HPX_CFG_VEND_CAP = 3, + HPX_CFG_DVSEC = 4, + HPX_CFG_MAX = 5, +}; + +struct of_bus; + +struct of_pci_range_parser { + struct device_node *node; + struct of_bus *bus; + const __be32 *range; + const __be32 *end; + int na; + int ns; + int pna; + bool dma; +}; + +struct of_pci_range { + union { + u64 pci_addr; + u64 bus_addr; + }; + u64 cpu_addr; + u64 size; + u32 flags; +}; + +struct pci_fixup { + u16 vendor; + u16 device; + u32 class; + unsigned int class_shift; + int hook_offset; +}; + +enum { + NVME_REG_CAP = 0, + NVME_REG_VS = 8, + NVME_REG_INTMS = 12, + NVME_REG_INTMC = 16, + NVME_REG_CC = 20, + NVME_REG_CSTS = 28, + NVME_REG_NSSR = 32, + NVME_REG_AQA = 36, + NVME_REG_ASQ = 40, + NVME_REG_ACQ = 48, + NVME_REG_CMBLOC = 56, + NVME_REG_CMBSZ = 60, + NVME_REG_BPINFO = 64, + NVME_REG_BPRSEL = 68, + NVME_REG_BPMBL = 72, + NVME_REG_PMRCAP = 3584, + NVME_REG_PMRCTL = 3588, + NVME_REG_PMRSTS = 3592, + NVME_REG_PMREBS = 3596, + NVME_REG_PMRSWTP = 3600, + NVME_REG_DBS = 4096, +}; + +enum { + NVME_CC_ENABLE = 1, + NVME_CC_EN_SHIFT = 0, + NVME_CC_CSS_SHIFT = 4, + NVME_CC_MPS_SHIFT = 7, + NVME_CC_AMS_SHIFT = 11, + NVME_CC_SHN_SHIFT = 14, + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0, + NVME_CC_CSS_CSI = 96, + NVME_CC_CSS_MASK = 112, + NVME_CC_AMS_RR = 0, + NVME_CC_AMS_WRRU = 2048, + NVME_CC_AMS_VS = 14336, + NVME_CC_SHN_NONE = 0, + NVME_CC_SHN_NORMAL = 16384, + NVME_CC_SHN_ABRUPT = 32768, + NVME_CC_SHN_MASK = 49152, + NVME_CC_IOSQES = 393216, + NVME_CC_IOCQES = 4194304, + NVME_CAP_CSS_NVM = 1, + NVME_CAP_CSS_CSI = 64, + NVME_CSTS_RDY = 1, + NVME_CSTS_CFS = 2, + NVME_CSTS_NSSRO = 16, + NVME_CSTS_PP = 32, + NVME_CSTS_SHST_NORMAL = 0, + NVME_CSTS_SHST_OCCUR = 4, + NVME_CSTS_SHST_CMPLT = 8, + NVME_CSTS_SHST_MASK = 12, +}; + +enum { + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704, + SWITCHTEC_GAS_PART_CFG_OFFSET = 16384, + SWITCHTEC_GAS_NTB_OFFSET = 65536, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568, +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600, +}; + +struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info ntp_info[48]; +} __attribute__((packed)); + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + struct { + u32 win_size; + u32 reserved[3]; + } bar_ext_entry[6]; + u32 reserved2[192]; + u32 req_id_table[512]; + u32 reserved3[256]; + u64 lut_entry[512]; +}; + +struct pci_dev_reset_methods { + u16 vendor; + u16 device; + int (*reset)(struct pci_dev *, int); +}; + +struct acs_on_id { + short unsigned int vendor; + short unsigned int device; +}; + +struct pci_dev_acs_enabled { + u16 vendor; + u16 device; + int (*acs_enabled)(struct pci_dev *, u16); +}; + +struct pci_dev_acs_ops { + u16 vendor; + u16 device; + int (*enable_acs)(struct pci_dev *); + int (*disable_acs_redir)(struct pci_dev *); +}; + +struct slot { + u8 number; + unsigned int devfn; + struct pci_bus *bus; + struct pci_dev *dev; + unsigned int latch_status: 1; + unsigned int adapter_status: 1; + unsigned int extracting; + struct hotplug_slot hotplug_slot; + struct list_head slot_list; +}; + +struct cpci_hp_controller_ops { + int (*query_enum)(); + int (*enable_irq)(); + int (*disable_irq)(); + int (*check_irq)(void *); + int (*hardware_test)(struct slot *, u32); + u8 (*get_power)(struct slot *); + int (*set_power)(struct slot *, int); +}; + +struct cpci_hp_controller { + unsigned int irq; + long unsigned int irq_flags; + char *devname; + void *dev_id; + char *name; + struct cpci_hp_controller_ops *ops; +}; + +struct controller { + struct pcie_device *pcie; + u32 slot_cap; + unsigned int inband_presence_disabled: 1; + u16 slot_ctrl; + struct mutex ctrl_lock; + long unsigned int cmd_started; + unsigned int cmd_busy: 1; + wait_queue_head_t queue; + atomic_t pending_events; + unsigned int notification_enabled: 1; + unsigned int power_fault_detected; + struct task_struct *poll_thread; + u8 state; + struct mutex state_lock; + struct delayed_work button_work; + struct hotplug_slot hotplug_slot; + struct rw_semaphore reset_lock; + unsigned int ist_running; + int request_result; + wait_queue_head_t requester; +}; + +struct acpiphp_slot; + +struct slot___2 { + struct hotplug_slot hotplug_slot; + struct acpiphp_slot *acpi_slot; + unsigned int sun; +}; + +struct acpiphp_slot { + struct list_head node; + struct pci_bus *bus; + struct list_head funcs; + struct slot___2 *slot; + u8 device; + u32 flags; +}; + +struct acpiphp_attention_info { + int (*set_attn)(struct hotplug_slot *, u8); + int (*get_attn)(struct hotplug_slot *, u8 *); + struct module *owner; +}; + +struct acpiphp_context; + +struct acpiphp_bridge { + struct list_head list; + struct list_head slots; + struct kref ref; + struct acpiphp_context *context; + int nr_slots; + struct pci_bus *pci_bus; + struct pci_dev *pci_dev; + bool is_going_away; +}; + +struct acpiphp_func { + struct acpiphp_bridge *parent; + struct acpiphp_slot *slot; + struct list_head sibling; + u8 function; + u32 flags; +}; + +struct acpiphp_context { + struct acpi_hotplug_context hp; + struct acpiphp_func func; + struct acpiphp_bridge *bridge; + unsigned int refcount; +}; + +struct acpiphp_root_context { + struct acpi_hotplug_context hp; + struct acpiphp_bridge *root_bridge; +}; + +struct pci_bridge_emul_conf { + __le16 vendor; + __le16 device; + __le16 command; + __le16 status; + __le32 class_revision; + u8 cache_line_size; + u8 latency_timer; + u8 header_type; + u8 bist; + __le32 bar[2]; + u8 primary_bus; + u8 secondary_bus; + u8 subordinate_bus; + u8 secondary_latency_timer; + u8 iobase; + u8 iolimit; + __le16 secondary_status; + __le16 membase; + __le16 memlimit; + __le16 pref_mem_base; + __le16 pref_mem_limit; + __le32 prefbaseupper; + __le32 preflimitupper; + __le16 iobaseupper; + __le16 iolimitupper; + u8 capabilities_pointer; + u8 reserve[3]; + __le32 romaddr; + u8 intline; + u8 intpin; + __le16 bridgectrl; +}; + +struct pci_bridge_emul_pcie_conf { + u8 cap_id; + u8 next; + __le16 cap; + __le32 devcap; + __le16 devctl; + __le16 devsta; + __le32 lnkcap; + __le16 lnkctl; + __le16 lnksta; + __le32 slotcap; + __le16 slotctl; + __le16 slotsta; + __le16 rootctl; + __le16 rsvd; + __le32 rootsta; + __le32 devcap2; + __le16 devctl2; + __le16 devsta2; + __le32 lnkcap2; + __le16 lnkctl2; + __le16 lnksta2; + __le32 slotcap2; + __le16 slotctl2; + __le16 slotsta2; +}; + +typedef enum { + PCI_BRIDGE_EMUL_HANDLED = 0, + PCI_BRIDGE_EMUL_NOT_HANDLED = 1, +} pci_bridge_emul_read_status_t; + +struct pci_bridge_emul; + +struct pci_bridge_emul_ops { + pci_bridge_emul_read_status_t (*read_base)(struct pci_bridge_emul *, int, u32 *); + pci_bridge_emul_read_status_t (*read_pcie)(struct pci_bridge_emul *, int, u32 *); + void (*write_base)(struct pci_bridge_emul *, int, u32, u32, u32); + void (*write_pcie)(struct pci_bridge_emul *, int, u32, u32, u32); +}; + +struct pci_bridge_reg_behavior; + +struct pci_bridge_emul { + struct pci_bridge_emul_conf conf; + struct pci_bridge_emul_pcie_conf pcie_conf; + struct pci_bridge_emul_ops *ops; + struct pci_bridge_reg_behavior *pci_regs_behavior; + struct pci_bridge_reg_behavior *pcie_cap_regs_behavior; + void *data; + bool has_pcie; +}; + +struct pci_bridge_reg_behavior { + u32 ro; + u32 rw; + u32 w1c; +}; + +enum { + PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR = 1, +}; + +enum dmi_device_type { + DMI_DEV_TYPE_ANY = 0, + DMI_DEV_TYPE_OTHER = 1, + DMI_DEV_TYPE_UNKNOWN = 2, + DMI_DEV_TYPE_VIDEO = 3, + DMI_DEV_TYPE_SCSI = 4, + DMI_DEV_TYPE_ETHERNET = 5, + DMI_DEV_TYPE_TOKENRING = 6, + DMI_DEV_TYPE_SOUND = 7, + DMI_DEV_TYPE_PATA = 8, + DMI_DEV_TYPE_SATA = 9, + DMI_DEV_TYPE_SAS = 10, + DMI_DEV_TYPE_IPMI = 4294967295, + DMI_DEV_TYPE_OEM_STRING = 4294967294, + DMI_DEV_TYPE_DEV_ONBOARD = 4294967293, + DMI_DEV_TYPE_DEV_SLOT = 4294967292, +}; + +struct dmi_device { + struct list_head list; + int type; + const char *name; + void *device_data; +}; + +struct dmi_dev_onboard { + struct dmi_device dev; + int instance; + int segment; + int bus; + int devfn; +}; + +enum smbios_attr_enum { + SMBIOS_ATTR_NONE = 0, + SMBIOS_ATTR_LABEL_SHOW = 1, + SMBIOS_ATTR_INSTANCE_SHOW = 2, +}; + +enum acpi_attr_enum { + ACPI_ATTR_LABEL_SHOW = 0, + ACPI_ATTR_INDEX_SHOW = 1, +}; + +struct pci_epf_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +enum pci_interrupt_pin { + PCI_INTERRUPT_UNKNOWN = 0, + PCI_INTERRUPT_INTA = 1, + PCI_INTERRUPT_INTB = 2, + PCI_INTERRUPT_INTC = 3, + PCI_INTERRUPT_INTD = 4, +}; + +enum pci_barno { + BAR_0 = 0, + BAR_1 = 1, + BAR_2 = 2, + BAR_3 = 3, + BAR_4 = 4, + BAR_5 = 5, +}; + +struct pci_epf_header { + u16 vendorid; + u16 deviceid; + u8 revid; + u8 progif_code; + u8 subclass_code; + u8 baseclass_code; + u8 cache_line_size; + u16 subsys_vendor_id; + u16 subsys_id; + enum pci_interrupt_pin interrupt_pin; +}; + +struct pci_epf; + +struct pci_epf_ops { + int (*bind)(struct pci_epf *); + void (*unbind)(struct pci_epf *); +}; + +struct pci_epf_bar { + dma_addr_t phys_addr; + void *addr; + size_t size; + enum pci_barno barno; + int flags; +}; + +struct pci_epc; + +struct pci_epf_driver; + +struct pci_epf { + struct device dev; + const char *name; + struct pci_epf_header *header; + struct pci_epf_bar bar[6]; + u8 msi_interrupts; + u16 msix_interrupts; + u8 func_no; + struct pci_epc *epc; + struct pci_epf_driver *driver; + struct list_head list; + struct notifier_block nb; + struct mutex lock; +}; + +struct pci_epf_driver { + int (*probe)(struct pci_epf *); + int (*remove)(struct pci_epf *); + struct device_driver driver; + struct pci_epf_ops *ops; + struct module *owner; + struct list_head epf_group; + const struct pci_epf_device_id *id_table; +}; + +struct pci_epc_ops; + +struct pci_epc_mem; + +struct pci_epc { + struct device dev; + struct list_head pci_epf; + const struct pci_epc_ops *ops; + struct pci_epc_mem **windows; + struct pci_epc_mem *mem; + unsigned int num_windows; + u8 max_functions; + struct config_group *group; + struct mutex lock; + long unsigned int function_num_map; + struct atomic_notifier_head notifier; +}; + +enum pci_epc_irq_type { + PCI_EPC_IRQ_UNKNOWN = 0, + PCI_EPC_IRQ_LEGACY = 1, + PCI_EPC_IRQ_MSI = 2, + PCI_EPC_IRQ_MSIX = 3, +}; + +struct pci_epc_features; + +struct pci_epc_ops { + int (*write_header)(struct pci_epc *, u8, struct pci_epf_header *); + int (*set_bar)(struct pci_epc *, u8, struct pci_epf_bar *); + void (*clear_bar)(struct pci_epc *, u8, struct pci_epf_bar *); + int (*map_addr)(struct pci_epc *, u8, phys_addr_t, u64, size_t); + void (*unmap_addr)(struct pci_epc *, u8, phys_addr_t); + int (*set_msi)(struct pci_epc *, u8, u8); + int (*get_msi)(struct pci_epc *, u8); + int (*set_msix)(struct pci_epc *, u8, u16, enum pci_barno, u32); + int (*get_msix)(struct pci_epc *, u8); + int (*raise_irq)(struct pci_epc *, u8, enum pci_epc_irq_type, u16); + int (*start)(struct pci_epc *); + void (*stop)(struct pci_epc *); + const struct pci_epc_features * (*get_features)(struct pci_epc *, u8); + struct module *owner; +}; + +struct pci_epc_features { + unsigned int linkup_notifier: 1; + unsigned int core_init_notifier: 1; + unsigned int msi_capable: 1; + unsigned int msix_capable: 1; + u8 reserved_bar; + u8 bar_fixed_64bit; + u64 bar_fixed_size[6]; + size_t align; +}; + +struct pci_epc_mem_window { + phys_addr_t phys_base; + size_t size; + size_t page_size; +}; + +struct pci_epc_mem { + struct pci_epc_mem_window window; + long unsigned int *bitmap; + int pages; + struct mutex lock; +}; + +struct pci_epf_group { + struct config_group group; + struct pci_epf *epf; + int index; +}; + +struct pci_epc_group { + struct config_group group; + struct pci_epc *epc; + bool start; +}; + +enum pci_notify_event { + CORE_INIT = 0, + LINK_UP = 1, +}; + +struct advk_pcie { + struct platform_device *pdev; + void *base; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; + struct irq_chip msi_bottom_irq_chip; + struct irq_chip msi_irq_chip; + struct msi_domain_info msi_domain_info; + long unsigned int msi_used[1]; + struct mutex msi_used_lock; + u16 msi_msg; + int link_gen; + struct pci_bridge_emul bridge; + struct gpio_desc *reset_gpio; + struct phy *phy; +}; + +struct tegra_msi { + struct msi_controller chip; + long unsigned int used[4]; + struct irq_domain *domain; + struct mutex lock; + void *virt; + dma_addr_t phys; + int irq; +}; + +struct tegra_pcie_port_soc { + struct { + u8 turnoff_bit; + u8 ack_bit; + } pme; +}; + +struct tegra_pcie_soc { + unsigned int num_ports; + const struct tegra_pcie_port_soc *ports; + unsigned int msi_base_shift; + long unsigned int afi_pex2_ctrl; + u32 pads_pll_ctl; + u32 tx_ref_sel; + u32 pads_refclk_cfg0; + u32 pads_refclk_cfg1; + u32 update_fc_threshold; + bool has_pex_clkreq_en; + bool has_pex_bias_ctrl; + bool has_intr_prsnt_sense; + bool has_cml_clk; + bool has_gen2; + bool force_pca_enable; + bool program_uphy; + bool update_clamp_threshold; + bool program_deskew_time; + bool update_fc_timer; + bool has_cache_bars; + struct { + struct { + u32 rp_ectl_2_r1; + u32 rp_ectl_4_r1; + u32 rp_ectl_5_r1; + u32 rp_ectl_6_r1; + u32 rp_ectl_2_r2; + u32 rp_ectl_4_r2; + u32 rp_ectl_5_r2; + u32 rp_ectl_6_r2; + } regs; + bool enable; + } ectl; +}; + +struct tegra_pcie { + struct device *dev; + void *pads; + void *afi; + void *cfg; + int irq; + struct resource cs; + struct clk *pex_clk; + struct clk *afi_clk; + struct clk *pll_e; + struct clk *cml_clk; + struct reset_control *pex_rst; + struct reset_control *afi_rst; + struct reset_control *pcie_xrst; + bool legacy_phy; + struct phy *phy; + struct tegra_msi msi; + struct list_head ports; + u32 xbar_config; + struct regulator_bulk_data *supplies; + unsigned int num_supplies; + const struct tegra_pcie_soc *soc; + struct dentry *debugfs; +}; + +struct tegra_pcie_port { + struct tegra_pcie *pcie; + struct device_node *np; + struct list_head list; + struct resource regs; + void *base; + unsigned int index; + unsigned int lanes; + struct phy **phys; + struct gpio_desc *reset_gpio; +}; + +struct xgene_msi; + +struct xgene_msi_group { + struct xgene_msi *msi; + int gic_irq; + u32 msi_grp; +}; + +struct xgene_msi { + struct device_node *node; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + u64 msi_addr; + void *msi_regs; + long unsigned int *bitmap; + struct mutex bitmap_lock; + struct xgene_msi_group *msi_groups; + int num_cpus; +}; + +struct rockchip_pcie { + void *reg_base; + void *apb_base; + bool legacy_phy; + struct phy *phys[4]; + struct reset_control *core_rst; + struct reset_control *mgmt_rst; + struct reset_control *mgmt_sticky_rst; + struct reset_control *pipe_rst; + struct reset_control *pm_rst; + struct reset_control *aclk_rst; + struct reset_control *pclk_rst; + struct clk *aclk_pcie; + struct clk *aclk_perf_pcie; + struct clk *hclk_pcie; + struct clk *clk_pcie_pm; + struct regulator *vpcie12v; + struct regulator *vpcie3v3; + struct regulator *vpcie1v8; + struct regulator *vpcie0v9; + struct gpio_desc *ep_gpio; + u32 lanes; + u8 lanes_map; + int link_gen; + struct device *dev; + struct irq_domain *irq_domain; + int offset; + void *msg_region; + phys_addr_t msg_bus_addr; + bool is_rc; + struct resource *mem_res; +}; + +struct rockchip_pcie_ep { + struct rockchip_pcie rockchip; + struct pci_epc *epc; + u32 max_regions; + long unsigned int ob_region_map; + phys_addr_t *ob_addr; + phys_addr_t irq_phys_addr; + void *irq_cpu_addr; + u64 irq_pci_addr; + u8 irq_pci_fn; + u8 irq_pending; +}; + +struct mtk_pcie_port; + +struct mtk_pcie_soc { + bool need_fix_class_id; + bool need_fix_device_id; + unsigned int device_id; + struct pci_ops *ops; + int (*startup)(struct mtk_pcie_port *); + int (*setup_irq)(struct mtk_pcie_port *, struct device_node *); +}; + +struct mtk_pcie; + +struct mtk_pcie_port { + void *base; + struct list_head list; + struct mtk_pcie *pcie; + struct reset_control *reset; + struct clk *sys_ck; + struct clk *ahb_ck; + struct clk *axi_ck; + struct clk *aux_ck; + struct clk *obff_ck; + struct clk *pipe_ck; + struct phy *phy; + u32 slot; + int irq; + struct irq_domain *irq_domain; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + struct mutex lock; + long unsigned int msi_irq_in_use[1]; +}; + +struct mtk_pcie { + struct device *dev; + void *base; + struct clk *free_ck; + struct list_head ports; + const struct mtk_pcie_soc *soc; +}; + +enum { + RGR1_SW_INIT_1 = 0, + EXT_CFG_INDEX = 1, + EXT_CFG_DATA = 2, +}; + +enum pcie_type { + GENERIC = 0, + BCM7278 = 1, + BCM2711 = 2, +}; + +struct brcm_pcie; + +struct pcie_cfg_data { + const int *offsets; + const enum pcie_type type; + void (*perst_set)(struct brcm_pcie *, u32); + void (*bridge_sw_init_set)(struct brcm_pcie *, u32); +}; + +struct brcm_msi; + +struct brcm_pcie { + struct device *dev; + void *base; + struct clk *clk; + struct device_node *np; + bool ssc; + int gen; + u64 msi_target_addr; + struct brcm_msi *msi; + const int *reg_offsets; + enum pcie_type type; + struct reset_control *rescal; + int num_memc; + u64 memc_size[3]; + u32 hw_rev; + void (*perst_set)(struct brcm_pcie *, u32); + void (*bridge_sw_init_set)(struct brcm_pcie *, u32); +}; + +struct brcm_msi { + struct device *dev; + void *base; + struct device_node *np; + struct irq_domain *msi_domain; + struct irq_domain *inner_domain; + struct mutex lock; + u64 target_addr; + int irq; + long unsigned int used; + bool legacy; + int legacy_shift; + int nr; + void *intr_base; +}; + +enum dw_pcie_region_type { + DW_PCIE_REGION_UNKNOWN = 0, + DW_PCIE_REGION_INBOUND = 1, + DW_PCIE_REGION_OUTBOUND = 2, +}; + +struct pcie_port; + +struct dw_pcie_host_ops { + int (*host_init)(struct pcie_port *); + void (*set_num_vectors)(struct pcie_port *); + int (*msi_host_init)(struct pcie_port *); +}; + +struct pcie_port { + u64 cfg0_base; + void *va_cfg0_base; + u32 cfg0_size; + resource_size_t io_base; + phys_addr_t io_bus_addr; + u32 io_size; + int irq; + const struct dw_pcie_host_ops *ops; + int msi_irq; + struct irq_domain *irq_domain; + struct irq_domain *msi_domain; + u16 msi_msg; + dma_addr_t msi_data; + struct irq_chip *msi_irq_chip; + u32 num_vectors; + u32 irq_mask[8]; + struct pci_host_bridge *bridge; + raw_spinlock_t lock; + long unsigned int msi_irq_in_use[4]; +}; + +enum dw_pcie_as_type { + DW_PCIE_AS_UNKNOWN = 0, + DW_PCIE_AS_MEM = 1, + DW_PCIE_AS_IO = 2, +}; + +struct dw_pcie_ep; + +struct dw_pcie_ep_ops { + void (*ep_init)(struct dw_pcie_ep *); + int (*raise_irq)(struct dw_pcie_ep *, u8, enum pci_epc_irq_type, u16); + const struct pci_epc_features * (*get_features)(struct dw_pcie_ep *); + unsigned int (*func_conf_select)(struct dw_pcie_ep *, u8); +}; + +struct dw_pcie_ep { + struct pci_epc *epc; + struct list_head func_list; + const struct dw_pcie_ep_ops *ops; + phys_addr_t phys_base; + size_t addr_size; + size_t page_size; + u8 bar_to_atu[6]; + phys_addr_t *outbound_addr; + long unsigned int *ib_window_map; + long unsigned int *ob_window_map; + u32 num_ib_windows; + u32 num_ob_windows; + void *msi_mem; + phys_addr_t msi_mem_phys; + struct pci_epf_bar *epf_bar[6]; +}; + +struct dw_pcie; + +struct dw_pcie_ops { + u64 (*cpu_addr_fixup)(struct dw_pcie *, u64); + u32 (*read_dbi)(struct dw_pcie *, void *, u32, size_t); + void (*write_dbi)(struct dw_pcie *, void *, u32, size_t, u32); + void (*write_dbi2)(struct dw_pcie *, void *, u32, size_t, u32); + int (*link_up)(struct dw_pcie *); + int (*start_link)(struct dw_pcie *); + void (*stop_link)(struct dw_pcie *); +}; + +struct dw_pcie { + struct device *dev; + void *dbi_base; + void *dbi_base2; + void *atu_base; + u32 num_viewport; + u8 iatu_unroll_enabled; + struct pcie_port pp; + struct dw_pcie_ep ep; + const struct dw_pcie_ops *ops; + unsigned int version; + int num_lanes; + int link_gen; + u8 n_fts[2]; +}; + +struct pci_epf_msix_tbl { + u64 msg_addr; + u32 msg_data; + u32 vector_ctrl; +}; + +struct dw_pcie_ep_func { + struct list_head list; + u8 func_no; + u8 msi_cap; + u8 msix_cap; +}; + +enum dw_pcie_device_mode { + DW_PCIE_UNKNOWN_TYPE = 0, + DW_PCIE_EP_TYPE = 1, + DW_PCIE_LEG_EP_TYPE = 2, + DW_PCIE_RC_TYPE = 3, +}; + +struct dw_plat_pcie { + struct dw_pcie *pci; + struct regmap *regmap; + enum dw_pcie_device_mode mode; +}; + +struct dw_plat_pcie_of_data { + enum dw_pcie_device_mode mode; +}; + +struct qcom_pcie_resources_2_1_0 { + struct clk_bulk_data clks[5]; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; + struct reset_control *ext_reset; + struct regulator_bulk_data supplies[3]; +}; + +struct qcom_pcie_resources_1_0_0 { + struct clk *iface; + struct clk *aux; + struct clk *master_bus; + struct clk *slave_bus; + struct reset_control *core; + struct regulator *vdda; +}; + +struct qcom_pcie_resources_2_3_2 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct clk *cfg_clk; + struct clk *pipe_clk; + struct regulator_bulk_data supplies[2]; +}; + +struct qcom_pcie_resources_2_4_0 { + struct clk_bulk_data clks[4]; + int num_clks; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; +}; + +struct qcom_pcie_resources_2_3_3 { + struct clk *iface; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct clk *ahb_clk; + struct clk *aux_clk; + struct reset_control *rst[7]; +}; + +struct qcom_pcie_resources_2_7_0 { + struct clk_bulk_data clks[6]; + struct regulator_bulk_data supplies[2]; + struct reset_control *pci_reset; + struct clk *pipe_clk; +}; + +union qcom_pcie_resources { + struct qcom_pcie_resources_1_0_0 v1_0_0; + struct qcom_pcie_resources_2_1_0 v2_1_0; + struct qcom_pcie_resources_2_3_2 v2_3_2; + struct qcom_pcie_resources_2_3_3 v2_3_3; + struct qcom_pcie_resources_2_4_0 v2_4_0; + struct qcom_pcie_resources_2_7_0 v2_7_0; +}; + +struct qcom_pcie; + +struct qcom_pcie_ops { + int (*get_resources)(struct qcom_pcie *); + int (*init)(struct qcom_pcie *); + int (*post_init)(struct qcom_pcie *); + void (*deinit)(struct qcom_pcie *); + void (*post_deinit)(struct qcom_pcie *); + void (*ltssm_enable)(struct qcom_pcie *); +}; + +struct qcom_pcie { + struct dw_pcie *pci; + void *parf; + void *elbi; + union qcom_pcie_resources res; + struct phy *phy; + struct gpio_desc *reset; + const struct qcom_pcie_ops *ops; +}; + +struct armada8k_pcie { + struct dw_pcie *pci; + struct clk *clk; + struct clk *clk_reg; + struct phy *phy[4]; + unsigned int phy_count; +}; + +struct kirin_pcie { + struct dw_pcie *pci; + void *apb_base; + void *phy_base; + struct regmap *crgctrl; + struct regmap *sysctrl; + struct clk *apb_sys_clk; + struct clk *apb_phy_clk; + struct clk *phy_ref_clk; + struct clk *pcie_aclk; + struct clk *pcie_aux_clk; + int gpio_id_reset; +}; + +struct histb_pcie { + struct dw_pcie *pci; + struct clk *aux_clk; + struct clk *pipe_clk; + struct clk *sys_clk; + struct clk *bus_clk; + struct phy *phy; + struct reset_control *soft_reset; + struct reset_control *sys_reset; + struct reset_control *bus_reset; + void *ctrl; + int reset_gpio; + struct regulator *vpcie; +}; + +enum pcie_data_rate { + PCIE_GEN1 = 0, + PCIE_GEN2 = 1, + PCIE_GEN3 = 2, + PCIE_GEN4 = 3, +}; + +struct meson_pcie_clk_res { + struct clk *clk; + struct clk *port_clk; + struct clk *general_clk; +}; + +struct meson_pcie_rc_reset { + struct reset_control *port; + struct reset_control *apb; +}; + +struct meson_pcie { + struct dw_pcie pci; + void *cfg_base; + struct meson_pcie_clk_res clk_res; + struct meson_pcie_rc_reset mrst; + struct gpio_desc *reset_gpio; + struct phy *phy; +}; + +struct al_pcie_acpi { + void *dbi_base; +}; + +struct thunder_pem_pci { + u32 ea_entry[3]; + void *pem_reg_base; +}; + +struct xgene_pcie_port { + struct device_node *node; + struct device *dev; + struct clk *clk; + void *csr_base; + void *cfg_base; + long unsigned int cfg_addr; + bool link_up; + u32 version; +}; + +struct rio_device_id { + __u16 did; + __u16 vid; + __u16 asm_did; + __u16 asm_vid; +}; + +typedef s32 dma_cookie_t; + +enum dma_status { + DMA_COMPLETE = 0, + DMA_IN_PROGRESS = 1, + DMA_PAUSED = 2, + DMA_ERROR = 3, + DMA_OUT_OF_ORDER = 4, +}; + +enum dma_transaction_type { + DMA_MEMCPY = 0, + DMA_XOR = 1, + DMA_PQ = 2, + DMA_XOR_VAL = 3, + DMA_PQ_VAL = 4, + DMA_MEMSET = 5, + DMA_MEMSET_SG = 6, + DMA_INTERRUPT = 7, + DMA_PRIVATE = 8, + DMA_ASYNC_TX = 9, + DMA_SLAVE = 10, + DMA_CYCLIC = 11, + DMA_INTERLEAVE = 12, + DMA_COMPLETION_NO_ORDER = 13, + DMA_REPEAT = 14, + DMA_LOAD_EOT = 15, + DMA_TX_TYPE_END = 16, +}; + +enum dma_transfer_direction { + DMA_MEM_TO_MEM = 0, + DMA_MEM_TO_DEV = 1, + DMA_DEV_TO_MEM = 2, + DMA_DEV_TO_DEV = 3, + DMA_TRANS_NONE = 4, +}; + +struct data_chunk { + size_t size; + size_t icg; + size_t dst_icg; + size_t src_icg; +}; + +struct dma_interleaved_template { + dma_addr_t src_start; + dma_addr_t dst_start; + enum dma_transfer_direction dir; + bool src_inc; + bool dst_inc; + bool src_sgl; + bool dst_sgl; + size_t numf; + size_t frame_size; + struct data_chunk sgl[0]; +}; + +enum dma_ctrl_flags { + DMA_PREP_INTERRUPT = 1, + DMA_CTRL_ACK = 2, + DMA_PREP_PQ_DISABLE_P = 4, + DMA_PREP_PQ_DISABLE_Q = 8, + DMA_PREP_CONTINUE = 16, + DMA_PREP_FENCE = 32, + DMA_CTRL_REUSE = 64, + DMA_PREP_CMD = 128, + DMA_PREP_REPEAT = 256, + DMA_PREP_LOAD_EOT = 512, +}; + +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +enum sum_check_flags { + SUM_CHECK_P_RESULT = 1, + SUM_CHECK_Q_RESULT = 2, +}; + +typedef struct { + long unsigned int bits[1]; +} dma_cap_mask_t; + +enum dma_desc_metadata_mode { + DESC_METADATA_NONE = 0, + DESC_METADATA_CLIENT = 1, + DESC_METADATA_ENGINE = 2, +}; + +struct dma_chan_percpu { + long unsigned int memcpy_count; + long unsigned int bytes_transferred; +}; + +struct dma_router { + struct device *dev; + void (*route_free)(struct device *, void *); +}; + +struct dma_device; + +struct dma_chan_dev; + +struct dma_chan { + struct dma_device *device; + struct device *slave; + dma_cookie_t cookie; + dma_cookie_t completed_cookie; + int chan_id; + struct dma_chan_dev *dev; + const char *name; + char *dbg_client_name; + struct list_head device_node; + struct dma_chan_percpu *local; + int client_count; + int table_count; + struct dma_router *router; + void *route_data; + void *private; +}; + +typedef bool (*dma_filter_fn)(struct dma_chan *, void *); + +struct dma_slave_map; + +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, +}; + +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +struct dma_async_tx_descriptor; + +struct dma_slave_caps; + +struct dma_slave_config; + +struct dma_tx_state; + +struct dma_device { + struct kref ref; + unsigned int chancnt; + unsigned int privatecnt; + struct list_head channels; + struct list_head global_node; + struct dma_filter filter; + dma_cap_mask_t cap_mask; + enum dma_desc_metadata_mode desc_metadata_modes; + short unsigned int max_xor; + short unsigned int max_pq; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; + int dev_id; + struct device *dev; + struct module *owner; + struct ida chan_ida; + struct mutex chan_mutex; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool descriptor_reuse; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *); + void (*device_free_chan_resources)(struct dma_chan *); + struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan *, dma_addr_t, dma_addr_t, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan *, dma_addr_t, int, size_t, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan *, struct scatterlist *, unsigned int, int, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *); + struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan *, struct dma_interleaved_template *, long unsigned int); + struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan *, dma_addr_t, u64, long unsigned int); + void (*device_caps)(struct dma_chan *, struct dma_slave_caps *); + int (*device_config)(struct dma_chan *, struct dma_slave_config *); + int (*device_pause)(struct dma_chan *); + int (*device_resume)(struct dma_chan *); + int (*device_terminate_all)(struct dma_chan *); + void (*device_synchronize)(struct dma_chan *); + enum dma_status (*device_tx_status)(struct dma_chan *, dma_cookie_t, struct dma_tx_state *); + void (*device_issue_pending)(struct dma_chan *); + void (*device_release)(struct dma_device *); + void (*dbg_summary_show)(struct seq_file *, struct dma_device *); + struct dentry *dbg_dev_root; +}; + +struct dma_chan_dev { + struct dma_chan *chan; + struct device device; + int dev_id; +}; + +enum dma_slave_buswidth { + DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, + DMA_SLAVE_BUSWIDTH_1_BYTE = 1, + DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, + DMA_SLAVE_BUSWIDTH_4_BYTES = 4, + DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, +}; + +struct dma_slave_config { + enum dma_transfer_direction direction; + phys_addr_t src_addr; + phys_addr_t dst_addr; + enum dma_slave_buswidth src_addr_width; + enum dma_slave_buswidth dst_addr_width; + u32 src_maxburst; + u32 dst_maxburst; + u32 src_port_window_size; + u32 dst_port_window_size; + bool device_fc; + unsigned int slave_id; +}; + +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + u32 min_burst; + u32 max_burst; + u32 max_sg_burst; + bool cmd_pause; + bool cmd_resume; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + +typedef void (*dma_async_tx_callback)(void *); + +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, + DMA_TRANS_READ_FAILED = 1, + DMA_TRANS_WRITE_FAILED = 2, + DMA_TRANS_ABORTED = 3, +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *); + +struct dmaengine_unmap_data { + u16 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + +struct dma_descriptor_metadata_ops { + int (*attach)(struct dma_async_tx_descriptor *, void *, size_t); + void * (*get_ptr)(struct dma_async_tx_descriptor *, size_t *, size_t *); + int (*set_len)(struct dma_async_tx_descriptor *, size_t); +}; + +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + enum dma_ctrl_flags flags; + dma_addr_t phys; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *); + int (*desc_free)(struct dma_async_tx_descriptor *); + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; + struct dmaengine_unmap_data *unmap; + enum dma_desc_metadata_mode desc_metadata_mode; + struct dma_descriptor_metadata_ops *metadata_ops; + struct dma_async_tx_descriptor *next; + struct dma_async_tx_descriptor *parent; + spinlock_t lock; +}; + +struct dma_tx_state { + dma_cookie_t last; + dma_cookie_t used; + u32 residue; + u32 in_flight_bytes; +}; + +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +struct rio_switch_ops; + +struct rio_dev; + +struct rio_switch { + struct list_head node; + u8 *route_table; + u32 port_ok; + struct rio_switch_ops *ops; + spinlock_t lock; + struct rio_dev *nextdev[0]; +}; + +struct rio_mport; + +struct rio_switch_ops { + struct module *owner; + int (*add_entry)(struct rio_mport *, u16, u8, u16, u16, u8); + int (*get_entry)(struct rio_mport *, u16, u8, u16, u16, u8 *); + int (*clr_table)(struct rio_mport *, u16, u8, u16); + int (*set_domain)(struct rio_mport *, u16, u8, u8); + int (*get_domain)(struct rio_mport *, u16, u8, u8 *); + int (*em_init)(struct rio_dev *); + int (*em_handle)(struct rio_dev *, u8); +}; + +struct rio_net; + +struct rio_driver; + +union rio_pw_msg; + +struct rio_dev { + struct list_head global_list; + struct list_head net_list; + struct rio_net *net; + bool do_enum; + u16 did; + u16 vid; + u32 device_rev; + u16 asm_did; + u16 asm_vid; + u16 asm_rev; + u16 efptr; + u32 pef; + u32 swpinfo; + u32 src_ops; + u32 dst_ops; + u32 comp_tag; + u32 phys_efptr; + u32 phys_rmap; + u32 em_efptr; + u64 dma_mask; + struct rio_driver *driver; + struct device dev; + struct resource riores[16]; + int (*pwcback)(struct rio_dev *, union rio_pw_msg *, int); + u16 destid; + u8 hopcount; + struct rio_dev *prev; + atomic_t state; + struct rio_switch rswitch[0]; +}; + +struct rio_msg { + struct resource *res; + void (*mcback)(struct rio_mport *, void *, int, int); +}; + +struct rio_ops; + +struct rio_scan; + +struct rio_mport { + struct list_head dbells; + struct list_head pwrites; + struct list_head node; + struct list_head nnode; + struct rio_net *net; + struct mutex lock; + struct resource iores; + struct resource riores[16]; + struct rio_msg inb_msg[4]; + struct rio_msg outb_msg[4]; + int host_deviceid; + struct rio_ops *ops; + unsigned char id; + unsigned char index; + unsigned int sys_size; + u32 phys_efptr; + u32 phys_rmap; + unsigned char name[40]; + struct device dev; + void *priv; + struct dma_device dma; + struct rio_scan *nscan; + atomic_t state; + unsigned int pwe_refcnt; +}; + +enum rio_device_state { + RIO_DEVICE_INITIALIZING = 0, + RIO_DEVICE_RUNNING = 1, + RIO_DEVICE_GONE = 2, + RIO_DEVICE_SHUTDOWN = 3, +}; + +struct rio_net { + struct list_head node; + struct list_head devices; + struct list_head switches; + struct list_head mports; + struct rio_mport *hport; + unsigned char id; + struct device dev; + void *enum_data; + void (*release)(struct rio_net *); +}; + +struct rio_driver { + struct list_head node; + char *name; + const struct rio_device_id *id_table; + int (*probe)(struct rio_dev *, const struct rio_device_id *); + void (*remove)(struct rio_dev *); + void (*shutdown)(struct rio_dev *); + int (*suspend)(struct rio_dev *, u32); + int (*resume)(struct rio_dev *); + int (*enable_wake)(struct rio_dev *, u32, int); + struct device_driver driver; +}; + +union rio_pw_msg { + struct { + u32 comptag; + u32 errdetect; + u32 is_port; + u32 ltlerrdet; + u32 padding[12]; + } em; + u32 raw[16]; +}; + +struct rio_dbell { + struct list_head node; + struct resource *res; + void (*dinb)(struct rio_mport *, void *, u16, u16, u16); + void *dev_id; +}; + +struct rio_mport_attr; + +struct rio_ops { + int (*lcread)(struct rio_mport *, int, u32, int, u32 *); + int (*lcwrite)(struct rio_mport *, int, u32, int, u32); + int (*cread)(struct rio_mport *, int, u16, u8, u32, int, u32 *); + int (*cwrite)(struct rio_mport *, int, u16, u8, u32, int, u32); + int (*dsend)(struct rio_mport *, int, u16, u16); + int (*pwenable)(struct rio_mport *, int); + int (*open_outb_mbox)(struct rio_mport *, void *, int, int); + void (*close_outb_mbox)(struct rio_mport *, int); + int (*open_inb_mbox)(struct rio_mport *, void *, int, int); + void (*close_inb_mbox)(struct rio_mport *, int); + int (*add_outb_message)(struct rio_mport *, struct rio_dev *, int, void *, size_t); + int (*add_inb_buffer)(struct rio_mport *, int, void *); + void * (*get_inb_message)(struct rio_mport *, int); + int (*map_inb)(struct rio_mport *, dma_addr_t, u64, u64, u32); + void (*unmap_inb)(struct rio_mport *, dma_addr_t); + int (*query_mport)(struct rio_mport *, struct rio_mport_attr *); + int (*map_outb)(struct rio_mport *, u16, u64, u32, u32, dma_addr_t *); + void (*unmap_outb)(struct rio_mport *, u16, u64); +}; + +struct rio_scan { + struct module *owner; + int (*enumerate)(struct rio_mport *, u32); + int (*discover)(struct rio_mport *, u32); +}; + +struct rio_mport_attr { + int flags; + int link_speed; + int link_width; + int dma_max_sge; + int dma_max_size; + int dma_align; +}; + +enum rio_write_type { + RDW_DEFAULT = 0, + RDW_ALL_NWRITE = 1, + RDW_ALL_NWRITE_R = 2, + RDW_LAST_NWRITE_R = 3, +}; + +struct rio_dma_ext { + u16 destid; + u64 rio_addr; + u8 rio_addr_u; + enum rio_write_type wr_type; +}; + +struct rio_dma_data { + struct scatterlist *sg; + unsigned int sg_len; + u64 rio_addr; + u8 rio_addr_u; + enum rio_write_type wr_type; +}; + +struct rio_scan_node { + int mport_id; + struct list_head node; + struct rio_scan *ops; +}; + +struct rio_pwrite { + struct list_head node; + int (*pwcback)(struct rio_mport *, void *, union rio_pw_msg *, int); + void *context; +}; + +struct rio_disc_work { + struct work_struct work; + struct rio_mport *mport; +}; + +enum rio_link_speed { + RIO_LINK_DOWN = 0, + RIO_LINK_125 = 1, + RIO_LINK_250 = 2, + RIO_LINK_312 = 3, + RIO_LINK_500 = 4, + RIO_LINK_625 = 5, +}; + +enum rio_mport_flags { + RIO_MPORT_DMA = 1, + RIO_MPORT_DMA_SG = 2, + RIO_MPORT_IBSG = 4, +}; + +struct kfifo { + union { + struct __kfifo kfifo; + unsigned char *type; + const unsigned char *const_type; + char (*rectype)[0]; + void *ptr; + const void *ptr_const; + }; + unsigned char buf[0]; +}; + +enum { + DBG_NONE = 0, + DBG_INIT = 1, + DBG_EXIT = 2, + DBG_MPORT = 4, + DBG_MAINT = 8, + DBG_DMA = 16, + DBG_DMAV = 32, + DBG_IBW = 64, + DBG_EVENT = 128, + DBG_OBW = 256, + DBG_DBELL = 512, + DBG_OMSG = 1024, + DBG_IMSG = 2048, + DBG_ALL = 4294967295, +}; + +struct tsi721_dma_desc { + __le32 type_id; + __le32 bcount; + union { + __le32 raddr_lo; + __le32 next_lo; + }; + union { + __le32 raddr_hi; + __le32 next_hi; + }; + union { + struct { + __le32 bufptr_lo; + __le32 bufptr_hi; + __le32 s_dist; + __le32 s_size; + } t1; + __le32 data[4]; + u32 reserved[4]; + }; +}; + +struct tsi721_imsg_desc { + __le32 type_id; + __le32 msg_info; + __le32 bufptr_lo; + __le32 bufptr_hi; + u32 reserved[12]; +}; + +struct tsi721_omsg_desc { + __le32 type_id; + __le32 msg_info; + union { + __le32 bufptr_lo; + __le32 next_lo; + }; + union { + __le32 bufptr_hi; + __le32 next_hi; + }; +}; + +enum dma_dtype { + DTYPE1 = 1, + DTYPE2 = 2, + DTYPE3 = 3, + DTYPE4 = 4, + DTYPE5 = 5, + DTYPE6 = 6, +}; + +enum dma_rtype { + NREAD = 0, + LAST_NWRITE_R = 1, + ALL_NWRITE = 2, + ALL_NWRITE_R = 3, + MAINT_RD = 4, + MAINT_WR = 5, +}; + +struct tsi721_tx_desc { + struct dma_async_tx_descriptor txd; + u16 destid; + u64 rio_addr; + u8 rio_addr_u; + enum dma_rtype rtype; + struct list_head desc_node; + struct scatterlist *sg; + unsigned int sg_len; + enum dma_status status; +}; + +struct tsi721_bdma_chan { + int id; + void *regs; + int bd_num; + void *bd_base; + dma_addr_t bd_phys; + void *sts_base; + dma_addr_t sts_phys; + int sts_size; + u32 sts_rdptr; + u32 wr_count; + u32 wr_count_next; + struct dma_chan dchan; + struct tsi721_tx_desc *tx_desc; + spinlock_t lock; + struct tsi721_tx_desc *active_tx; + struct list_head queue; + struct list_head free_list; + struct tasklet_struct tasklet; + bool active; +}; + +struct tsi721_bdma_maint { + int ch_id; + int bd_num; + void *bd_base; + dma_addr_t bd_phys; + void *sts_base; + dma_addr_t sts_phys; + int sts_size; +}; + +struct tsi721_imsg_ring { + u32 size; + void *buf_base; + dma_addr_t buf_phys; + void *imfq_base; + dma_addr_t imfq_phys; + void *imd_base; + dma_addr_t imd_phys; + void *imq_base[512]; + u32 rx_slot; + void *dev_id; + u32 fq_wrptr; + u32 desc_rdptr; + spinlock_t lock; +}; + +struct tsi721_omsg_ring { + u32 size; + void *omd_base; + dma_addr_t omd_phys; + void *omq_base[512]; + dma_addr_t omq_phys[512]; + void *sts_base; + dma_addr_t sts_phys; + u32 sts_size; + u32 sts_rdptr; + u32 tx_slot; + void *dev_id; + u32 wr_count; + spinlock_t lock; +}; + +enum tsi721_flags { + TSI721_USING_MSI = 1, + TSI721_USING_MSIX = 2, + TSI721_IMSGID_SET = 4, +}; + +enum tsi721_msix_vect { + TSI721_VECT_IDB = 0, + TSI721_VECT_PWRX = 1, + TSI721_VECT_OMB0_DONE = 2, + TSI721_VECT_OMB1_DONE = 3, + TSI721_VECT_OMB2_DONE = 4, + TSI721_VECT_OMB3_DONE = 5, + TSI721_VECT_OMB0_INT = 6, + TSI721_VECT_OMB1_INT = 7, + TSI721_VECT_OMB2_INT = 8, + TSI721_VECT_OMB3_INT = 9, + TSI721_VECT_IMB0_RCV = 10, + TSI721_VECT_IMB1_RCV = 11, + TSI721_VECT_IMB2_RCV = 12, + TSI721_VECT_IMB3_RCV = 13, + TSI721_VECT_IMB0_INT = 14, + TSI721_VECT_IMB1_INT = 15, + TSI721_VECT_IMB2_INT = 16, + TSI721_VECT_IMB3_INT = 17, + TSI721_VECT_DMA0_DONE = 18, + TSI721_VECT_DMA1_DONE = 19, + TSI721_VECT_DMA2_DONE = 20, + TSI721_VECT_DMA3_DONE = 21, + TSI721_VECT_DMA4_DONE = 22, + TSI721_VECT_DMA5_DONE = 23, + TSI721_VECT_DMA6_DONE = 24, + TSI721_VECT_DMA7_DONE = 25, + TSI721_VECT_DMA0_INT = 26, + TSI721_VECT_DMA1_INT = 27, + TSI721_VECT_DMA2_INT = 28, + TSI721_VECT_DMA3_INT = 29, + TSI721_VECT_DMA4_INT = 30, + TSI721_VECT_DMA5_INT = 31, + TSI721_VECT_DMA6_INT = 32, + TSI721_VECT_DMA7_INT = 33, + TSI721_VECT_MAX = 34, +}; + +struct msix_irq { + u16 vector; + char irq_name[64]; +}; + +struct tsi721_ib_win_mapping { + struct list_head node; + dma_addr_t lstart; +}; + +struct tsi721_ib_win { + u64 rstart; + u32 size; + dma_addr_t lstart; + bool active; + bool xlat; + struct list_head mappings; +}; + +struct tsi721_obw_bar { + u64 base; + u64 size; + u64 free; +}; + +struct tsi721_ob_win { + u64 base; + u32 size; + u16 destid; + u64 rstart; + bool active; + struct tsi721_obw_bar *pbar; +}; + +struct tsi721_device { + struct pci_dev *pdev; + struct rio_mport mport; + u32 flags; + void *regs; + struct msix_irq msix[34]; + void *odb_base; + void *idb_base; + dma_addr_t idb_dma; + struct work_struct idb_work; + u32 db_discard_count; + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; + u32 pw_discard_count; + struct tsi721_bdma_maint mdma; + struct tsi721_bdma_chan bdma[8]; + int imsg_init[8]; + struct tsi721_imsg_ring imsg_ring[8]; + int omsg_init[4]; + struct tsi721_omsg_ring omsg_ring[4]; + struct tsi721_ib_win ib_win[8]; + int ibwin_cnt; + struct tsi721_obw_bar p2r_bar[2]; + struct tsi721_ob_win ob_win[8]; + int obwin_cnt; +}; + +enum hdmi_infoframe_type { + HDMI_INFOFRAME_TYPE_VENDOR = 129, + HDMI_INFOFRAME_TYPE_AVI = 130, + HDMI_INFOFRAME_TYPE_SPD = 131, + HDMI_INFOFRAME_TYPE_AUDIO = 132, + HDMI_INFOFRAME_TYPE_DRM = 135, +}; + +struct hdmi_any_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; +}; + +enum hdmi_colorspace { + HDMI_COLORSPACE_RGB = 0, + HDMI_COLORSPACE_YUV422 = 1, + HDMI_COLORSPACE_YUV444 = 2, + HDMI_COLORSPACE_YUV420 = 3, + HDMI_COLORSPACE_RESERVED4 = 4, + HDMI_COLORSPACE_RESERVED5 = 5, + HDMI_COLORSPACE_RESERVED6 = 6, + HDMI_COLORSPACE_IDO_DEFINED = 7, +}; + +enum hdmi_scan_mode { + HDMI_SCAN_MODE_NONE = 0, + HDMI_SCAN_MODE_OVERSCAN = 1, + HDMI_SCAN_MODE_UNDERSCAN = 2, + HDMI_SCAN_MODE_RESERVED = 3, +}; + +enum hdmi_colorimetry { + HDMI_COLORIMETRY_NONE = 0, + HDMI_COLORIMETRY_ITU_601 = 1, + HDMI_COLORIMETRY_ITU_709 = 2, + HDMI_COLORIMETRY_EXTENDED = 3, +}; + +enum hdmi_picture_aspect { + HDMI_PICTURE_ASPECT_NONE = 0, + HDMI_PICTURE_ASPECT_4_3 = 1, + HDMI_PICTURE_ASPECT_16_9 = 2, + HDMI_PICTURE_ASPECT_64_27 = 3, + HDMI_PICTURE_ASPECT_256_135 = 4, + HDMI_PICTURE_ASPECT_RESERVED = 5, +}; + +enum hdmi_active_aspect { + HDMI_ACTIVE_ASPECT_16_9_TOP = 2, + HDMI_ACTIVE_ASPECT_14_9_TOP = 3, + HDMI_ACTIVE_ASPECT_16_9_CENTER = 4, + HDMI_ACTIVE_ASPECT_PICTURE = 8, + HDMI_ACTIVE_ASPECT_4_3 = 9, + HDMI_ACTIVE_ASPECT_16_9 = 10, + HDMI_ACTIVE_ASPECT_14_9 = 11, + HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13, + HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14, + HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15, +}; + +enum hdmi_extended_colorimetry { + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0, + HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1, + HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2, + HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3, + HDMI_EXTENDED_COLORIMETRY_OPRGB = 4, + HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5, + HDMI_EXTENDED_COLORIMETRY_BT2020 = 6, + HDMI_EXTENDED_COLORIMETRY_RESERVED = 7, +}; + +enum hdmi_quantization_range { + HDMI_QUANTIZATION_RANGE_DEFAULT = 0, + HDMI_QUANTIZATION_RANGE_LIMITED = 1, + HDMI_QUANTIZATION_RANGE_FULL = 2, + HDMI_QUANTIZATION_RANGE_RESERVED = 3, +}; + +enum hdmi_nups { + HDMI_NUPS_UNKNOWN = 0, + HDMI_NUPS_HORIZONTAL = 1, + HDMI_NUPS_VERTICAL = 2, + HDMI_NUPS_BOTH = 3, +}; + +enum hdmi_ycc_quantization_range { + HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0, + HDMI_YCC_QUANTIZATION_RANGE_FULL = 1, +}; + +enum hdmi_content_type { + HDMI_CONTENT_TYPE_GRAPHICS = 0, + HDMI_CONTENT_TYPE_PHOTO = 1, + HDMI_CONTENT_TYPE_CINEMA = 2, + HDMI_CONTENT_TYPE_GAME = 3, +}; + +enum hdmi_metadata_type { + HDMI_STATIC_METADATA_TYPE1 = 1, +}; + +enum hdmi_eotf { + HDMI_EOTF_TRADITIONAL_GAMMA_SDR = 0, + HDMI_EOTF_TRADITIONAL_GAMMA_HDR = 1, + HDMI_EOTF_SMPTE_ST2084 = 2, + HDMI_EOTF_BT_2100_HLG = 3, +}; + +struct hdmi_avi_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_colorspace colorspace; + enum hdmi_scan_mode scan_mode; + enum hdmi_colorimetry colorimetry; + enum hdmi_picture_aspect picture_aspect; + enum hdmi_active_aspect active_aspect; + bool itc; + enum hdmi_extended_colorimetry extended_colorimetry; + enum hdmi_quantization_range quantization_range; + enum hdmi_nups nups; + unsigned char video_code; + enum hdmi_ycc_quantization_range ycc_quantization_range; + enum hdmi_content_type content_type; + unsigned char pixel_repeat; + short unsigned int top_bar; + short unsigned int bottom_bar; + short unsigned int left_bar; + short unsigned int right_bar; +}; + +struct hdmi_drm_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_eotf eotf; + enum hdmi_metadata_type metadata_type; + struct { + u16 x; + u16 y; + } display_primaries[3]; + struct { + u16 x; + u16 y; + } white_point; + u16 max_display_mastering_luminance; + u16 min_display_mastering_luminance; + u16 max_cll; + u16 max_fall; +}; + +enum hdmi_spd_sdi { + HDMI_SPD_SDI_UNKNOWN = 0, + HDMI_SPD_SDI_DSTB = 1, + HDMI_SPD_SDI_DVDP = 2, + HDMI_SPD_SDI_DVHS = 3, + HDMI_SPD_SDI_HDDVR = 4, + HDMI_SPD_SDI_DVC = 5, + HDMI_SPD_SDI_DSC = 6, + HDMI_SPD_SDI_VCD = 7, + HDMI_SPD_SDI_GAME = 8, + HDMI_SPD_SDI_PC = 9, + HDMI_SPD_SDI_BD = 10, + HDMI_SPD_SDI_SACD = 11, + HDMI_SPD_SDI_HDDVD = 12, + HDMI_SPD_SDI_PMP = 13, +}; + +struct hdmi_spd_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + char vendor[8]; + char product[16]; + enum hdmi_spd_sdi sdi; +}; + +enum hdmi_audio_coding_type { + HDMI_AUDIO_CODING_TYPE_STREAM = 0, + HDMI_AUDIO_CODING_TYPE_PCM = 1, + HDMI_AUDIO_CODING_TYPE_AC3 = 2, + HDMI_AUDIO_CODING_TYPE_MPEG1 = 3, + HDMI_AUDIO_CODING_TYPE_MP3 = 4, + HDMI_AUDIO_CODING_TYPE_MPEG2 = 5, + HDMI_AUDIO_CODING_TYPE_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_DTS = 7, + HDMI_AUDIO_CODING_TYPE_ATRAC = 8, + HDMI_AUDIO_CODING_TYPE_DSD = 9, + HDMI_AUDIO_CODING_TYPE_EAC3 = 10, + HDMI_AUDIO_CODING_TYPE_DTS_HD = 11, + HDMI_AUDIO_CODING_TYPE_MLP = 12, + HDMI_AUDIO_CODING_TYPE_DST = 13, + HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14, + HDMI_AUDIO_CODING_TYPE_CXT = 15, +}; + +enum hdmi_audio_sample_size { + HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0, + HDMI_AUDIO_SAMPLE_SIZE_16 = 1, + HDMI_AUDIO_SAMPLE_SIZE_20 = 2, + HDMI_AUDIO_SAMPLE_SIZE_24 = 3, +}; + +enum hdmi_audio_sample_frequency { + HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0, + HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1, + HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2, + HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3, + HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4, + HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5, + HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6, + HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7, +}; + +enum hdmi_audio_coding_type_ext { + HDMI_AUDIO_CODING_TYPE_EXT_CT = 0, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1, + HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6, + HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, +}; + +struct hdmi_audio_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned char channels; + enum hdmi_audio_coding_type coding_type; + enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_sample_frequency sample_frequency; + enum hdmi_audio_coding_type_ext coding_type_ext; + unsigned char channel_allocation; + unsigned char level_shift_value; + bool downmix_inhibit; +}; + +enum hdmi_3d_structure { + HDMI_3D_STRUCTURE_INVALID = 4294967295, + HDMI_3D_STRUCTURE_FRAME_PACKING = 0, + HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1, + HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3, + HDMI_3D_STRUCTURE_L_DEPTH = 4, + HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5, + HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6, + HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8, +}; + +struct hdmi_vendor_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + u8 vic; + enum hdmi_3d_structure s3d_struct; + unsigned int s3d_ext_data; +}; + +union hdmi_vendor_any_infoframe { + struct { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + unsigned int oui; + } any; + struct hdmi_vendor_infoframe hdmi; +}; + +union hdmi_infoframe { + struct hdmi_any_infoframe any; + struct hdmi_avi_infoframe avi; + struct hdmi_spd_infoframe spd; + union hdmi_vendor_any_infoframe vendor; + struct hdmi_audio_infoframe audio; + struct hdmi_drm_infoframe drm; +}; + +enum con_scroll { + SM_UP = 0, + SM_DOWN = 1, +}; + +enum vc_intensity { + VCI_HALF_BRIGHT = 0, + VCI_NORMAL = 1, + VCI_BOLD = 2, + VCI_MASK = 3, +}; + +struct vc_data; + +struct console_font; + +struct consw { + struct module *owner; + const char * (*con_startup)(); + void (*con_init)(struct vc_data *, int); + void (*con_deinit)(struct vc_data *); + void (*con_clear)(struct vc_data *, int, int, int, int); + void (*con_putc)(struct vc_data *, int, int, int); + void (*con_putcs)(struct vc_data *, const short unsigned int *, int, int, int); + void (*con_cursor)(struct vc_data *, int); + bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int); + int (*con_switch)(struct vc_data *); + int (*con_blank)(struct vc_data *, int, int); + int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int); + int (*con_font_get)(struct vc_data *, struct console_font *); + int (*con_font_default)(struct vc_data *, struct console_font *, char *); + int (*con_font_copy)(struct vc_data *, int); + int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int); + void (*con_set_palette)(struct vc_data *, const unsigned char *); + void (*con_scrolldelta)(struct vc_data *, int); + int (*con_set_origin)(struct vc_data *); + void (*con_save_screen)(struct vc_data *); + u8 (*con_build_attr)(struct vc_data *, u8, enum vc_intensity, bool, bool, bool, bool); + void (*con_invert_region)(struct vc_data *, u16 *, int); + u16 * (*con_screen_pos)(const struct vc_data *, int); + long unsigned int (*con_getxy)(struct vc_data *, long unsigned int, int *, int *); + void (*con_flush_scrollback)(struct vc_data *); + int (*con_debug_enter)(struct vc_data *); + int (*con_debug_leave)(struct vc_data *); +}; + +struct vc_state { + unsigned int x; + unsigned int y; + unsigned char color; + unsigned char Gx_charset[2]; + unsigned int charset: 1; + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; + +struct console_font { + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_mode { + char mode; + char waitv; + short int relsig; + short int acqsig; + short int frsig; +}; + +struct uni_pagedir; + +struct uni_screen; + +struct vc_data { + struct tty_port port; + struct vc_state state; + struct vc_state saved_state; + short unsigned int vc_num; + unsigned int vc_cols; + unsigned int vc_rows; + unsigned int vc_size_row; + unsigned int vc_scan_lines; + long unsigned int vc_origin; + long unsigned int vc_scr_end; + long unsigned int vc_visible_origin; + unsigned int vc_top; + unsigned int vc_bottom; + const struct consw *vc_sw; + short unsigned int *vc_screenbuf; + unsigned int vc_screenbuf_size; + unsigned char vc_mode; + unsigned char vc_attr; + unsigned char vc_def_color; + unsigned char vc_ulcolor; + unsigned char vc_itcolor; + unsigned char vc_halfcolor; + unsigned int vc_cursor_type; + short unsigned int vc_complement_mask; + short unsigned int vc_s_complement_mask; + long unsigned int vc_pos; + short unsigned int vc_hi_font_mask; + struct console_font vc_font; + short unsigned int vc_video_erase_char; + unsigned int vc_state; + unsigned int vc_npar; + unsigned int vc_par[16]; + struct vt_mode vt_mode; + struct pid *vt_pid; + int vt_newvt; + wait_queue_head_t paste_wait; + unsigned int vc_disp_ctrl: 1; + unsigned int vc_toggle_meta: 1; + unsigned int vc_decscnm: 1; + unsigned int vc_decom: 1; + unsigned int vc_decawm: 1; + unsigned int vc_deccm: 1; + unsigned int vc_decim: 1; + unsigned int vc_priv: 3; + unsigned int vc_need_wrap: 1; + unsigned int vc_can_do_color: 1; + unsigned int vc_report_mouse: 2; + unsigned char vc_utf: 1; + unsigned char vc_utf_count; + int vc_utf_char; + long unsigned int vc_tab_stop[4]; + unsigned char vc_palette[48]; + short unsigned int *vc_translate; + unsigned int vc_resize_user; + unsigned int vc_bell_pitch; + unsigned int vc_bell_duration; + short unsigned int vc_cur_blink_ms; + struct vc_data **vc_display_fg; + struct uni_pagedir *vc_uni_pagedir; + struct uni_pagedir **vc_uni_pagedir_loc; + struct uni_screen *vc_uni_screen; +}; + +struct linux_logo { + int type; + unsigned int width; + unsigned int height; + unsigned int clutsize; + const unsigned char *clut; + const unsigned char *data; +}; + +struct fb_fix_screeninfo { + char id[16]; + long unsigned int smem_start; + __u32 smem_len; + __u32 type; + __u32 type_aux; + __u32 visual; + __u16 xpanstep; + __u16 ypanstep; + __u16 ywrapstep; + __u32 line_length; + long unsigned int mmio_start; + __u32 mmio_len; + __u32 accel; + __u16 capabilities; + __u16 reserved[2]; +}; + +struct fb_bitfield { + __u32 offset; + __u32 length; + __u32 msb_right; +}; + +struct fb_var_screeninfo { + __u32 xres; + __u32 yres; + __u32 xres_virtual; + __u32 yres_virtual; + __u32 xoffset; + __u32 yoffset; + __u32 bits_per_pixel; + __u32 grayscale; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + __u32 nonstd; + __u32 activate; + __u32 height; + __u32 width; + __u32 accel_flags; + __u32 pixclock; + __u32 left_margin; + __u32 right_margin; + __u32 upper_margin; + __u32 lower_margin; + __u32 hsync_len; + __u32 vsync_len; + __u32 sync; + __u32 vmode; + __u32 rotate; + __u32 colorspace; + __u32 reserved[4]; +}; + +struct fb_cmap { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +enum { + FB_BLANK_UNBLANK = 0, + FB_BLANK_NORMAL = 1, + FB_BLANK_VSYNC_SUSPEND = 2, + FB_BLANK_HSYNC_SUSPEND = 3, + FB_BLANK_POWERDOWN = 4, +}; + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 fg_color; + __u32 bg_color; + __u8 depth; + const char *data; + struct fb_cmap cmap; +}; + +struct fbcurpos { + __u16 x; + __u16 y; +}; + +struct fb_cursor { + __u16 set; + __u16 enable; + __u16 rop; + const char *mask; + struct fbcurpos hot; + struct fb_image image; +}; + +struct fb_chroma { + __u32 redx; + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_videomode; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; + __u8 manufacturer[4]; + __u8 monitor[14]; + __u8 serial_no[14]; + __u8 ascii[14]; + __u32 modedb_len; + __u32 model; + __u32 serial; + __u32 year; + __u32 week; + __u32 hfmin; + __u32 hfmax; + __u32 dclkmin; + __u32 dclkmax; + __u16 input; + __u16 dpms; + __u16 signal; + __u16 vfmin; + __u16 vfmax; + __u16 gamma; + __u16 gtf: 1; + __u16 misc; + __u8 version; + __u8 revision; + __u8 max_x; + __u8 max_y; +}; + +struct fb_videomode { + const char *name; + u32 refresh; + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +struct fb_info; + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_pixmap { + u8 *addr; + u32 size; + u32 offset; + u32 buf_align; + u32 scan_align; + u32 access_align; + u32 flags; + u32 blit_x; + u32 blit_y; + void (*writeio)(struct fb_info *, void *, void *, unsigned int); + void (*readio)(struct fb_info *, void *, void *, unsigned int); +}; + +struct backlight_device; + +struct fb_deferred_io; + +struct fb_ops; + +struct fb_tile_ops; + +struct apertures_struct; + +struct fb_info { + atomic_t count; + int node; + int flags; + int fbcon_rotate_hint; + struct mutex lock; + struct mutex mm_lock; + struct fb_var_screeninfo var; + struct fb_fix_screeninfo fix; + struct fb_monspecs monspecs; + struct work_struct queue; + struct fb_pixmap pixmap; + struct fb_pixmap sprite; + struct fb_cmap cmap; + struct list_head modelist; + struct fb_videomode *mode; + struct backlight_device *bl_dev; + struct mutex bl_curve_mutex; + u8 bl_curve[128]; + struct delayed_work deferred_work; + struct fb_deferred_io *fbdefio; + const struct fb_ops *fbops; + struct device *device; + struct device *dev; + int class_flag; + struct fb_tile_ops *tileops; + union { + char *screen_base; + char *screen_buffer; + }; + long unsigned int screen_size; + void *pseudo_palette; + u32 state; + void *fbcon_par; + void *par; + struct apertures_struct *apertures; + bool skip_vt_switch; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +struct fb_deferred_io { + long unsigned int delay; + struct mutex lock; + struct list_head pagelist; + void (*first_io)(struct fb_info *); + void (*deferred_io)(struct fb_info *, struct list_head *); +}; + +struct fb_ops { + struct module *owner; + int (*fb_open)(struct fb_info *, int); + int (*fb_release)(struct fb_info *, int); + ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *); + ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *); + int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *); + int (*fb_set_par)(struct fb_info *); + int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *); + int (*fb_setcmap)(struct fb_cmap *, struct fb_info *); + int (*fb_blank)(int, struct fb_info *); + int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *); + void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *); + void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *); + void (*fb_imageblit)(struct fb_info *, const struct fb_image *); + int (*fb_cursor)(struct fb_info *, struct fb_cursor *); + int (*fb_sync)(struct fb_info *); + int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int); + int (*fb_mmap)(struct fb_info *, struct vm_area_struct *); + void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *); + void (*fb_destroy)(struct fb_info *); + int (*fb_debug_enter)(struct fb_info *); + int (*fb_debug_leave)(struct fb_info *); +}; + +struct fb_tilemap { + __u32 width; + __u32 height; + __u32 depth; + __u32 length; + const __u8 *data; +}; + +struct fb_tilerect { + __u32 sx; + __u32 sy; + __u32 width; + __u32 height; + __u32 index; + __u32 fg; + __u32 bg; + __u32 rop; +}; + +struct fb_tilearea { + __u32 sx; + __u32 sy; + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; +}; + +struct fb_tileblit { + __u32 sx; + __u32 sy; + __u32 width; + __u32 height; + __u32 fg; + __u32 bg; + __u32 length; + __u32 *indices; +}; + +struct fb_tilecursor { + __u32 sx; + __u32 sy; + __u32 mode; + __u32 shape; + __u32 fg; + __u32 bg; +}; + +struct fb_tile_ops { + void (*fb_settile)(struct fb_info *, struct fb_tilemap *); + void (*fb_tilecopy)(struct fb_info *, struct fb_tilearea *); + void (*fb_tilefill)(struct fb_info *, struct fb_tilerect *); + void (*fb_tileblit)(struct fb_info *, struct fb_tileblit *); + void (*fb_tilecursor)(struct fb_info *, struct fb_tilecursor *); + int (*fb_get_tilemax)(struct fb_info *); +}; + +struct aperture { + resource_size_t base; + resource_size_t size; +}; + +struct apertures_struct { + unsigned int count; + struct aperture ranges[0]; +}; + +enum backlight_type { + BACKLIGHT_RAW = 1, + BACKLIGHT_PLATFORM = 2, + BACKLIGHT_FIRMWARE = 3, + BACKLIGHT_TYPE_MAX = 4, +}; + +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR = 1, + BACKLIGHT_SCALE_NON_LINEAR = 2, +}; + +struct backlight_properties { + int brightness; + int max_brightness; + int power; + int fb_blank; + enum backlight_type type; + unsigned int state; + enum backlight_scale scale; +}; + +struct backlight_ops; + +struct backlight_device { + struct backlight_properties props; + struct mutex update_lock; + struct mutex ops_lock; + const struct backlight_ops *ops; + struct notifier_block fb_notif; + struct list_head entry; + struct device dev; + bool fb_bl_on[32]; + int use_count; +}; + +enum backlight_update_reason { + BACKLIGHT_UPDATE_HOTKEY = 0, + BACKLIGHT_UPDATE_SYSFS = 1, +}; + +enum backlight_notification { + BACKLIGHT_REGISTERED = 0, + BACKLIGHT_UNREGISTERED = 1, +}; + +struct backlight_ops { + unsigned int options; + int (*update_status)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); + int (*check_fb)(struct backlight_device *, struct fb_info *); +}; + +struct fb_cmap_user { + __u32 start; + __u32 len; + __u16 *red; + __u16 *green; + __u16 *blue; + __u16 *transp; +}; + +struct fb_modelist { + struct list_head list; + struct fb_videomode mode; +}; + +struct logo_data { + int depth; + int needs_directpalette; + int needs_truepalette; + int needs_cmapreset; + const struct linux_logo *logo; +}; + +struct fb_fix_screeninfo32 { + char id[16]; + compat_caddr_t smem_start; + u32 smem_len; + u32 type; + u32 type_aux; + u32 visual; + u16 xpanstep; + u16 ypanstep; + u16 ywrapstep; + u32 line_length; + compat_caddr_t mmio_start; + u32 mmio_len; + u32 accel; + u16 reserved[3]; +}; + +struct fb_cmap32 { + u32 start; + u32 len; + compat_caddr_t red; + compat_caddr_t green; + compat_caddr_t blue; + compat_caddr_t transp; +}; + +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + +enum display_flags { + DISPLAY_FLAGS_HSYNC_LOW = 1, + DISPLAY_FLAGS_HSYNC_HIGH = 2, + DISPLAY_FLAGS_VSYNC_LOW = 4, + DISPLAY_FLAGS_VSYNC_HIGH = 8, + DISPLAY_FLAGS_DE_LOW = 16, + DISPLAY_FLAGS_DE_HIGH = 32, + DISPLAY_FLAGS_PIXDATA_POSEDGE = 64, + DISPLAY_FLAGS_PIXDATA_NEGEDGE = 128, + DISPLAY_FLAGS_INTERLACED = 256, + DISPLAY_FLAGS_DOUBLESCAN = 512, + DISPLAY_FLAGS_DOUBLECLK = 1024, + DISPLAY_FLAGS_SYNC_POSEDGE = 2048, + DISPLAY_FLAGS_SYNC_NEGEDGE = 4096, +}; + +struct videomode { + long unsigned int pixelclock; + u32 hactive; + u32 hfront_porch; + u32 hback_porch; + u32 hsync_len; + u32 vactive; + u32 vfront_porch; + u32 vback_porch; + u32 vsync_len; + enum display_flags flags; +}; + +struct broken_edid { + u8 manufacturer[4]; + u32 model; + u32 fix; +}; + +struct __fb_timings { + u32 dclk; + u32 hfreq; + u32 vfreq; + u32 hactive; + u32 vactive; + u32 hblank; + u32 vblank; + u32 htotal; + u32 vtotal; +}; + +typedef unsigned int u_int; + +struct fb_cvt_data { + u32 xres; + u32 yres; + u32 refresh; + u32 f_refresh; + u32 pixclock; + u32 hperiod; + u32 hblank; + u32 hfreq; + u32 htotal; + u32 vtotal; + u32 vsync; + u32 hsync; + u32 h_front_porch; + u32 h_back_porch; + u32 v_front_porch; + u32 v_back_porch; + u32 h_margin; + u32 v_margin; + u32 interlace; + u32 aspect_ratio; + u32 active_pixels; + u32 flags; + u32 status; +}; + +typedef unsigned char u_char; + +typedef short unsigned int u_short; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +struct vc { + struct vc_data *d; + struct work_struct SAK_work; +}; + +struct fbcon_display { + const u_char *fontdata; + int userfont; + u_short scrollmode; + u_short inverse; + short int yscroll; + int vrows; + int cursor_shape; + int con_rotate; + u32 xres_virtual; + u32 yres_virtual; + u32 height; + u32 width; + u32 bits_per_pixel; + u32 grayscale; + u32 nonstd; + u32 accel_flags; + u32 rotate; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + const struct fb_videomode *mode; +}; + +struct fbcon_ops { + void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int); + void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int); + void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int); + void (*clear_margins)(struct vc_data *, struct fb_info *, int, int); + void (*cursor)(struct vc_data *, struct fb_info *, int, int, int); + int (*update_start)(struct fb_info *); + int (*rotate_font)(struct fb_info *, struct vc_data *); + struct fb_var_screeninfo var; + struct timer_list cursor_timer; + struct fb_cursor cursor_state; + struct fbcon_display *p; + struct fb_info *info; + int currcon; + int cur_blink_jiffies; + int cursor_flash; + int cursor_reset; + int blank_state; + int graphics; + int save_graphics; + int flags; + int rotate; + int cur_rotate; + char *cursor_data; + u8 *fontbuffer; + u8 *fontdata; + u8 *cursor_src; + u32 cursor_size; + u32 fd_size; +}; + +enum { + FBCON_LOGO_CANSHOW = 4294967295, + FBCON_LOGO_DRAW = 4294967294, + FBCON_LOGO_DONTSHOW = 4294967293, +}; + +enum { + CLCD_CAP_RGB444 = 1, + CLCD_CAP_RGB5551 = 2, + CLCD_CAP_RGB565 = 4, + CLCD_CAP_RGB888 = 8, + CLCD_CAP_BGR444 = 16, + CLCD_CAP_BGR5551 = 32, + CLCD_CAP_BGR565 = 64, + CLCD_CAP_BGR888 = 128, + CLCD_CAP_444 = 17, + CLCD_CAP_5551 = 34, + CLCD_CAP_565 = 68, + CLCD_CAP_888 = 136, + CLCD_CAP_RGB = 15, + CLCD_CAP_BGR = 240, + CLCD_CAP_ALL = 255, +}; + +struct clcd_panel { + struct fb_videomode mode; + short int width; + short int height; + u32 tim2; + u32 tim3; + u32 cntl; + u32 caps; + unsigned int bpp: 8; + unsigned int fixedtimings: 1; + unsigned int grayscale: 1; + unsigned int connector; + struct backlight_device *backlight; + bool bgr_connection; +}; + +struct clcd_regs { + u32 tim0; + u32 tim1; + u32 tim2; + u32 tim3; + u32 cntl; + long unsigned int pixclock; +}; + +struct clcd_fb; + +struct clcd_board { + const char *name; + u32 caps; + int (*check)(struct clcd_fb *, struct fb_var_screeninfo *); + void (*decode)(struct clcd_fb *, struct clcd_regs *); + void (*disable)(struct clcd_fb *); + void (*enable)(struct clcd_fb *); + int (*setup)(struct clcd_fb *); + int (*mmap)(struct clcd_fb *, struct vm_area_struct *); + void (*remove)(struct clcd_fb *); +}; + +struct clcd_fb { + struct fb_info fb; + struct amba_device *dev; + struct clk *clk; + struct clcd_panel *panel; + struct clcd_board *board; + void *board_data; + void *regs; + u16 off_ienb; + u16 off_cntl; + u32 clcd_cntl; + u32 cmap[16]; + bool clk_enabled; +}; + +struct timing_entry { + u32 min; + u32 typ; + u32 max; +}; + +struct display_timing { + struct timing_entry pixelclock; + struct timing_entry hactive; + struct timing_entry hfront_porch; + struct timing_entry hback_porch; + struct timing_entry hsync_len; + struct timing_entry vactive; + struct timing_entry vfront_porch; + struct timing_entry vback_porch; + struct timing_entry vsync_len; + enum display_flags flags; +}; + +struct xenfb_update { + uint8_t type; + int32_t x; + int32_t y; + int32_t width; + int32_t height; +}; + +struct xenfb_resize { + uint8_t type; + int32_t width; + int32_t height; + int32_t stride; + int32_t depth; + int32_t offset; +}; + +union xenfb_out_event { + uint8_t type; + struct xenfb_update update; + struct xenfb_resize resize; + char pad[40]; +}; + +struct xenfb_page { + uint32_t in_cons; + uint32_t in_prod; + uint32_t out_cons; + uint32_t out_prod; + int32_t width; + int32_t height; + uint32_t line_length; + uint32_t mem_length; + uint8_t depth; + long unsigned int pd[256]; +}; + +enum xenbus_state { + XenbusStateUnknown = 0, + XenbusStateInitialising = 1, + XenbusStateInitWait = 2, + XenbusStateInitialised = 3, + XenbusStateConnected = 4, + XenbusStateClosing = 5, + XenbusStateClosed = 6, + XenbusStateReconfiguring = 7, + XenbusStateReconfigured = 8, +}; + +struct xenbus_watch { + struct list_head list; + const char *node; + unsigned int nr_pending; + bool (*will_handle)(struct xenbus_watch *, const char *, const char *); + void (*callback)(struct xenbus_watch *, const char *, const char *); +}; + +struct xenbus_device { + const char *devicetype; + const char *nodename; + const char *otherend; + int otherend_id; + struct xenbus_watch otherend_watch; + struct device dev; + enum xenbus_state state; + struct completion down; + struct work_struct work; + struct semaphore reclaim_sem; +}; + +struct xenbus_device_id { + char devicetype[32]; +}; + +struct xenbus_driver { + const char *name; + const struct xenbus_device_id *ids; + bool allow_rebind; + int (*probe)(struct xenbus_device *, const struct xenbus_device_id *); + void (*otherend_changed)(struct xenbus_device *, enum xenbus_state); + int (*remove)(struct xenbus_device *); + int (*suspend)(struct xenbus_device *); + int (*resume)(struct xenbus_device *); + int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); + struct device_driver driver; + int (*read_otherend_details)(struct xenbus_device *); + int (*is_ready)(struct xenbus_device *); + void (*reclaim_memory)(struct xenbus_device *); +}; + +struct xenbus_transaction { + u32 id; +}; + +struct xenfb_info { + unsigned char *fb; + struct fb_info *fb_info; + int x1; + int y1; + int x2; + int y2; + spinlock_t dirty_lock; + int nr_pages; + int irq; + struct xenfb_page *page; + long unsigned int *gfns; + int update_wanted; + int feature_resize; + struct xenfb_resize resize; + int resize_dpy; + spinlock_t resize_lock; + struct xenbus_device *xbdev; +}; + +enum { + KPARAM_MEM = 0, + KPARAM_WIDTH = 1, + KPARAM_HEIGHT = 2, + KPARAM_CNT = 3, +}; + +struct acpi_table_bgrt { + struct acpi_table_header header; + u16 version; + u8 status; + u8 image_type; + u64 image_address; + u32 image_offset_x; + u32 image_offset_y; +}; + +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = 4294967295, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3, +}; + +struct bmp_file_header { + u16 id; + u32 file_size; + u32 reserved; + u32 bitmap_offset; +} __attribute__((packed)); + +struct bmp_dib_header { + u32 dib_header_size; + s32 width; + s32 height; + u16 planes; + u16 bpp; + u32 compression; + u32 bitmap_size; + u32 horz_resolution; + u32 vert_resolution; + u32 colors_used; + u32 colors_important; +}; + +struct simplefb_format { + const char *name; + u32 bits_per_pixel; + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + u32 fourcc; +}; + +struct simplefb_platform_data { + u32 width; + u32 height; + u32 stride; + const char *format; +}; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +struct simplefb_par { + u32 palette[16]; + bool clks_enabled; + unsigned int clk_count; + struct clk **clks; + bool regulators_enabled; + u32 regulator_count; + struct regulator **regulators; +}; + +struct display_timings { + unsigned int num_timings; + unsigned int native_mode; + struct display_timing **timings; +}; + +enum ipmi_addr_src { + SI_INVALID = 0, + SI_HOTMOD = 1, + SI_HARDCODED = 2, + SI_SPMI = 3, + SI_ACPI = 4, + SI_SMBIOS = 5, + SI_PCI = 6, + SI_DEVICETREE = 7, + SI_PLATFORM = 8, + SI_LAST = 9, +}; + +struct dmi_header { + u8 type; + u8 length; + u16 handle; +}; + +enum si_type { + SI_TYPE_INVALID = 0, + SI_KCS = 1, + SI_SMIC = 2, + SI_BT = 3, +}; + +enum ipmi_addr_space { + IPMI_IO_ADDR_SPACE = 0, + IPMI_MEM_ADDR_SPACE = 1, +}; + +enum ipmi_plat_interface_type { + IPMI_PLAT_IF_SI = 0, + IPMI_PLAT_IF_SSIF = 1, +}; + +struct ipmi_plat_data { + enum ipmi_plat_interface_type iftype; + unsigned int type; + unsigned int space; + long unsigned int addr; + unsigned int regspacing; + unsigned int regsize; + unsigned int regshift; + unsigned int irq; + unsigned int slave_addr; + enum ipmi_addr_src addr_source; +}; + +struct ipmi_dmi_info { + enum si_type si_type; + unsigned int space; + long unsigned int addr; + u8 slave_addr; + struct ipmi_dmi_info *next; +}; + +typedef u16 acpi_owner_id; + +union acpi_name_union { + u32 integer; + char ascii[4]; +}; + +struct acpi_table_desc { + acpi_physical_address address; + struct acpi_table_header *pointer; + u32 length; + union acpi_name_union signature; + acpi_owner_id owner_id; + u8 flags; + u16 validation_count; +}; + +struct acpi_madt_local_apic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u32 lapic_flags; +}; + +struct acpi_madt_io_apic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 address; + u32 global_irq_base; +}; + +struct acpi_madt_interrupt_override { + struct acpi_subtable_header header; + u8 bus; + u8 source_irq; + u32 global_irq; + u16 inti_flags; +} __attribute__((packed)); + +struct acpi_madt_nmi_source { + struct acpi_subtable_header header; + u16 inti_flags; + u32 global_irq; +}; + +struct acpi_madt_local_apic_nmi { + struct acpi_subtable_header header; + u8 processor_id; + u16 inti_flags; + u8 lint; +} __attribute__((packed)); + +struct acpi_madt_local_apic_override { + struct acpi_subtable_header header; + u16 reserved; + u64 address; +} __attribute__((packed)); + +struct acpi_madt_io_sapic { + struct acpi_subtable_header header; + u8 id; + u8 reserved; + u32 global_irq_base; + u64 address; +}; + +struct acpi_madt_local_sapic { + struct acpi_subtable_header header; + u8 processor_id; + u8 id; + u8 eid; + u8 reserved[3]; + u32 lapic_flags; + u32 uid; + char uid_string[1]; +} __attribute__((packed)); + +struct acpi_madt_interrupt_source { + struct acpi_subtable_header header; + u16 inti_flags; + u8 type; + u8 id; + u8 eid; + u8 io_sapic_vector; + u32 global_irq; + u32 flags; +}; + +struct acpi_madt_local_x2apic { + struct acpi_subtable_header header; + u16 reserved; + u32 local_apic_id; + u32 lapic_flags; + u32 uid; +}; + +struct acpi_madt_local_x2apic_nmi { + struct acpi_subtable_header header; + u16 inti_flags; + u32 uid; + u8 lint; + u8 reserved[3]; +}; + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + +enum acpi_subtable_type { + ACPI_SUBTABLE_COMMON = 0, + ACPI_SUBTABLE_HMAT = 1, +}; + +struct acpi_subtable_entry { + union acpi_subtable_headers *hdr; + enum acpi_subtable_type type; +}; + +typedef char *acpi_string; + +struct acpi_osi_entry { + char string[64]; + bool enable; +}; + +struct acpi_osi_config { + u8 default_disabling; + unsigned int linux_enable: 1; + unsigned int linux_dmi: 1; + unsigned int linux_cmdline: 1; + unsigned int darwin_enable: 1; + unsigned int darwin_dmi: 1; + unsigned int darwin_cmdline: 1; +}; + +struct acpi_predefined_names { + const char *name; + u8 type; + char *val; +}; + +typedef u32 (*acpi_osd_handler)(void *); + +typedef void (*acpi_osd_exec_callback)(void *); + +typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *); + +typedef void (*acpi_notify_handler)(acpi_handle, u32, void *); + +typedef void (*acpi_object_handler)(acpi_handle, void *); + +typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *); + +typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **); + +struct acpi_pci_id { + u16 segment; + u16 bus; + u16 device; + u16 function; +}; + +struct acpi_mem_mapping { + acpi_physical_address physical_address; + u8 *logical_address; + acpi_size length; + struct acpi_mem_mapping *next_mm; +}; + +struct acpi_mem_space_context { + u32 length; + acpi_physical_address address; + struct acpi_mem_mapping *cur_mm; + struct acpi_mem_mapping *first_mm; +}; + +typedef enum { + OSL_GLOBAL_LOCK_HANDLER = 0, + OSL_NOTIFY_HANDLER = 1, + OSL_GPE_HANDLER = 2, + OSL_DEBUGGER_MAIN_THREAD = 3, + OSL_DEBUGGER_EXEC_THREAD = 4, + OSL_EC_POLL_HANDLER = 5, + OSL_EC_BURST_HANDLER = 6, +} acpi_execute_type; + +union acpi_operand_object; + +struct acpi_namespace_node { + union acpi_operand_object *object; + u8 descriptor_type; + u8 type; + u16 flags; + union acpi_name_union name; + struct acpi_namespace_node *parent; + struct acpi_namespace_node *child; + struct acpi_namespace_node *peer; + acpi_owner_id owner_id; +}; + +struct acpi_object_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; +}; + +struct acpi_object_integer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 fill[3]; + u64 value; +}; + +struct acpi_object_string { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + char *pointer; + u32 length; +}; + +struct acpi_object_buffer { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 *pointer; + u32 length; + u32 aml_length; + u8 *aml_start; + struct acpi_namespace_node *node; +}; + +struct acpi_object_package { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + union acpi_operand_object **elements; + u8 *aml_start; + u32 aml_length; + u32 count; +}; + +struct acpi_object_event { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + void *os_semaphore; +}; + +struct acpi_walk_state; + +typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *); + +struct acpi_object_method { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 info_flags; + u8 param_count; + u8 sync_level; + union acpi_operand_object *mutex; + union acpi_operand_object *node; + u8 *aml_start; + union { + acpi_internal_method implementation; + union acpi_operand_object *handler; + } dispatch; + u32 aml_length; + acpi_owner_id owner_id; + u8 thread_count; +}; + +struct acpi_thread_state; + +struct acpi_object_mutex { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 sync_level; + u16 acquisition_depth; + void *os_mutex; + u64 thread_id; + struct acpi_thread_state *owner_thread; + union acpi_operand_object *prev; + union acpi_operand_object *next; + struct acpi_namespace_node *node; + u8 original_sync_level; +}; + +struct acpi_object_region { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler; + union acpi_operand_object *next; + acpi_physical_address address; + u32 length; +}; + +struct acpi_object_notify_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_gpe_block_info; + +struct acpi_object_device { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + struct acpi_gpe_block_info *gpe_block; +}; + +struct acpi_object_power_resource { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + u32 system_level; + u32 resource_order; +}; + +struct acpi_object_processor { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 proc_id; + u8 length; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; + acpi_io_address address; +}; + +struct acpi_object_thermal_zone { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *notify_list[2]; + union acpi_operand_object *handler; +}; + +struct acpi_object_field_common { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; +}; + +struct acpi_object_region_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u16 resource_length; + union acpi_operand_object *region_obj; + u8 *resource_buffer; + u16 pin_number_index; + u8 *internal_pcc_buffer; +}; + +struct acpi_object_buffer_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + u8 is_create_field; + union acpi_operand_object *buffer_obj; +}; + +struct acpi_object_bank_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *region_obj; + union acpi_operand_object *bank_obj; +}; + +struct acpi_object_index_field { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 field_flags; + u8 attribute; + u8 access_byte_width; + struct acpi_namespace_node *node; + u32 bit_length; + u32 base_byte_offset; + u32 value; + u8 start_field_bit_offset; + u8 access_length; + union acpi_operand_object *index_obj; + union acpi_operand_object *data_obj; +}; + +struct acpi_object_notify_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *node; + u32 handler_type; + acpi_notify_handler handler; + void *context; + union acpi_operand_object *next[2]; +}; + +struct acpi_object_addr_handler { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 space_id; + u8 handler_flags; + acpi_adr_space_handler handler; + struct acpi_namespace_node *node; + void *context; + acpi_adr_space_setup setup; + union acpi_operand_object *region_list; + union acpi_operand_object *next; +}; + +struct acpi_object_reference { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + u8 class; + u8 target_type; + u8 resolved; + void *object; + struct acpi_namespace_node *node; + union acpi_operand_object **where; + u8 *index_pointer; + u8 *aml; + u32 value; +}; + +struct acpi_object_extra { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + struct acpi_namespace_node *method_REG; + struct acpi_namespace_node *scope_node; + void *region_context; + u8 *aml_start; + u32 aml_length; +}; + +struct acpi_object_data { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + acpi_object_handler handler; + void *pointer; +}; + +struct acpi_object_cache_list { + union acpi_operand_object *next_object; + u8 descriptor_type; + u8 type; + u16 reference_count; + u8 flags; + union acpi_operand_object *next; +}; + +union acpi_operand_object { + struct acpi_object_common common; + struct acpi_object_integer integer; + struct acpi_object_string string; + struct acpi_object_buffer buffer; + struct acpi_object_package package; + struct acpi_object_event event; + struct acpi_object_method method; + struct acpi_object_mutex mutex; + struct acpi_object_region region; + struct acpi_object_notify_common common_notify; + struct acpi_object_device device; + struct acpi_object_power_resource power_resource; + struct acpi_object_processor processor; + struct acpi_object_thermal_zone thermal_zone; + struct acpi_object_field_common common_field; + struct acpi_object_region_field field; + struct acpi_object_buffer_field buffer_field; + struct acpi_object_bank_field bank_field; + struct acpi_object_index_field index_field; + struct acpi_object_notify_handler notify; + struct acpi_object_addr_handler address_space; + struct acpi_object_reference reference; + struct acpi_object_extra extra; + struct acpi_object_data data; + struct acpi_object_cache_list cache; + struct acpi_namespace_node node; +}; + +union acpi_parse_object; + +union acpi_generic_state; + +struct acpi_parse_state { + u8 *aml_start; + u8 *aml; + u8 *aml_end; + u8 *pkg_start; + u8 *pkg_end; + union acpi_parse_object *start_op; + struct acpi_namespace_node *start_node; + union acpi_generic_state *scope; + union acpi_parse_object *start_scope; + u32 aml_size; +}; + +typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **); + +typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *); + +struct acpi_opcode_info; + +struct acpi_walk_state { + struct acpi_walk_state *next; + u8 descriptor_type; + u8 walk_type; + u16 opcode; + u8 next_op_info; + u8 num_operands; + u8 operand_index; + acpi_owner_id owner_id; + u8 last_predicate; + u8 current_result; + u8 return_used; + u8 scope_depth; + u8 pass_number; + u8 namespace_override; + u8 result_size; + u8 result_count; + u8 *aml; + u32 arg_types; + u32 method_breakpoint; + u32 user_breakpoint; + u32 parse_flags; + struct acpi_parse_state parser_state; + u32 prev_arg_types; + u32 arg_count; + u16 method_nesting_depth; + u8 method_is_nested; + struct acpi_namespace_node arguments[7]; + struct acpi_namespace_node local_variables[8]; + union acpi_operand_object *operands[9]; + union acpi_operand_object **params; + u8 *aml_last_while; + union acpi_operand_object **caller_return_desc; + union acpi_generic_state *control_state; + struct acpi_namespace_node *deferred_node; + union acpi_operand_object *implicit_return_obj; + struct acpi_namespace_node *method_call_node; + union acpi_parse_object *method_call_op; + union acpi_operand_object *method_desc; + struct acpi_namespace_node *method_node; + char *method_pathname; + union acpi_parse_object *op; + const struct acpi_opcode_info *op_info; + union acpi_parse_object *origin; + union acpi_operand_object *result_obj; + union acpi_generic_state *results; + union acpi_operand_object *return_desc; + union acpi_generic_state *scope_info; + union acpi_parse_object *prev_op; + union acpi_parse_object *next_op; + struct acpi_thread_state *thread; + acpi_parse_downwards descending_callback; + acpi_parse_upwards ascending_callback; +}; + +struct acpi_gpe_handler_info { + acpi_gpe_handler address; + void *context; + struct acpi_namespace_node *method_node; + u8 original_flags; + u8 originally_enabled; +}; + +struct acpi_gpe_notify_info { + struct acpi_namespace_node *device_node; + struct acpi_gpe_notify_info *next; +}; + +union acpi_gpe_dispatch_info { + struct acpi_namespace_node *method_node; + struct acpi_gpe_handler_info *handler; + struct acpi_gpe_notify_info *notify_list; +}; + +struct acpi_gpe_register_info; + +struct acpi_gpe_event_info { + union acpi_gpe_dispatch_info dispatch; + struct acpi_gpe_register_info *register_info; + u8 flags; + u8 gpe_number; + u8 runtime_count; + u8 disable_for_dispatch; +}; + +struct acpi_gpe_address { + u8 space_id; + u64 address; +}; + +struct acpi_gpe_register_info { + struct acpi_gpe_address status_address; + struct acpi_gpe_address enable_address; + u16 base_gpe_number; + u8 enable_for_wake; + u8 enable_for_run; + u8 mask_for_run; + u8 enable_mask; +}; + +struct acpi_gpe_xrupt_info; + +struct acpi_gpe_block_info { + struct acpi_namespace_node *node; + struct acpi_gpe_block_info *previous; + struct acpi_gpe_block_info *next; + struct acpi_gpe_xrupt_info *xrupt_block; + struct acpi_gpe_register_info *register_info; + struct acpi_gpe_event_info *event_info; + u64 address; + u32 register_count; + u16 gpe_count; + u16 block_base_number; + u8 space_id; + u8 initialized; +}; + +struct acpi_gpe_xrupt_info { + struct acpi_gpe_xrupt_info *previous; + struct acpi_gpe_xrupt_info *next; + struct acpi_gpe_block_info *gpe_block_list_head; + u32 interrupt_number; +}; + +struct acpi_common_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; +}; + +struct acpi_update_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *object; +}; + +struct acpi_pkg_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 index; + union acpi_operand_object *source_object; + union acpi_operand_object *dest_object; + struct acpi_walk_state *walk_state; + void *this_target_obj; + u32 num_packages; +}; + +struct acpi_control_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u16 opcode; + union acpi_parse_object *predicate_op; + u8 *aml_predicate_start; + u8 *package_end; + u64 loop_timeout; +}; + +union acpi_parse_value { + u64 integer; + u32 size; + char *string; + u8 *buffer; + char *name; + union acpi_parse_object *arg; +}; + +struct acpi_parse_obj_common { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; +}; + +struct acpi_parse_obj_named { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + char *path; + u8 *data; + u32 length; + u32 name; +}; + +struct acpi_parse_obj_asl { + union acpi_parse_object *parent; + u8 descriptor_type; + u8 flags; + u16 aml_opcode; + u8 *aml; + union acpi_parse_object *next; + struct acpi_namespace_node *node; + union acpi_parse_value value; + u8 arg_list_length; + union acpi_parse_object *child; + union acpi_parse_object *parent_method; + char *filename; + u8 file_changed; + char *parent_filename; + char *external_name; + char *namepath; + char name_seg[4]; + u32 extra_value; + u32 column; + u32 line_number; + u32 logical_line_number; + u32 logical_byte_offset; + u32 end_line; + u32 end_logical_line; + u32 acpi_btype; + u32 aml_length; + u32 aml_subtree_length; + u32 final_aml_length; + u32 final_aml_offset; + u32 compile_flags; + u16 parse_opcode; + u8 aml_opcode_length; + u8 aml_pkg_len_bytes; + u8 extra; + char parse_op_name[20]; +}; + +union acpi_parse_object { + struct acpi_parse_obj_common common; + struct acpi_parse_obj_named named; + struct acpi_parse_obj_asl asl; +}; + +struct acpi_scope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + struct acpi_namespace_node *node; +}; + +struct acpi_pscope_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u32 arg_count; + union acpi_parse_object *op; + u8 *arg_end; + u8 *pkg_end; + u32 arg_list; +}; + +struct acpi_thread_state { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 current_sync_level; + struct acpi_walk_state *walk_state_list; + union acpi_operand_object *acquired_mutex_list; + u64 thread_id; +}; + +struct acpi_result_values { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + union acpi_operand_object *obj_desc[8]; +}; + +struct acpi_global_notify_handler { + acpi_notify_handler handler; + void *context; +}; + +struct acpi_notify_info { + void *next; + u8 descriptor_type; + u8 flags; + u16 value; + u16 state; + u8 handler_list_id; + struct acpi_namespace_node *node; + union acpi_operand_object *handler_list_head; + struct acpi_global_notify_handler *global; +}; + +union acpi_generic_state { + struct acpi_common_state common; + struct acpi_control_state control; + struct acpi_update_state update; + struct acpi_scope_state scope; + struct acpi_pscope_state parse_scope; + struct acpi_pkg_state pkg; + struct acpi_thread_state thread; + struct acpi_result_values results; + struct acpi_notify_info notify; +}; + +struct acpi_opcode_info { + u32 parse_args; + u32 runtime_args; + u16 flags; + u8 object_type; + u8 class; + u8 type; +}; + +struct acpi_os_dpc { + acpi_osd_exec_callback function; + void *context; + struct work_struct work; +}; + +struct acpi_ioremap { + struct list_head list; + void *virt; + acpi_physical_address phys; + acpi_size size; + union { + long unsigned int refcount; + struct rcu_work rwork; + } track; +}; + +struct acpi_hp_work { + struct work_struct work; + struct acpi_device *adev; + u32 src; +}; + +struct acpi_object_list { + u32 count; + union acpi_object *pointer; +}; + +struct acpi_pld_info { + u8 revision; + u8 ignore_color; + u8 red; + u8 green; + u8 blue; + u16 width; + u16 height; + u8 user_visible; + u8 dock; + u8 lid; + u8 panel; + u8 vertical_position; + u8 horizontal_position; + u8 shape; + u8 group_orientation; + u8 group_token; + u8 group_position; + u8 bay; + u8 ejectable; + u8 ospm_eject_required; + u8 cabinet_number; + u8 card_cage_number; + u8 reference; + u8 rotation; + u8 order; + u8 reserved; + u16 vertical_offset; + u16 horizontal_offset; +}; + +struct acpi_handle_list { + u32 count; + acpi_handle handles[10]; +}; + +enum acpi_predicate { + all_versions = 0, + less_than_or_equal = 1, + equal = 2, + greater_than_or_equal = 3, +}; + +struct acpi_platform_list { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; + char *table; + enum acpi_predicate pred; + char *reason; + u32 data; +}; + +struct acpi_device_bus_id { + const char *bus_id; + unsigned int instance_no; + struct list_head node; +}; + +struct acpi_dev_match_info { + struct acpi_device_id hid[2]; + const char *uid; + s64 hrv; +}; + +struct nvs_region { + __u64 phys_start; + __u64 size; + struct list_head node; +}; + +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *); + void *context; +}; + +struct acpi_hardware_id { + struct list_head list; + const char *id; +}; + +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct fwnode_handle *parent; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + +struct acpi_data_node_attr { + struct attribute attr; + ssize_t (*show)(struct acpi_data_node *, char *); + ssize_t (*store)(struct acpi_data_node *, const char *, size_t); +}; + +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + +typedef u32 (*acpi_event_handler)(void *); + +enum acpi_bus_device_type { + ACPI_BUS_TYPE_DEVICE = 0, + ACPI_BUS_TYPE_POWER = 1, + ACPI_BUS_TYPE_PROCESSOR = 2, + ACPI_BUS_TYPE_THERMAL = 3, + ACPI_BUS_TYPE_POWER_BUTTON = 4, + ACPI_BUS_TYPE_SLEEP_BUTTON = 5, + ACPI_BUS_TYPE_ECDT_EC = 6, + ACPI_BUS_DEVICE_TYPE_COUNT = 7, +}; + +struct acpi_device_physical_node { + unsigned int node_id; + struct list_head node; + struct device *dev; + bool put_online: 1; +}; + +struct acpi_osc_context { + char *uuid_str; + int rev; + struct acpi_buffer cap; + struct acpi_buffer ret; +}; + +struct acpi_pnp_device_id { + u32 length; + char *string; +}; + +struct acpi_pnp_device_id_list { + u32 count; + u32 list_size; + struct acpi_pnp_device_id ids[0]; +}; + +struct acpi_device_info { + u32 info_size; + u32 name; + acpi_object_type type; + u8 param_count; + u16 valid; + u8 flags; + u8 highest_dstates[4]; + u8 lowest_dstates[5]; + u64 address; + struct acpi_pnp_device_id hardware_id; + struct acpi_pnp_device_id unique_id; + struct acpi_pnp_device_id class_code; + struct acpi_pnp_device_id_list compatible_id_list; +}; + +struct acpi_table_spcr { + struct acpi_table_header header; + u8 interface_type; + u8 reserved[3]; + struct acpi_generic_address serial_port; + u8 interrupt_type; + u8 pc_interrupt; + u32 interrupt; + u8 baud_rate; + u8 parity; + u8 stop_bits; + u8 flow_control; + u8 terminal_type; + u8 reserved1; + u16 pci_device_id; + u16 pci_vendor_id; + u8 pci_bus; + u8 pci_device; + u8 pci_function; + u32 pci_flags; + u8 pci_segment; + u32 reserved2; +} __attribute__((packed)); + +struct acpi_table_stao { + struct acpi_table_header header; + u8 ignore_uart; +} __attribute__((packed)); + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE = 1, +}; + +struct acpi_dep_data { + struct list_head node; + acpi_handle master; + acpi_handle slave; +}; + +struct acpi_table_events_work { + struct work_struct work; + void *table; + u32 event; +}; + +struct resource_win { + struct resource res; + resource_size_t offset; +}; + +struct res_proc_context { + struct list_head *list; + int (*preproc)(struct acpi_resource *, void *); + void *preproc_data; + int count; + int error; +}; + +struct acpi_processor_errata { + u8 smp; + struct { + u8 throttle: 1; + u8 fdma: 1; + u8 reserved: 6; + u32 bmisx; + } piix4; +}; + +typedef u32 acpi_event_status; + +struct acpi_table_ecdt { + struct acpi_table_header header; + struct acpi_generic_address control; + struct acpi_generic_address data; + u32 uid; + u8 gpe; + u8 id[1]; +} __attribute__((packed)); + +struct transaction; + +struct acpi_ec { + acpi_handle handle; + int gpe; + int irq; + long unsigned int command_addr; + long unsigned int data_addr; + bool global_lock; + long unsigned int flags; + long unsigned int reference_count; + struct mutex mutex; + wait_queue_head_t wait; + struct list_head list; + struct transaction *curr; + spinlock_t lock; + struct work_struct work; + long unsigned int timestamp; + long unsigned int nr_pending_queries; + bool busy_polling; + unsigned int polling_guard; +}; + +struct transaction { + const u8 *wdata; + u8 *rdata; + short unsigned int irq_count; + u8 command; + u8 wi; + u8 ri; + u8 wlen; + u8 rlen; + u8 flags; +}; + +typedef int (*acpi_ec_query_func)(void *); + +enum ec_command { + ACPI_EC_COMMAND_READ = 128, + ACPI_EC_COMMAND_WRITE = 129, + ACPI_EC_BURST_ENABLE = 130, + ACPI_EC_BURST_DISABLE = 131, + ACPI_EC_COMMAND_QUERY = 132, +}; + +enum { + EC_FLAGS_QUERY_ENABLED = 0, + EC_FLAGS_QUERY_PENDING = 1, + EC_FLAGS_QUERY_GUARDING = 2, + EC_FLAGS_EVENT_HANDLER_INSTALLED = 3, + EC_FLAGS_EC_HANDLER_INSTALLED = 4, + EC_FLAGS_QUERY_METHODS_INSTALLED = 5, + EC_FLAGS_STARTED = 6, + EC_FLAGS_STOPPED = 7, + EC_FLAGS_EVENTS_MASKED = 8, +}; + +struct acpi_ec_query_handler { + struct list_head node; + acpi_ec_query_func func; + acpi_handle handle; + void *data; + u8 query_bit; + struct kref kref; +}; + +struct acpi_ec_query { + struct transaction transaction; + struct work_struct work; + struct acpi_ec_query_handler *handler; +}; + +struct dock_station { + acpi_handle handle; + long unsigned int last_dock_time; + u32 flags; + struct list_head dependent_devices; + struct list_head sibling; + struct platform_device *dock_device; +}; + +struct dock_dependent_device { + struct list_head list; + struct acpi_device *adev; +}; + +enum dock_callback_type { + DOCK_CALL_HANDLER = 0, + DOCK_CALL_FIXUP = 1, + DOCK_CALL_UEVENT = 2, +}; + +struct pci_osc_bit_struct { + u32 bit; + char *desc; +}; + +struct acpi_handle_node { + struct list_head node; + acpi_handle handle; +}; + +struct acpi_pci_link_irq { + u32 active; + u8 triggering; + u8 polarity; + u8 resource_type; + u8 possible_count; + u32 possible[16]; + u8 initialized: 1; + u8 reserved: 7; +}; + +struct acpi_pci_link { + struct list_head list; + struct acpi_device *device; + struct acpi_pci_link_irq irq; + int refcnt; +}; + +struct acpi_pci_routing_table { + u32 length; + u32 pin; + u64 address; + u32 source_index; + char source[4]; +}; + +struct acpi_prt_entry { + struct acpi_pci_id id; + u8 pin; + acpi_handle link; + u32 index; +}; + +struct prt_quirk { + const struct dmi_system_id *system; + unsigned int segment; + unsigned int bus; + unsigned int device; + unsigned char pin; + const char *source; + const char *actual_source; +}; + +struct apd_private_data; + +struct apd_device_desc { + unsigned int fixed_clk_rate; + struct property_entry *properties; + int (*setup)(struct apd_private_data *); +}; + +struct apd_private_data { + struct clk *clk; + struct acpi_device *adev; + const struct apd_device_desc *dev_desc; +}; + +struct acpi_power_dependent_device { + struct device *dev; + struct list_head node; +}; + +struct acpi_power_resource { + struct acpi_device device; + struct list_head list_node; + char *name; + u32 system_level; + u32 order; + unsigned int ref_count; + bool wakeup_enabled; + struct mutex resource_lock; + struct list_head dependents; +}; + +struct acpi_power_resource_entry { + struct list_head node; + struct acpi_power_resource *resource; +}; + +struct acpi_bus_event { + struct list_head node; + acpi_device_class device_class; + acpi_bus_id bus_id; + u32 type; + u32 data; +}; + +struct acpi_genl_event { + acpi_device_class device_class; + char bus_id[15]; + u32 type; + u32 data; +}; + +enum { + ACPI_GENL_ATTR_UNSPEC = 0, + ACPI_GENL_ATTR_EVENT = 1, + __ACPI_GENL_ATTR_MAX = 2, +}; + +enum { + ACPI_GENL_CMD_UNSPEC = 0, + ACPI_GENL_CMD_EVENT = 1, + __ACPI_GENL_CMD_MAX = 2, +}; + +struct acpi_ged_device { + struct device *dev; + struct list_head event_list; +}; + +struct acpi_ged_event { + struct list_head node; + struct device *dev; + unsigned int gsi; + unsigned int irq; + acpi_handle handle; +}; + +typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *); + +struct acpi_table_bert { + struct acpi_table_header header; + u32 region_length; + u64 address; +}; + +struct acpi_table_attr { + struct bin_attribute attr; + char name[4]; + int instance; + char filename[8]; + struct list_head node; +}; + +struct acpi_data_attr { + struct bin_attribute attr; + u64 addr; +}; + +struct acpi_data_obj { + char *name; + int (*fn)(void *, struct acpi_data_attr *); +}; + +struct event_counter { + u32 count; + u32 flags; +}; + +struct acpi_device_properties { + const guid_t *guid; + const union acpi_object *properties; + struct list_head list; +}; + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +struct acpi_irq_parse_one_ctx { + int rc; + unsigned int index; + long unsigned int *res_flags; + struct irq_fwspec *fwspec; +}; + +enum { + ACPI_REFCLASS_LOCAL = 0, + ACPI_REFCLASS_ARG = 1, + ACPI_REFCLASS_REFOF = 2, + ACPI_REFCLASS_INDEX = 3, + ACPI_REFCLASS_TABLE = 4, + ACPI_REFCLASS_NAME = 5, + ACPI_REFCLASS_DEBUG = 6, + ACPI_REFCLASS_MAX = 6, +}; + +struct acpi_common_descriptor { + void *common_pointer; + u8 descriptor_type; +}; + +union acpi_descriptor { + struct acpi_common_descriptor common; + union acpi_operand_object object; + struct acpi_namespace_node node; + union acpi_parse_object op; +}; + +struct acpi_create_field_info { + struct acpi_namespace_node *region_node; + struct acpi_namespace_node *field_node; + struct acpi_namespace_node *register_node; + struct acpi_namespace_node *data_register_node; + struct acpi_namespace_node *connection_node; + u8 *resource_buffer; + u32 bank_value; + u32 field_bit_position; + u32 field_bit_length; + u16 resource_length; + u16 pin_number_index; + u8 field_flags; + u8 attribute; + u8 field_type; + u8 access_length; +}; + +struct acpi_init_walk_info { + u32 table_index; + u32 object_count; + u32 method_count; + u32 serial_method_count; + u32 non_serial_method_count; + u32 serialized_method_count; + u32 device_count; + u32 op_region_count; + u32 field_count; + u32 buffer_count; + u32 package_count; + u32 op_region_init; + u32 field_init; + u32 buffer_init; + u32 package_init; + acpi_owner_id owner_id; +}; + +typedef u32 acpi_name; + +typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *); + +struct acpi_name_info { + char name[4]; + u16 argument_list; + u8 expected_btypes; +} __attribute__((packed)); + +struct acpi_package_info { + u8 type; + u8 object_type1; + u8 count1; + u8 object_type2; + u8 count2; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info2 { + u8 type; + u8 count; + u8 object_type[4]; + u8 reserved; +}; + +struct acpi_package_info3 { + u8 type; + u8 count; + u8 object_type[2]; + u8 tail_object_type; + u16 reserved; +} __attribute__((packed)); + +struct acpi_package_info4 { + u8 type; + u8 object_type1; + u8 count1; + u8 sub_object_types; + u8 pkg_count; + u16 reserved; +} __attribute__((packed)); + +union acpi_predefined_info { + struct acpi_name_info info; + struct acpi_package_info ret_info; + struct acpi_package_info2 ret_info2; + struct acpi_package_info3 ret_info3; + struct acpi_package_info4 ret_info4; +}; + +struct acpi_evaluate_info { + struct acpi_namespace_node *prefix_node; + const char *relative_pathname; + union acpi_operand_object **parameters; + struct acpi_namespace_node *node; + union acpi_operand_object *obj_desc; + char *full_pathname; + const union acpi_predefined_info *predefined; + union acpi_operand_object *return_object; + union acpi_operand_object *parent_package; + u32 return_flags; + u32 return_btype; + u16 param_count; + u16 node_flags; + u8 pass_number; + u8 return_object_type; + u8 flags; +}; + +enum { + AML_FIELD_ACCESS_ANY = 0, + AML_FIELD_ACCESS_BYTE = 1, + AML_FIELD_ACCESS_WORD = 2, + AML_FIELD_ACCESS_DWORD = 3, + AML_FIELD_ACCESS_QWORD = 4, + AML_FIELD_ACCESS_BUFFER = 5, +}; + +typedef enum { + ACPI_IMODE_LOAD_PASS1 = 1, + ACPI_IMODE_LOAD_PASS2 = 2, + ACPI_IMODE_EXECUTE = 3, +} acpi_interpreter_mode; + +typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *); + +struct acpi_reg_walk_info { + u32 function; + u32 reg_run_count; + acpi_adr_space_type space_id; +}; + +enum { + AML_FIELD_UPDATE_PRESERVE = 0, + AML_FIELD_UPDATE_WRITE_AS_ONES = 32, + AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64, +}; + +struct acpi_signal_fatal_info { + u32 type; + u32 code; + u32 argument; +}; + +enum { + MATCH_MTR = 0, + MATCH_MEQ = 1, + MATCH_MLE = 2, + MATCH_MLT = 3, + MATCH_MGE = 4, + MATCH_MGT = 5, +}; + +enum { + AML_FIELD_ATTRIB_QUICK = 2, + AML_FIELD_ATTRIB_SEND_RECEIVE = 4, + AML_FIELD_ATTRIB_BYTE = 6, + AML_FIELD_ATTRIB_WORD = 8, + AML_FIELD_ATTRIB_BLOCK = 10, + AML_FIELD_ATTRIB_BYTES = 11, + AML_FIELD_ATTRIB_PROCESS_CALL = 12, + AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 13, + AML_FIELD_ATTRIB_RAW_BYTES = 14, + AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 15, +}; + +typedef enum { + ACPI_TRACE_AML_METHOD = 0, + ACPI_TRACE_AML_OPCODE = 1, + ACPI_TRACE_AML_REGION = 2, +} acpi_trace_event_type; + +struct acpi_port_info { + char *name; + u16 start; + u16 end; + u8 osi_dependency; +}; + +struct acpi_pci_device { + acpi_handle device; + struct acpi_pci_device *next; +}; + +typedef acpi_status (*acpi_init_handler)(acpi_handle, u32); + +struct acpi_device_walk_info { + struct acpi_table_desc *table_desc; + struct acpi_evaluate_info *evaluate_info; + u32 device_count; + u32 num_STA; + u32 num_INI; +}; + +struct acpi_table_list { + struct acpi_table_desc *tables; + u32 current_table_count; + u32 max_table_count; + u8 flags; +}; + +enum acpi_return_package_types { + ACPI_PTYPE1_FIXED = 1, + ACPI_PTYPE1_VAR = 2, + ACPI_PTYPE1_OPTION = 3, + ACPI_PTYPE2 = 4, + ACPI_PTYPE2_COUNT = 5, + ACPI_PTYPE2_PKG_COUNT = 6, + ACPI_PTYPE2_FIXED = 7, + ACPI_PTYPE2_MIN = 8, + ACPI_PTYPE2_REV_FIXED = 9, + ACPI_PTYPE2_FIX_VAR = 10, + ACPI_PTYPE2_VAR_VAR = 11, + ACPI_PTYPE2_UUID_PAIR = 12, + ACPI_PTYPE_CUSTOM = 13, +}; + +typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **); + +struct acpi_simple_repair_info { + char name[4]; + u32 unexpected_btypes; + u32 package_index; + acpi_object_converter object_converter; +}; + +typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **); + +struct acpi_repair_info { + char name[4]; + acpi_repair_function repair_function; +}; + +struct acpi_namestring_info { + const char *external_name; + const char *next_external_char; + char *internal_name; + u32 length; + u32 num_segments; + u32 num_carats; + u8 fully_qualified; +}; + +typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **); + +struct acpi_rw_lock { + void *writer_mutex; + void *reader_mutex; + u32 num_readers; +}; + +struct acpi_get_devices_info { + acpi_walk_callback user_function; + void *context; + const char *hid; +}; + +struct aml_resource_small_header { + u8 descriptor_type; +}; + +struct aml_resource_irq { + u8 descriptor_type; + u16 irq_mask; + u8 flags; +} __attribute__((packed)); + +struct aml_resource_dma { + u8 descriptor_type; + u8 dma_channel_mask; + u8 flags; +}; + +struct aml_resource_start_dependent { + u8 descriptor_type; + u8 flags; +}; + +struct aml_resource_end_dependent { + u8 descriptor_type; +}; + +struct aml_resource_io { + u8 descriptor_type; + u8 flags; + u16 minimum; + u16 maximum; + u8 alignment; + u8 address_length; +}; + +struct aml_resource_fixed_io { + u8 descriptor_type; + u16 address; + u8 address_length; +} __attribute__((packed)); + +struct aml_resource_vendor_small { + u8 descriptor_type; +}; + +struct aml_resource_end_tag { + u8 descriptor_type; + u8 checksum; +}; + +struct aml_resource_fixed_dma { + u8 descriptor_type; + u16 request_lines; + u16 channels; + u8 width; +} __attribute__((packed)); + +struct aml_resource_large_header { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_memory24 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u16 minimum; + u16 maximum; + u16 alignment; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_vendor_large { + u8 descriptor_type; + u16 resource_length; +} __attribute__((packed)); + +struct aml_resource_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 minimum; + u32 maximum; + u32 alignment; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_fixed_memory32 { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u32 address; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; +} __attribute__((packed)); + +struct aml_resource_extended_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u8 revision_ID; + u8 reserved; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; + u64 type_specific; +} __attribute__((packed)); + +struct aml_resource_address64 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +} __attribute__((packed)); + +struct aml_resource_address32 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u32 granularity; + u32 minimum; + u32 maximum; + u32 translation_offset; + u32 address_length; +} __attribute__((packed)); + +struct aml_resource_address16 { + u8 descriptor_type; + u16 resource_length; + u8 resource_type; + u8 flags; + u8 specific_flags; + u16 granularity; + u16 minimum; + u16 maximum; + u16 translation_offset; + u16 address_length; +} __attribute__((packed)); + +struct aml_resource_extended_irq { + u8 descriptor_type; + u16 resource_length; + u8 flags; + u8 interrupt_count; + u32 interrupts[1]; +} __attribute__((packed)); + +struct aml_resource_generic_register { + u8 descriptor_type; + u16 resource_length; + u8 address_space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct aml_resource_gpio { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 connection_type; + u16 flags; + u16 int_flags; + u8 pin_config; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_common_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; +} __attribute__((packed)); + +struct aml_resource_i2c_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u16 slave_address; +} __attribute__((packed)); + +struct aml_resource_spi_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 connection_speed; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; +} __attribute__((packed)); + +struct aml_resource_uart_serialbus { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u8 res_source_index; + u8 type; + u8 flags; + u16 type_specific_flags; + u8 type_revision_id; + u16 type_data_length; + u32 default_baud_rate; + u16 rx_fifo_size; + u16 tx_fifo_size; + u8 parity; + u8 lines_enabled; +} __attribute__((packed)); + +struct aml_resource_pin_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config; + u16 function_number; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 pin_table_offset; + u16 label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_function { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u16 function_number; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +struct aml_resource_pin_group_config { + u8 descriptor_type; + u16 resource_length; + u8 revision_id; + u16 flags; + u8 pin_config_type; + u32 pin_config_value; + u8 res_source_index; + u16 res_source_offset; + u16 res_source_label_offset; + u16 vendor_offset; + u16 vendor_length; +} __attribute__((packed)); + +union aml_resource { + u8 descriptor_type; + struct aml_resource_small_header small_header; + struct aml_resource_large_header large_header; + struct aml_resource_irq irq; + struct aml_resource_dma dma; + struct aml_resource_start_dependent start_dpf; + struct aml_resource_end_dependent end_dpf; + struct aml_resource_io io; + struct aml_resource_fixed_io fixed_io; + struct aml_resource_fixed_dma fixed_dma; + struct aml_resource_vendor_small vendor_small; + struct aml_resource_end_tag end_tag; + struct aml_resource_memory24 memory24; + struct aml_resource_generic_register generic_reg; + struct aml_resource_vendor_large vendor_large; + struct aml_resource_memory32 memory32; + struct aml_resource_fixed_memory32 fixed_memory32; + struct aml_resource_address16 address16; + struct aml_resource_address32 address32; + struct aml_resource_address64 address64; + struct aml_resource_extended_address64 ext_address64; + struct aml_resource_extended_irq extended_irq; + struct aml_resource_gpio gpio; + struct aml_resource_i2c_serialbus i2c_serial_bus; + struct aml_resource_spi_serialbus spi_serial_bus; + struct aml_resource_uart_serialbus uart_serial_bus; + struct aml_resource_common_serialbus common_serial_bus; + struct aml_resource_pin_function pin_function; + struct aml_resource_pin_config pin_config; + struct aml_resource_pin_group pin_group; + struct aml_resource_pin_group_function pin_group_function; + struct aml_resource_pin_group_config pin_group_config; + struct aml_resource_address address; + u32 dword_item; + u16 word_item; + u8 byte_item; +}; + +struct acpi_rsconvert_info { + u8 opcode; + u8 resource_offset; + u8 aml_offset; + u8 value; +}; + +enum { + ACPI_RSC_INITGET = 0, + ACPI_RSC_INITSET = 1, + ACPI_RSC_FLAGINIT = 2, + ACPI_RSC_1BITFLAG = 3, + ACPI_RSC_2BITFLAG = 4, + ACPI_RSC_3BITFLAG = 5, + ACPI_RSC_ADDRESS = 6, + ACPI_RSC_BITMASK = 7, + ACPI_RSC_BITMASK16 = 8, + ACPI_RSC_COUNT = 9, + ACPI_RSC_COUNT16 = 10, + ACPI_RSC_COUNT_GPIO_PIN = 11, + ACPI_RSC_COUNT_GPIO_RES = 12, + ACPI_RSC_COUNT_GPIO_VEN = 13, + ACPI_RSC_COUNT_SERIAL_RES = 14, + ACPI_RSC_COUNT_SERIAL_VEN = 15, + ACPI_RSC_DATA8 = 16, + ACPI_RSC_EXIT_EQ = 17, + ACPI_RSC_EXIT_LE = 18, + ACPI_RSC_EXIT_NE = 19, + ACPI_RSC_LENGTH = 20, + ACPI_RSC_MOVE_GPIO_PIN = 21, + ACPI_RSC_MOVE_GPIO_RES = 22, + ACPI_RSC_MOVE_SERIAL_RES = 23, + ACPI_RSC_MOVE_SERIAL_VEN = 24, + ACPI_RSC_MOVE8 = 25, + ACPI_RSC_MOVE16 = 26, + ACPI_RSC_MOVE32 = 27, + ACPI_RSC_MOVE64 = 28, + ACPI_RSC_SET8 = 29, + ACPI_RSC_SOURCE = 30, + ACPI_RSC_SOURCEX = 31, +}; + +typedef u16 acpi_rs_length; + +typedef u32 acpi_rsdesc_size; + +struct acpi_vendor_uuid { + u8 subtype; + u8 data[16]; +}; + +typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *); + +struct acpi_vendor_walk_info { + struct acpi_vendor_uuid *uuid; + struct acpi_buffer *buffer; + acpi_status status; +}; + +typedef acpi_status (*acpi_table_handler)(u32, void *, void *); + +struct acpi_fadt_info { + const char *name; + u16 address64; + u16 address32; + u16 length; + u8 default_length; + u8 flags; +}; + +struct acpi_fadt_pm_info { + struct acpi_generic_address *target; + u16 source; + u8 register_num; +}; + +struct acpi_table_rsdp { + char signature[8]; + u8 checksum; + char oem_id[6]; + u8 revision; + u32 rsdt_physical_address; + u32 length; + u64 xsdt_physical_address; + u8 extended_checksum; + u8 reserved[3]; +} __attribute__((packed)); + +struct acpi_address_range { + struct acpi_address_range *next; + struct acpi_namespace_node *region_node; + acpi_physical_address start_address; + acpi_physical_address end_address; +}; + +struct acpi_pkg_info { + u8 *free_space; + acpi_size length; + u32 object_space; + u32 num_packages; +}; + +struct acpi_exception_info { + char *name; +}; + +typedef u32 (*acpi_sci_handler)(void *); + +typedef u32 (*acpi_interface_handler)(acpi_string, u32); + +struct acpi_mutex_info { + void *mutex; + u32 use_count; + u64 thread_id; +}; + +struct acpi_sci_handler_info { + struct acpi_sci_handler_info *next; + acpi_sci_handler address; + void *context; +}; + +struct acpi_comment_node { + char *comment; + struct acpi_comment_node *next; +}; + +struct acpi_interface_info { + char *name; + struct acpi_interface_info *next; + u8 flags; + u8 value; +}; + +typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *); + +typedef u32 acpi_mutex_handle; + +typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **); + +struct acpi_table_mcfg { + struct acpi_table_header header; + u8 reserved[8]; +}; + +struct acpi_mcfg_allocation { + u64 address; + u16 pci_segment; + u8 start_bus_number; + u8 end_bus_number; + u32 reserved; +}; + +struct mcfg_entry { + struct list_head list; + phys_addr_t addr; + u16 segment; + u8 bus_start; + u8 bus_end; +}; + +struct mcfg_fixup { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; + u16 segment; + struct resource bus_range; + const struct pci_ecam_ops *ops; + struct resource cfgres; +}; + +struct acpi_pci_slot { + struct pci_slot *pci_slot; + struct list_head list; +}; + +struct acpi_power_register { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__((packed)); + +struct acpi_lpi_states_array { + unsigned int size; + unsigned int composite_states_size; + struct acpi_lpi_state *entries; + struct acpi_lpi_state *composite_states[8]; +}; + +struct container_dev { + struct device dev; + int (*offline)(struct container_dev *); +}; + +struct acpi_table_slit { + struct acpi_table_header header; + u64 locality_count; + u8 entry[1]; +} __attribute__((packed)); + +struct acpi_table_srat { + struct acpi_table_header header; + u32 table_revision; + u64 reserved; +}; + +struct acpi_srat_cpu_affinity { + struct acpi_subtable_header header; + u8 proximity_domain_lo; + u8 apic_id; + u32 flags; + u8 local_sapic_eid; + u8 proximity_domain_hi[3]; + u32 clock_domain; +}; + +struct acpi_srat_mem_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u16 reserved; + u64 base_address; + u64 length; + u32 reserved1; + u32 flags; + u64 reserved2; +} __attribute__((packed)); + +struct acpi_srat_x2apic_cpu_affinity { + struct acpi_subtable_header header; + u16 reserved; + u32 proximity_domain; + u32 apic_id; + u32 flags; + u32 clock_domain; + u32 reserved2; +}; + +struct acpi_srat_generic_affinity { + struct acpi_subtable_header header; + u8 reserved; + u8 device_handle_type; + u32 proximity_domain; + u8 device_handle[16]; + u32 flags; + u32 reserved1; +}; + +enum acpi_hmat_type { + ACPI_HMAT_TYPE_PROXIMITY = 0, + ACPI_HMAT_TYPE_LOCALITY = 1, + ACPI_HMAT_TYPE_CACHE = 2, + ACPI_HMAT_TYPE_RESERVED = 3, +}; + +struct acpi_hmat_proximity_domain { + struct acpi_hmat_structure header; + u16 flags; + u16 reserved1; + u32 processor_PD; + u32 memory_PD; + u32 reserved2; + u64 reserved3; + u64 reserved4; +}; + +struct acpi_hmat_locality { + struct acpi_hmat_structure header; + u8 flags; + u8 data_type; + u16 reserved1; + u32 number_of_initiator_Pds; + u32 number_of_target_Pds; + u32 reserved2; + u64 entry_base_unit; +}; + +struct acpi_hmat_cache { + struct acpi_hmat_structure header; + u32 memory_PD; + u32 reserved1; + u64 cache_size; + u32 cache_attributes; + u16 reserved2; + u16 number_of_SMBIOShandles; +}; + +struct node_hmem_attrs { + unsigned int read_bandwidth; + unsigned int write_bandwidth; + unsigned int read_latency; + unsigned int write_latency; +}; + +enum cache_indexing { + NODE_CACHE_DIRECT_MAP = 0, + NODE_CACHE_INDEXED = 1, + NODE_CACHE_OTHER = 2, +}; + +enum cache_write_policy { + NODE_CACHE_WRITE_BACK = 0, + NODE_CACHE_WRITE_THROUGH = 1, + NODE_CACHE_WRITE_OTHER = 2, +}; + +struct node_cache_attrs { + enum cache_indexing indexing; + enum cache_write_policy write_policy; + u64 size; + u16 line_size; + u8 level; +}; + +enum locality_types { + WRITE_LATENCY = 0, + READ_LATENCY = 1, + WRITE_BANDWIDTH = 2, + READ_BANDWIDTH = 3, +}; + +struct memory_locality { + struct list_head node; + struct acpi_hmat_locality *hmat_loc; +}; + +struct target_cache { + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct memory_target { + struct list_head node; + unsigned int memory_pxm; + unsigned int processor_pxm; + struct resource memregions; + struct node_hmem_attrs hmem_attrs[2]; + struct list_head caches; + struct node_cache_attrs cache_attrs; + bool registered; +}; + +struct memory_initiator { + struct list_head node; + unsigned int processor_pxm; + bool has_cpu; +}; + +struct acpi_memory_info { + struct list_head list; + u64 start_addr; + u64 length; + short unsigned int caching; + short unsigned int write_protect; + unsigned int enabled: 1; +}; + +struct acpi_memory_device { + struct acpi_device *device; + struct list_head res_list; +}; + +struct acpi_pcct_hw_reduced { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_pcct_shared_memory { + u32 signature; + u16 command; + u16 status; +}; + +struct mbox_chan; + +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *, void *); + int (*flush)(struct mbox_chan *, long unsigned int); + int (*startup)(struct mbox_chan *); + void (*shutdown)(struct mbox_chan *); + bool (*last_tx_done)(struct mbox_chan *); + bool (*peek_data)(struct mbox_chan *); +}; + +struct mbox_controller; + +struct mbox_client; + +struct mbox_chan { + struct mbox_controller *mbox; + unsigned int txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned int msg_count; + unsigned int msg_free; + void *msg_data[20]; + spinlock_t lock; + void *con_priv; +}; + +struct mbox_controller { + struct device *dev; + const struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned int txpoll_period; + struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *); + struct hrtimer poll_hrt; + struct list_head node; +}; + +struct mbox_client { + struct device *dev; + bool tx_block; + long unsigned int tx_tout; + bool knows_txdone; + void (*rx_callback)(struct mbox_client *, void *); + void (*tx_prepare)(struct mbox_client *, void *); + void (*tx_done)(struct mbox_client *, void *, int); +}; + +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 address; +} __attribute__((packed)); + +struct cpc_register_resource { + acpi_object_type type; + u64 *sys_mem_vaddr; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + int write_cmd_status; + int write_cmd_id; + struct cpc_register_resource cpc_regs[21]; + struct acpi_psd_package domain_info; + struct kobject kobj; +}; + +enum cppc_regs { + HIGHEST_PERF = 0, + NOMINAL_PERF = 1, + LOW_NON_LINEAR_PERF = 2, + LOWEST_PERF = 3, + GUARANTEED_PERF = 4, + DESIRED_PERF = 5, + MIN_PERF = 6, + MAX_PERF = 7, + PERF_REDUC_TOLERANCE = 8, + TIME_WINDOW = 9, + CTR_WRAP_TIME = 10, + REFERENCE_CTR = 11, + DELIVERED_CTR = 12, + PERF_LIMITED = 13, + ENABLE = 14, + AUTO_SEL_ENABLE = 15, + AUTO_ACT_WINDOW = 16, + ENERGY_PERF = 17, + REFERENCE_PERF = 18, + LOWEST_FREQ = 19, + NOMINAL_FREQ = 20, +}; + +struct cppc_perf_caps { + u32 guaranteed_perf; + u32 highest_perf; + u32 nominal_perf; + u32 lowest_perf; + u32 lowest_nonlinear_perf; + u32 lowest_freq; + u32 nominal_freq; +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 delivered; + u64 reference_perf; + u64 wraparound_time; +}; + +struct cppc_cpudata { + int cpu; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + struct cpufreq_policy *cur_policy; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +struct cppc_pcc_data { + struct mbox_chan *pcc_channel; + void *pcc_comm_addr; + bool pcc_channel_acquired; + unsigned int deadline_us; + unsigned int pcc_mpar; + unsigned int pcc_mrtt; + unsigned int pcc_nominal; + bool pending_pcc_write_cmd; + bool platform_owns_pcc; + unsigned int pcc_write_cnt; + struct rw_semaphore pcc_lock; + wait_queue_head_t pcc_write_wait_q; + ktime_t last_cmd_cmpl_time; + ktime_t last_mpar_reset; + int mpar_count; + int refcount; +}; + +struct cppc_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, ssize_t); +}; + +enum acpi_pptt_type { + ACPI_PPTT_TYPE_PROCESSOR = 0, + ACPI_PPTT_TYPE_CACHE = 1, + ACPI_PPTT_TYPE_ID = 2, + ACPI_PPTT_TYPE_RESERVED = 3, +}; + +struct acpi_pptt_processor { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 parent; + u32 acpi_processor_id; + u32 number_of_priv_resources; +}; + +struct acpi_pptt_cache { + struct acpi_subtable_header header; + u16 reserved; + u32 flags; + u32 next_level_of_cache; + u32 size; + u32 number_of_sets; + u8 associativity; + u8 attributes; + u16 line_size; +}; + +struct acpi_whea_header { + u8 action; + u8 instruction; + u8 flags; + u8 reserved; + struct acpi_generic_address register_region; + u64 value; + u64 mask; +} __attribute__((packed)); + +struct acpi_hest_header { + u16 type; + u16 source_id; +}; + +struct cper_sec_mem_err { + u64 validation_bits; + u64 error_status; + u64 physical_addr; + u64 physical_addr_mask; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u8 error_type; + u8 extended; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; +}; + +struct apei_exec_context; + +typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *, struct acpi_whea_header *); + +struct apei_exec_ins_type; + +struct apei_exec_context { + u32 ip; + u64 value; + u64 var1; + u64 var2; + u64 src_base; + u64 dst_base; + struct apei_exec_ins_type *ins_table; + u32 instructions; + struct acpi_whea_header *action_table; + u32 entries; +}; + +struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +}; + +struct apei_resources { + struct list_head iomem; + struct list_head ioport; +}; + +typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *, struct acpi_whea_header *, void *); + +struct apei_res { + struct list_head list; + long unsigned int start; + long unsigned int end; +}; + +struct acpi_table_hest { + struct acpi_table_header header; + u32 error_source_count; +}; + +enum acpi_hest_types { + ACPI_HEST_TYPE_IA32_CHECK = 0, + ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1, + ACPI_HEST_TYPE_IA32_NMI = 2, + ACPI_HEST_TYPE_NOT_USED3 = 3, + ACPI_HEST_TYPE_NOT_USED4 = 4, + ACPI_HEST_TYPE_NOT_USED5 = 5, + ACPI_HEST_TYPE_AER_ROOT_PORT = 6, + ACPI_HEST_TYPE_AER_ENDPOINT = 7, + ACPI_HEST_TYPE_AER_BRIDGE = 8, + ACPI_HEST_TYPE_GENERIC_ERROR = 9, + ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10, + ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11, + ACPI_HEST_TYPE_RESERVED = 12, +}; + +struct acpi_hest_notify { + u8 type; + u8 length; + u16 config_write_enable; + u32 poll_interval; + u32 vector; + u32 polling_threshold_value; + u32 polling_threshold_window; + u32 error_threshold_value; + u32 error_threshold_window; +}; + +struct acpi_hest_ia_machine_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + u64 global_capability_data; + u64 global_control_data; + u8 num_hardware_banks; + u8 reserved3[7]; +}; + +struct acpi_hest_ia_corrected { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +struct acpi_hest_ia_deferred_check { + struct acpi_hest_header header; + u16 reserved1; + u8 flags; + u8 enabled; + u32 records_to_preallocate; + u32 max_sections_per_record; + struct acpi_hest_notify notify; + u8 num_hardware_banks; + u8 reserved2[3]; +}; + +enum hest_status { + HEST_ENABLED = 0, + HEST_DISABLED = 1, + HEST_NOT_FOUND = 2, +}; + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *, void *); + +struct acpi_table_erst { + struct acpi_table_header header; + u32 header_length; + u32 reserved; + u32 entries; +}; + +enum acpi_erst_actions { + ACPI_ERST_BEGIN_WRITE = 0, + ACPI_ERST_BEGIN_READ = 1, + ACPI_ERST_BEGIN_CLEAR = 2, + ACPI_ERST_END = 3, + ACPI_ERST_SET_RECORD_OFFSET = 4, + ACPI_ERST_EXECUTE_OPERATION = 5, + ACPI_ERST_CHECK_BUSY_STATUS = 6, + ACPI_ERST_GET_COMMAND_STATUS = 7, + ACPI_ERST_GET_RECORD_ID = 8, + ACPI_ERST_SET_RECORD_ID = 9, + ACPI_ERST_GET_RECORD_COUNT = 10, + ACPI_ERST_BEGIN_DUMMY_WRIITE = 11, + ACPI_ERST_NOT_USED = 12, + ACPI_ERST_GET_ERROR_RANGE = 13, + ACPI_ERST_GET_ERROR_LENGTH = 14, + ACPI_ERST_GET_ERROR_ATTRIBUTES = 15, + ACPI_ERST_EXECUTE_TIMINGS = 16, + ACPI_ERST_ACTION_RESERVED = 17, +}; + +enum acpi_erst_instructions { + ACPI_ERST_READ_REGISTER = 0, + ACPI_ERST_READ_REGISTER_VALUE = 1, + ACPI_ERST_WRITE_REGISTER = 2, + ACPI_ERST_WRITE_REGISTER_VALUE = 3, + ACPI_ERST_NOOP = 4, + ACPI_ERST_LOAD_VAR1 = 5, + ACPI_ERST_LOAD_VAR2 = 6, + ACPI_ERST_STORE_VAR1 = 7, + ACPI_ERST_ADD = 8, + ACPI_ERST_SUBTRACT = 9, + ACPI_ERST_ADD_VALUE = 10, + ACPI_ERST_SUBTRACT_VALUE = 11, + ACPI_ERST_STALL = 12, + ACPI_ERST_STALL_WHILE_TRUE = 13, + ACPI_ERST_SKIP_NEXT_IF_TRUE = 14, + ACPI_ERST_GOTO = 15, + ACPI_ERST_SET_SRC_ADDRESS_BASE = 16, + ACPI_ERST_SET_DST_ADDRESS_BASE = 17, + ACPI_ERST_MOVE_DATA = 18, + ACPI_ERST_INSTRUCTION_RESERVED = 19, +}; + +struct cper_record_header { + char signature[4]; + u16 revision; + u32 signature_end; + u16 section_count; + u32 error_severity; + u32 validation_bits; + u32 record_length; + u64 timestamp; + guid_t platform_id; + guid_t partition_id; + guid_t creator_id; + guid_t notification_type; + u64 record_id; + u32 flags; + u64 persistence_information; + u8 reserved[12]; +} __attribute__((packed)); + +struct cper_section_descriptor { + u32 section_offset; + u32 section_length; + u16 revision; + u8 validation_bits; + u8 reserved; + u32 flags; + guid_t section_type; + guid_t fru_id; + u32 section_severity; + u8 fru_text[20]; +}; + +struct erst_erange { + u64 base; + u64 size; + void *vaddr; + u32 attr; +}; + +struct erst_record_id_cache { + struct mutex lock; + u64 *entries; + int len; + int size; + int refcount; +}; + +struct cper_pstore_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + char data[0]; +}; + +struct acpi_bert_region { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +struct acpi_hest_generic_status { + u32 block_status; + u32 raw_data_offset; + u32 raw_data_length; + u32 data_length; + u32 error_severity; +}; + +struct pmic_table { + int address; + int reg; + int bit; +}; + +struct intel_pmic_opregion_data { + int (*get_power)(struct regmap *, int, int, u64 *); + int (*update_power)(struct regmap *, int, int, bool); + int (*get_raw_temp)(struct regmap *, int); + int (*update_aux)(struct regmap *, int, int); + int (*get_policy)(struct regmap *, int, int, u64 *); + int (*update_policy)(struct regmap *, int, int, int); + int (*exec_mipi_pmic_seq_element)(struct regmap *, u16, u32, u32, u32); + struct pmic_table *power_table; + int power_table_count; + struct pmic_table *thermal_table; + int thermal_table_count; + int pmic_i2c_address; +}; + +struct intel_pmic_regs_handler_ctx { + unsigned int val; + u16 addr; +}; + +struct intel_pmic_opregion { + struct mutex lock; + struct acpi_lpat_conversion_table *lpat_table; + struct regmap *regmap; + struct intel_pmic_opregion_data *data; + struct intel_pmic_regs_handler_ctx ctx; +}; + +struct acpi_table_iort { + struct acpi_table_header header; + u32 node_count; + u32 node_offset; + u32 reserved; +}; + +struct acpi_iort_node { + u8 type; + u16 length; + u8 revision; + u32 reserved; + u32 mapping_count; + u32 mapping_offset; + char node_data[1]; +} __attribute__((packed)); + +enum acpi_iort_node_type { + ACPI_IORT_NODE_ITS_GROUP = 0, + ACPI_IORT_NODE_NAMED_COMPONENT = 1, + ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 2, + ACPI_IORT_NODE_SMMU = 3, + ACPI_IORT_NODE_SMMU_V3 = 4, + ACPI_IORT_NODE_PMCG = 5, +}; + +struct acpi_iort_id_mapping { + u32 input_base; + u32 id_count; + u32 output_base; + u32 output_reference; + u32 flags; +}; + +struct acpi_iort_its_group { + u32 its_count; + u32 identifiers[1]; +}; + +struct acpi_iort_named_component { + u32 node_flags; + u64 memory_properties; + u8 memory_address_limit; + char device_name[1]; +} __attribute__((packed)); + +struct acpi_iort_root_complex { + u64 memory_properties; + u32 ats_attribute; + u32 pci_segment_number; + u8 memory_address_limit; + u8 reserved[3]; +} __attribute__((packed)); + +struct acpi_iort_smmu { + u64 base_address; + u64 span; + u32 model; + u32 flags; + u32 global_interrupt_offset; + u32 context_interrupt_count; + u32 context_interrupt_offset; + u32 pmu_interrupt_count; + u32 pmu_interrupt_offset; + u64 interrupts[1]; +} __attribute__((packed)); + +struct acpi_iort_smmu_v3 { + u64 base_address; + u32 flags; + u32 reserved; + u64 vatos_address; + u32 model; + u32 event_gsiv; + u32 pri_gsiv; + u32 gerr_gsiv; + u32 sync_gsiv; + u32 pxm; + u32 id_mapping_index; +} __attribute__((packed)); + +struct acpi_iort_pmcg { + u64 page0_base_address; + u32 overflow_gsiv; + u32 node_reference; + u64 page1_base_address; +}; + +struct iort_its_msi_chip { + struct list_head list; + struct fwnode_handle *fw_node; + phys_addr_t base_addr; + u32 translation_id; +}; + +struct iort_fwnode { + struct list_head list; + struct acpi_iort_node *iort_node; + struct fwnode_handle *fwnode; +}; + +typedef acpi_status (*iort_find_node_callback)(struct acpi_iort_node *, void *); + +struct iort_pci_alias_info { + struct device *dev; + struct acpi_iort_node *node; +}; + +struct iort_dev_config { + const char *name; + int (*dev_init)(struct acpi_iort_node *); + void (*dev_dma_configure)(struct device *, struct acpi_iort_node *); + int (*dev_count_resources)(struct acpi_iort_node *); + void (*dev_init_resources)(struct resource *, struct acpi_iort_node *); + int (*dev_set_proximity)(struct device *, struct acpi_iort_node *); + int (*dev_add_platdata)(struct platform_device *); +}; + +enum arch_timer_ppi_nr { + ARCH_TIMER_PHYS_SECURE_PPI = 0, + ARCH_TIMER_PHYS_NONSECURE_PPI = 1, + ARCH_TIMER_VIRT_PPI = 2, + ARCH_TIMER_HYP_PPI = 3, + ARCH_TIMER_MAX_TIMER_PPI = 4, +}; + +struct arch_timer_mem_frame { + bool valid; + phys_addr_t cntbase; + size_t size; + int phys_irq; + int virt_irq; +}; + +struct arch_timer_mem { + phys_addr_t cntctlbase; + size_t size; + struct arch_timer_mem_frame frame[8]; +}; + +struct acpi_table_gtdt { + struct acpi_table_header header; + u64 counter_block_addresss; + u32 reserved; + u32 secure_el1_interrupt; + u32 secure_el1_flags; + u32 non_secure_el1_interrupt; + u32 non_secure_el1_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 non_secure_el2_interrupt; + u32 non_secure_el2_flags; + u64 counter_read_block_address; + u32 platform_timer_count; + u32 platform_timer_offset; +} __attribute__((packed)); + +struct acpi_gtdt_header { + u8 type; + u16 length; +} __attribute__((packed)); + +enum acpi_gtdt_type { + ACPI_GTDT_TYPE_TIMER_BLOCK = 0, + ACPI_GTDT_TYPE_WATCHDOG = 1, + ACPI_GTDT_TYPE_RESERVED = 2, +}; + +struct acpi_gtdt_timer_block { + struct acpi_gtdt_header header; + u8 reserved; + u64 block_address; + u32 timer_count; + u32 timer_offset; +} __attribute__((packed)); + +struct acpi_gtdt_timer_entry { + u8 frame_number; + u8 reserved[3]; + u64 base_address; + u64 el0_base_address; + u32 timer_interrupt; + u32 timer_flags; + u32 virtual_timer_interrupt; + u32 virtual_timer_flags; + u32 common_flags; +} __attribute__((packed)); + +struct acpi_gtdt_watchdog { + struct acpi_gtdt_header header; + u8 reserved; + u64 refresh_frame_address; + u64 control_frame_address; + u32 timer_interrupt; + u32 timer_flags; +} __attribute__((packed)); + +struct acpi_gtdt_descriptor { + struct acpi_table_gtdt *gtdt; + void *gtdt_end; + void *platform_timer; +}; + +struct pnp_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; +}; + +struct pnp_card_device_id { + __u8 id[8]; + kernel_ulong_t driver_data; + struct { + __u8 id[8]; + } devs[8]; +}; + +struct pnp_protocol; + +struct pnp_id; + +struct pnp_card { + struct device dev; + unsigned char number; + struct list_head global_list; + struct list_head protocol_list; + struct list_head devices; + struct pnp_protocol *protocol; + struct pnp_id *id; + char name[50]; + unsigned char pnpver; + unsigned char productver; + unsigned int serial; + unsigned char checksum; + struct proc_dir_entry *procdir; +}; + +struct pnp_dev; + +struct pnp_protocol { + struct list_head protocol_list; + char *name; + int (*get)(struct pnp_dev *); + int (*set)(struct pnp_dev *); + int (*disable)(struct pnp_dev *); + bool (*can_wakeup)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + unsigned char number; + struct device dev; + struct list_head cards; + struct list_head devices; +}; + +struct pnp_id { + char id[8]; + struct pnp_id *next; +}; + +struct pnp_card_driver; + +struct pnp_card_link { + struct pnp_card *card; + struct pnp_card_driver *driver; + void *driver_data; + pm_message_t pm_state; +}; + +struct pnp_driver { + const char *name; + const struct pnp_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_dev *, const struct pnp_device_id *); + void (*remove)(struct pnp_dev *); + void (*shutdown)(struct pnp_dev *); + int (*suspend)(struct pnp_dev *, pm_message_t); + int (*resume)(struct pnp_dev *); + struct device_driver driver; +}; + +struct pnp_card_driver { + struct list_head global_list; + char *name; + const struct pnp_card_device_id *id_table; + unsigned int flags; + int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *); + void (*remove)(struct pnp_card_link *); + int (*suspend)(struct pnp_card_link *, pm_message_t); + int (*resume)(struct pnp_card_link *); + struct pnp_driver link; +}; + +struct pnp_dev { + struct device dev; + u64 dma_mask; + unsigned int number; + int status; + struct list_head global_list; + struct list_head protocol_list; + struct list_head card_list; + struct list_head rdev_list; + struct pnp_protocol *protocol; + struct pnp_card *card; + struct pnp_driver *driver; + struct pnp_card_link *card_link; + struct pnp_id *id; + int active; + int capabilities; + unsigned int num_dependent_sets; + struct list_head resources; + struct list_head options; + char name[50]; + int flags; + struct proc_dir_entry *procent; + void *data; +}; + +struct pnp_resource { + struct list_head list; + struct resource res; +}; + +struct pnp_port { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +typedef struct { + long unsigned int bits[4]; +} pnp_irq_mask_t; + +struct pnp_irq { + pnp_irq_mask_t map; + unsigned char flags; +}; + +struct pnp_dma { + unsigned char map; + unsigned char flags; +}; + +struct pnp_mem { + resource_size_t min; + resource_size_t max; + resource_size_t align; + resource_size_t size; + unsigned char flags; +}; + +struct pnp_option { + struct list_head list; + unsigned int flags; + long unsigned int type; + union { + struct pnp_port port; + struct pnp_irq irq; + struct pnp_dma dma; + struct pnp_mem mem; + } u; +}; + +struct pnp_info_buffer { + char *buffer; + char *curr; + long unsigned int size; + long unsigned int len; + int stop; + int error; +}; + +typedef struct pnp_info_buffer pnp_info_buffer_t; + +struct pnp_fixup { + char id[7]; + void (*quirk_function)(struct pnp_dev *); +}; + +struct acpipnp_parse_option_s { + struct pnp_dev *dev; + unsigned int option_flags; +}; + +struct deferred_device { + struct amba_device *dev; + struct resource *parent; + struct list_head node; +}; + +struct find_data { + struct amba_device *dev; + struct device *parent; + const char *busid; + unsigned int id; + unsigned int mask; +}; + +struct tegra_ahb { + void *regs; + struct device *dev; + u32 ctx[0]; +}; + +struct clk_bulk_devres { + struct clk_bulk_data *clks; + int num_clks; +}; + +struct clk_hw; + +struct clk_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct clk *clk; + struct clk_hw *clk_hw; +}; + +struct clk_core; + +struct clk_init_data; + +struct clk_hw { + struct clk_core *core; + struct clk *clk; + const struct clk_init_data *init; +}; + +struct clk_rate_request { + long unsigned int rate; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int best_parent_rate; + struct clk_hw *best_parent_hw; +}; + +struct clk_duty { + unsigned int num; + unsigned int den; +}; + +struct clk_ops { + int (*prepare)(struct clk_hw *); + void (*unprepare)(struct clk_hw *); + int (*is_prepared)(struct clk_hw *); + void (*unprepare_unused)(struct clk_hw *); + int (*enable)(struct clk_hw *); + void (*disable)(struct clk_hw *); + int (*is_enabled)(struct clk_hw *); + void (*disable_unused)(struct clk_hw *); + int (*save_context)(struct clk_hw *); + void (*restore_context)(struct clk_hw *); + long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int); + long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *); + int (*determine_rate)(struct clk_hw *, struct clk_rate_request *); + int (*set_parent)(struct clk_hw *, u8); + u8 (*get_parent)(struct clk_hw *); + int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int); + int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8); + long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int); + int (*get_phase)(struct clk_hw *); + int (*set_phase)(struct clk_hw *, int); + int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *); + int (*init)(struct clk_hw *); + void (*terminate)(struct clk_hw *); + void (*debug_init)(struct clk_hw *, struct dentry *); +}; + +struct clk_parent_data { + const struct clk_hw *hw; + const char *fw_name; + const char *name; + int index; +}; + +struct clk_init_data { + const char *name; + const struct clk_ops *ops; + const char * const *parent_names; + const struct clk_parent_data *parent_data; + const struct clk_hw **parent_hws; + u8 num_parents; + long unsigned int flags; +}; + +struct clk_lookup_alloc { + struct clk_lookup cl; + char dev_id[20]; + char con_id[16]; +}; + +struct clk_notifier { + struct clk *clk; + struct srcu_notifier_head notifier_head; + struct list_head node; +}; + +struct clk { + struct clk_core *core; + struct device *dev; + const char *dev_id; + const char *con_id; + long unsigned int min_rate; + long unsigned int max_rate; + unsigned int exclusive_count; + struct hlist_node clks_node; +}; + +struct clk_notifier_data { + struct clk *clk; + long unsigned int old_rate; + long unsigned int new_rate; +}; + +struct clk_parent_map; + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct clk_core *parent; + struct clk_parent_map *parents; + u8 num_parents; + u8 new_parent_index; + long unsigned int rate; + long unsigned int req_rate; + long unsigned int new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + long unsigned int flags; + bool orphan; + bool rpm_enabled; + unsigned int enable_count; + unsigned int prepare_count; + unsigned int protect_count; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int accuracy; + int phase; + struct clk_duty duty; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_head clks; + unsigned int notifier_count; + struct dentry *dentry; + struct hlist_node debug_node; + struct kref ref; +}; + +struct clk_onecell_data { + struct clk **clks; + unsigned int clk_num; +}; + +struct clk_hw_onecell_data { + unsigned int num; + struct clk_hw *hws[0]; +}; + +struct clk_parent_map { + const struct clk_hw *hw; + struct clk_core *core; + const char *fw_name; + const char *name; + int index; +}; + +struct trace_event_raw_clk { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_clk_rate { + struct trace_entry ent; + u32 __data_loc_name; + long unsigned int rate; + char __data[0]; +}; + +struct trace_event_raw_clk_parent { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_pname; + char __data[0]; +}; + +struct trace_event_raw_clk_phase { + struct trace_entry ent; + u32 __data_loc_name; + int phase; + char __data[0]; +}; + +struct trace_event_raw_clk_duty_cycle { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int num; + unsigned int den; + char __data[0]; +}; + +struct trace_event_data_offsets_clk { + u32 name; +}; + +struct trace_event_data_offsets_clk_rate { + u32 name; +}; + +struct trace_event_data_offsets_clk_parent { + u32 name; + u32 pname; +}; + +struct trace_event_data_offsets_clk_phase { + u32 name; +}; + +struct trace_event_data_offsets_clk_duty_cycle { + u32 name; +}; + +typedef void (*btf_trace_clk_enable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_enable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_disable_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_prepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_unprepare_complete)(void *, struct clk_core *); + +typedef void (*btf_trace_clk_set_rate)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_rate_complete)(void *, struct clk_core *, long unsigned int); + +typedef void (*btf_trace_clk_set_parent)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_parent_complete)(void *, struct clk_core *, struct clk_core *); + +typedef void (*btf_trace_clk_set_phase)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_phase_complete)(void *, struct clk_core *, int); + +typedef void (*btf_trace_clk_set_duty_cycle)(void *, struct clk_core *, struct clk_duty *); + +typedef void (*btf_trace_clk_set_duty_cycle_complete)(void *, struct clk_core *, struct clk_duty *); + +struct of_clk_provider { + struct list_head link; + struct device_node *node; + struct clk * (*get)(struct of_phandle_args *, void *); + struct clk_hw * (*get_hw)(struct of_phandle_args *, void *); + void *data; +}; + +struct clock_provider { + void (*clk_init_cb)(struct device_node *); + struct device_node *np; + struct list_head node; +}; + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +typedef void (*of_init_fn_1)(struct device_node *); + +struct clk_fixed_factor { + struct clk_hw hw; + unsigned int mult; + unsigned int div; +}; + +struct clk_fixed_rate { + struct clk_hw hw; + long unsigned int fixed_rate; + long unsigned int fixed_accuracy; + long unsigned int flags; +}; + +struct clk_gate { + struct clk_hw hw; + void *reg; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct clk_multiplier { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +struct clk_mux { + struct clk_hw hw; + void *reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct clk_fractional_divider { + struct clk_hw hw; + void *reg; + u8 mshift; + u8 mwidth; + u32 mmask; + u8 nshift; + u8 nwidth; + u32 nmask; + u8 flags; + void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *); + spinlock_t *lock; +}; + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +struct ccsr_guts { + u32 porpllsr; + u32 porbmsr; + u32 porimpscr; + u32 pordevsr; + u32 pordbgmsr; + u32 pordevsr2; + u8 res018[8]; + u32 porcir; + u8 res024[12]; + u32 gpiocr; + u8 res034[12]; + u32 gpoutdr; + u8 res044[12]; + u32 gpindr; + u8 res054[12]; + u32 pmuxcr; + u32 pmuxcr2; + u32 dmuxcr; + u8 res06c[4]; + u32 devdisr; + u32 devdisr2; + u8 res078[4]; + u32 pmjcr; + u32 powmgtcsr; + u32 pmrccr; + u32 pmpdccr; + u32 pmcdr; + u32 mcpsumr; + u32 rstrscr; + u32 ectrstcr; + u32 autorstsr; + u32 pvr; + u32 svr; + u8 res0a8[8]; + u32 rstcr; + u8 res0b4[12]; + u32 iovselsr; + u8 res0c4[60]; + u32 rcwsr[16]; + u8 res140[228]; + u32 iodelay1; + u32 iodelay2; + u8 res22c[984]; + u32 pamubypenr; + u8 res608[504]; + u32 clkdvdr; + u8 res804[252]; + u32 ircr; + u8 res904[4]; + u32 dmacr; + u8 res90c[8]; + u32 elbccr; + u8 res918[520]; + u32 ddr1clkdr; + u32 ddr2clkdr; + u32 ddrclkdr; + u8 resb2c[724]; + u32 clkocr; + u8 rese04[12]; + u32 ddrdllcr; + u8 rese14[12]; + u32 lbcdllcr; + u32 cpfor; + u8 rese28[220]; + u32 srds1cr0; + u32 srds1cr1; + u8 resf0c[32]; + u32 itcr; + u8 resf30[16]; + u32 srds2cr0; + u32 srds2cr1; +}; + +struct clockgen_pll_div { + struct clk *clk; + char name[32]; +}; + +struct clockgen_pll { + struct clockgen_pll_div div[32]; +}; + +struct clockgen_sourceinfo { + u32 flags; + int pll; + int div; +}; + +struct clockgen_muxinfo { + struct clockgen_sourceinfo clksel[16]; +}; + +struct clockgen; + +struct clockgen_chipinfo { + const char *compat; + const char *guts_compat; + const struct clockgen_muxinfo *cmux_groups[2]; + const struct clockgen_muxinfo *hwaccel[5]; + void (*init_periph)(struct clockgen *); + int cmux_to_group[9]; + u32 pll_mask; + u32 flags; +}; + +struct clockgen { + struct device_node *node; + void *regs; + struct clockgen_chipinfo info; + struct clk *sysclk; + struct clk *coreclk; + struct clockgen_pll pll[6]; + struct clk *cmux[8]; + struct clk *hwaccel[5]; + struct clk *fman[2]; + struct ccsr_guts *guts; +}; + +struct mux_hwclock { + struct clk_hw hw; + struct clockgen *cg; + const struct clockgen_muxinfo *info; + u32 *reg; + u8 parent_to_clksel[16]; + s8 clksel_to_parent[16]; + int num_parents; +}; + +struct scpi_opp { + u32 freq; + u32 m_volt; +}; + +struct scpi_dvfs_info { + unsigned int count; + unsigned int latency; + struct scpi_opp *opps; +}; + +struct scpi_sensor_info { + u16 sensor_id; + u8 class; + u8 trigger_type; + char name[20]; +}; + +struct scpi_ops { + u32 (*get_version)(); + int (*clk_get_range)(u16, long unsigned int *, long unsigned int *); + long unsigned int (*clk_get_val)(u16); + int (*clk_set_val)(u16, long unsigned int); + int (*dvfs_get_idx)(u8); + int (*dvfs_set_idx)(u8, u8); + struct scpi_dvfs_info * (*dvfs_get_info)(u8); + int (*device_domain_id)(struct device *); + int (*get_transition_latency)(struct device *); + int (*add_opps_to_device)(struct device *); + int (*sensor_get_capability)(u16 *); + int (*sensor_get_info)(u16, struct scpi_sensor_info *); + int (*sensor_get_value)(u16, u64 *); + int (*device_get_power_state)(u16); + int (*device_set_power_state)(u16, u8); +}; + +struct scpi_clk { + u32 id; + struct clk_hw hw; + struct scpi_dvfs_info *info; + struct scpi_ops *scpi_ops; +}; + +struct scpi_clk_data { + struct scpi_clk **clk; + unsigned int clk_num; +}; + +enum xgene_pll_type { + PLL_TYPE_PCP = 0, + PLL_TYPE_SOC = 1, +}; + +struct xgene_clk_pll { + struct clk_hw hw; + void *reg; + spinlock_t *lock; + u32 pll_offset; + enum xgene_pll_type type; + int version; +}; + +struct xgene_clk_pmd { + struct clk_hw hw; + void *reg; + u8 shift; + u32 mask; + u64 denom; + u32 flags; + spinlock_t *lock; +}; + +struct xgene_dev_parameters { + void *csr_reg; + u32 reg_clk_offset; + u32 reg_clk_mask; + u32 reg_csr_offset; + u32 reg_csr_mask; + void *divider_reg; + u32 reg_divider_offset; + u32 reg_divider_shift; + u32 reg_divider_width; +}; + +struct xgene_clk { + struct clk_hw hw; + spinlock_t *lock; + struct xgene_dev_parameters param; +}; + +struct reset_controller_dev; + +struct reset_control_ops { + int (*reset)(struct reset_controller_dev *, long unsigned int); + int (*assert)(struct reset_controller_dev *, long unsigned int); + int (*deassert)(struct reset_controller_dev *, long unsigned int); + int (*status)(struct reset_controller_dev *, long unsigned int); +}; + +struct reset_controller_dev { + const struct reset_control_ops *ops; + struct module *owner; + struct list_head list; + struct list_head reset_control_head; + struct device *dev; + struct device_node *of_node; + int of_reset_n_cells; + int (*of_xlate)(struct reset_controller_dev *, const struct of_phandle_args *); + unsigned int nr_resets; +}; + +struct reset_simple_data { + spinlock_t lock; + void *membase; + struct reset_controller_dev rcdev; + bool active_low; + bool status_active_low; + unsigned int reset_us; +}; + +struct clk_dvp { + struct clk_hw_onecell_data *data; + struct reset_simple_data reset; +}; + +struct bcm2835_cprman { + struct device *dev; + void *regs; + spinlock_t regs_lock; + unsigned int soc; + const char *real_parent_names[7]; + struct clk_hw_onecell_data onecell; +}; + +struct cprman_plat_data { + unsigned int soc; +}; + +struct bcm2835_pll_ana_bits; + +struct bcm2835_pll_data { + const char *name; + u32 cm_ctrl_reg; + u32 a2w_ctrl_reg; + u32 frac_reg; + u32 ana_reg_base; + u32 reference_enable_mask; + u32 lock_mask; + u32 flags; + const struct bcm2835_pll_ana_bits *ana; + long unsigned int min_rate; + long unsigned int max_rate; + long unsigned int max_fb_rate; +}; + +struct bcm2835_pll_ana_bits { + u32 mask0; + u32 set0; + u32 mask1; + u32 set1; + u32 mask3; + u32 set3; + u32 fb_prediv_mask; +}; + +struct bcm2835_pll_divider_data { + const char *name; + const char *source_pll; + u32 cm_reg; + u32 a2w_reg; + u32 load_mask; + u32 hold_mask; + u32 fixed_divider; + u32 flags; +}; + +struct bcm2835_clock_data { + const char *name; + const char * const *parents; + int num_mux_parents; + unsigned int set_rate_parent; + u32 ctl_reg; + u32 div_reg; + u32 int_bits; + u32 frac_bits; + u32 flags; + bool is_vpu_clock; + bool is_mash_clock; + bool low_jitter; + u32 tcnt_mux; +}; + +struct bcm2835_gate_data { + const char *name; + const char *parent; + u32 ctl_reg; +}; + +struct bcm2835_pll { + struct clk_hw hw; + struct bcm2835_cprman *cprman; + const struct bcm2835_pll_data *data; +}; + +struct bcm2835_pll_divider { + struct clk_divider div; + struct bcm2835_cprman *cprman; + const struct bcm2835_pll_divider_data *data; +}; + +struct bcm2835_clock { + struct clk_hw hw; + struct bcm2835_cprman *cprman; + const struct bcm2835_clock_data *data; +}; + +struct bcm2835_clk_desc { + struct clk_hw * (*clk_register)(struct bcm2835_cprman *, const void *); + unsigned int supported; + const void *data; +}; + +struct hisi_clock_data { + struct clk_onecell_data clk_data; + void *base; +}; + +struct hisi_fixed_rate_clock { + unsigned int id; + char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int fixed_rate; +}; + +struct hisi_fixed_factor_clock { + unsigned int id; + char *name; + const char *parent_name; + long unsigned int mult; + long unsigned int div; + long unsigned int flags; +}; + +struct hisi_mux_clock { + unsigned int id; + const char *name; + const char * const *parent_names; + u8 num_parents; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u8 mux_flags; + u32 *table; + const char *alias; +}; + +struct hisi_phase_clock { + unsigned int id; + const char *name; + const char *parent_names; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u32 *phase_degrees; + u32 *phase_regvals; + u8 phase_num; +}; + +struct hisi_divider_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u8 div_flags; + struct clk_div_table *table; + const char *alias; +}; + +struct hi6220_divider_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u32 mask_bit; + const char *alias; +}; + +struct hisi_gate_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int offset; + u8 bit_idx; + u8 gate_flags; + const char *alias; +}; + +struct clkgate_separated { + struct clk_hw hw; + void *enable; + u8 bit_idx; + u8 flags; + spinlock_t *lock; +}; + +struct hi6220_clk_divider { + struct clk_hw hw; + void *reg; + u8 shift; + u8 width; + u32 mask; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +struct clk_hisi_phase { + struct clk_hw hw; + void *reg; + u32 *phase_degrees; + u32 *phase_regvals; + u8 phase_num; + u32 mask; + u8 shift; + u8 flags; + spinlock_t *lock; +}; + +struct hisi_crg_funcs { + struct hisi_clock_data * (*register_clks)(struct platform_device *); + void (*unregister_clks)(struct platform_device *); +}; + +struct hisi_reset_controller; + +struct hisi_crg_dev { + struct hisi_clock_data *clk_data; + struct hisi_reset_controller *rstc; + const struct hisi_crg_funcs *funcs; +}; + +struct hi3519_crg_data { + struct hisi_clock_data *clk_data; + struct hisi_reset_controller *rstc; +}; + +struct hisi_reset_controller___2 { + spinlock_t lock; + void *membase; + struct reset_controller_dev rcdev; +}; + +struct mbox_chan___2; + +struct hi6220_stub_clk { + u32 id; + struct device *dev; + struct clk_hw hw; + struct regmap *dfs_map; + struct mbox_client cl; + struct mbox_chan___2 *mbox; +}; + +struct hi6220_mbox_msg { + unsigned char type; + unsigned char cmd; + unsigned char obj; + unsigned char src; + unsigned char para[4]; +}; + +union hi6220_mbox_data { + unsigned int data[8]; + struct hi6220_mbox_msg msg; +}; + +struct hi3660_stub_clk_chan { + struct mbox_client cl; + struct mbox_chan___2 *mbox; +}; + +struct hi3660_stub_clk { + unsigned int id; + struct clk_hw hw; + unsigned int cmd; + unsigned int msg[8]; + unsigned int rate; +}; + +struct mtk_fixed_clk { + int id; + const char *name; + const char *parent; + long unsigned int rate; +}; + +struct mtk_fixed_factor { + int id; + const char *name; + const char *parent_name; + int mult; + int div; +}; + +struct mtk_composite { + int id; + const char *name; + const char * const *parent_names; + const char *parent; + unsigned int flags; + uint32_t mux_reg; + uint32_t divider_reg; + uint32_t gate_reg; + signed char mux_shift; + signed char mux_width; + signed char gate_shift; + signed char divider_shift; + signed char divider_width; + u8 mux_flags; + signed char num_parents; +}; + +struct mtk_gate_regs { + u32 sta_ofs; + u32 clr_ofs; + u32 set_ofs; +}; + +struct mtk_gate { + int id; + const char *name; + const char *parent_name; + const struct mtk_gate_regs *regs; + int shift; + const struct clk_ops *ops; + long unsigned int flags; +}; + +struct mtk_clk_divider { + int id; + const char *name; + const char *parent_name; + long unsigned int flags; + u32 div_reg; + unsigned char div_shift; + unsigned char div_width; + unsigned char clk_divider_flags; + const struct clk_div_table *clk_div_table; +}; + +struct mtk_pll_div_table { + u32 div; + long unsigned int freq; +}; + +struct mtk_pll_data { + int id; + const char *name; + uint32_t reg; + uint32_t pwr_reg; + uint32_t en_mask; + uint32_t pd_reg; + uint32_t tuner_reg; + uint32_t tuner_en_reg; + uint8_t tuner_en_bit; + int pd_shift; + unsigned int flags; + const struct clk_ops *ops; + u32 rst_bar_mask; + long unsigned int fmin; + long unsigned int fmax; + int pcwbits; + int pcwibits; + uint32_t pcw_reg; + int pcw_shift; + uint32_t pcw_chg_reg; + const struct mtk_pll_div_table *div_table; + const char *parent_name; +}; + +struct mtk_clk_pll { + struct clk_hw hw; + void *base_addr; + void *pd_addr; + void *pwr_addr; + void *tuner_addr; + void *tuner_en_addr; + void *pcw_addr; + void *pcw_chg_addr; + const struct mtk_pll_data *data; +}; + +struct mtk_clk_gate { + struct clk_hw hw; + struct regmap *regmap; + int set_ofs; + int clr_ofs; + int sta_ofs; + u8 bit; +}; + +struct mtk_ref2usb_tx { + struct clk_hw hw; + void *base_addr; +}; + +struct mtk_clk_cpumux { + struct clk_hw hw; + struct regmap *regmap; + u32 reg; + u32 mask; + u8 shift; +}; + +struct mtk_reset { + struct regmap *regmap; + int regofs; + struct reset_controller_dev rcdev; +}; + +struct mtk_mux; + +struct mtk_clk_mux { + struct clk_hw hw; + struct regmap *regmap; + const struct mtk_mux *data; + spinlock_t *lock; +}; + +struct mtk_mux { + int id; + const char *name; + const char * const *parent_names; + unsigned int flags; + u32 mux_ofs; + u32 set_ofs; + u32 clr_ofs; + u32 upd_ofs; + u8 mux_shift; + u8 mux_width; + u8 gate_shift; + s8 upd_shift; + const struct clk_ops *ops; + signed char num_parents; +}; + +struct clk_mt8167_mm_driver_data { + const struct mtk_gate *gates_clk; + int gates_num; +}; + +struct mtk_clk_usb { + int id; + const char *name; + const char *parent; + u32 reg_ofs; +}; + +struct clk_mt8173_mm_driver_data { + const struct mtk_gate *gates_clk; + int gates_num; +}; + +struct clk_regmap { + struct clk_hw hw; + struct regmap *map; + void *data; +}; + +struct meson_aoclk_data { + const unsigned int reset_reg; + const int num_reset; + const unsigned int *reset; + const int num_clks; + struct clk_regmap **clks; + const struct clk_hw_onecell_data *hw_data; +}; + +struct meson_aoclk_reset_controller { + struct reset_controller_dev reset; + const struct meson_aoclk_data *data; + struct regmap *regmap; +}; + +struct parm { + u16 reg_off; + u8 shift; + u8 width; +}; + +struct meson_clk_cpu_dyndiv_data { + struct parm div; + struct parm dyn; +}; + +struct meson_clk_dualdiv_param { + unsigned int n1; + unsigned int n2; + unsigned int m1; + unsigned int m2; + unsigned int dual; +}; + +struct meson_clk_dualdiv_data { + struct parm n1; + struct parm n2; + struct parm m1; + struct parm m2; + struct parm dual; + const struct meson_clk_dualdiv_param *table; +}; + +struct reg_sequence { + unsigned int reg; + unsigned int def; + unsigned int delay_us; +}; + +struct meson_eeclkc_data { + struct clk_regmap * const *regmap_clks; + unsigned int regmap_clk_num; + const struct reg_sequence *init_regs; + unsigned int init_count; + struct clk_hw_onecell_data *hw_onecell_data; +}; + +struct meson_clk_mpll_data { + struct parm sdm; + struct parm sdm_en; + struct parm n2; + struct parm ssen; + struct parm misc; + const struct reg_sequence *init_regs; + unsigned int init_count; + spinlock_t *lock; + u8 flags; +}; + +struct pll_params_table { + unsigned int m; + unsigned int n; +}; + +struct pll_mult_range { + unsigned int min; + unsigned int max; +}; + +struct meson_clk_pll_data { + struct parm en; + struct parm m; + struct parm n; + struct parm frac; + struct parm l; + struct parm rst; + const struct reg_sequence *init_regs; + unsigned int init_count; + const struct pll_params_table *table; + const struct pll_mult_range *range; + u8 flags; +}; + +struct clk_regmap_gate_data { + unsigned int offset; + u8 bit_idx; + u8 flags; +}; + +struct clk_regmap_div_data { + unsigned int offset; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; +}; + +struct clk_regmap_mux_data { + unsigned int offset; + u32 *table; + u32 mask; + u8 shift; + u8 flags; +}; + +struct meson_vid_pll_div_data { + struct parm val; + struct parm sel; +}; + +struct vid_pll_div { + unsigned int shift_val; + unsigned int shift_sel; + unsigned int divider; + unsigned int multiplier; +}; + +struct g12a_cpu_clk_postmux_nb_data { + struct notifier_block nb; + struct clk_hw *xtal; + struct clk_hw *cpu_clk_dyn; + struct clk_hw *cpu_clk_postmux0; + struct clk_hw *cpu_clk_postmux1; + struct clk_hw *cpu_clk_premux1; +}; + +struct g12a_sys_pll_nb_data { + struct notifier_block nb; + struct clk_hw *sys_pll; + struct clk_hw *cpu_clk; + struct clk_hw *cpu_clk_dyn; +}; + +struct meson_g12a_data { + const struct meson_eeclkc_data eeclkc_data; + int (*dvfs_setup)(struct platform_device *); +}; + +struct tbg_def { + char *name; + u32 refdiv_offset; + u32 fbdiv_offset; + u32 vcodiv_reg; + u32 vcodiv_offset; +}; + +struct clk_periph_driver_data { + struct clk_hw_onecell_data *hw_data; + spinlock_t lock; + void *reg; + u32 tbg_sel; + u32 div_sel0; + u32 div_sel1; + u32 div_sel2; + u32 clk_sel; + u32 clk_dis; +}; + +struct clk_double_div { + struct clk_hw hw; + void *reg1; + u8 shift1; + void *reg2; + u8 shift2; +}; + +struct clk_pm_cpu { + struct clk_hw hw; + void *reg_mux; + u8 shift_mux; + u32 mask_mux; + void *reg_div; + u8 shift_div; + struct regmap *nb_pm_base; +}; + +struct clk_periph_data { + const char *name; + const char * const *parent_names; + int num_parents; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + struct clk_hw *muxrate_hw; + bool is_double_div; +}; + +struct cpu_dfs_regs { + unsigned int divider_reg; + unsigned int force_reg; + unsigned int ratio_reg; + unsigned int ratio_state_reg; + unsigned int divider_mask; + unsigned int cluster_offset; + unsigned int force_mask; + int divider_offset; + int divider_ratio; + int ratio_offset; + int ratio_state_offset; + int ratio_state_cluster_offset; +}; + +struct ap_cpu_clk { + unsigned int cluster; + const char *clk_name; + struct device *dev; + struct clk_hw hw; + struct regmap *pll_cr_base; + const struct cpu_dfs_regs *pll_regs; +}; + +enum { + CP110_CLK_TYPE_CORE = 0, + CP110_CLK_TYPE_GATABLE = 1, +}; + +struct cp110_gate_clk { + struct clk_hw hw; + struct regmap *regmap; + u8 bit_idx; +}; + +struct clk_regmap___2; + +struct qcom_reset_map; + +struct gdsc; + +struct qcom_cc_desc { + const struct regmap_config *config; + struct clk_regmap___2 **clks; + size_t num_clks; + const struct qcom_reset_map *resets; + size_t num_resets; + struct gdsc **gdscs; + size_t num_gdscs; + struct clk_hw **clk_hws; + size_t num_clk_hws; +}; + +struct clk_regmap___2 { + struct clk_hw hw; + struct regmap *regmap; + unsigned int enable_reg; + unsigned int enable_mask; + bool enable_is_inverted; +}; + +struct qcom_reset_map { + unsigned int reg; + u8 bit; +}; + +enum gpd_status { + GENPD_STATE_ON = 0, + GENPD_STATE_OFF = 1, +}; + +struct opp_table; + +struct dev_pm_opp; + +struct gpd_dev_ops { + int (*start)(struct device *); + int (*stop)(struct device *); +}; + +struct dev_power_governor; + +struct genpd_power_state; + +struct genpd_lock_ops; + +struct generic_pm_domain { + struct device dev; + struct dev_pm_domain domain; + struct list_head gpd_list_node; + struct list_head parent_links; + struct list_head child_links; + struct list_head dev_list; + struct dev_power_governor *gov; + struct work_struct power_off_work; + struct fwnode_handle *provider; + bool has_provider; + const char *name; + atomic_t sd_count; + enum gpd_status status; + unsigned int device_count; + unsigned int suspended_count; + unsigned int prepared_count; + unsigned int performance_state; + cpumask_var_t cpus; + int (*power_off)(struct generic_pm_domain *); + int (*power_on)(struct generic_pm_domain *); + struct raw_notifier_head power_notifiers; + struct opp_table *opp_table; + unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, struct dev_pm_opp *); + int (*set_performance_state)(struct generic_pm_domain *, unsigned int); + struct gpd_dev_ops dev_ops; + s64 max_off_time_ns; + bool max_off_time_changed; + bool cached_power_down_ok; + bool cached_power_down_state_idx; + int (*attach_dev)(struct generic_pm_domain *, struct device *); + void (*detach_dev)(struct generic_pm_domain *, struct device *); + unsigned int flags; + struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *, unsigned int); + unsigned int state_count; + unsigned int state_idx; + ktime_t on_time; + ktime_t accounting_time; + const struct genpd_lock_ops *lock_ops; + union { + struct mutex mlock; + struct { + spinlock_t slock; + long unsigned int lock_flags; + }; + }; +}; + +struct gdsc { + struct generic_pm_domain pd; + struct generic_pm_domain *parent; + struct regmap *regmap; + unsigned int gdscr; + unsigned int gds_hw_ctrl; + unsigned int clamp_io_ctrl; + unsigned int *cxcs; + unsigned int cxc_count; + const u8 pwrsts; + const u8 flags; + struct reset_controller_dev *rcdev; + unsigned int *resets; + unsigned int reset_count; + const char *supply; + struct regulator *rsupply; +}; + +struct parent_map { + u8 src; + u8 cfg; +}; + +struct freq_tbl { + long unsigned int freq; + u8 src; + u8 pre_div; + u16 m; + u16 n; +}; + +struct qcom_reset_controller { + const struct qcom_reset_map *reset_map; + struct regmap *regmap; + struct reset_controller_dev rcdev; +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *); + bool (*suspend_ok)(struct device *); +}; + +struct genpd_power_state { + s64 power_off_latency_ns; + s64 power_on_latency_ns; + s64 residency_ns; + u64 usage; + u64 rejected; + struct fwnode_handle *fwnode; + ktime_t idle_time; + void *data; +}; + +struct genpd_lock_ops { + void (*lock)(struct generic_pm_domain *); + void (*lock_nested)(struct generic_pm_domain *, int); + int (*lock_interruptible)(struct generic_pm_domain *); + void (*unlock)(struct generic_pm_domain *); +}; + +struct gdsc_desc { + struct device *dev; + struct gdsc **scs; + size_t num; +}; + +struct qcom_cc { + struct qcom_reset_controller reset; + struct clk_regmap___2 **rclks; + size_t num_rclks; +}; + +enum { + CLK_ALPHA_PLL_TYPE_DEFAULT = 0, + CLK_ALPHA_PLL_TYPE_HUAYRA = 1, + CLK_ALPHA_PLL_TYPE_BRAMMO = 2, + CLK_ALPHA_PLL_TYPE_FABIA = 3, + CLK_ALPHA_PLL_TYPE_TRION = 4, + CLK_ALPHA_PLL_TYPE_LUCID = 4, + CLK_ALPHA_PLL_TYPE_MAX = 5, +}; + +enum { + PLL_OFF_L_VAL = 0, + PLL_OFF_CAL_L_VAL = 1, + PLL_OFF_ALPHA_VAL = 2, + PLL_OFF_ALPHA_VAL_U = 3, + PLL_OFF_USER_CTL = 4, + PLL_OFF_USER_CTL_U = 5, + PLL_OFF_USER_CTL_U1 = 6, + PLL_OFF_CONFIG_CTL = 7, + PLL_OFF_CONFIG_CTL_U = 8, + PLL_OFF_CONFIG_CTL_U1 = 9, + PLL_OFF_TEST_CTL = 10, + PLL_OFF_TEST_CTL_U = 11, + PLL_OFF_TEST_CTL_U1 = 12, + PLL_OFF_STATUS = 13, + PLL_OFF_OPMODE = 14, + PLL_OFF_FRAC = 15, + PLL_OFF_CAL_VAL = 16, + PLL_OFF_MAX_REGS = 17, +}; + +struct pll_vco { + long unsigned int min_freq; + long unsigned int max_freq; + u32 val; +}; + +struct clk_alpha_pll { + u32 offset; + const u8 *regs; + const struct pll_vco *vco_table; + size_t num_vco; + u8 flags; + struct clk_regmap___2 clkr; +}; + +struct clk_alpha_pll_postdiv { + u32 offset; + u8 width; + const u8 *regs; + struct clk_regmap___2 clkr; + int post_div_shift; + const struct clk_div_table *post_div_table; + size_t num_post_div; +}; + +struct alpha_pll_config { + u32 l; + u32 alpha; + u32 alpha_hi; + u32 config_ctl_val; + u32 config_ctl_hi_val; + u32 config_ctl_hi1_val; + u32 user_ctl_val; + u32 user_ctl_hi_val; + u32 user_ctl_hi1_val; + u32 test_ctl_val; + u32 test_ctl_hi_val; + u32 test_ctl_hi1_val; + u32 main_output_mask; + u32 aux_output_mask; + u32 aux2_output_mask; + u32 early_output_mask; + u32 alpha_en_mask; + u32 alpha_mode_mask; + u32 pre_div_val; + u32 pre_div_mask; + u32 post_div_val; + u32 post_div_mask; + u32 vco_val; + u32 vco_mask; +}; + +struct pll_freq_tbl { + long unsigned int freq; + u16 l; + u16 m; + u16 n; + u32 ibits; +}; + +struct clk_pll { + u32 l_reg; + u32 m_reg; + u32 n_reg; + u32 config_reg; + u32 mode_reg; + u32 status_reg; + u8 status_bit; + u8 post_div_width; + u8 post_div_shift; + const struct pll_freq_tbl *freq_tbl; + struct clk_regmap___2 clkr; +}; + +struct pll_config { + u16 l; + u32 m; + u32 n; + u32 vco_val; + u32 vco_mask; + u32 pre_div_val; + u32 pre_div_mask; + u32 post_div_val; + u32 post_div_mask; + u32 mn_ena_mask; + u32 main_output_mask; + u32 aux_output_mask; +}; + +struct mn { + u8 mnctr_en_bit; + u8 mnctr_reset_bit; + u8 mnctr_mode_shift; + u8 n_val_shift; + u8 m_val_shift; + u8 width; + bool reset_in_cc; +}; + +struct pre_div { + u8 pre_div_shift; + u8 pre_div_width; +}; + +struct src_sel { + u8 src_sel_shift; + const struct parent_map *parent_map; +}; + +struct clk_rcg { + u32 ns_reg; + u32 md_reg; + struct mn mn; + struct pre_div p; + struct src_sel s; + const struct freq_tbl *freq_tbl; + struct clk_regmap___2 clkr; +}; + +struct clk_dyn_rcg { + u32 ns_reg[2]; + u32 md_reg[2]; + u32 bank_reg; + u8 mux_sel_bit; + struct mn mn[2]; + struct pre_div p[2]; + struct src_sel s[2]; + const struct freq_tbl *freq_tbl; + struct clk_regmap___2 clkr; +}; + +struct frac_entry { + int num; + int den; +}; + +struct clk_rcg2 { + u32 cmd_rcgr; + u8 mnd_width; + u8 hid_width; + u8 safe_src_index; + const struct parent_map *parent_map; + const struct freq_tbl *freq_tbl; + struct clk_regmap___2 clkr; + u8 cfg_off; +}; + +struct clk_rcg_dfs_data { + struct clk_rcg2 *rcg; + struct clk_init_data *init; +}; + +enum freq_policy { + FLOOR = 0, + CEIL = 1, +}; + +struct clk_branch { + u32 hwcg_reg; + u32 halt_reg; + u8 hwcg_bit; + u8 halt_bit; + u8 halt_check; + struct clk_regmap___2 clkr; +}; + +struct clk_regmap_div { + u32 reg; + u32 shift; + u32 width; + struct clk_regmap___2 clkr; +}; + +struct clk_regmap_mux { + u32 reg; + u32 shift; + u32 width; + const struct parent_map *parent_map; + struct clk_regmap___2 clkr; +}; + +struct clk_regmap_mux_div { + u32 reg_offset; + u32 hid_width; + u32 hid_shift; + u32 src_width; + u32 src_shift; + u32 div; + u32 src; + const u32 *parent_map; + struct clk_regmap___2 clkr; + struct clk *pclk; + struct notifier_block clk_nb; +}; + +struct hfpll_data { + u32 mode_reg; + u32 l_reg; + u32 m_reg; + u32 n_reg; + u32 user_reg; + u32 droop_reg; + u32 config_reg; + u32 status_reg; + u8 lock_bit; + u32 droop_val; + u32 config_val; + u32 user_val; + u32 user_vco_mask; + long unsigned int low_vco_max_rate; + long unsigned int min_rate; + long unsigned int max_rate; +}; + +struct clk_hfpll { + const struct hfpll_data *d; + int init_done; + struct clk_regmap___2 clkr; + spinlock_t lock; +}; + +typedef struct generic_pm_domain * (*genpd_xlate_t)(struct of_phandle_args *, void *); + +struct genpd_onecell_data { + struct generic_pm_domain **domains; + unsigned int num_domains; + genpd_xlate_t xlate; +}; + +enum gdsc_status { + GDSC_OFF = 0, + GDSC_ON = 1, +}; + +enum { + P_XO = 0, + P_GPLL0 = 1, + P_GPLL0_AUX = 2, + P_BIMC = 3, + P_GPLL1 = 4, + P_GPLL1_AUX = 5, + P_GPLL2 = 6, + P_GPLL2_AUX = 7, + P_SLEEP_CLK = 8, + P_DSI0_PHYPLL_BYTE = 9, + P_DSI0_PHYPLL_DSI = 10, + P_EXT_PRI_I2S = 11, + P_EXT_SEC_I2S = 12, + P_EXT_MCLK = 13, +}; + +enum { + P_XO___2 = 0, + P_GPLL0___2 = 1, + P_GPLL4 = 2, +}; + +enum { + P_XO___3 = 0, + P_GPLL0___3 = 1, + P_GPLL2___2 = 2, + P_GPLL3 = 3, + P_GPLL1___2 = 4, + P_GPLL2_EARLY = 5, + P_GPLL0_EARLY_DIV = 6, + P_SLEEP_CLK___2 = 7, + P_GPLL4___2 = 8, + P_AUD_REF_CLK = 9, + P_GPLL1_EARLY_DIV = 10, +}; + +enum { + P_XO___4 = 0, + P_MMPLL0 = 1, + P_GPLL0___4 = 2, + P_GPLL0_DIV = 3, + P_MMPLL1 = 4, + P_MMPLL9 = 5, + P_MMPLL2 = 6, + P_MMPLL8 = 7, + P_MMPLL3 = 8, + P_DSI0PLL = 9, + P_DSI1PLL = 10, + P_MMPLL5 = 11, + P_HDMIPLL = 12, + P_DSI0PLL_BYTE = 13, + P_DSI1PLL_BYTE = 14, + P_MMPLL4 = 15, +}; + +enum rockchip_pll_type { + pll_rk3036 = 0, + pll_rk3066 = 1, + pll_rk3328 = 2, + pll_rk3399 = 3, +}; + +struct rockchip_clk_provider { + void *reg_base; + struct clk_onecell_data clk_data; + struct device_node *cru_node; + struct regmap *grf; + spinlock_t lock; +}; + +struct rockchip_pll_rate_table { + long unsigned int rate; + unsigned int nr; + unsigned int nf; + unsigned int no; + unsigned int nb; + unsigned int fbdiv; + unsigned int postdiv1; + unsigned int refdiv; + unsigned int postdiv2; + unsigned int dsmpd; + unsigned int frac; +}; + +struct rockchip_pll_clock { + unsigned int id; + const char *name; + const char * const *parent_names; + u8 num_parents; + long unsigned int flags; + int con_offset; + int mode_offset; + int mode_shift; + int lock_shift; + enum rockchip_pll_type type; + u8 pll_flags; + struct rockchip_pll_rate_table *rate_table; +}; + +struct rockchip_cpuclk_clksel { + int reg; + u32 val; +}; + +struct rockchip_cpuclk_rate_table { + long unsigned int prate; + struct rockchip_cpuclk_clksel divs[2]; +}; + +struct rockchip_cpuclk_reg_data { + int core_reg; + u8 div_core_shift; + u32 div_core_mask; + u8 mux_core_alt; + u8 mux_core_main; + u8 mux_core_shift; + u32 mux_core_mask; +}; + +enum rockchip_clk_branch_type { + branch_composite = 0, + branch_mux = 1, + branch_muxgrf = 2, + branch_divider = 3, + branch_fraction_divider = 4, + branch_gate = 5, + branch_mmc = 6, + branch_inverter = 7, + branch_factor = 8, + branch_ddrclk = 9, + branch_half_divider = 10, +}; + +struct rockchip_clk_branch { + unsigned int id; + enum rockchip_clk_branch_type branch_type; + const char *name; + const char * const *parent_names; + u8 num_parents; + long unsigned int flags; + int muxdiv_offset; + u8 mux_shift; + u8 mux_width; + u8 mux_flags; + int div_offset; + u8 div_shift; + u8 div_width; + u8 div_flags; + struct clk_div_table *div_table; + int gate_offset; + u8 gate_shift; + u8 gate_flags; + struct rockchip_clk_branch *child; +}; + +struct rockchip_clk_frac { + struct notifier_block clk_nb; + struct clk_fractional_divider div; + struct clk_gate gate; + struct clk_mux mux; + const struct clk_ops *mux_ops; + int mux_frac_idx; + bool rate_change_remuxed; + int rate_change_idx; +}; + +struct rockchip_clk_pll { + struct clk_hw hw; + struct clk_mux pll_mux; + const struct clk_ops *pll_mux_ops; + struct notifier_block clk_nb; + void *reg_base; + int lock_offset; + unsigned int lock_shift; + enum rockchip_pll_type type; + u8 flags; + const struct rockchip_pll_rate_table *rate_table; + unsigned int rate_count; + spinlock_t *lock; + struct rockchip_clk_provider *ctx; +}; + +struct rockchip_cpuclk { + struct clk_hw hw; + struct clk_mux cpu_mux; + const struct clk_ops *cpu_mux_ops; + struct clk *alt_parent; + void *reg_base; + struct notifier_block clk_nb; + unsigned int rate_count; + struct rockchip_cpuclk_rate_table *rate_table; + const struct rockchip_cpuclk_reg_data *reg_data; + spinlock_t *lock; +}; + +struct rockchip_inv_clock { + struct clk_hw hw; + void *reg; + int shift; + int flags; + spinlock_t *lock; +}; + +struct rockchip_mmc_clock { + struct clk_hw hw; + void *reg; + int id; + int shift; + int cached_phase; + struct notifier_block clk_rate_change_nb; +}; + +struct rockchip_muxgrf_clock { + struct clk_hw hw; + struct regmap *regmap; + u32 reg; + u32 shift; + u32 width; + int flags; +}; + +struct rockchip_ddrclk { + struct clk_hw hw; + void *reg_base; + int mux_offset; + int mux_shift; + int mux_width; + int div_shift; + int div_width; + int ddr_flag; + spinlock_t *lock; +}; + +struct rockchip_softrst { + struct reset_controller_dev rcdev; + void *reg_base; + int num_regs; + int num_per_reg; + u8 flags; + spinlock_t lock; +}; + +enum px30_plls { + apll = 0, + dpll = 1, + cpll = 2, + npll = 3, + apll_b_h = 4, + apll_b_l = 5, +}; + +enum px30_pmu_plls { + gpll = 0, +}; + +enum rv1108_plls { + apll___2 = 0, + dpll___2 = 1, + gpll___2 = 2, +}; + +enum rk3036_plls { + apll___3 = 0, + dpll___3 = 1, + gpll___3 = 2, +}; + +enum rk3128_plls { + apll___4 = 0, + dpll___4 = 1, + cpll___2 = 2, + gpll___4 = 3, +}; + +enum rk3188_plls { + apll___5 = 0, + cpll___3 = 1, + dpll___5 = 2, + gpll___5 = 3, +}; + +enum rk3228_plls { + apll___6 = 0, + dpll___6 = 1, + cpll___4 = 2, + gpll___6 = 3, +}; + +enum rk3308_plls { + apll___7 = 0, + dpll___7 = 1, + vpll0 = 2, + vpll1 = 3, +}; + +enum rk3328_plls { + apll___8 = 0, + dpll___8 = 1, + cpll___5 = 2, + gpll___7 = 3, + npll___2 = 4, +}; + +enum rk3368_plls { + apllb = 0, + aplll = 1, + dpll___9 = 2, + cpll___6 = 3, + gpll___8 = 4, + npll___3 = 5, +}; + +enum rk3399_plls { + lpll = 0, + bpll = 1, + dpll___10 = 2, + cpll___7 = 3, + gpll___9 = 4, + npll___4 = 5, + vpll = 6, +}; + +enum rk3399_pmu_plls { + ppll = 0, +}; + +struct clk_rk3399_inits { + void (*inits)(struct device_node *); +}; + +enum samsung_pll_type { + pll_2126 = 0, + pll_3000 = 1, + pll_35xx = 2, + pll_36xx = 3, + pll_2550 = 4, + pll_2650 = 5, + pll_4500 = 6, + pll_4502 = 7, + pll_4508 = 8, + pll_4600 = 9, + pll_4650 = 10, + pll_4650c = 11, + pll_6552 = 12, + pll_6552_s3c2416 = 13, + pll_6553 = 14, + pll_s3c2410_mpll = 15, + pll_s3c2410_upll = 16, + pll_s3c2440_mpll = 17, + pll_2550x = 18, + pll_2550xx = 19, + pll_2650x = 20, + pll_2650xx = 21, + pll_1450x = 22, + pll_1451x = 23, + pll_1452x = 24, + pll_1460x = 25, +}; + +struct samsung_pll_rate_table { + unsigned int rate; + unsigned int pdiv; + unsigned int mdiv; + unsigned int sdiv; + unsigned int kdiv; + unsigned int afc; + unsigned int mfr; + unsigned int mrr; + unsigned int vsel; +}; + +struct samsung_clk_provider { + void *reg_base; + struct device *dev; + spinlock_t lock; + struct clk_hw_onecell_data clk_data; +}; + +struct samsung_clock_alias { + unsigned int id; + const char *dev_name; + const char *alias; +}; + +struct samsung_fixed_rate_clock { + unsigned int id; + char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int fixed_rate; +}; + +struct samsung_fixed_factor_clock { + unsigned int id; + char *name; + const char *parent_name; + long unsigned int mult; + long unsigned int div; + long unsigned int flags; +}; + +struct samsung_mux_clock { + unsigned int id; + const char *name; + const char * const *parent_names; + u8 num_parents; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u8 mux_flags; +}; + +struct samsung_div_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int offset; + u8 shift; + u8 width; + u8 div_flags; + struct clk_div_table *table; +}; + +struct samsung_gate_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + long unsigned int offset; + u8 bit_idx; + u8 gate_flags; +}; + +struct samsung_clk_reg_dump { + u32 offset; + u32 value; +}; + +struct samsung_pll_clock { + unsigned int id; + const char *name; + const char *parent_name; + long unsigned int flags; + int con_offset; + int lock_offset; + enum samsung_pll_type type; + const struct samsung_pll_rate_table *rate_table; +}; + +struct samsung_clock_reg_cache { + struct list_head node; + void *reg_base; + struct samsung_clk_reg_dump *rdump; + unsigned int rd_num; + const struct samsung_clk_reg_dump *rsuspend; + unsigned int rsuspend_num; +}; + +struct samsung_cmu_info { + const struct samsung_pll_clock *pll_clks; + unsigned int nr_pll_clks; + const struct samsung_mux_clock *mux_clks; + unsigned int nr_mux_clks; + const struct samsung_div_clock *div_clks; + unsigned int nr_div_clks; + const struct samsung_gate_clock *gate_clks; + unsigned int nr_gate_clks; + const struct samsung_fixed_rate_clock *fixed_clks; + unsigned int nr_fixed_clks; + const struct samsung_fixed_factor_clock *fixed_factor_clks; + unsigned int nr_fixed_factor_clks; + unsigned int nr_clk_ids; + const long unsigned int *clk_regs; + unsigned int nr_clk_regs; + const struct samsung_clk_reg_dump *suspend_regs; + unsigned int nr_suspend_regs; + const char *clk_name; +}; + +struct samsung_clk_pll { + struct clk_hw hw; + void *lock_reg; + void *con_reg; + short unsigned int enable_offs; + short unsigned int lock_offs; + enum samsung_pll_type type; + unsigned int rate_count; + const struct samsung_pll_rate_table *rate_table; +}; + +struct exynos_cpuclk_cfg_data { + long unsigned int prate; + long unsigned int div0; + long unsigned int div1; +}; + +struct exynos_cpuclk { + struct clk_hw hw; + const struct clk_hw *alt_parent; + void *ctrl_base; + spinlock_t *lock; + const struct exynos_cpuclk_cfg_data *cfg; + const long unsigned int num_cfgs; + struct notifier_block clk_nb; + long unsigned int flags; +}; + +struct exynos5433_cmu_data { + struct samsung_clk_reg_dump *clk_save; + unsigned int nr_clk_save; + const struct samsung_clk_reg_dump *clk_suspend; + unsigned int nr_clk_suspend; + struct clk *clk; + struct clk **pclks; + int nr_pclks; + struct samsung_clk_provider ctx; +}; + +struct exynos_audss_clk_drvdata { + unsigned int has_adma_clk: 1; + unsigned int has_mst_clk: 1; + unsigned int enable_epll: 1; + unsigned int num_clks; +}; + +struct exynos_clkout { + struct clk_gate gate; + struct clk_mux mux; + spinlock_t slock; + void *reg; + u32 pmu_debug_save; + struct clk_hw_onecell_data data; +}; + +struct clk_factors_config { + u8 nshift; + u8 nwidth; + u8 kshift; + u8 kwidth; + u8 mshift; + u8 mwidth; + u8 pshift; + u8 pwidth; + u8 n_start; +}; + +struct factors_request { + long unsigned int rate; + long unsigned int parent_rate; + u8 parent_index; + u8 n; + u8 k; + u8 m; + u8 p; +}; + +struct factors_data { + int enable; + int mux; + int muxmask; + const struct clk_factors_config *table; + void (*getter)(struct factors_request *); + void (*recalc)(struct factors_request *); + const char *name; +}; + +struct clk_factors { + struct clk_hw hw; + void *reg; + const struct clk_factors_config *config; + void (*get_factors)(struct factors_request *); + void (*recalc)(struct factors_request *); + spinlock_t *lock; + struct clk_mux *mux; + struct clk_gate *gate; +}; + +struct mux_data { + u8 shift; +}; + +struct div_data { + u8 shift; + u8 pow; + u8 width; + const struct clk_div_table *table; +}; + +struct divs_data { + const struct factors_data *factors; + int ndivs; + struct { + u8 self; + u8 fixed; + struct clk_div_table *table; + u8 shift; + u8 pow; + u8 gate; + bool critical; + } div[4]; +}; + +struct ve_reset_data { + void *reg; + spinlock_t *lock; + struct reset_controller_dev rcdev; +}; + +struct mmc_phase { + struct clk_hw hw; + u8 offset; + void *reg; + spinlock_t *lock; +}; + +struct sun4i_a10_display_clk_data { + bool has_div; + u8 num_rst; + u8 parents; + u8 offset_en; + u8 offset_div; + u8 offset_mux; + u8 offset_rst; + u8 width_div; + u8 width_mux; + u32 flags; +}; + +struct reset_data { + void *reg; + spinlock_t *lock; + struct reset_controller_dev rcdev; + u8 offset; +}; + +struct tcon_ch1_clk { + struct clk_hw hw; + spinlock_t lock; + void *reg; +}; + +enum { + AHB1 = 0, + AHB2 = 1, + APB1 = 2, + APB2 = 3, + PARENT_MAX = 4, +}; + +struct sun9i_mmc_clk_data { + spinlock_t lock; + void *membase; + struct clk *clk; + struct reset_control *reset; + struct clk_onecell_data clk_data; + struct reset_controller_dev rcdev; +}; + +struct usb_reset_data { + void *reg; + spinlock_t *lock; + struct clk *clk; + struct reset_controller_dev rcdev; +}; + +struct usb_clk_data { + u32 clk_mask; + u32 reset_mask; + bool reset_needs_clk; +}; + +struct sun9i_a80_cpus_clk { + struct clk_hw hw; + void *reg; +}; + +struct gates_data { + long unsigned int mask[1]; +}; + +struct ccu_common { + void *base; + u16 reg; + u16 lock_reg; + u32 prediv; + long unsigned int features; + spinlock_t *lock; + struct clk_hw hw; +}; + +struct ccu_reset_map; + +struct sunxi_ccu_desc { + struct ccu_common **ccu_clks; + long unsigned int num_ccu_clks; + struct clk_hw_onecell_data *hw_clks; + struct ccu_reset_map *resets; + long unsigned int num_resets; +}; + +struct ccu_reset_map { + u16 reg; + u32 bit; +}; + +struct ccu_pll_nb { + struct notifier_block clk_nb; + struct ccu_common *common; + u32 enable; + u32 lock; +}; + +struct ccu_reset { + void *base; + struct ccu_reset_map *reset_map; + spinlock_t *lock; + struct reset_controller_dev rcdev; +}; + +struct ccu_mux_fixed_prediv { + u8 index; + u16 div; +}; + +struct ccu_mux_var_prediv { + u8 index; + u8 shift; + u8 width; +}; + +struct ccu_mux_internal { + u8 shift; + u8 width; + const u8 *table; + const struct ccu_mux_fixed_prediv *fixed_predivs; + u8 n_predivs; + const struct ccu_mux_var_prediv *var_predivs; + u8 n_var_predivs; +}; + +struct ccu_div_internal { + u8 shift; + u8 width; + u32 max; + u32 offset; + u32 flags; + struct clk_div_table *table; +}; + +struct ccu_div { + u32 enable; + struct ccu_div_internal div; + struct ccu_mux_internal mux; + struct ccu_common common; + unsigned int fixed_post_div; +}; + +struct ccu_frac_internal { + u32 enable; + u32 select; + long unsigned int rates[2]; +}; + +struct ccu_gate { + u32 enable; + struct ccu_common common; +}; + +struct ccu_mux { + u16 reg; + u32 enable; + struct ccu_mux_internal mux; + struct ccu_common common; +}; + +struct ccu_mux_nb { + struct notifier_block clk_nb; + struct ccu_common *common; + struct ccu_mux_internal *cm; + u32 delay_us; + u8 bypass_index; + u8 original_index; +}; + +struct ccu_mult_internal { + u8 offset; + u8 shift; + u8 width; + u8 min; + u8 max; +}; + +struct ccu_mult { + u32 enable; + u32 lock; + struct ccu_frac_internal frac; + struct ccu_mult_internal mult; + struct ccu_mux_internal mux; + struct ccu_common common; +}; + +struct _ccu_mult { + long unsigned int mult; + long unsigned int min; + long unsigned int max; +}; + +struct ccu_phase { + u8 shift; + u8 width; + struct ccu_common common; +}; + +struct ccu_sdm_setting { + long unsigned int rate; + u32 pattern; + u32 m; + u32 n; +}; + +struct ccu_sdm_internal { + struct ccu_sdm_setting *table; + u32 table_size; + u32 enable; + u32 tuning_enable; + u16 tuning_reg; +}; + +struct ccu_nk { + u16 reg; + u32 enable; + u32 lock; + struct ccu_mult_internal n; + struct ccu_mult_internal k; + unsigned int fixed_post_div; + struct ccu_common common; +}; + +struct _ccu_nk { + long unsigned int n; + long unsigned int min_n; + long unsigned int max_n; + long unsigned int k; + long unsigned int min_k; + long unsigned int max_k; +}; + +struct ccu_nkm { + u32 enable; + u32 lock; + struct ccu_mult_internal n; + struct ccu_mult_internal k; + struct ccu_div_internal m; + struct ccu_mux_internal mux; + unsigned int fixed_post_div; + struct ccu_common common; +}; + +struct _ccu_nkm { + long unsigned int n; + long unsigned int min_n; + long unsigned int max_n; + long unsigned int k; + long unsigned int min_k; + long unsigned int max_k; + long unsigned int m; + long unsigned int min_m; + long unsigned int max_m; +}; + +struct ccu_nkmp { + u32 enable; + u32 lock; + struct ccu_mult_internal n; + struct ccu_mult_internal k; + struct ccu_div_internal m; + struct ccu_div_internal p; + unsigned int fixed_post_div; + unsigned int max_rate; + struct ccu_common common; +}; + +struct _ccu_nkmp { + long unsigned int n; + long unsigned int min_n; + long unsigned int max_n; + long unsigned int k; + long unsigned int min_k; + long unsigned int max_k; + long unsigned int m; + long unsigned int min_m; + long unsigned int max_m; + long unsigned int p; + long unsigned int min_p; + long unsigned int max_p; +}; + +struct ccu_nm { + u32 enable; + u32 lock; + struct ccu_mult_internal n; + struct ccu_div_internal m; + struct ccu_frac_internal frac; + struct ccu_sdm_internal sdm; + unsigned int fixed_post_div; + unsigned int min_rate; + unsigned int max_rate; + struct ccu_common common; +}; + +struct _ccu_nm { + long unsigned int n; + long unsigned int min_n; + long unsigned int max_n; + long unsigned int m; + long unsigned int min_m; + long unsigned int max_m; +}; + +struct ccu_mp { + u32 enable; + struct ccu_div_internal m; + struct ccu_div_internal p; + struct ccu_mux_internal mux; + unsigned int fixed_post_div; + struct ccu_common common; +}; + +struct tegra_cpu_car_ops { + void (*wait_for_reset)(u32); + void (*put_in_reset)(u32); + void (*out_of_reset)(u32); + void (*enable_clock)(u32); + void (*disable_clock)(u32); + bool (*rail_off_ready)(); + void (*suspend)(); + void (*resume)(); +}; + +struct tegra_clk_periph_regs { + u32 enb_reg; + u32 enb_set_reg; + u32 enb_clr_reg; + u32 rst_reg; + u32 rst_set_reg; + u32 rst_clr_reg; +}; + +struct tegra_clk_init_table { + unsigned int clk_id; + unsigned int parent_id; + long unsigned int rate; + int state; +}; + +struct tegra_clk_duplicate { + int clk_id; + struct clk_lookup lookup; +}; + +struct tegra_clk { + int dt_id; + bool present; +}; + +struct tegra_devclk { + int dt_id; + char *dev_id; + char *con_id; +}; + +typedef void (*tegra_clk_apply_init_table_func)(); + +struct tegra_clk_sync_source { + struct clk_hw hw; + long unsigned int rate; + long unsigned int max_rate; +}; + +struct i2c_msg { + __u16 addr; + __u16 flags; + __u16 len; + __u8 *buf; +}; + +union i2c_smbus_data { + __u8 byte; + __u16 word; + __u8 block[34]; +}; + +enum i2c_slave_event { + I2C_SLAVE_READ_REQUESTED = 0, + I2C_SLAVE_WRITE_REQUESTED = 1, + I2C_SLAVE_READ_PROCESSED = 2, + I2C_SLAVE_WRITE_RECEIVED = 3, + I2C_SLAVE_STOP = 4, +}; + +struct i2c_client; + +typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); + +struct i2c_adapter; + +struct i2c_client { + short unsigned int flags; + short unsigned int addr; + char name[20]; + struct i2c_adapter *adapter; + struct device dev; + int init_irq; + int irq; + struct list_head detected; + i2c_slave_cb_t slave_cb; +}; + +struct i2c_algorithm; + +struct i2c_lock_operations; + +struct i2c_bus_recovery_info; + +struct i2c_adapter_quirks; + +struct i2c_adapter { + struct module *owner; + unsigned int class; + const struct i2c_algorithm *algo; + void *algo_data; + const struct i2c_lock_operations *lock_ops; + struct rt_mutex bus_lock; + struct rt_mutex mux_lock; + int timeout; + int retries; + struct device dev; + long unsigned int locked_flags; + int nr; + char name[48]; + struct completion dev_released; + struct mutex userspace_clients_lock; + struct list_head userspace_clients; + struct i2c_bus_recovery_info *bus_recovery_info; + const struct i2c_adapter_quirks *quirks; + struct irq_domain *host_notify_domain; +}; + +struct i2c_algorithm { + int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int); + int (*master_xfer_atomic)(struct i2c_adapter *, struct i2c_msg *, int); + int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + int (*smbus_xfer_atomic)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *); + u32 (*functionality)(struct i2c_adapter *); + int (*reg_slave)(struct i2c_client *); + int (*unreg_slave)(struct i2c_client *); +}; + +struct i2c_lock_operations { + void (*lock_bus)(struct i2c_adapter *, unsigned int); + int (*trylock_bus)(struct i2c_adapter *, unsigned int); + void (*unlock_bus)(struct i2c_adapter *, unsigned int); +}; + +struct i2c_bus_recovery_info { + int (*recover_bus)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *); + void (*set_scl)(struct i2c_adapter *, int); + int (*get_sda)(struct i2c_adapter *); + void (*set_sda)(struct i2c_adapter *, int); + int (*get_bus_free)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *); + void (*unprepare_recovery)(struct i2c_adapter *); + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; +}; + +struct i2c_adapter_quirks { + u64 flags; + int max_num_msgs; + u16 max_write_len; + u16 max_read_len; + u16 max_comb_1st_msg_len; + u16 max_comb_2nd_msg_len; +}; + +struct rail_alignment { + int offset_uv; + int step_uv; +}; + +struct cvb_coefficients { + int c0; + int c1; + int c2; +}; + +struct cvb_table_freq_entry { + long unsigned int freq; + struct cvb_coefficients coefficients; +}; + +struct cvb_cpu_dfll_data { + u32 tune0_low; + u32 tune0_high; + u32 tune1; + unsigned int tune_high_min_millivolts; +}; + +struct cvb_table { + int speedo_id; + int process_id; + int min_millivolts; + int max_millivolts; + int speedo_scale; + int voltage_scale; + struct cvb_table_freq_entry entries[40]; + struct cvb_cpu_dfll_data cpu_dfll_data; +}; + +struct tegra_dfll_soc_data { + struct device *dev; + long unsigned int max_freq; + const struct cvb_table *cvb; + struct rail_alignment alignment; + void (*init_clock_trimmers)(); + void (*set_clock_trimmers_high)(); + void (*set_clock_trimmers_low)(); +}; + +enum dfll_ctrl_mode { + DFLL_UNINITIALIZED = 0, + DFLL_DISABLED = 1, + DFLL_OPEN_LOOP = 2, + DFLL_CLOSED_LOOP = 3, +}; + +enum dfll_tune_range { + DFLL_TUNE_UNINITIALIZED = 0, + DFLL_TUNE_LOW = 1, +}; + +enum tegra_dfll_pmu_if { + TEGRA_DFLL_PMU_I2C = 0, + TEGRA_DFLL_PMU_PWM = 1, +}; + +struct dfll_rate_req { + long unsigned int rate; + long unsigned int dvco_target_rate; + int lut_index; + u8 mult_bits; + u8 scale_bits; +}; + +struct tegra_dfll { + struct device *dev; + struct tegra_dfll_soc_data *soc; + void *base; + void *i2c_base; + void *i2c_controller_base; + void *lut_base; + struct regulator *vdd_reg; + struct clk *soc_clk; + struct clk *ref_clk; + struct clk *i2c_clk; + struct clk *dfll_clk; + struct reset_control *dvco_rst; + long unsigned int ref_rate; + long unsigned int i2c_clk_rate; + long unsigned int dvco_rate_min; + enum dfll_ctrl_mode mode; + enum dfll_tune_range tune_range; + struct dentry *debugfs_dir; + struct clk_hw dfll_clk_hw; + const char *output_clock_name; + struct dfll_rate_req last_req; + long unsigned int last_unrounded_rate; + u32 droop_ctrl; + u32 sample_rate; + u32 force_mode; + u32 cf; + u32 ci; + u32 cg; + bool cg_scale; + u32 i2c_fs_rate; + u32 i2c_reg; + u32 i2c_slave_addr; + unsigned int lut[33]; + long unsigned int lut_uv[33]; + int lut_size; + u8 lut_bottom; + u8 lut_min; + u8 lut_max; + u8 lut_safe; + enum tegra_dfll_pmu_if pmu_if; + long unsigned int pwm_rate; + struct pinctrl *pwm_pin; + struct pinctrl_state *pwm_enable_state; + struct pinctrl_state *pwm_disable_state; + u32 reg_init_uV; +}; + +struct tegra_clk_frac_div { + struct clk_hw hw; + void *reg; + u8 flags; + u8 shift; + u8 width; + u8 frac_width; + spinlock_t *lock; +}; + +struct tegra_clk_periph_gate { + u32 magic; + struct clk_hw hw; + void *clk_base; + u8 flags; + int clk_num; + int *enable_refcnt; + const struct tegra_clk_periph_regs *regs; +}; + +struct tegra_clk_periph { + u32 magic; + struct clk_hw hw; + struct clk_mux mux; + struct tegra_clk_frac_div divider; + struct tegra_clk_periph_gate gate; + const struct clk_ops *mux_ops; + const struct clk_ops *div_ops; + const struct clk_ops *gate_ops; +}; + +struct tegra_periph_init_data { + const char *name; + int clk_id; + union { + const char * const *parent_names; + const char *parent_name; + } p; + int num_parents; + struct tegra_clk_periph periph; + u32 offset; + const char *con_id; + const char *dev_id; + long unsigned int flags; +}; + +struct tegra_clk_periph_fixed { + struct clk_hw hw; + void *base; + const struct tegra_clk_periph_regs *regs; + unsigned int mul; + unsigned int div; + unsigned int num; +}; + +struct tegra_clk_pll_freq_table { + long unsigned int input_rate; + long unsigned int output_rate; + u32 n; + u32 m; + u8 p; + u8 cpcon; + u16 sdm_data; +}; + +struct pdiv_map { + u8 pdiv; + u8 hw_val; +}; + +struct div_nmp { + u8 divn_shift; + u8 divn_width; + u8 divm_shift; + u8 divm_width; + u8 divp_shift; + u8 divp_width; + u8 override_divn_shift; + u8 override_divm_shift; + u8 override_divp_shift; +}; + +struct tegra_clk_pll; + +struct tegra_clk_pll_params { + long unsigned int input_min; + long unsigned int input_max; + long unsigned int cf_min; + long unsigned int cf_max; + long unsigned int vco_min; + long unsigned int vco_max; + u32 base_reg; + u32 misc_reg; + u32 lock_reg; + u32 lock_mask; + u32 lock_enable_bit_idx; + u32 iddq_reg; + u32 iddq_bit_idx; + u32 reset_reg; + u32 reset_bit_idx; + u32 sdm_din_reg; + u32 sdm_din_mask; + u32 sdm_ctrl_reg; + u32 sdm_ctrl_en_mask; + u32 ssc_ctrl_reg; + u32 ssc_ctrl_en_mask; + u32 aux_reg; + u32 dyn_ramp_reg; + u32 ext_misc_reg[6]; + u32 pmc_divnm_reg; + u32 pmc_divp_reg; + u32 flags; + int stepa_shift; + int stepb_shift; + int lock_delay; + int max_p; + bool defaults_set; + const struct pdiv_map *pdiv_tohw; + struct div_nmp *div_nmp; + struct tegra_clk_pll_freq_table *freq_table; + long unsigned int fixed_rate; + u16 mdiv_default; + u32 (*round_p_to_pdiv)(u32, u32 *); + void (*set_gain)(struct tegra_clk_pll_freq_table *); + int (*calc_rate)(struct clk_hw *, struct tegra_clk_pll_freq_table *, long unsigned int, long unsigned int); + long unsigned int (*adjust_vco)(struct tegra_clk_pll_params *, long unsigned int); + void (*set_defaults)(struct tegra_clk_pll *); + int (*dyn_ramp)(struct tegra_clk_pll *, struct tegra_clk_pll_freq_table *); + int (*pre_rate_change)(); + void (*post_rate_change)(); +}; + +struct tegra_clk_pll { + struct clk_hw hw; + void *clk_base; + void *pmc; + spinlock_t *lock; + struct tegra_clk_pll_params *params; +}; + +struct utmi_clk_param { + u32 osc_frequency; + u8 enable_delay_count; + u8 stable_count; + u8 active_delay_count; + u8 xtal_freq_count; +}; + +struct tegra_clk_pll_out { + struct clk_hw hw; + void *reg; + u8 enb_bit_idx; + u8 rst_bit_idx; + spinlock_t *lock; + u8 flags; +}; + +struct tegra_sdmmc_mux { + struct clk_hw hw; + void *reg; + spinlock_t *lock; + const struct clk_ops *gate_ops; + struct tegra_clk_periph_gate gate; + u8 div_flags; +}; + +struct tegra_clk_super_mux { + struct clk_hw hw; + void *reg; + struct tegra_clk_frac_div frac_div; + const struct clk_ops *div_ops; + u8 width; + u8 flags; + u8 div2_index; + u8 pllx_index; + spinlock_t *lock; +}; + +struct tegra_audio_clk_info { + char *name; + struct tegra_clk_pll_params *pll_params; + int clk_id; + char *parent; +}; + +enum clk_id { + tegra_clk_actmon = 0, + tegra_clk_adx = 1, + tegra_clk_adx1 = 2, + tegra_clk_afi = 3, + tegra_clk_amx = 4, + tegra_clk_amx1 = 5, + tegra_clk_apb2ape = 6, + tegra_clk_ahbdma = 7, + tegra_clk_apbdma = 8, + tegra_clk_apbif = 9, + tegra_clk_ape = 10, + tegra_clk_audio0 = 11, + tegra_clk_audio0_2x = 12, + tegra_clk_audio0_mux = 13, + tegra_clk_audio1 = 14, + tegra_clk_audio1_2x = 15, + tegra_clk_audio1_mux = 16, + tegra_clk_audio2 = 17, + tegra_clk_audio2_2x = 18, + tegra_clk_audio2_mux = 19, + tegra_clk_audio3 = 20, + tegra_clk_audio3_2x = 21, + tegra_clk_audio3_mux = 22, + tegra_clk_audio4 = 23, + tegra_clk_audio4_2x = 24, + tegra_clk_audio4_mux = 25, + tegra_clk_bsea = 26, + tegra_clk_bsev = 27, + tegra_clk_cclk_g = 28, + tegra_clk_cclk_lp = 29, + tegra_clk_cilab = 30, + tegra_clk_cilcd = 31, + tegra_clk_cile = 32, + tegra_clk_clk_32k = 33, + tegra_clk_clk72Mhz = 34, + tegra_clk_clk72Mhz_8 = 35, + tegra_clk_clk_m = 36, + tegra_clk_osc = 37, + tegra_clk_osc_div2 = 38, + tegra_clk_osc_div4 = 39, + tegra_clk_cml0 = 40, + tegra_clk_cml1 = 41, + tegra_clk_csi = 42, + tegra_clk_csite = 43, + tegra_clk_csite_8 = 44, + tegra_clk_csus = 45, + tegra_clk_cve = 46, + tegra_clk_dam0 = 47, + tegra_clk_dam1 = 48, + tegra_clk_dam2 = 49, + tegra_clk_d_audio = 50, + tegra_clk_dbgapb = 51, + tegra_clk_dds = 52, + tegra_clk_dfll_ref = 53, + tegra_clk_dfll_soc = 54, + tegra_clk_disp1 = 55, + tegra_clk_disp1_8 = 56, + tegra_clk_disp2 = 57, + tegra_clk_disp2_8 = 58, + tegra_clk_dp2 = 59, + tegra_clk_dpaux = 60, + tegra_clk_dpaux1 = 61, + tegra_clk_dsialp = 62, + tegra_clk_dsia_mux = 63, + tegra_clk_dsiblp = 64, + tegra_clk_dsib_mux = 65, + tegra_clk_dtv = 66, + tegra_clk_emc = 67, + tegra_clk_entropy = 68, + tegra_clk_entropy_8 = 69, + tegra_clk_epp = 70, + tegra_clk_epp_8 = 71, + tegra_clk_extern1 = 72, + tegra_clk_extern2 = 73, + tegra_clk_extern3 = 74, + tegra_clk_fuse = 75, + tegra_clk_fuse_burn = 76, + tegra_clk_gpu = 77, + tegra_clk_gr2d = 78, + tegra_clk_gr2d_8 = 79, + tegra_clk_gr3d = 80, + tegra_clk_gr3d_8 = 81, + tegra_clk_hclk = 82, + tegra_clk_hda = 83, + tegra_clk_hda_8 = 84, + tegra_clk_hda2codec_2x = 85, + tegra_clk_hda2codec_2x_8 = 86, + tegra_clk_hda2hdmi = 87, + tegra_clk_hdmi = 88, + tegra_clk_hdmi_audio = 89, + tegra_clk_host1x = 90, + tegra_clk_host1x_8 = 91, + tegra_clk_host1x_9 = 92, + tegra_clk_hsic_trk = 93, + tegra_clk_i2c1 = 94, + tegra_clk_i2c2 = 95, + tegra_clk_i2c3 = 96, + tegra_clk_i2c4 = 97, + tegra_clk_i2c5 = 98, + tegra_clk_i2c6 = 99, + tegra_clk_i2cslow = 100, + tegra_clk_i2s0 = 101, + tegra_clk_i2s0_sync = 102, + tegra_clk_i2s1 = 103, + tegra_clk_i2s1_sync = 104, + tegra_clk_i2s2 = 105, + tegra_clk_i2s2_sync = 106, + tegra_clk_i2s3 = 107, + tegra_clk_i2s3_sync = 108, + tegra_clk_i2s4 = 109, + tegra_clk_i2s4_sync = 110, + tegra_clk_isp = 111, + tegra_clk_isp_8 = 112, + tegra_clk_isp_9 = 113, + tegra_clk_ispb = 114, + tegra_clk_kbc = 115, + tegra_clk_kfuse = 116, + tegra_clk_la = 117, + tegra_clk_maud = 118, + tegra_clk_mipi = 119, + tegra_clk_mipibif = 120, + tegra_clk_mipi_cal = 121, + tegra_clk_mpe = 122, + tegra_clk_mselect = 123, + tegra_clk_msenc = 124, + tegra_clk_ndflash = 125, + tegra_clk_ndflash_8 = 126, + tegra_clk_ndspeed = 127, + tegra_clk_ndspeed_8 = 128, + tegra_clk_nor = 129, + tegra_clk_nvdec = 130, + tegra_clk_nvenc = 131, + tegra_clk_nvjpg = 132, + tegra_clk_owr = 133, + tegra_clk_owr_8 = 134, + tegra_clk_pcie = 135, + tegra_clk_pclk = 136, + tegra_clk_pll_a = 137, + tegra_clk_pll_a_out0 = 138, + tegra_clk_pll_a1 = 139, + tegra_clk_pll_c = 140, + tegra_clk_pll_c2 = 141, + tegra_clk_pll_c3 = 142, + tegra_clk_pll_c4 = 143, + tegra_clk_pll_c4_out0 = 144, + tegra_clk_pll_c4_out1 = 145, + tegra_clk_pll_c4_out2 = 146, + tegra_clk_pll_c4_out3 = 147, + tegra_clk_pll_c_out1 = 148, + tegra_clk_pll_d = 149, + tegra_clk_pll_d2 = 150, + tegra_clk_pll_d2_out0 = 151, + tegra_clk_pll_d_out0 = 152, + tegra_clk_pll_dp = 153, + tegra_clk_pll_e_out0 = 154, + tegra_clk_pll_g_ref = 155, + tegra_clk_pll_m = 156, + tegra_clk_pll_m_out1 = 157, + tegra_clk_pll_mb = 158, + tegra_clk_pll_p = 159, + tegra_clk_pll_p_out1 = 160, + tegra_clk_pll_p_out2 = 161, + tegra_clk_pll_p_out2_int = 162, + tegra_clk_pll_p_out3 = 163, + tegra_clk_pll_p_out4 = 164, + tegra_clk_pll_p_out4_cpu = 165, + tegra_clk_pll_p_out5 = 166, + tegra_clk_pll_p_out_hsio = 167, + tegra_clk_pll_p_out_xusb = 168, + tegra_clk_pll_p_out_cpu = 169, + tegra_clk_pll_p_out_adsp = 170, + tegra_clk_pll_ref = 171, + tegra_clk_pll_re_out = 172, + tegra_clk_pll_re_vco = 173, + tegra_clk_pll_u = 174, + tegra_clk_pll_u_out = 175, + tegra_clk_pll_u_out1 = 176, + tegra_clk_pll_u_out2 = 177, + tegra_clk_pll_u_12m = 178, + tegra_clk_pll_u_480m = 179, + tegra_clk_pll_u_48m = 180, + tegra_clk_pll_u_60m = 181, + tegra_clk_pll_x = 182, + tegra_clk_pll_x_out0 = 183, + tegra_clk_pwm = 184, + tegra_clk_qspi = 185, + tegra_clk_rtc = 186, + tegra_clk_sata = 187, + tegra_clk_sata_8 = 188, + tegra_clk_sata_cold = 189, + tegra_clk_sata_oob = 190, + tegra_clk_sata_oob_8 = 191, + tegra_clk_sbc1 = 192, + tegra_clk_sbc1_8 = 193, + tegra_clk_sbc1_9 = 194, + tegra_clk_sbc2 = 195, + tegra_clk_sbc2_8 = 196, + tegra_clk_sbc2_9 = 197, + tegra_clk_sbc3 = 198, + tegra_clk_sbc3_8 = 199, + tegra_clk_sbc3_9 = 200, + tegra_clk_sbc4 = 201, + tegra_clk_sbc4_8 = 202, + tegra_clk_sbc4_9 = 203, + tegra_clk_sbc5 = 204, + tegra_clk_sbc5_8 = 205, + tegra_clk_sbc6 = 206, + tegra_clk_sbc6_8 = 207, + tegra_clk_sclk = 208, + tegra_clk_sdmmc_legacy = 209, + tegra_clk_sdmmc1 = 210, + tegra_clk_sdmmc1_8 = 211, + tegra_clk_sdmmc1_9 = 212, + tegra_clk_sdmmc2 = 213, + tegra_clk_sdmmc2_8 = 214, + tegra_clk_sdmmc3 = 215, + tegra_clk_sdmmc3_8 = 216, + tegra_clk_sdmmc3_9 = 217, + tegra_clk_sdmmc4 = 218, + tegra_clk_sdmmc4_8 = 219, + tegra_clk_se = 220, + tegra_clk_se_10 = 221, + tegra_clk_soc_therm = 222, + tegra_clk_soc_therm_8 = 223, + tegra_clk_sor0 = 224, + tegra_clk_sor0_out = 225, + tegra_clk_sor1 = 226, + tegra_clk_sor1_out = 227, + tegra_clk_spdif = 228, + tegra_clk_spdif_2x = 229, + tegra_clk_spdif_in = 230, + tegra_clk_spdif_in_8 = 231, + tegra_clk_spdif_in_sync = 232, + tegra_clk_spdif_mux = 233, + tegra_clk_spdif_out = 234, + tegra_clk_timer = 235, + tegra_clk_trace = 236, + tegra_clk_tsec = 237, + tegra_clk_tsec_8 = 238, + tegra_clk_tsecb = 239, + tegra_clk_tsensor = 240, + tegra_clk_tvdac = 241, + tegra_clk_tvo = 242, + tegra_clk_uarta = 243, + tegra_clk_uarta_8 = 244, + tegra_clk_uartb = 245, + tegra_clk_uartb_8 = 246, + tegra_clk_uartc = 247, + tegra_clk_uartc_8 = 248, + tegra_clk_uartd = 249, + tegra_clk_uartd_8 = 250, + tegra_clk_uarte = 251, + tegra_clk_uarte_8 = 252, + tegra_clk_uartape = 253, + tegra_clk_usb2 = 254, + tegra_clk_usb2_hsic_trk = 255, + tegra_clk_usb2_trk = 256, + tegra_clk_usb3 = 257, + tegra_clk_usbd = 258, + tegra_clk_vcp = 259, + tegra_clk_vde = 260, + tegra_clk_vde_8 = 261, + tegra_clk_vfir = 262, + tegra_clk_vi = 263, + tegra_clk_vi_8 = 264, + tegra_clk_vi_9 = 265, + tegra_clk_vi_10 = 266, + tegra_clk_vi_i2c = 267, + tegra_clk_vic03 = 268, + tegra_clk_vic03_8 = 269, + tegra_clk_vim2_clk = 270, + tegra_clk_vimclk_sync = 271, + tegra_clk_vi_sensor = 272, + tegra_clk_vi_sensor_8 = 273, + tegra_clk_vi_sensor_9 = 274, + tegra_clk_vi_sensor2 = 275, + tegra_clk_vi_sensor2_8 = 276, + tegra_clk_xusb_dev = 277, + tegra_clk_xusb_dev_src = 278, + tegra_clk_xusb_dev_src_8 = 279, + tegra_clk_xusb_falcon_src = 280, + tegra_clk_xusb_falcon_src_8 = 281, + tegra_clk_xusb_fs_src = 282, + tegra_clk_xusb_gate = 283, + tegra_clk_xusb_host = 284, + tegra_clk_xusb_host_src = 285, + tegra_clk_xusb_host_src_8 = 286, + tegra_clk_xusb_hs_src = 287, + tegra_clk_xusb_hs_src_4 = 288, + tegra_clk_xusb_ss = 289, + tegra_clk_xusb_ss_src = 290, + tegra_clk_xusb_ss_src_8 = 291, + tegra_clk_xusb_ss_div2 = 292, + tegra_clk_xusb_ssp_src = 293, + tegra_clk_sclk_mux = 294, + tegra_clk_sor_safe = 295, + tegra_clk_cec = 296, + tegra_clk_ispa = 297, + tegra_clk_dmic1 = 298, + tegra_clk_dmic2 = 299, + tegra_clk_dmic3 = 300, + tegra_clk_dmic1_sync_clk = 301, + tegra_clk_dmic2_sync_clk = 302, + tegra_clk_dmic3_sync_clk = 303, + tegra_clk_dmic1_sync_clk_mux = 304, + tegra_clk_dmic2_sync_clk_mux = 305, + tegra_clk_dmic3_sync_clk_mux = 306, + tegra_clk_iqc1 = 307, + tegra_clk_iqc2 = 308, + tegra_clk_pll_a_out_adsp = 309, + tegra_clk_pll_a_out0_out_adsp = 310, + tegra_clk_adsp = 311, + tegra_clk_adsp_neon = 312, + tegra_clk_max = 313, +}; + +struct tegra_sync_source_initdata { + char *name; + long unsigned int rate; + long unsigned int max_rate; + int clk_id; +}; + +struct tegra_audio_clk_initdata { + char *gate_name; + char *mux_name; + u32 offset; + int gate_clk_id; + int mux_clk_id; +}; + +struct tegra_audio2x_clk_initdata { + char *parent; + char *gate_name; + char *name_2x; + char *div_name; + int clk_id; + int clk_num; + u8 div_offset; +}; + +struct pll_out_data { + char *div_name; + char *pll_out_name; + u32 offset; + int clk_id; + u8 div_shift; + u8 div_flags; + u8 rst_shift; + spinlock_t *lock; +}; + +enum tegra_super_gen { + gen4 = 4, + gen5 = 5, +}; + +struct tegra_super_gen_info { + enum tegra_super_gen gen; + const char **sclk_parents; + const char **cclk_g_parents; + const char **cclk_lp_parents; + int num_sclk_parents; + int num_cclk_g_parents; + int num_cclk_lp_parents; +}; + +enum tegra_revision { + TEGRA_REVISION_UNKNOWN = 0, + TEGRA_REVISION_A01 = 1, + TEGRA_REVISION_A02 = 2, + TEGRA_REVISION_A03 = 3, + TEGRA_REVISION_A03p = 4, + TEGRA_REVISION_A04 = 5, + TEGRA_REVISION_MAX = 6, +}; + +struct tegra_sku_info { + int sku_id; + int cpu_process_id; + int cpu_speedo_id; + int cpu_speedo_value; + int cpu_iddq_value; + int soc_process_id; + int soc_speedo_id; + int soc_speedo_value; + int gpu_process_id; + int gpu_speedo_id; + int gpu_speedo_value; + enum tegra_revision revision; +}; + +struct dfll_fcpu_data { + const long unsigned int *cpu_max_freq_table; + unsigned int cpu_max_freq_table_size; + const struct cvb_table *cpu_cvb_tables; + unsigned int cpu_cvb_tables_size; +}; + +struct cpu_clk_suspend_context { + u32 clk_csite_src; + u32 cclkg_burst; + u32 cclkg_divider; +}; + +enum { + DOWN___2 = 0, + UP___2 = 1, +}; + +struct cpu_clk_suspend_context___2 { + u32 clk_csite_src; +}; + +struct tegra210_domain_mbist_war { + void (*handle_lvl2_ovr)(struct tegra210_domain_mbist_war *); + const u32 lvl2_offset; + const u32 lvl2_mask; + const unsigned int num_clks; + const unsigned int *clk_init_data; + struct clk_bulk_data *clks; +}; + +struct utmi_clk_param___2 { + u32 osc_frequency; + u8 enable_delay_count; + u16 stable_count; + u8 active_delay_count; + u16 xtal_freq_count; +}; + +struct tegra210_clk_emc_config { + long unsigned int rate; + bool same_freq; + u32 value; + long unsigned int parent_rate; + u8 parent; +}; + +struct tegra210_clk_emc_provider { + struct module *owner; + struct device *dev; + struct tegra210_clk_emc_config *configs; + unsigned int num_configs; + int (*set_rate)(struct device *, const struct tegra210_clk_emc_config *); +}; + +struct tegra210_clk_emc { + struct clk_hw hw; + void *regs; + struct tegra210_clk_emc_provider *provider; + struct clk *parents[8]; +}; + +enum { + CMD_CLK_GET_RATE = 1, + CMD_CLK_SET_RATE = 2, + CMD_CLK_ROUND_RATE = 3, + CMD_CLK_GET_PARENT = 4, + CMD_CLK_SET_PARENT = 5, + CMD_CLK_IS_ENABLED = 6, + CMD_CLK_ENABLE = 7, + CMD_CLK_DISABLE = 8, + CMD_CLK_GET_ALL_INFO = 14, + CMD_CLK_GET_MAX_CLK_ID = 15, + CMD_CLK_GET_FMAX_AT_VMIN = 16, + CMD_CLK_MAX = 17, +}; + +struct cmd_clk_get_rate_request {}; + +struct cmd_clk_get_rate_response { + int64_t rate; +}; + +struct cmd_clk_set_rate_request { + int32_t unused; + int64_t rate; +} __attribute__((packed)); + +struct cmd_clk_set_rate_response { + int64_t rate; +}; + +struct cmd_clk_round_rate_request { + int32_t unused; + int64_t rate; +} __attribute__((packed)); + +struct cmd_clk_round_rate_response { + int64_t rate; +}; + +struct cmd_clk_get_parent_request {}; + +struct cmd_clk_get_parent_response { + uint32_t parent_id; +}; + +struct cmd_clk_set_parent_request { + uint32_t parent_id; +}; + +struct cmd_clk_set_parent_response { + uint32_t parent_id; +}; + +struct cmd_clk_is_enabled_request {}; + +struct cmd_clk_is_enabled_response { + int32_t state; +}; + +struct cmd_clk_enable_request {}; + +struct cmd_clk_disable_request {}; + +struct cmd_clk_get_all_info_request {}; + +struct cmd_clk_get_all_info_response { + uint32_t flags; + uint32_t parent; + uint32_t parents[16]; + uint8_t num_parents; + uint8_t name[40]; +} __attribute__((packed)); + +struct cmd_clk_get_max_clk_id_request {}; + +struct cmd_clk_get_max_clk_id_response { + uint32_t max_id; +}; + +struct cmd_clk_get_fmax_at_vmin_request {}; + +struct mrq_clk_request { + uint32_t cmd_and_id; + union { + struct cmd_clk_get_rate_request clk_get_rate; + struct cmd_clk_set_rate_request clk_set_rate; + struct cmd_clk_round_rate_request clk_round_rate; + struct cmd_clk_get_parent_request clk_get_parent; + struct cmd_clk_set_parent_request clk_set_parent; + struct cmd_clk_enable_request clk_enable; + struct cmd_clk_disable_request clk_disable; + struct cmd_clk_is_enabled_request clk_is_enabled; + struct cmd_clk_get_all_info_request clk_get_all_info; + struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id; + struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin; + }; +} __attribute__((packed)); + +struct tegra_bpmp_ops; + +struct tegra_bpmp_soc { + struct { + struct { + unsigned int offset; + unsigned int count; + unsigned int timeout; + } cpu_tx; + struct { + unsigned int offset; + unsigned int count; + unsigned int timeout; + } thread; + struct { + unsigned int offset; + unsigned int count; + unsigned int timeout; + } cpu_rx; + } channels; + const struct tegra_bpmp_ops *ops; + unsigned int num_resets; +}; + +struct tegra_bpmp; + +struct tegra_bpmp_channel; + +struct tegra_bpmp_ops { + int (*init)(struct tegra_bpmp *); + void (*deinit)(struct tegra_bpmp *); + bool (*is_response_ready)(struct tegra_bpmp_channel *); + bool (*is_request_ready)(struct tegra_bpmp_channel *); + int (*ack_response)(struct tegra_bpmp_channel *); + int (*ack_request)(struct tegra_bpmp_channel *); + bool (*is_response_channel_free)(struct tegra_bpmp_channel *); + bool (*is_request_channel_free)(struct tegra_bpmp_channel *); + int (*post_response)(struct tegra_bpmp_channel *); + int (*post_request)(struct tegra_bpmp_channel *); + int (*ring_doorbell)(struct tegra_bpmp *); + int (*resume)(struct tegra_bpmp *); +}; + +struct tegra_bpmp_mb_data { + u32 code; + u32 flags; + u8 data[120]; +}; + +struct tegra_ivc; + +struct tegra_bpmp_channel { + struct tegra_bpmp *bpmp; + struct tegra_bpmp_mb_data *ib; + struct tegra_bpmp_mb_data *ob; + struct completion completion; + struct tegra_ivc *ivc; + unsigned int index; +}; + +struct tegra_bpmp_clk; + +struct tegra_bpmp { + const struct tegra_bpmp_soc *soc; + struct device *dev; + void *priv; + struct { + struct mbox_client client; + struct mbox_chan___2 *channel; + } mbox; + spinlock_t atomic_tx_lock; + struct tegra_bpmp_channel *tx_channel; + struct tegra_bpmp_channel *rx_channel; + struct tegra_bpmp_channel *threaded_channels; + struct { + long unsigned int *allocated; + long unsigned int *busy; + unsigned int count; + struct semaphore lock; + } threaded; + struct list_head mrqs; + spinlock_t lock; + struct tegra_bpmp_clk **clocks; + unsigned int num_clocks; + struct reset_controller_dev rstc; + struct genpd_onecell_data genpd; + struct dentry *debugfs_mirror; +}; + +struct tegra_bpmp_clk { + struct clk_hw hw; + struct tegra_bpmp *bpmp; + unsigned int id; + unsigned int num_parents; + unsigned int *parents; +}; + +struct tegra_bpmp_message { + unsigned int mrq; + struct { + const void *data; + size_t size; + } tx; + struct { + void *data; + size_t size; + int ret; + } rx; +}; + +struct tegra_bpmp_clk_info { + unsigned int id; + char name[40]; + unsigned int parents[16]; + unsigned int num_parents; + long unsigned int flags; +}; + +struct tegra_bpmp_clk_message { + unsigned int cmd; + unsigned int id; + struct { + const void *data; + size_t size; + } tx; + struct { + void *data; + size_t size; + int ret; + } rx; +}; + +struct clk_sp810; + +struct clk_sp810_timerclken { + struct clk_hw hw; + struct clk *clk; + struct clk_sp810 *sp810; + int channel; +}; + +struct clk_sp810 { + struct device_node *node; + void *base; + spinlock_t lock; + struct clk_sp810_timerclken timerclken[4]; +}; + +struct vexpress_osc { + struct regmap *reg; + struct clk_hw hw; + long unsigned int rate_min; + long unsigned int rate_max; +}; + +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + spinlock_t lock; + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + struct virt_dma_desc *cyclic; +}; + +struct acpi_table_csrt { + struct acpi_table_header header; +}; + +struct acpi_csrt_group { + u32 length; + u32 vendor_id; + u32 subvendor_id; + u16 device_id; + u16 subdevice_id; + u16 revision; + u16 reserved; + u32 shared_info_length; +}; + +struct acpi_csrt_shared_info { + u16 major_version; + u16 minor_version; + u32 mmio_base_low; + u32 mmio_base_high; + u32 gsi_interrupt; + u8 interrupt_polarity; + u8 interrupt_mode; + u8 num_channels; + u8 dma_address_width; + u16 base_request_line; + u16 num_handshake_signals; + u32 max_block_size; +}; + +struct acpi_dma_spec { + int chan_id; + int slave_id; + struct device *dev; +}; + +struct acpi_dma { + struct list_head dma_controllers; + struct device *dev; + struct dma_chan * (*acpi_dma_xlate)(struct acpi_dma_spec *, struct acpi_dma *); + void *data; + short unsigned int base_request_line; + short unsigned int end_request_line; +}; + +struct acpi_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; + size_t index; + size_t n; +}; + +struct of_dma { + struct list_head of_dma_controllers; + struct device_node *of_node; + struct dma_chan * (*of_dma_xlate)(struct of_phandle_args *, struct of_dma *); + void * (*of_dma_route_allocate)(struct of_phandle_args *, struct of_dma *); + struct dma_router *dma_router; + void *of_dma_data; +}; + +struct of_dma_filter_info { + dma_cap_mask_t dma_cap; + dma_filter_fn filter_fn; +}; + +struct mv_xor_v2_descriptor { + u16 desc_id; + u16 flags; + u32 crc32_result; + u32 desc_ctrl; + u32 buff_size; + u32 fill_pattern_src_addr[4]; + u32 data_buff_addr[12]; + u32 reserved[12]; +}; + +struct mv_xor_v2_sw_desc; + +struct mv_xor_v2_device { + spinlock_t lock; + void *dma_base; + void *glob_base; + struct clk *clk; + struct clk *reg_clk; + struct tasklet_struct irq_tasklet; + struct list_head free_sw_desc; + struct dma_device dmadev; + struct dma_chan dmachan; + dma_addr_t hw_desq; + struct mv_xor_v2_descriptor *hw_desq_virt; + struct mv_xor_v2_sw_desc *sw_desq; + int desc_size; + unsigned int npendings; + unsigned int hw_queue_idx; + struct msi_desc *msi_desc; +}; + +struct mv_xor_v2_sw_desc { + int idx; + struct dma_async_tx_descriptor async_tx; + struct mv_xor_v2_descriptor hw_desc; + struct list_head free_list; +}; + +struct bam_desc_hw { + __le32 addr; + __le16 size; + __le16 flags; +}; + +struct bam_async_desc { + struct virt_dma_desc vd; + u32 num_desc; + u32 xfer_len; + u16 flags; + struct bam_desc_hw *curr_desc; + struct list_head desc_node; + enum dma_transfer_direction dir; + size_t length; + struct bam_desc_hw desc[0]; +}; + +enum bam_reg { + BAM_CTRL = 0, + BAM_REVISION = 1, + BAM_NUM_PIPES = 2, + BAM_DESC_CNT_TRSHLD = 3, + BAM_IRQ_SRCS = 4, + BAM_IRQ_SRCS_MSK = 5, + BAM_IRQ_SRCS_UNMASKED = 6, + BAM_IRQ_STTS = 7, + BAM_IRQ_CLR = 8, + BAM_IRQ_EN = 9, + BAM_CNFG_BITS = 10, + BAM_IRQ_SRCS_EE = 11, + BAM_IRQ_SRCS_MSK_EE = 12, + BAM_P_CTRL = 13, + BAM_P_RST = 14, + BAM_P_HALT = 15, + BAM_P_IRQ_STTS = 16, + BAM_P_IRQ_CLR = 17, + BAM_P_IRQ_EN = 18, + BAM_P_EVNT_DEST_ADDR = 19, + BAM_P_EVNT_REG = 20, + BAM_P_SW_OFSTS = 21, + BAM_P_DATA_FIFO_ADDR = 22, + BAM_P_DESC_FIFO_ADDR = 23, + BAM_P_EVNT_GEN_TRSHLD = 24, + BAM_P_FIFO_SIZES = 25, +}; + +struct reg_offset_data { + u32 base_offset; + unsigned int pipe_mult; + unsigned int evnt_mult; + unsigned int ee_mult; +}; + +struct bam_device; + +struct bam_chan { + struct virt_dma_chan vc; + struct bam_device *bdev; + u32 id; + struct dma_slave_config slave; + struct bam_desc_hw *fifo_virt; + dma_addr_t fifo_phys; + short unsigned int head; + short unsigned int tail; + unsigned int initialized; + unsigned int paused; + unsigned int reconfigure; + struct list_head desc_list; + struct list_head node; +}; + +struct bam_device { + void *regs; + struct device *dev; + struct dma_device common; + struct bam_chan *channels; + u32 num_channels; + u32 num_ees; + u32 ee; + bool controlled_remotely; + const struct reg_offset_data *layout; + struct clk *bamclk; + int irq; + struct tasklet_struct task; +}; + +struct bcm2835_pm { + struct device *dev; + void *base; + void *asb; +}; + +struct bcm2835_power; + +struct bcm2835_power_domain { + struct generic_pm_domain base; + struct bcm2835_power *power; + u32 domain; + struct clk *clk; +}; + +struct bcm2835_power { + struct device *dev; + void *base; + void *asb; + struct genpd_onecell_data pd_xlate; + struct bcm2835_power_domain domains[13]; + struct reset_controller_dev reset; +}; + +struct rpi_power_domain { + u32 domain; + bool enabled; + bool old_interface; + struct generic_pm_domain base; + struct rpi_firmware *fw; +}; + +struct rpi_power_domains { + bool has_new_interface; + struct genpd_onecell_data xlate; + struct rpi_firmware *fw; + struct rpi_power_domain domains[23]; +}; + +struct rpi_power_domain_packet { + u32 domain; + u32 on; +}; + +struct soc_device_attribute { + const char *machine; + const char *family; + const char *revision; + const char *serial_number; + const char *soc_id; + const void *data; + const struct attribute_group *custom_attr_group; +}; + +struct soc_device; + +enum cpubiuctrl_regs { + CPU_CREDIT_REG = 0, + CPU_MCP_FLOW_REG = 1, + CPU_WRITEBACK_CTRL_REG = 2, + RAC_CONFIG0_REG = 3, + RAC_CONFIG1_REG = 4, + NUM_CPU_BIUCTRL_REGS = 5, +}; + +enum mtk_ddp_comp_id { + DDP_COMPONENT_AAL0 = 0, + DDP_COMPONENT_AAL1 = 1, + DDP_COMPONENT_BLS = 2, + DDP_COMPONENT_CCORR = 3, + DDP_COMPONENT_COLOR0 = 4, + DDP_COMPONENT_COLOR1 = 5, + DDP_COMPONENT_DITHER = 6, + DDP_COMPONENT_DPI0 = 7, + DDP_COMPONENT_DPI1 = 8, + DDP_COMPONENT_DSI0 = 9, + DDP_COMPONENT_DSI1 = 10, + DDP_COMPONENT_DSI2 = 11, + DDP_COMPONENT_DSI3 = 12, + DDP_COMPONENT_GAMMA = 13, + DDP_COMPONENT_OD0 = 14, + DDP_COMPONENT_OD1 = 15, + DDP_COMPONENT_OVL0 = 16, + DDP_COMPONENT_OVL_2L0 = 17, + DDP_COMPONENT_OVL_2L1 = 18, + DDP_COMPONENT_OVL1 = 19, + DDP_COMPONENT_PWM0 = 20, + DDP_COMPONENT_PWM1 = 21, + DDP_COMPONENT_PWM2 = 22, + DDP_COMPONENT_RDMA0 = 23, + DDP_COMPONENT_RDMA1 = 24, + DDP_COMPONENT_RDMA2 = 25, + DDP_COMPONENT_UFOE = 26, + DDP_COMPONENT_WDMA0 = 27, + DDP_COMPONENT_WDMA1 = 28, + DDP_COMPONENT_ID_MAX = 29, +}; + +struct mtk_mmsys_driver_data { + const char *clk_driver; +}; + +struct meson_msr; + +struct meson_msr_id { + struct meson_msr *priv; + unsigned int id; + const char *name; +}; + +struct meson_msr { + struct regmap *regmap; + struct meson_msr_id msr_table[128]; +}; + +struct meson_gx_soc_id { + const char *name; + unsigned int id; +}; + +struct meson_gx_package_id { + const char *name; + unsigned int major_id; + unsigned int pack_id; + unsigned int pack_mask; +}; + +struct meson_gx_pwrc_vpu { + struct generic_pm_domain genpd; + struct regmap *regmap_ao; + struct regmap *regmap_hhi; + struct reset_control *rstc; + struct clk *vpu_clk; + struct clk *vapb_clk; +}; + +struct meson_ee_pwrc_mem_domain { + unsigned int reg; + unsigned int mask; +}; + +struct meson_ee_pwrc_top_domain { + unsigned int sleep_reg; + unsigned int sleep_mask; + unsigned int iso_reg; + unsigned int iso_mask; +}; + +struct meson_ee_pwrc_domain; + +struct meson_ee_pwrc_domain_desc { + char *name; + unsigned int reset_names_count; + unsigned int clk_names_count; + struct meson_ee_pwrc_top_domain *top_pd; + unsigned int mem_pd_count; + struct meson_ee_pwrc_mem_domain *mem_pd; + bool (*get_power)(struct meson_ee_pwrc_domain *); +}; + +struct meson_ee_pwrc; + +struct meson_ee_pwrc_domain { + struct generic_pm_domain base; + bool enabled; + struct meson_ee_pwrc *pwrc; + struct meson_ee_pwrc_domain_desc desc; + struct clk_bulk_data *clks; + int num_clks; + struct reset_control *rstc; + int num_rstc; +}; + +struct meson_ee_pwrc_domain_data { + unsigned int count; + struct meson_ee_pwrc_domain_desc *domains; +}; + +struct meson_ee_pwrc { + struct regmap *regmap_ao; + struct regmap *regmap_hhi; + struct meson_ee_pwrc_domain *domains; + struct genpd_onecell_data xlate; +}; + +enum { + SM_EFUSE_READ = 0, + SM_EFUSE_WRITE = 1, + SM_EFUSE_USER_MAX = 2, + SM_GET_CHIP_ID = 3, + SM_A1_PWRC_SET = 4, + SM_A1_PWRC_GET = 5, +}; + +struct meson_secure_pwrc; + +struct meson_secure_pwrc_domain { + struct generic_pm_domain base; + unsigned int index; + struct meson_secure_pwrc *pwrc; +}; + +struct meson_sm_firmware; + +struct meson_secure_pwrc { + struct meson_secure_pwrc_domain *domains; + struct genpd_onecell_data xlate; + struct meson_sm_firmware *fw; +}; + +struct meson_secure_pwrc_domain_desc { + unsigned int index; + unsigned int flags; + char *name; + bool (*is_off)(struct meson_secure_pwrc_domain *); +}; + +struct meson_secure_pwrc_domain_data { + unsigned int count; + struct meson_secure_pwrc_domain_desc *domains; +}; + +enum cmd_db_hw_type { + CMD_DB_HW_INVALID = 0, + CMD_DB_HW_MIN = 3, + CMD_DB_HW_ARC = 3, + CMD_DB_HW_VRM = 4, + CMD_DB_HW_BCM = 5, + CMD_DB_HW_MAX = 5, + CMD_DB_HW_ALL = 255, +}; + +struct entry_header { + u8 id[8]; + __le32 priority[2]; + __le32 addr; + __le16 len; + __le16 offset; +}; + +struct rsc_hdr { + __le16 slv_id; + __le16 header_offset; + __le16 data_offset; + __le16 cnt; + __le16 version; + __le16 reserved[3]; +}; + +struct cmd_db_header { + __le32 version; + u8 magic[4]; + struct rsc_hdr header[8]; + __le32 checksum; + __le32 reserved; + u8 data[0]; +}; + +struct smem_proc_comm { + __le32 command; + __le32 status; + __le32 params[2]; +}; + +struct smem_global_entry { + __le32 allocated; + __le32 offset; + __le32 size; + __le32 aux_base; +}; + +struct smem_header { + struct smem_proc_comm proc_comm[4]; + __le32 version[32]; + __le32 initialized; + __le32 free_offset; + __le32 available; + __le32 reserved; + struct smem_global_entry toc[512]; +}; + +struct smem_ptable_entry { + __le32 offset; + __le32 size; + __le32 flags; + __le16 host0; + __le16 host1; + __le32 cacheline; + __le32 reserved[7]; +}; + +struct smem_ptable { + u8 magic[4]; + __le32 version; + __le32 num_entries; + __le32 reserved[5]; + struct smem_ptable_entry entry[0]; +}; + +struct smem_partition_header { + u8 magic[4]; + __le16 host0; + __le16 host1; + __le32 size; + __le32 offset_free_uncached; + __le32 offset_free_cached; + __le32 reserved[3]; +}; + +struct smem_private_entry { + u16 canary; + __le16 item; + __le32 size; + __le16 padding_data; + __le16 padding_hdr; + __le32 reserved; +}; + +struct smem_info { + u8 magic[4]; + __le32 size; + __le32 base_addr; + __le32 reserved; + __le16 num_items; +}; + +struct smem_region { + u32 aux_base; + void *virt_base; + size_t size; +}; + +struct hwspinlock; + +struct qcom_smem { + struct device *dev; + struct hwspinlock *hwlock; + struct smem_partition_header *global_partition; + size_t global_cacheline; + struct smem_partition_header *partitions[11]; + size_t cacheline[11]; + u32 item_count; + struct platform_device *socinfo; + unsigned int num_regions; + struct smem_region regions[0]; +}; + +struct rockchip_grf_value { + const char *desc; + u32 reg; + u32 val; +}; + +struct rockchip_grf_info { + const struct rockchip_grf_value *values; + int num_values; +}; + +struct rockchip_domain_info { + int pwr_mask; + int status_mask; + int req_mask; + int idle_mask; + int ack_mask; + bool active_wakeup; + int pwr_w_mask; + int req_w_mask; +}; + +struct rockchip_pmu_info { + u32 pwr_offset; + u32 status_offset; + u32 req_offset; + u32 idle_offset; + u32 ack_offset; + u32 core_pwrcnt_offset; + u32 gpu_pwrcnt_offset; + unsigned int core_power_transition_time; + unsigned int gpu_power_transition_time; + int num_domains; + const struct rockchip_domain_info *domain_info; +}; + +struct rockchip_pmu; + +struct rockchip_pm_domain { + struct generic_pm_domain genpd; + const struct rockchip_domain_info *info; + struct rockchip_pmu *pmu; + int num_qos; + struct regmap **qos_regmap; + u32 *qos_save_regs[5]; + int num_clks; + struct clk_bulk_data *clks; +}; + +struct rockchip_pmu { + struct device *dev; + struct regmap *regmap; + const struct rockchip_pmu_info *info; + struct mutex mutex; + struct genpd_onecell_data genpd_data; + struct generic_pm_domain *domains[0]; +}; + +struct exynos_soc_id { + const char *name; + unsigned int id; +}; + +enum sys_powerdown { + SYS_AFTR = 0, + SYS_LPA = 1, + SYS_SLEEP = 2, + NUM_SYS_POWERDOWN = 3, +}; + +struct exynos_pmu_conf { + unsigned int offset; + u8 val[3]; +}; + +struct exynos_pmu_data { + const struct exynos_pmu_conf *pmu_config; + void (*pmu_init)(); + void (*powerdown_conf)(enum sys_powerdown); + void (*powerdown_conf_extra)(enum sys_powerdown); +}; + +struct exynos_pmu_context { + struct device *dev; + const struct exynos_pmu_data *pmu_data; +}; + +struct exynos_pm_domain_config { + u32 local_pwr_cfg; +}; + +struct exynos_pm_domain { + void *base; + bool is_off; + struct generic_pm_domain pd; + u32 local_pwr_cfg; +}; + +struct sunxi_sram_func { + char *func; + u8 val; + u32 reg_val; +}; + +struct sunxi_sram_data { + char *name; + u8 reg; + u8 offset; + u8 width; + struct sunxi_sram_func *func; + struct list_head list; +}; + +struct sunxi_sram_desc { + struct sunxi_sram_data data; + bool claimed; +}; + +struct sunxi_sramc_variant { + bool has_emac_clock; +}; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; +}; + +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t); + +typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t); + +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM = 1, + NVMEM_TYPE_OTP = 2, + NVMEM_TYPE_BATTERY_BACKED = 3, +}; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + struct gpio_desc *wp_gpio; + const struct nvmem_cell_info *cells; + int ncells; + enum nvmem_type type; + bool read_only; + bool root_only; + bool no_of_node; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + int size; + int word_size; + int stride; + void *priv; + bool compat; + struct device *base_dev; +}; + +struct tegra_fuse; + +struct tegra_fuse_info { + u32 (*read)(struct tegra_fuse *, unsigned int); + unsigned int size; + unsigned int spare; +}; + +struct nvmem_device; + +struct tegra_fuse_soc; + +struct tegra_fuse { + struct device *dev; + void *base; + phys_addr_t phys; + struct clk *clk; + u32 (*read_early)(struct tegra_fuse *, unsigned int); + u32 (*read)(struct tegra_fuse *, unsigned int); + const struct tegra_fuse_soc *soc; + struct { + struct mutex lock; + struct completion wait; + struct dma_chan *chan; + struct dma_slave_config config; + dma_addr_t phys; + u32 *virt; + } apbdma; + struct nvmem_device *nvmem; + struct nvmem_cell_lookup *lookups; +}; + +struct tegra_fuse_soc { + void (*init)(struct tegra_fuse *); + void (*speedo_init)(struct tegra_sku_info *); + int (*probe)(struct tegra_fuse *); + const struct tegra_fuse_info *info; + const struct nvmem_cell_lookup *lookups; + unsigned int num_lookups; + const struct attribute_group *soc_attr_group; +}; + +enum { + THRESHOLD_INDEX_0 = 0, + THRESHOLD_INDEX_1 = 1, + THRESHOLD_INDEX_COUNT = 2, +}; + +enum tegra_suspend_mode { + TEGRA_SUSPEND_NONE = 0, + TEGRA_SUSPEND_LP2 = 1, + TEGRA_SUSPEND_LP1 = 2, + TEGRA_SUSPEND_LP0 = 3, + TEGRA_MAX_SUSPEND_MODE = 4, +}; + +enum tegra_io_pad { + TEGRA_IO_PAD_AUDIO = 0, + TEGRA_IO_PAD_AUDIO_HV = 1, + TEGRA_IO_PAD_BB = 2, + TEGRA_IO_PAD_CAM = 3, + TEGRA_IO_PAD_COMP = 4, + TEGRA_IO_PAD_CONN = 5, + TEGRA_IO_PAD_CSIA = 6, + TEGRA_IO_PAD_CSIB = 7, + TEGRA_IO_PAD_CSIC = 8, + TEGRA_IO_PAD_CSID = 9, + TEGRA_IO_PAD_CSIE = 10, + TEGRA_IO_PAD_CSIF = 11, + TEGRA_IO_PAD_CSIG = 12, + TEGRA_IO_PAD_CSIH = 13, + TEGRA_IO_PAD_DAP3 = 14, + TEGRA_IO_PAD_DAP5 = 15, + TEGRA_IO_PAD_DBG = 16, + TEGRA_IO_PAD_DEBUG_NONAO = 17, + TEGRA_IO_PAD_DMIC = 18, + TEGRA_IO_PAD_DMIC_HV = 19, + TEGRA_IO_PAD_DP = 20, + TEGRA_IO_PAD_DSI = 21, + TEGRA_IO_PAD_DSIB = 22, + TEGRA_IO_PAD_DSIC = 23, + TEGRA_IO_PAD_DSID = 24, + TEGRA_IO_PAD_EDP = 25, + TEGRA_IO_PAD_EMMC = 26, + TEGRA_IO_PAD_EMMC2 = 27, + TEGRA_IO_PAD_EQOS = 28, + TEGRA_IO_PAD_GPIO = 29, + TEGRA_IO_PAD_GP_PWM2 = 30, + TEGRA_IO_PAD_GP_PWM3 = 31, + TEGRA_IO_PAD_HDMI = 32, + TEGRA_IO_PAD_HDMI_DP0 = 33, + TEGRA_IO_PAD_HDMI_DP1 = 34, + TEGRA_IO_PAD_HDMI_DP2 = 35, + TEGRA_IO_PAD_HDMI_DP3 = 36, + TEGRA_IO_PAD_HSIC = 37, + TEGRA_IO_PAD_HV = 38, + TEGRA_IO_PAD_LVDS = 39, + TEGRA_IO_PAD_MIPI_BIAS = 40, + TEGRA_IO_PAD_NAND = 41, + TEGRA_IO_PAD_PEX_BIAS = 42, + TEGRA_IO_PAD_PEX_CLK_BIAS = 43, + TEGRA_IO_PAD_PEX_CLK1 = 44, + TEGRA_IO_PAD_PEX_CLK2 = 45, + TEGRA_IO_PAD_PEX_CLK3 = 46, + TEGRA_IO_PAD_PEX_CLK_2_BIAS = 47, + TEGRA_IO_PAD_PEX_CLK_2 = 48, + TEGRA_IO_PAD_PEX_CNTRL = 49, + TEGRA_IO_PAD_PEX_CTL2 = 50, + TEGRA_IO_PAD_PEX_L0_RST_N = 51, + TEGRA_IO_PAD_PEX_L1_RST_N = 52, + TEGRA_IO_PAD_PEX_L5_RST_N = 53, + TEGRA_IO_PAD_PWR_CTL = 54, + TEGRA_IO_PAD_SDMMC1 = 55, + TEGRA_IO_PAD_SDMMC1_HV = 56, + TEGRA_IO_PAD_SDMMC2 = 57, + TEGRA_IO_PAD_SDMMC2_HV = 58, + TEGRA_IO_PAD_SDMMC3 = 59, + TEGRA_IO_PAD_SDMMC3_HV = 60, + TEGRA_IO_PAD_SDMMC4 = 61, + TEGRA_IO_PAD_SOC_GPIO10 = 62, + TEGRA_IO_PAD_SOC_GPIO12 = 63, + TEGRA_IO_PAD_SOC_GPIO13 = 64, + TEGRA_IO_PAD_SOC_GPIO53 = 65, + TEGRA_IO_PAD_SPI = 66, + TEGRA_IO_PAD_SPI_HV = 67, + TEGRA_IO_PAD_SYS_DDC = 68, + TEGRA_IO_PAD_UART = 69, + TEGRA_IO_PAD_UART4 = 70, + TEGRA_IO_PAD_UART5 = 71, + TEGRA_IO_PAD_UFS = 72, + TEGRA_IO_PAD_USB0 = 73, + TEGRA_IO_PAD_USB1 = 74, + TEGRA_IO_PAD_USB2 = 75, + TEGRA_IO_PAD_USB3 = 76, + TEGRA_IO_PAD_USB_BIAS = 77, + TEGRA_IO_PAD_AO_HV = 78, +}; + +struct pmc_clk { + struct clk_hw hw; + long unsigned int offs; + u32 mux_shift; + u32 force_en_shift; +}; + +struct pmc_clk_gate { + struct clk_hw hw; + long unsigned int offs; + u32 shift; +}; + +struct pmc_clk_init_data { + char *name; + const char * const *parents; + int num_parents; + int clk_id; + u8 mux_shift; + u8 force_en_shift; +}; + +struct tegra_pmc; + +struct tegra_powergate { + struct generic_pm_domain genpd; + struct tegra_pmc *pmc; + unsigned int id; + struct clk **clks; + unsigned int num_clks; + struct reset_control *reset; +}; + +struct tegra_pmc_soc; + +struct tegra_pmc { + struct device *dev; + void *base; + void *wake; + void *aotag; + void *scratch; + struct clk *clk; + struct dentry *debugfs; + const struct tegra_pmc_soc *soc; + bool tz_only; + long unsigned int rate; + enum tegra_suspend_mode suspend_mode; + u32 cpu_good_time; + u32 cpu_off_time; + u32 core_osc_time; + u32 core_pmu_time; + u32 core_off_time; + bool corereq_high; + bool sysclkreq_high; + bool combined_req; + bool cpu_pwr_good_en; + u32 lp0_vec_phys; + u32 lp0_vec_size; + long unsigned int powergates_available[1]; + struct mutex powergates_lock; + struct pinctrl_dev *pctl_dev; + struct irq_domain *domain; + struct irq_chip irq; + struct notifier_block clk_nb; +}; + +struct tegra_io_pad_soc { + enum tegra_io_pad id; + unsigned int dpd; + unsigned int voltage; + const char *name; +}; + +struct tegra_pmc_regs { + unsigned int scratch0; + unsigned int dpd_req; + unsigned int dpd_status; + unsigned int dpd2_req; + unsigned int dpd2_status; + unsigned int rst_status; + unsigned int rst_source_shift; + unsigned int rst_source_mask; + unsigned int rst_level_shift; + unsigned int rst_level_mask; +}; + +struct tegra_wake_event { + const char *name; + unsigned int id; + unsigned int irq; + struct { + unsigned int instance; + unsigned int pin; + } gpio; +}; + +struct tegra_pmc_soc { + unsigned int num_powergates; + const char * const *powergates; + unsigned int num_cpu_powergates; + const u8 *cpu_powergates; + bool has_tsense_reset; + bool has_gpu_clamps; + bool needs_mbist_war; + bool has_impl_33v_pwr; + bool maybe_tz_only; + const struct tegra_io_pad_soc *io_pads; + unsigned int num_io_pads; + const struct pinctrl_pin_desc *pin_descs; + unsigned int num_pin_descs; + const struct tegra_pmc_regs *regs; + void (*init)(struct tegra_pmc *); + void (*setup_irq_polarity)(struct tegra_pmc *, struct device_node *, bool); + int (*irq_set_wake)(struct irq_data *, unsigned int); + int (*irq_set_type)(struct irq_data *, unsigned int); + const char * const *reset_sources; + unsigned int num_reset_sources; + const char * const *reset_levels; + unsigned int num_reset_levels; + const struct tegra_wake_event *wake_events; + unsigned int num_wake_events; + const struct pmc_clk_init_data *pmc_clks_data; + unsigned int num_pmc_clks; + bool has_blink_output; +}; + +enum mrq_pg_cmd { + CMD_PG_QUERY_ABI = 0, + CMD_PG_SET_STATE = 1, + CMD_PG_GET_STATE = 2, + CMD_PG_GET_NAME = 3, + CMD_PG_GET_MAX_ID = 4, +}; + +enum pg_states { + PG_STATE_OFF = 0, + PG_STATE_ON = 1, + PG_STATE_RUNNING = 2, +}; + +struct cmd_pg_query_abi_request { + uint32_t type; +}; + +struct cmd_pg_set_state_request { + uint32_t state; +}; + +struct cmd_pg_get_state_response { + uint32_t state; +}; + +struct cmd_pg_get_name_response { + uint8_t name[40]; +}; + +struct cmd_pg_get_max_id_response { + uint32_t max_id; +}; + +struct mrq_pg_request { + uint32_t cmd; + uint32_t id; + union { + struct cmd_pg_query_abi_request query_abi; + struct cmd_pg_set_state_request set_state; + }; +}; + +struct mrq_pg_response { + union { + struct cmd_pg_get_state_response get_state; + struct cmd_pg_get_name_response get_name; + struct cmd_pg_get_max_id_response get_max_id; + }; +}; + +struct tegra_powergate_info { + unsigned int id; + char *name; +}; + +struct tegra_powergate___2 { + struct generic_pm_domain genpd; + struct tegra_bpmp *bpmp; + unsigned int id; +}; + +typedef struct { + union { + xen_pfn_t *p; + uint64_t q; + }; +} __guest_handle_xen_pfn_t; + +struct grant_entry_v1 { + uint16_t flags; + domid_t domid; + uint32_t frame; +}; + +struct grant_entry_header { + uint16_t flags; + domid_t domid; +}; + +union grant_entry_v2 { + struct grant_entry_header hdr; + struct { + struct grant_entry_header hdr; + uint32_t pad0; + uint64_t frame; + } full_page; + struct { + struct grant_entry_header hdr; + uint16_t page_off; + uint16_t length; + uint64_t frame; + } sub_page; + struct { + struct grant_entry_header hdr; + domid_t trans_domid; + uint16_t pad0; + grant_ref_t gref; + } transitive; + uint32_t __spacer[4]; +}; + +struct gnttab_setup_table { + domid_t dom; + uint32_t nr_frames; + int16_t status; + __guest_handle_xen_pfn_t frame_list; +}; + +struct gnttab_copy { + struct { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } source; + struct { + union { + grant_ref_t ref; + xen_pfn_t gmfn; + } u; + domid_t domid; + uint16_t offset; + } dest; + uint16_t len; + uint16_t flags; + int16_t status; +}; + +struct gnttab_query_size { + domid_t dom; + uint32_t nr_frames; + uint32_t max_nr_frames; + int16_t status; +}; + +struct gnttab_set_version { + uint32_t version; +}; + +struct gnttab_get_status_frames { + uint32_t nr_frames; + domid_t dom; + int16_t status; + __guest_handle_uint64_t frame_list; +}; + +struct gnttab_free_callback { + struct gnttab_free_callback *next; + void (*fn)(void *); + void *arg; + u16 count; +}; + +struct gntab_unmap_queue_data; + +typedef void (*gnttab_unmap_refs_done)(int, struct gntab_unmap_queue_data *); + +struct gntab_unmap_queue_data { + struct delayed_work gnttab_work; + void *data; + gnttab_unmap_refs_done done; + struct gnttab_unmap_grant_ref *unmap_ops; + struct gnttab_unmap_grant_ref *kunmap_ops; + struct page **pages; + unsigned int count; + unsigned int age; +}; + +struct gnttab_page_cache { + spinlock_t lock; + struct list_head pages; + unsigned int num_pages; +}; + +struct xen_page_foreign { + domid_t domid; + grant_ref_t gref; +}; + +typedef void (*xen_grant_fn_t)(long unsigned int, unsigned int, unsigned int, void *); + +struct gnttab_ops { + unsigned int version; + unsigned int grefs_per_grant_frame; + int (*map_frames)(xen_pfn_t *, unsigned int); + void (*unmap_frames)(); + void (*update_entry)(grant_ref_t, domid_t, long unsigned int, unsigned int); + int (*end_foreign_access_ref)(grant_ref_t, int); + long unsigned int (*end_foreign_transfer_ref)(grant_ref_t); + int (*query_foreign_access)(grant_ref_t); +}; + +struct unmap_refs_callback_data { + struct completion completion; + int result; +}; + +struct deferred_entry { + struct list_head list; + grant_ref_t ref; + bool ro; + uint16_t warn_delay; + struct page *page; +}; + +struct xen_feature_info { + unsigned int submap_idx; + uint32_t submap; +}; + +struct balloon_stats { + long unsigned int current_pages; + long unsigned int target_pages; + long unsigned int target_unpopulated; + long unsigned int balloon_low; + long unsigned int balloon_high; + long unsigned int total_pages; + long unsigned int schedule_delay; + long unsigned int max_schedule_delay; + long unsigned int retry_count; + long unsigned int max_retry_count; +}; + +enum bp_state { + BP_DONE = 0, + BP_WAIT = 1, + BP_EAGAIN = 2, + BP_ECANCELED = 3, +}; + +enum shutdown_state { + SHUTDOWN_INVALID = 4294967295, + SHUTDOWN_POWEROFF = 0, + SHUTDOWN_SUSPEND = 2, + SHUTDOWN_HALT = 4, +}; + +struct suspend_info { + int cancelled; +}; + +struct shutdown_handler { + const char command[11]; + bool flag; + void (*cb)(); +}; + +struct vcpu_runstate_info { + int state; + uint64_t state_entry_time; + uint64_t time[4]; +}; + +typedef struct { + union { + struct vcpu_runstate_info *p; + uint64_t q; + }; +} __guest_handle_vcpu_runstate_info; + +struct vcpu_register_runstate_memory_area { + union { + __guest_handle_vcpu_runstate_info h; + struct vcpu_runstate_info *v; + uint64_t p; + } addr; +}; + +struct xen_memory_reservation { + __guest_handle_xen_pfn_t extent_start; + xen_ulong_t nr_extents; + unsigned int extent_order; + unsigned int address_bits; + domid_t domid; +}; + +typedef uint32_t evtchn_port_t; + +typedef struct { + union { + evtchn_port_t *p; + uint64_t q; + }; +} __guest_handle_evtchn_port_t; + +struct evtchn_bind_interdomain { + domid_t remote_dom; + evtchn_port_t remote_port; + evtchn_port_t local_port; +}; + +struct evtchn_bind_virq { + uint32_t virq; + uint32_t vcpu; + evtchn_port_t port; +}; + +struct evtchn_bind_pirq { + uint32_t pirq; + uint32_t flags; + evtchn_port_t port; +}; + +struct evtchn_bind_ipi { + uint32_t vcpu; + evtchn_port_t port; +}; + +struct evtchn_close { + evtchn_port_t port; +}; + +struct evtchn_send { + evtchn_port_t port; +}; + +struct evtchn_status { + domid_t dom; + evtchn_port_t port; + uint32_t status; + uint32_t vcpu; + union { + struct { + domid_t dom; + } unbound; + struct { + domid_t dom; + evtchn_port_t port; + } interdomain; + uint32_t pirq; + uint32_t virq; + } u; +}; + +struct evtchn_bind_vcpu { + evtchn_port_t port; + uint32_t vcpu; +}; + +struct evtchn_set_priority { + evtchn_port_t port; + uint32_t priority; +}; + +struct sched_poll { + __guest_handle_evtchn_port_t ports; + unsigned int nr_ports; + uint64_t timeout; +}; + +enum ipi_vector { + XEN_PLACEHOLDER_VECTOR = 0, + XEN_NR_IPIS = 1, +}; + +struct physdev_eoi { + uint32_t irq; +}; + +struct physdev_irq_status_query { + uint32_t irq; + uint32_t flags; +}; + +struct physdev_irq { + uint32_t irq; + uint32_t vector; +}; + +struct physdev_map_pirq { + domid_t domid; + int type; + int index; + int pirq; + int bus; + int devfn; + int entry_nr; + uint64_t table_base; +}; + +struct physdev_unmap_pirq { + domid_t domid; + int pirq; +}; + +struct physdev_get_free_pirq { + int type; + uint32_t pirq; +}; + +struct evtchn_loop_ctrl; + +struct evtchn_ops { + unsigned int (*max_channels)(); + unsigned int (*nr_channels)(); + int (*setup)(evtchn_port_t); + void (*bind_to_cpu)(evtchn_port_t, unsigned int, unsigned int); + void (*clear_pending)(evtchn_port_t); + void (*set_pending)(evtchn_port_t); + bool (*is_pending)(evtchn_port_t); + bool (*test_and_set_mask)(evtchn_port_t); + void (*mask)(evtchn_port_t); + void (*unmask)(evtchn_port_t); + void (*handle_events)(unsigned int, struct evtchn_loop_ctrl *); + void (*resume)(); + int (*percpu_init)(unsigned int); + int (*percpu_deinit)(unsigned int); +}; + +struct evtchn_loop_ctrl { + ktime_t timeout; + unsigned int count; + bool defer_eoi; +}; + +enum xen_irq_type { + IRQT_UNBOUND = 0, + IRQT_PIRQ = 1, + IRQT_VIRQ = 2, + IRQT_IPI = 3, + IRQT_EVTCHN = 4, +}; + +struct irq_info { + struct list_head list; + struct list_head eoi_list; + short int refcnt; + short int spurious_cnt; + enum xen_irq_type type; + unsigned int irq; + evtchn_port_t evtchn; + short unsigned int cpu; + short unsigned int eoi_cpu; + unsigned int irq_epoch; + u64 eoi_time; + union { + short unsigned int virq; + enum ipi_vector ipi; + struct { + short unsigned int pirq; + short unsigned int gsi; + unsigned char vector; + unsigned char flags; + uint16_t domid; + } pirq; + } u; +}; + +struct lateeoi_work { + struct delayed_work delayed; + spinlock_t eoi_list_lock; + struct list_head eoi_list; +}; + +struct evtchn_unmask { + evtchn_port_t port; +}; + +struct evtchn_init_control { + uint64_t control_gfn; + uint32_t offset; + uint32_t vcpu; + uint8_t link_bits; + uint8_t _pad[7]; +}; + +struct evtchn_expand_array { + uint64_t array_gfn; +}; + +typedef uint32_t event_word_t; + +struct evtchn_fifo_control_block { + uint32_t ready; + uint32_t _rsvd; + event_word_t head[16]; +}; + +struct evtchn_fifo_queue { + uint32_t head[16]; +}; + +struct evtchn_alloc_unbound { + domid_t dom; + domid_t remote_dom; + evtchn_port_t port; +}; + +struct xenbus_map_node { + struct list_head next; + union { + struct { + struct vm_struct *area; + } pv; + struct { + struct page *pages[16]; + long unsigned int addrs[16]; + void *addr; + } hvm; + }; + grant_handle_t handles[16]; + unsigned int nr_handles; +}; + +struct map_ring_valloc { + struct xenbus_map_node *node; + long unsigned int addrs[16]; + phys_addr_t phys_addrs[16]; + struct gnttab_map_grant_ref map[16]; + struct gnttab_unmap_grant_ref unmap[16]; + unsigned int idx; +}; + +struct xenbus_ring_ops { + int (*map)(struct xenbus_device *, struct map_ring_valloc *, grant_ref_t *, unsigned int, void **); + int (*unmap)(struct xenbus_device *, void *); +}; + +struct unmap_ring_hvm { + unsigned int idx; + long unsigned int addrs[16]; +}; + +enum xsd_sockmsg_type { + XS_DEBUG = 0, + XS_DIRECTORY = 1, + XS_READ = 2, + XS_GET_PERMS = 3, + XS_WATCH = 4, + XS_UNWATCH = 5, + XS_TRANSACTION_START = 6, + XS_TRANSACTION_END = 7, + XS_INTRODUCE = 8, + XS_RELEASE = 9, + XS_GET_DOMAIN_PATH = 10, + XS_WRITE = 11, + XS_MKDIR = 12, + XS_RM = 13, + XS_SET_PERMS = 14, + XS_WATCH_EVENT = 15, + XS_ERROR = 16, + XS_IS_DOMAIN_INTRODUCED = 17, + XS_RESUME = 18, + XS_SET_TARGET = 19, + XS_RESTRICT = 20, + XS_RESET_WATCHES = 21, +}; + +struct xsd_sockmsg { + uint32_t type; + uint32_t req_id; + uint32_t tx_id; + uint32_t len; +}; + +typedef uint32_t XENSTORE_RING_IDX; + +struct xenstore_domain_interface { + char req[1024]; + char rsp[1024]; + XENSTORE_RING_IDX req_cons; + XENSTORE_RING_IDX req_prod; + XENSTORE_RING_IDX rsp_cons; + XENSTORE_RING_IDX rsp_prod; +}; + +struct xs_watch_event { + struct list_head list; + unsigned int len; + struct xenbus_watch *handle; + const char *path; + const char *token; + char body[0]; +}; + +enum xb_req_state { + xb_req_state_queued = 0, + xb_req_state_wait_reply = 1, + xb_req_state_got_reply = 2, + xb_req_state_aborted = 3, +}; + +struct xb_req_data { + struct list_head list; + wait_queue_head_t wq; + struct xsd_sockmsg msg; + uint32_t caller_req_id; + enum xsd_sockmsg_type type; + char *body; + const struct kvec *vec; + int num_vecs; + int err; + enum xb_req_state state; + bool user_req; + void (*cb)(struct xb_req_data *); + void *par; +}; + +enum xenstore_init { + XS_UNKNOWN = 0, + XS_PV = 1, + XS_HVM = 2, + XS_LOCAL = 3, +}; + +struct xen_bus_type { + char *root; + unsigned int levels; + int (*get_bus_id)(char *, const char *); + int (*probe)(struct xen_bus_type *, const char *, const char *); + bool (*otherend_will_handle)(struct xenbus_watch *, const char *, const char *); + void (*otherend_changed)(struct xenbus_watch *, const char *, const char *); + struct bus_type bus; +}; + +struct xb_find_info { + struct xenbus_device *dev; + const char *nodename; +}; + +struct xenbus_transaction_holder { + struct list_head list; + struct xenbus_transaction handle; + unsigned int generation_id; +}; + +struct read_buffer { + struct list_head list; + unsigned int cons; + unsigned int len; + char msg[0]; +}; + +struct xenbus_file_priv { + struct mutex msgbuffer_mutex; + struct list_head transactions; + struct list_head watches; + unsigned int len; + union { + struct xsd_sockmsg msg; + char buffer[4096]; + } u; + struct mutex reply_mutex; + struct list_head read_buffers; + wait_queue_head_t read_waitq; + struct kref kref; + struct work_struct wq; +}; + +struct watch_adapter { + struct list_head list; + struct xenbus_watch watch; + struct xenbus_file_priv *dev_data; + char *token; +}; + +typedef struct { + union { + int *p; + uint64_t q; + }; +} __guest_handle_int; + +typedef struct { + union { + xen_ulong_t *p; + uint64_t q; + }; +} __guest_handle_xen_ulong_t; + +struct xen_add_to_physmap_range { + domid_t domid; + uint16_t space; + uint16_t size; + domid_t foreign_domid; + __guest_handle_xen_ulong_t idxs; + __guest_handle_xen_pfn_t gpfns; + __guest_handle_int errs; +}; + +struct xen_remove_from_physmap { + domid_t domid; + xen_pfn_t gpfn; +}; + +struct physdev_manage_pci { + uint8_t bus; + uint8_t devfn; +}; + +struct physdev_manage_pci_ext { + uint8_t bus; + uint8_t devfn; + unsigned int is_extfn; + unsigned int is_virtfn; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; +}; + +struct physdev_pci_device_add { + uint16_t seg; + uint8_t bus; + uint8_t devfn; + uint32_t flags; + struct { + uint8_t bus; + uint8_t devfn; + } physfn; + uint32_t optarr[0]; +}; + +struct physdev_pci_device { + uint16_t seg; + uint8_t bus; + uint8_t devfn; +}; + +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +}; + +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__((packed)); + +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +}; + +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__((packed)); + +struct usb_ssp_isoc_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wReseved; + __le32 dwBytesPerInterval; +}; + +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +}; + +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +}; + +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +struct usb_ext_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +} __attribute__((packed)); + +struct usb_ss_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; + __le16 wSpeedSupported; + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +}; + +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; +}; + +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; + __le16 wFunctionalitySupport; + __le16 wReserved; + __le32 bmSublinkSpeedAttr[1]; +}; + +struct usb_ptm_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +}; + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, + USB_SPEED_LOW = 1, + USB_SPEED_FULL = 2, + USB_SPEED_HIGH = 3, + USB_SPEED_WIRELESS = 4, + USB_SPEED_SUPER = 5, + USB_SPEED_SUPER_PLUS = 6, +}; + +enum usb_device_state { + USB_STATE_NOTATTACHED = 0, + USB_STATE_ATTACHED = 1, + USB_STATE_POWERED = 2, + USB_STATE_RECONNECTING = 3, + USB_STATE_UNAUTHENTICATED = 4, + USB_STATE_DEFAULT = 5, + USB_STATE_ADDRESS = 6, + USB_STATE_CONFIGURED = 7, + USB_STATE_SUSPENDED = 8, +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1 = 1, + USB3_LPM_U2 = 2, + USB3_LPM_U3 = 3, +}; + +struct ep_device; + +struct usb_host_endpoint { + struct usb_endpoint_descriptor desc; + struct usb_ss_ep_comp_descriptor ss_ep_comp; + struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp; + char: 8; + struct list_head urb_list; + void *hcpriv; + struct ep_device *ep_dev; + unsigned char *extra; + int extralen; + int enabled; + int streams; + int: 32; +} __attribute__((packed)); + +struct usb_host_interface { + struct usb_interface_descriptor desc; + int extralen; + unsigned char *extra; + struct usb_host_endpoint *endpoint; + char *string; +}; + +enum usb_interface_condition { + USB_INTERFACE_UNBOUND = 0, + USB_INTERFACE_BINDING = 1, + USB_INTERFACE_BOUND = 2, + USB_INTERFACE_UNBINDING = 3, +}; + +struct usb_interface { + struct usb_host_interface *altsetting; + struct usb_host_interface *cur_altsetting; + unsigned int num_altsetting; + struct usb_interface_assoc_descriptor *intf_assoc; + int minor; + enum usb_interface_condition condition; + unsigned int sysfs_files_created: 1; + unsigned int ep_devs_created: 1; + unsigned int unregistering: 1; + unsigned int needs_remote_wakeup: 1; + unsigned int needs_altsetting0: 1; + unsigned int needs_binding: 1; + unsigned int resetting_device: 1; + unsigned int authorized: 1; + struct device dev; + struct device *usb_dev; + struct work_struct reset_ws; +}; + +struct usb_interface_cache { + unsigned int num_altsetting; + struct kref ref; + struct usb_host_interface altsetting[0]; +}; + +struct usb_host_config { + struct usb_config_descriptor desc; + char *string; + struct usb_interface_assoc_descriptor *intf_assoc[16]; + struct usb_interface *interface[32]; + struct usb_interface_cache *intf_cache[32]; + unsigned char *extra; + int extralen; +}; + +struct usb_host_bos { + struct usb_bos_descriptor *desc; + struct usb_ext_cap_descriptor *ext_cap; + struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + struct usb_ss_container_id_descriptor *ss_id; + struct usb_ptm_cap_descriptor *ptm_cap; +}; + +struct usb_devmap { + long unsigned int devicemap[2]; +}; + +struct mon_bus; + +struct usb_device; + +struct usb_bus { + struct device *controller; + struct device *sysdev; + int busnum; + const char *bus_name; + u8 uses_pio_for_control; + u8 otg_port; + unsigned int is_b_host: 1; + unsigned int b_hnp_enable: 1; + unsigned int no_stop_on_short: 1; + unsigned int no_sg_constraint: 1; + unsigned int sg_tablesize; + int devnum_next; + struct mutex devnum_next_mutex; + struct usb_devmap devmap; + struct usb_device *root_hub; + struct usb_bus *hs_companion; + int bandwidth_allocated; + int bandwidth_int_reqs; + int bandwidth_isoc_reqs; + unsigned int resuming_ports; + struct mon_bus *mon_bus; + int monitored; +}; + +struct wusb_dev; + +enum usb_device_removable { + USB_DEVICE_REMOVABLE_UNKNOWN = 0, + USB_DEVICE_REMOVABLE = 1, + USB_DEVICE_FIXED = 2, +}; + +struct usb2_lpm_parameters { + unsigned int besl; + int timeout; +}; + +struct usb3_lpm_parameters { + unsigned int mel; + unsigned int pel; + unsigned int sel; + int timeout; +}; + +struct usb_tt; + +struct usb_device { + int devnum; + char devpath[16]; + u32 route; + enum usb_device_state state; + enum usb_device_speed speed; + unsigned int rx_lanes; + unsigned int tx_lanes; + struct usb_tt *tt; + int ttport; + unsigned int toggle[2]; + struct usb_device *parent; + struct usb_bus *bus; + struct usb_host_endpoint ep0; + struct device dev; + struct usb_device_descriptor descriptor; + struct usb_host_bos *bos; + struct usb_host_config *config; + struct usb_host_config *actconfig; + struct usb_host_endpoint *ep_in[16]; + struct usb_host_endpoint *ep_out[16]; + char **rawdescriptors; + short unsigned int bus_mA; + u8 portnum; + u8 level; + u8 devaddr; + unsigned int can_submit: 1; + unsigned int persist_enabled: 1; + unsigned int have_langid: 1; + unsigned int authorized: 1; + unsigned int authenticated: 1; + unsigned int wusb: 1; + unsigned int lpm_capable: 1; + unsigned int usb2_hw_lpm_capable: 1; + unsigned int usb2_hw_lpm_besl_capable: 1; + unsigned int usb2_hw_lpm_enabled: 1; + unsigned int usb2_hw_lpm_allowed: 1; + unsigned int usb3_lpm_u1_enabled: 1; + unsigned int usb3_lpm_u2_enabled: 1; + int string_langid; + char *product; + char *manufacturer; + char *serial; + struct list_head filelist; + int maxchild; + u32 quirks; + atomic_t urbnum; + long unsigned int active_duration; + long unsigned int connect_time; + unsigned int do_remote_wakeup: 1; + unsigned int reset_resume: 1; + unsigned int port_is_suspended: 1; + struct wusb_dev *wusb_dev; + int slot_id; + enum usb_device_removable removable; + struct usb2_lpm_parameters l1_params; + struct usb3_lpm_parameters u1_params; + struct usb3_lpm_parameters u2_params; + unsigned int lpm_disable_count; + u16 hub_delay; + unsigned int use_generic_driver: 1; +}; + +struct usb_tt { + struct usb_device *hub; + int multi; + unsigned int think_time; + void *hcpriv; + spinlock_t lock; + struct list_head clear_list; + struct work_struct clear_work; +}; + +struct usb_iso_packet_descriptor { + unsigned int offset; + unsigned int length; + unsigned int actual_length; + int status; +}; + +struct usb_anchor { + struct list_head urb_list; + wait_queue_head_t wait; + spinlock_t lock; + atomic_t suspend_wakeups; + unsigned int poisoned: 1; +}; + +struct urb; + +typedef void (*usb_complete_t)(struct urb *); + +struct urb { + struct kref kref; + int unlinked; + void *hcpriv; + atomic_t use_count; + atomic_t reject; + struct list_head urb_list; + struct list_head anchor_list; + struct usb_anchor *anchor; + struct usb_device *dev; + struct usb_host_endpoint *ep; + unsigned int pipe; + unsigned int stream_id; + int status; + unsigned int transfer_flags; + void *transfer_buffer; + dma_addr_t transfer_dma; + struct scatterlist *sg; + int num_mapped_sgs; + int num_sgs; + u32 transfer_buffer_length; + u32 actual_length; + unsigned char *setup_packet; + dma_addr_t setup_dma; + int start_frame; + int number_of_packets; + int interval; + int error_count; + void *context; + usb_complete_t complete; + struct usb_iso_packet_descriptor iso_frame_desc[0]; +}; + +struct giveback_urb_bh { + bool running; + spinlock_t lock; + struct list_head head; + struct tasklet_struct bh; + struct usb_host_endpoint *completing_ep; +}; + +enum usb_dev_authorize_policy { + USB_DEVICE_AUTHORIZE_NONE = 0, + USB_DEVICE_AUTHORIZE_ALL = 1, + USB_DEVICE_AUTHORIZE_INTERNAL = 2, +}; + +struct usb_phy; + +struct usb_phy_roothub; + +struct dma_pool___2; + +struct gen_pool___2; + +struct hc_driver; + +struct usb_hcd { + struct usb_bus self; + struct kref kref; + const char *product_desc; + int speed; + char irq_descr[24]; + struct timer_list rh_timer; + struct urb *status_urb; + struct work_struct wakeup_work; + struct work_struct died_work; + const struct hc_driver *driver; + struct usb_phy *usb_phy; + struct usb_phy_roothub *phy_roothub; + long unsigned int flags; + enum usb_dev_authorize_policy dev_policy; + unsigned int rh_registered: 1; + unsigned int rh_pollable: 1; + unsigned int msix_enabled: 1; + unsigned int msi_enabled: 1; + unsigned int skip_phy_initialization: 1; + unsigned int uses_new_polling: 1; + unsigned int wireless: 1; + unsigned int has_tt: 1; + unsigned int amd_resume_bug: 1; + unsigned int can_do_streams: 1; + unsigned int tpl_support: 1; + unsigned int cant_recv_wakeups: 1; + unsigned int irq; + void *regs; + resource_size_t rsrc_start; + resource_size_t rsrc_len; + unsigned int power_budget; + struct giveback_urb_bh high_prio_bh; + struct giveback_urb_bh low_prio_bh; + struct mutex *address0_mutex; + struct mutex *bandwidth_mutex; + struct usb_hcd *shared_hcd; + struct usb_hcd *primary_hcd; + struct dma_pool___2 *pool[4]; + int state; + struct gen_pool___2 *localmem_pool; + long unsigned int hcd_priv[0]; +}; + +struct hc_driver { + const char *description; + const char *product_desc; + size_t hcd_priv_size; + irqreturn_t (*irq)(struct usb_hcd *); + int flags; + int (*reset)(struct usb_hcd *); + int (*start)(struct usb_hcd *); + int (*pci_suspend)(struct usb_hcd *, bool); + int (*pci_resume)(struct usb_hcd *, bool); + void (*stop)(struct usb_hcd *); + void (*shutdown)(struct usb_hcd *); + int (*get_frame_number)(struct usb_hcd *); + int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t); + int (*urb_dequeue)(struct usb_hcd *, struct urb *, int); + int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t); + void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *); + void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *); + void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *); + int (*hub_status_data)(struct usb_hcd *, char *); + int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16); + int (*bus_suspend)(struct usb_hcd *); + int (*bus_resume)(struct usb_hcd *); + int (*start_port_reset)(struct usb_hcd *, unsigned int); + long unsigned int (*get_resuming_ports)(struct usb_hcd *); + void (*relinquish_port)(struct usb_hcd *, int); + int (*port_handed_over)(struct usb_hcd *, int); + void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); + int (*alloc_dev)(struct usb_hcd *, struct usb_device *); + void (*free_dev)(struct usb_hcd *, struct usb_device *); + int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t); + int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t); + int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); + int (*address_device)(struct usb_hcd *, struct usb_device *); + int (*enable_device)(struct usb_hcd *, struct usb_device *); + int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t); + int (*reset_device)(struct usb_hcd *, struct usb_device *); + int (*update_device)(struct usb_hcd *, struct usb_device *); + int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); + int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state); + int (*find_raw_port_number)(struct usb_hcd *, int); + int (*port_power)(struct usb_hcd *, int, bool); +}; + +struct physdev_dbgp_op { + uint8_t op; + uint8_t bus; + union { + struct physdev_pci_device pci; + } u; +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +struct ioctl_evtchn_bind_virq { + unsigned int virq; +}; + +struct ioctl_evtchn_bind_interdomain { + unsigned int remote_domain; + unsigned int remote_port; +}; + +struct ioctl_evtchn_bind_unbound_port { + unsigned int remote_domain; +}; + +struct ioctl_evtchn_unbind { + unsigned int port; +}; + +struct ioctl_evtchn_notify { + unsigned int port; +}; + +struct ioctl_evtchn_restrict_domid { + domid_t domid; +}; + +struct per_user_data { + struct mutex bind_mutex; + struct rb_root evtchns; + unsigned int nr_evtchns; + unsigned int ring_size; + evtchn_port_t *ring; + unsigned int ring_cons; + unsigned int ring_prod; + unsigned int ring_overflow; + struct mutex ring_cons_mutex; + spinlock_t ring_prod_lock; + wait_queue_head_t evtchn_wait; + struct fasync_struct *evtchn_async_queue; + const char *name; + domid_t restrict_domid; +}; + +struct user_evtchn { + struct rb_node node; + struct per_user_data *user; + evtchn_port_t port; + bool enabled; +}; + +typedef uint8_t xen_domain_handle_t[16]; + +struct xen_compile_info { + char compiler[64]; + char compile_by[16]; + char compile_domain[32]; + char compile_date[32]; +}; + +struct xen_platform_parameters { + xen_ulong_t virt_start; +}; + +struct xen_build_id { + uint32_t len; + unsigned char buf[0]; +}; + +struct hyp_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct hyp_sysfs_attr *, char *); + ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); + void *hyp_attr_data; +}; + +enum xen_swiotlb_err { + XEN_SWIOTLB_UNKNOWN = 0, + XEN_SWIOTLB_ENOMEM = 1, + XEN_SWIOTLB_EFIXUP = 2, +}; + +typedef void (*xen_gfn_fn_t)(long unsigned int, void *); + +struct xen_remap_gfn_info; + +struct remap_data { + xen_pfn_t *fgfn; + int nr_fgfn; + pgprot_t prot; + domid_t domid; + struct vm_area_struct *vma; + int index; + struct page **pages; + struct xen_remap_gfn_info *info; + int *err_ptr; + int mapped; + int h_errs[1]; + xen_ulong_t h_idxs[1]; + xen_pfn_t h_gpfns[1]; + int h_iter; +}; + +struct map_balloon_pages { + xen_pfn_t *pfns; + unsigned int idx; +}; + +struct remap_pfn { + struct mm_struct *mm; + struct page **pages; + pgprot_t prot; + long unsigned int i; +}; + +struct fastopen_queue { + struct request_sock *rskq_rst_head; + struct request_sock *rskq_rst_tail; + spinlock_t lock; + int qlen; + int max_qlen; + struct tcp_fastopen_context *ctx; +}; + +struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; + struct request_sock *rskq_accept_tail; + struct fastopen_queue fastopenq; +}; + +struct inet_connection_sock_af_ops { + int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *); + void (*send_check)(struct sock *, struct sk_buff *); + int (*rebuild_header)(struct sock *); + void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *); + int (*conn_request)(struct sock *, struct sk_buff *); + struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *); + u16 net_header_len; + u16 net_frag_header_len; + u16 sockaddr_len; + int (*setsockopt)(struct sock *, int, int, sockptr_t, unsigned int); + int (*getsockopt)(struct sock *, int, int, char *, int *); + void (*addr2sockaddr)(struct sock *, struct sockaddr *); + void (*mtu_reduced)(struct sock *); +}; + +struct inet_bind_bucket; + +struct tcp_ulp_ops; + +struct inet_connection_sock { + struct inet_sock icsk_inet; + struct request_sock_queue icsk_accept_queue; + struct inet_bind_bucket *icsk_bind_hash; + long unsigned int icsk_timeout; + struct timer_list icsk_retransmit_timer; + struct timer_list icsk_delack_timer; + __u32 icsk_rto; + __u32 icsk_rto_min; + __u32 icsk_delack_max; + __u32 icsk_pmtu_cookie; + const struct tcp_congestion_ops *icsk_ca_ops; + const struct inet_connection_sock_af_ops *icsk_af_ops; + const struct tcp_ulp_ops *icsk_ulp_ops; + void *icsk_ulp_data; + void (*icsk_clean_acked)(struct sock *, u32); + struct hlist_node icsk_listen_portaddr_node; + unsigned int (*icsk_sync_mss)(struct sock *, u32); + __u8 icsk_ca_state: 5; + __u8 icsk_ca_initialized: 1; + __u8 icsk_ca_setsockopt: 1; + __u8 icsk_ca_dst_locked: 1; + __u8 icsk_retransmits; + __u8 icsk_pending; + __u8 icsk_backoff; + __u8 icsk_syn_retries; + __u8 icsk_probes_out; + __u16 icsk_ext_hdr_len; + struct { + __u8 pending; + __u8 quick; + __u8 pingpong; + __u8 retry; + __u32 ato; + long unsigned int timeout; + __u32 lrcvtime; + __u16 last_seg_size; + __u16 rcv_mss; + } icsk_ack; + struct { + int enabled; + int search_high; + int search_low; + int probe_size; + u32 probe_timestamp; + } icsk_mtup; + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + u64 icsk_ca_priv[13]; +}; + +struct tcp_ulp_ops { + struct list_head list; + int (*init)(struct sock *); + void (*update)(struct sock *, struct proto *, void (*)(struct sock *)); + void (*release)(struct sock *); + int (*get_info)(const struct sock *, struct sk_buff *); + size_t (*get_info_size)(const struct sock *); + void (*clone)(const struct request_sock *, struct sock *, const gfp_t); + char name[16]; + struct module *owner; +}; + +typedef unsigned int RING_IDX; + +struct pvcalls_data_intf { + RING_IDX in_cons; + RING_IDX in_prod; + RING_IDX in_error; + uint8_t pad1[52]; + RING_IDX out_cons; + RING_IDX out_prod; + RING_IDX out_error; + uint8_t pad2[52]; + RING_IDX ring_order; + grant_ref_t ref[0]; +}; + +struct pvcalls_data { + unsigned char *in; + unsigned char *out; +}; + +struct xen_pvcalls_socket { + uint64_t id; + uint32_t domain; + uint32_t type; + uint32_t protocol; +}; + +struct xen_pvcalls_connect { + uint64_t id; + uint8_t addr[28]; + uint32_t len; + uint32_t flags; + grant_ref_t ref; + uint32_t evtchn; +}; + +struct xen_pvcalls_release { + uint64_t id; + uint8_t reuse; +}; + +struct xen_pvcalls_bind { + uint64_t id; + uint8_t addr[28]; + uint32_t len; +}; + +struct xen_pvcalls_listen { + uint64_t id; + uint32_t backlog; +}; + +struct xen_pvcalls_accept { + uint64_t id; + uint64_t id_new; + grant_ref_t ref; + uint32_t evtchn; +}; + +struct xen_pvcalls_poll { + uint64_t id; +}; + +struct xen_pvcalls_dummy { + uint8_t dummy[56]; +}; + +struct xen_pvcalls_request { + uint32_t req_id; + uint32_t cmd; + union { + struct xen_pvcalls_socket socket; + struct xen_pvcalls_connect connect; + struct xen_pvcalls_release release; + struct xen_pvcalls_bind bind; + struct xen_pvcalls_listen listen; + struct xen_pvcalls_accept accept; + struct xen_pvcalls_poll poll; + struct xen_pvcalls_dummy dummy; + } u; +}; + +struct _xen_pvcalls_socket { + uint64_t id; +}; + +struct _xen_pvcalls_connect { + uint64_t id; +}; + +struct _xen_pvcalls_release { + uint64_t id; +}; + +struct _xen_pvcalls_bind { + uint64_t id; +}; + +struct _xen_pvcalls_listen { + uint64_t id; +}; + +struct _xen_pvcalls_accept { + uint64_t id; +}; + +struct _xen_pvcalls_poll { + uint64_t id; +}; + +struct _xen_pvcalls_dummy { + uint8_t dummy[8]; +}; + +struct xen_pvcalls_response { + uint32_t req_id; + uint32_t cmd; + int32_t ret; + uint32_t pad; + union { + struct _xen_pvcalls_socket socket; + struct _xen_pvcalls_connect connect; + struct _xen_pvcalls_release release; + struct _xen_pvcalls_bind bind; + struct _xen_pvcalls_listen listen; + struct _xen_pvcalls_accept accept; + struct _xen_pvcalls_poll poll; + struct _xen_pvcalls_dummy dummy; + } u; +}; + +union xen_pvcalls_sring_entry { + struct xen_pvcalls_request req; + struct xen_pvcalls_response rsp; +}; + +struct xen_pvcalls_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union xen_pvcalls_sring_entry ring[1]; +}; + +struct xen_pvcalls_back_ring { + RING_IDX rsp_prod_pvt; + RING_IDX req_cons; + unsigned int nr_ents; + struct xen_pvcalls_sring *sring; +}; + +struct pvcalls_back_global { + struct list_head frontends; + struct semaphore frontends_lock; +}; + +struct pvcalls_fedata { + struct list_head list; + struct xenbus_device *dev; + struct xen_pvcalls_sring *sring; + struct xen_pvcalls_back_ring ring; + int irq; + struct list_head socket_mappings; + struct xarray socketpass_mappings; + struct semaphore socket_lock; +}; + +struct pvcalls_ioworker { + struct work_struct register_work; + struct workqueue_struct *wq; +}; + +struct sockpass_mapping; + +struct sock_mapping { + struct list_head list; + struct pvcalls_fedata *fedata; + struct sockpass_mapping *sockpass; + struct socket *sock; + uint64_t id; + grant_ref_t ref; + struct pvcalls_data_intf *ring; + void *bytes; + struct pvcalls_data data; + uint32_t ring_order; + int irq; + atomic_t read; + atomic_t write; + atomic_t io; + atomic_t release; + atomic_t eoi; + void (*saved_data_ready)(struct sock *); + struct pvcalls_ioworker ioworker; +}; + +struct sockpass_mapping { + struct list_head list; + struct pvcalls_fedata *fedata; + struct socket *sock; + uint64_t id; + struct xen_pvcalls_request reqcopy; + spinlock_t copy_lock; + struct workqueue_struct *wq; + struct work_struct register_work; + void (*saved_data_ready)(struct sock *); +}; + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; + unsigned int is_wait_die; +}; + +enum regulator_type { + REGULATOR_VOLTAGE = 0, + REGULATOR_CURRENT = 1, +}; + +struct regulator_config; + +struct regulator_ops; + +struct regulator_desc { + const char *name; + const char *supply_name; + const char *of_match; + const char *regulators_node; + int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, struct regulator_config *); + int id; + unsigned int continuous_voltage_range: 1; + unsigned int n_voltages; + unsigned int n_current_limits; + const struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; + unsigned int min_uV; + unsigned int uV_step; + unsigned int linear_min_sel; + int fixed_uV; + unsigned int ramp_delay; + int min_dropout_uV; + const struct linear_range *linear_ranges; + const unsigned int *linear_range_selectors; + int n_linear_ranges; + const unsigned int *volt_table; + const unsigned int *curr_table; + unsigned int vsel_range_reg; + unsigned int vsel_range_mask; + unsigned int vsel_reg; + unsigned int vsel_mask; + unsigned int vsel_step; + unsigned int csel_reg; + unsigned int csel_mask; + unsigned int apply_reg; + unsigned int apply_bit; + unsigned int enable_reg; + unsigned int enable_mask; + unsigned int enable_val; + unsigned int disable_val; + bool enable_is_inverted; + unsigned int bypass_reg; + unsigned int bypass_mask; + unsigned int bypass_val_on; + unsigned int bypass_val_off; + unsigned int active_discharge_on; + unsigned int active_discharge_off; + unsigned int active_discharge_mask; + unsigned int active_discharge_reg; + unsigned int soft_start_reg; + unsigned int soft_start_mask; + unsigned int soft_start_val_on; + unsigned int pull_down_reg; + unsigned int pull_down_mask; + unsigned int pull_down_val_on; + unsigned int enable_time; + unsigned int off_on_delay; + unsigned int poll_enabled_time; + unsigned int (*of_map_mode)(unsigned int); +}; + +struct pre_voltage_change_data { + long unsigned int old_uV; + long unsigned int min_uV; + long unsigned int max_uV; +}; + +struct regulator_voltage { + int min_uV; + int max_uV; +}; + +struct regulator_dev; + +struct regulator { + struct device *dev; + struct list_head list; + unsigned int always_on: 1; + unsigned int bypass: 1; + unsigned int device_link: 1; + int uA_load; + unsigned int enable_count; + unsigned int deferred_disables; + struct regulator_voltage voltage[5]; + const char *supply_name; + struct device_attribute dev_attr; + struct regulator_dev *rdev; + struct dentry *debugfs; +}; + +struct regulator_coupler { + struct list_head list; + int (*attach_regulator)(struct regulator_coupler *, struct regulator_dev *); + int (*detach_regulator)(struct regulator_coupler *, struct regulator_dev *); + int (*balance_voltage)(struct regulator_coupler *, struct regulator_dev *, suspend_state_t); +}; + +struct coupling_desc { + struct regulator_dev **coupled_rdevs; + struct regulator_coupler *coupler; + int n_resolved; + int n_coupled; +}; + +struct regulator_enable_gpio; + +struct regulator_dev { + const struct regulator_desc *desc; + int exclusive; + u32 use_count; + u32 open_count; + u32 bypass_count; + struct list_head list; + struct list_head consumer_list; + struct coupling_desc coupling_desc; + struct blocking_notifier_head notifier; + struct ww_mutex mutex; + struct task_struct *mutex_owner; + int ref_cnt; + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator *supply; + const char *supply_name; + struct regmap *regmap; + struct delayed_work disable_work; + void *reg_data; + struct dentry *debugfs; + struct regulator_enable_gpio *ena_pin; + unsigned int ena_gpio_state: 1; + unsigned int is_switch: 1; + long unsigned int last_off_jiffy; +}; + +enum regulator_status { + REGULATOR_STATUS_OFF = 0, + REGULATOR_STATUS_ON = 1, + REGULATOR_STATUS_ERROR = 2, + REGULATOR_STATUS_FAST = 3, + REGULATOR_STATUS_NORMAL = 4, + REGULATOR_STATUS_IDLE = 5, + REGULATOR_STATUS_STANDBY = 6, + REGULATOR_STATUS_BYPASS = 7, + REGULATOR_STATUS_UNDEFINED = 8, +}; + +struct regulator_ops { + int (*list_voltage)(struct regulator_dev *, unsigned int); + int (*set_voltage)(struct regulator_dev *, int, int, unsigned int *); + int (*map_voltage)(struct regulator_dev *, int, int); + int (*set_voltage_sel)(struct regulator_dev *, unsigned int); + int (*get_voltage)(struct regulator_dev *); + int (*get_voltage_sel)(struct regulator_dev *); + int (*set_current_limit)(struct regulator_dev *, int, int); + int (*get_current_limit)(struct regulator_dev *); + int (*set_input_current_limit)(struct regulator_dev *, int); + int (*set_over_current_protection)(struct regulator_dev *); + int (*set_active_discharge)(struct regulator_dev *, bool); + int (*enable)(struct regulator_dev *); + int (*disable)(struct regulator_dev *); + int (*is_enabled)(struct regulator_dev *); + int (*set_mode)(struct regulator_dev *, unsigned int); + unsigned int (*get_mode)(struct regulator_dev *); + int (*get_error_flags)(struct regulator_dev *, unsigned int *); + int (*enable_time)(struct regulator_dev *); + int (*set_ramp_delay)(struct regulator_dev *, int); + int (*set_voltage_time)(struct regulator_dev *, int, int); + int (*set_voltage_time_sel)(struct regulator_dev *, unsigned int, unsigned int); + int (*set_soft_start)(struct regulator_dev *); + int (*get_status)(struct regulator_dev *); + unsigned int (*get_optimum_mode)(struct regulator_dev *, int, int, int); + int (*set_load)(struct regulator_dev *, int); + int (*set_bypass)(struct regulator_dev *, bool); + int (*get_bypass)(struct regulator_dev *, bool *); + int (*set_suspend_voltage)(struct regulator_dev *, int); + int (*set_suspend_enable)(struct regulator_dev *); + int (*set_suspend_disable)(struct regulator_dev *); + int (*set_suspend_mode)(struct regulator_dev *, unsigned int); + int (*resume)(struct regulator_dev *); + int (*set_pull_down)(struct regulator_dev *); +}; + +struct regulator_config { + struct device *dev; + const struct regulator_init_data *init_data; + void *driver_data; + struct device_node *of_node; + struct regmap *regmap; + struct gpio_desc *ena_gpiod; +}; + +struct regulator_enable_gpio { + struct list_head list; + struct gpio_desc *gpiod; + u32 enable_count; + u32 request_count; +}; + +enum regulator_active_discharge { + REGULATOR_ACTIVE_DISCHARGE_DEFAULT = 0, + REGULATOR_ACTIVE_DISCHARGE_DISABLE = 1, + REGULATOR_ACTIVE_DISCHARGE_ENABLE = 2, +}; + +struct trace_event_raw_regulator_basic { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_regulator_range { + struct trace_entry ent; + u32 __data_loc_name; + int min; + int max; + char __data[0]; +}; + +struct trace_event_raw_regulator_value { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int val; + char __data[0]; +}; + +struct trace_event_data_offsets_regulator_basic { + u32 name; +}; + +struct trace_event_data_offsets_regulator_range { + u32 name; +}; + +struct trace_event_data_offsets_regulator_value { + u32 name; +}; + +typedef void (*btf_trace_regulator_enable)(void *, const char *); + +typedef void (*btf_trace_regulator_enable_delay)(void *, const char *); + +typedef void (*btf_trace_regulator_enable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_disable)(void *, const char *); + +typedef void (*btf_trace_regulator_disable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_enable)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_enable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_disable)(void *, const char *); + +typedef void (*btf_trace_regulator_bypass_disable_complete)(void *, const char *); + +typedef void (*btf_trace_regulator_set_voltage)(void *, const char *, int, int); + +typedef void (*btf_trace_regulator_set_voltage_complete)(void *, const char *, unsigned int); + +enum regulator_get_type { + NORMAL_GET = 0, + EXCLUSIVE_GET = 1, + OPTIONAL_GET = 2, + MAX_GET_TYPE = 3, +}; + +struct regulator_map { + struct list_head list; + const char *dev_name; + const char *supply; + struct regulator_dev *regulator; +}; + +struct regulator_supply_alias { + struct list_head list; + struct device *src_dev; + const char *src_supply; + struct device *alias_dev; + const char *alias_supply; +}; + +struct summary_data { + struct seq_file *s; + struct regulator_dev *parent; + int level; +}; + +struct summary_lock_data { + struct ww_acquire_ctx *ww_ctx; + struct regulator_dev **new_contended_rdev; + struct regulator_dev **old_contended_rdev; +}; + +struct fixed_voltage_config { + const char *supply_name; + const char *input_supply; + int microvolts; + unsigned int startup_delay; + unsigned int off_on_delay; + unsigned int enabled_at_boot: 1; + struct regulator_init_data *init_data; +}; + +struct fixed_regulator_data { + struct fixed_voltage_config cfg; + struct regulator_init_data init_data; + struct platform_device pdev; +}; + +struct regulator_bulk_devres { + struct regulator_bulk_data *consumers; + int num_consumers; +}; + +struct regulator_supply_alias_match { + struct device *dev; + const char *id; +}; + +struct regulator_notifier_match { + struct regulator *regulator; + struct notifier_block *nb; +}; + +struct of_regulator_match { + const char *name; + void *driver_data; + struct regulator_init_data *init_data; + struct device_node *of_node; + const struct regulator_desc *desc; +}; + +struct devm_of_regulator_matches { + struct of_regulator_match *matches; + unsigned int num_matches; +}; + +struct fixed_voltage_data { + struct regulator_desc desc; + struct regulator_dev *dev; + struct clk *enable_clock; + unsigned int clk_enable_counter; +}; + +struct fixed_dev_type { + bool has_enable_clock; +}; + +struct gpio_regulator_state { + int value; + int gpios; +}; + +struct gpio_regulator_config { + const char *supply_name; + unsigned int enabled_at_boot: 1; + unsigned int startup_delay; + enum gpiod_flags *gflags; + int ngpios; + struct gpio_regulator_state *states; + int nr_states; + enum regulator_type type; + struct regulator_init_data *init_data; +}; + +struct gpio_regulator_data { + struct regulator_desc desc; + struct gpio_desc **gpiods; + int nr_gpios; + struct gpio_regulator_state *states; + int nr_states; + int state; +}; + +struct reset_control_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +struct reset_control___2 { + struct reset_controller_dev *rcdev; + struct list_head list; + unsigned int id; + struct kref refcnt; + bool acquired; + bool shared; + bool array; + atomic_t deassert_count; + atomic_t triggered_count; +}; + +struct reset_control_array { + struct reset_control___2 base; + unsigned int num_rstcs; + struct reset_control___2 *rstc[0]; +}; + +enum hi6220_reset_ctrl_type { + PERIPHERAL = 0, + MEDIA = 1, + AO = 2, +}; + +struct hi6220_reset_data { + struct reset_controller_dev rc_dev; + struct regmap *regmap; +}; + +struct hi3660_reset_controller { + struct reset_controller_dev rst; + struct regmap *map; +}; + +enum mrq_reset_commands { + CMD_RESET_ASSERT = 1, + CMD_RESET_DEASSERT = 2, + CMD_RESET_MODULE = 3, + CMD_RESET_GET_MAX_ID = 4, + CMD_RESET_MAX = 5, +}; + +struct mrq_reset_request { + uint32_t cmd; + uint32_t reset_id; +}; + +struct meson_reset_param { + int reg_count; + int level_offset; +}; + +struct meson_reset { + void *reg_base; + const struct meson_reset_param *param; + struct reset_controller_dev rcdev; + spinlock_t lock; +}; + +struct reset_simple_devdata { + u32 reg_offset; + u32 nr_resets; + bool active_low; + bool status_active_low; +}; + +struct serial_struct32 { + compat_int_t type; + compat_int_t line; + compat_uint_t port; + compat_int_t irq; + compat_int_t flags; + compat_int_t xmit_fifo_size; + compat_int_t custom_divisor; + compat_int_t baud_base; + short unsigned int close_delay; + char io_type; + char reserved_char; + compat_int_t hub6; + short unsigned int closing_wait; + short unsigned int closing_wait2; + compat_uint_t iomem_base; + short unsigned int iomem_reg_shift; + unsigned int port_high; + compat_int_t reserved; +}; + +struct n_tty_data { + size_t read_head; + size_t commit_head; + size_t canon_head; + size_t echo_head; + size_t echo_commit; + size_t echo_mark; + long unsigned int char_map[4]; + long unsigned int overrun_time; + int num_overrun; + bool no_room; + unsigned char lnext: 1; + unsigned char erasing: 1; + unsigned char raw: 1; + unsigned char real_raw: 1; + unsigned char icanon: 1; + unsigned char push: 1; + char read_buf[4096]; + long unsigned int read_flags[64]; + unsigned char echo_buf[4096]; + size_t read_tail; + size_t line_start; + unsigned int column; + unsigned int canon_column; + size_t echo_tail; + struct mutex atomic_read_lock; + struct mutex output_lock; +}; + +enum { + ERASE = 0, + WERASE = 1, + KILL = 2, +}; + +struct termios { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; +}; + +struct termios2 { + tcflag_t c_iflag; + tcflag_t c_oflag; + tcflag_t c_cflag; + tcflag_t c_lflag; + cc_t c_line; + cc_t c_cc[19]; + speed_t c_ispeed; + speed_t c_ospeed; +}; + +struct termio { + short unsigned int c_iflag; + short unsigned int c_oflag; + short unsigned int c_cflag; + short unsigned int c_lflag; + unsigned char c_line; + unsigned char c_cc[8]; +}; + +struct ldsem_waiter { + struct list_head list; + struct task_struct *task; +}; + +struct pts_fs_info___2; + +struct tty_audit_buf { + struct mutex mutex; + dev_t dev; + unsigned int icanon: 1; + size_t valid; + unsigned char *data; +}; + +struct input_id { + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; +}; + +struct input_absinfo { + __s32 value; + __s32 minimum; + __s32 maximum; + __s32 fuzz; + __s32 flat; + __s32 resolution; +}; + +struct input_keymap_entry { + __u8 flags; + __u8 len; + __u16 index; + __u32 keycode; + __u8 scancode[32]; +}; + +struct ff_replay { + __u16 length; + __u16 delay; +}; + +struct ff_trigger { + __u16 button; + __u16 interval; +}; + +struct ff_envelope { + __u16 attack_length; + __u16 attack_level; + __u16 fade_length; + __u16 fade_level; +}; + +struct ff_constant_effect { + __s16 level; + struct ff_envelope envelope; +}; + +struct ff_ramp_effect { + __s16 start_level; + __s16 end_level; + struct ff_envelope envelope; +}; + +struct ff_condition_effect { + __u16 right_saturation; + __u16 left_saturation; + __s16 right_coeff; + __s16 left_coeff; + __u16 deadband; + __s16 center; +}; + +struct ff_periodic_effect { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + __s16 *custom_data; +}; + +struct ff_rumble_effect { + __u16 strong_magnitude; + __u16 weak_magnitude; +}; + +struct ff_effect { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_device_id { + kernel_ulong_t flags; + __u16 bustype; + __u16 vendor; + __u16 product; + __u16 version; + kernel_ulong_t evbit[1]; + kernel_ulong_t keybit[12]; + kernel_ulong_t relbit[1]; + kernel_ulong_t absbit[1]; + kernel_ulong_t mscbit[1]; + kernel_ulong_t ledbit[1]; + kernel_ulong_t sndbit[1]; + kernel_ulong_t ffbit[2]; + kernel_ulong_t swbit[1]; + kernel_ulong_t propbit[1]; + kernel_ulong_t driver_info; +}; + +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO = 1, + INPUT_CLK_BOOT = 2, + INPUT_CLK_MAX = 3, +}; + +struct ff_device; + +struct input_dev_poller; + +struct input_mt; + +struct input_handle; + +struct input_dev { + const char *name; + const char *phys; + const char *uniq; + struct input_id id; + long unsigned int propbit[1]; + long unsigned int evbit[1]; + long unsigned int keybit[12]; + long unsigned int relbit[1]; + long unsigned int absbit[1]; + long unsigned int mscbit[1]; + long unsigned int ledbit[1]; + long unsigned int sndbit[1]; + long unsigned int ffbit[2]; + long unsigned int swbit[1]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; + unsigned int keycodesize; + void *keycode; + int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *); + int (*getkeycode)(struct input_dev *, struct input_keymap_entry *); + struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; + struct timer_list timer; + int rep[2]; + struct input_mt *mt; + struct input_absinfo *absinfo; + long unsigned int key[12]; + long unsigned int led[1]; + long unsigned int snd[1]; + long unsigned int sw[1]; + int (*open)(struct input_dev *); + void (*close)(struct input_dev *); + int (*flush)(struct input_dev *, struct file *); + int (*event)(struct input_dev *, unsigned int, unsigned int, int); + struct input_handle *grab; + spinlock_t event_lock; + struct mutex mutex; + unsigned int users; + bool going_away; + struct device dev; + struct list_head h_list; + struct list_head node; + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; + bool devres_managed; + ktime_t timestamp[3]; +}; + +struct ff_device { + int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *); + int (*erase)(struct input_dev *, int); + int (*playback)(struct input_dev *, int, int); + void (*set_gain)(struct input_dev *, u16); + void (*set_autocenter)(struct input_dev *, u16); + void (*destroy)(struct ff_device *); + void *private; + long unsigned int ffbit[2]; + struct mutex mutex; + int max_effects; + struct ff_effect *effects; + struct file *effect_owners[0]; +}; + +struct input_handler; + +struct input_handle { + void *private; + int open; + const char *name; + struct input_dev *dev; + struct input_handler *handler; + struct list_head d_node; + struct list_head h_node; +}; + +struct input_handler { + void *private; + void (*event)(struct input_handle *, unsigned int, unsigned int, int); + void (*events)(struct input_handle *, const struct input_value *, unsigned int); + bool (*filter)(struct input_handle *, unsigned int, unsigned int, int); + bool (*match)(struct input_handler *, struct input_dev *); + int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *); + void (*disconnect)(struct input_handle *); + void (*start)(struct input_handle *); + bool legacy_minors; + int minor; + const char *name; + const struct input_device_id *id_table; + struct list_head h_list; + struct list_head node; +}; + +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + long unsigned int key_down[12]; + unsigned int alt; + unsigned int alt_use; + unsigned int shift; + unsigned int shift_use; + bool active; + bool need_reinject; + bool reinjecting; + bool reset_canceled; + bool reset_requested; + long unsigned int reset_keybit[12]; + int reset_seq_len; + int reset_seq_cnt; + int reset_seq_version; + struct timer_list keyreset_timer; +}; + +struct consolefontdesc { + short unsigned int charcount; + short unsigned int charheight; + char *chardata; +}; + +struct unipair { + short unsigned int unicode; + short unsigned int fontpos; +}; + +struct unimapdesc { + short unsigned int entry_ct; + struct unipair *entries; +}; + +struct kbd_repeat { + int delay; + int period; +}; + +struct console_font_op { + unsigned int op; + unsigned int flags; + unsigned int width; + unsigned int height; + unsigned int charcount; + unsigned char *data; +}; + +struct vt_stat { + short unsigned int v_active; + short unsigned int v_signal; + short unsigned int v_state; +}; + +struct vt_sizes { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_scrollsize; +}; + +struct vt_consize { + short unsigned int v_rows; + short unsigned int v_cols; + short unsigned int v_vlin; + short unsigned int v_clin; + short unsigned int v_vcol; + short unsigned int v_ccol; +}; + +struct vt_event { + unsigned int event; + unsigned int oldev; + unsigned int newev; + unsigned int pad[4]; +}; + +struct vt_setactivate { + unsigned int console; + struct vt_mode mode; +}; + +struct vt_spawn_console { + spinlock_t lock; + struct pid *pid; + int sig; +}; + +struct vt_event_wait { + struct list_head list; + struct vt_event event; + int done; +}; + +struct compat_consolefontdesc { + short unsigned int charcount; + short unsigned int charheight; + compat_caddr_t chardata; +}; + +struct compat_console_font_op { + compat_uint_t op; + compat_uint_t flags; + compat_uint_t width; + compat_uint_t height; + compat_uint_t charcount; + compat_caddr_t data; +}; + +struct compat_unimapdesc { + short unsigned int entry_ct; + compat_caddr_t entries; +}; + +struct vt_notifier_param { + struct vc_data *vc; + unsigned int c; +}; + +struct vcs_poll_data { + struct notifier_block notifier; + unsigned int cons_num; + int event; + wait_queue_head_t waitq; + struct fasync_struct *fasync; +}; + +struct tiocl_selection { + short unsigned int xs; + short unsigned int ys; + short unsigned int xe; + short unsigned int ye; + short unsigned int sel_mode; +}; + +struct vc_selection { + struct mutex lock; + struct vc_data *cons; + char *buffer; + unsigned int buf_len; + volatile int start; + int end; +}; + +enum led_brightness { + LED_OFF = 0, + LED_ON = 1, + LED_HALF = 127, + LED_FULL = 255, +}; + +struct led_hw_trigger_type { + int dummy; +}; + +struct led_pattern; + +struct led_trigger; + +struct led_classdev { + const char *name; + enum led_brightness brightness; + enum led_brightness max_brightness; + int flags; + long unsigned int work_flags; + void (*brightness_set)(struct led_classdev *, enum led_brightness); + int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness); + enum led_brightness (*brightness_get)(struct led_classdev *); + int (*blink_set)(struct led_classdev *, long unsigned int *, long unsigned int *); + int (*pattern_set)(struct led_classdev *, struct led_pattern *, u32, int); + int (*pattern_clear)(struct led_classdev *); + struct device *dev; + const struct attribute_group **groups; + struct list_head node; + const char *default_trigger; + long unsigned int blink_delay_on; + long unsigned int blink_delay_off; + struct timer_list blink_timer; + int blink_brightness; + int new_blink_brightness; + void (*flash_resume)(struct led_classdev *); + struct work_struct set_brightness_work; + int delayed_set_value; + struct rw_semaphore trigger_lock; + struct led_trigger *trigger; + struct list_head trig_list; + void *trigger_data; + bool activated; + struct led_hw_trigger_type *trigger_type; + struct mutex led_access; +}; + +struct led_pattern { + u32 delta_t; + int brightness; +}; + +struct led_trigger { + const char *name; + int (*activate)(struct led_classdev *); + void (*deactivate)(struct led_classdev *); + struct led_hw_trigger_type *trigger_type; + rwlock_t leddev_list_lock; + struct list_head led_cdevs; + struct list_head next_trig; + const struct attribute_group **groups; +}; + +struct keyboard_notifier_param { + struct vc_data *vc; + int down; + int shift; + int ledstate; + unsigned int value; +}; + +struct kbd_struct { + unsigned char lockstate; + unsigned char slockstate; + unsigned char ledmode: 1; + unsigned char ledflagstate: 4; + char: 3; + unsigned char default_ledflagstate: 4; + unsigned char kbdmode: 3; + char: 1; + unsigned char modeflags: 5; +}; + +struct kbentry { + unsigned char kb_table; + unsigned char kb_index; + short unsigned int kb_value; +}; + +struct kbsentry { + unsigned char kb_func; + unsigned char kb_string[512]; +}; + +struct kbdiacr { + unsigned char diacr; + unsigned char base; + unsigned char result; +}; + +struct kbdiacrs { + unsigned int kb_cnt; + struct kbdiacr kbdiacr[256]; +}; + +struct kbdiacruc { + unsigned int diacr; + unsigned int base; + unsigned int result; +}; + +struct kbdiacrsuc { + unsigned int kb_cnt; + struct kbdiacruc kbdiacruc[256]; +}; + +struct kbkeycode { + unsigned int scancode; + unsigned int keycode; +}; + +typedef void k_handler_fn(struct vc_data *, unsigned char, char); + +typedef void fn_handler_fn(struct vc_data *); + +struct getset_keycode_data { + struct input_keymap_entry ke; + int error; +}; + +struct kbd_led_trigger { + struct led_trigger trigger; + unsigned int mask; +}; + +struct uni_pagedir { + u16 **uni_pgdir[32]; + long unsigned int refcount; + long unsigned int sum; + unsigned char *inverse_translations[4]; + u16 *inverse_trans_unicode; +}; + +typedef uint32_t char32_t; + +struct uni_screen { + char32_t *lines[0]; +}; + +struct con_driver { + const struct consw *con; + const char *desc; + struct device *dev; + int node; + int first; + int last; + int flag; +}; + +enum { + blank_off = 0, + blank_normal_wait = 1, + blank_vesa_wait = 2, +}; + +enum { + EPecma = 0, + EPdec = 1, + EPeq = 2, + EPgt = 3, + EPlt = 4, +}; + +struct rgb { + u8 r; + u8 g; + u8 b; +}; + +enum { + ESnormal = 0, + ESesc = 1, + ESsquare = 2, + ESgetpars = 3, + ESfunckey = 4, + EShash = 5, + ESsetG0 = 6, + ESsetG1 = 7, + ESpercent = 8, + EScsiignore = 9, + ESnonstd = 10, + ESpalette = 11, + ESosc = 12, +}; + +struct interval { + uint32_t first; + uint32_t last; +}; + +struct vc_draw_region { + long unsigned int from; + long unsigned int to; + int x; +}; + +struct hv_ops; + +struct hvc_struct { + struct tty_port port; + spinlock_t lock; + int index; + int do_wakeup; + char *outbuf; + int outbuf_size; + int n_outbuf; + uint32_t vtermno; + const struct hv_ops *ops; + int irq_requested; + int data; + struct winsize ws; + struct work_struct tty_resize; + struct list_head next; + long unsigned int flags; +}; + +struct hv_ops { + int (*get_chars)(uint32_t, char *, int); + int (*put_chars)(uint32_t, const char *, int); + int (*flush)(uint32_t, bool); + int (*notifier_add)(struct hvc_struct *, int); + void (*notifier_del)(struct hvc_struct *, int); + void (*notifier_hangup)(struct hvc_struct *, int); + int (*tiocmget)(struct hvc_struct *); + int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int); + void (*dtr_rts)(struct hvc_struct *, int); +}; + +struct earlycon_device { + struct console *con; + struct uart_port port; + char options[16]; + unsigned int baud; +}; + +struct earlycon_id { + char name[15]; + char name_term; + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *); +}; + +typedef uint32_t XENCONS_RING_IDX; + +struct xencons_interface { + char in[1024]; + char out[2048]; + XENCONS_RING_IDX in_cons; + XENCONS_RING_IDX in_prod; + XENCONS_RING_IDX out_cons; + XENCONS_RING_IDX out_prod; +}; + +struct xencons_info { + struct list_head list; + struct xenbus_device *xbdev; + struct xencons_interface *intf; + unsigned int evtchn; + struct hvc_struct *hvc; + int irq; + int vtermno; + grant_ref_t gntref; +}; + +struct uart_driver { + struct module *owner; + const char *driver_name; + const char *dev_name; + int major; + int minor; + int nr; + struct console *cons; + struct uart_state *state; + struct tty_driver *tty_driver; +}; + +struct uart_match { + struct uart_port *port; + struct uart_driver *driver; +}; + +enum hwparam_type { + hwparam_ioport = 0, + hwparam_iomem = 1, + hwparam_ioport_or_iomem = 2, + hwparam_irq = 3, + hwparam_dma = 4, + hwparam_dma_addr = 5, + hwparam_other = 6, +}; + +enum { + PLAT8250_DEV_LEGACY = 4294967295, + PLAT8250_DEV_PLATFORM = 0, + PLAT8250_DEV_PLATFORM1 = 1, + PLAT8250_DEV_PLATFORM2 = 2, + PLAT8250_DEV_FOURPORT = 3, + PLAT8250_DEV_ACCENT = 4, + PLAT8250_DEV_BOCA = 5, + PLAT8250_DEV_EXAR_ST16C554 = 6, + PLAT8250_DEV_HUB6 = 7, + PLAT8250_DEV_AU1X00 = 8, + PLAT8250_DEV_SM501 = 9, +}; + +struct uart_8250_port; + +struct uart_8250_ops { + int (*setup_irq)(struct uart_8250_port *); + void (*release_irq)(struct uart_8250_port *); +}; + +struct mctrl_gpios; + +struct uart_8250_dma; + +struct uart_8250_em485; + +struct uart_8250_port { + struct uart_port port; + struct timer_list timer; + struct list_head list; + u32 capabilities; + short unsigned int bugs; + bool fifo_bug; + unsigned int tx_loadsz; + unsigned char acr; + unsigned char fcr; + unsigned char ier; + unsigned char lcr; + unsigned char mcr; + unsigned char mcr_mask; + unsigned char mcr_force; + unsigned char cur_iotype; + unsigned int rpm_tx_active; + unsigned char canary; + unsigned char probe; + struct mctrl_gpios *gpios; + unsigned char lsr_saved_flags; + unsigned char msr_saved_flags; + struct uart_8250_dma *dma; + const struct uart_8250_ops *ops; + int (*dl_read)(struct uart_8250_port *); + void (*dl_write)(struct uart_8250_port *, int); + struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; +}; + +struct uart_8250_em485 { + struct hrtimer start_tx_timer; + struct hrtimer stop_tx_timer; + struct hrtimer *active_timer; + struct uart_8250_port *port; + unsigned int tx_stopped: 1; +}; + +struct uart_8250_dma { + int (*tx_dma)(struct uart_8250_port *); + int (*rx_dma)(struct uart_8250_port *); + dma_filter_fn fn; + void *rx_param; + void *tx_param; + struct dma_slave_config rxconf; + struct dma_slave_config txconf; + struct dma_chan *rxchan; + struct dma_chan *txchan; + phys_addr_t rx_dma_addr; + phys_addr_t tx_dma_addr; + dma_addr_t rx_addr; + dma_addr_t tx_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + void *rx_buf; + size_t rx_size; + size_t tx_size; + unsigned char tx_running; + unsigned char tx_err; + unsigned char rx_running; +}; + +struct old_serial_port { + unsigned int uart; + unsigned int baud_base; + unsigned int port; + unsigned int irq; + upf_t flags; + unsigned char io_type; + unsigned char *iomem_base; + short unsigned int iomem_reg_shift; +}; + +struct irq_info___2 { + struct hlist_node node; + int irq; + spinlock_t lock; + struct list_head *head; +}; + +struct serial8250_config { + const char *name; + short unsigned int fifo_size; + short unsigned int tx_loadsz; + unsigned char fcr; + unsigned char rxtrig_bytes[4]; + unsigned int flags; +}; + +struct dw8250_port_data { + int line; + struct uart_8250_dma dma; + u8 dlf_size; +}; + +struct pciserial_board { + unsigned int flags; + unsigned int num_ports; + unsigned int base_baud; + unsigned int uart_offset; + unsigned int reg_shift; + unsigned int first_offset; +}; + +struct serial_private; + +struct pci_serial_quirk { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; + int (*probe)(struct pci_dev *); + int (*init)(struct pci_dev *); + int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct serial_private { + struct pci_dev *dev; + unsigned int nr; + struct pci_serial_quirk *quirk; + const struct pciserial_board *board; + int line[0]; +}; + +struct f815xxa_data { + spinlock_t lock; + int idx; +}; + +struct timedia_struct { + int num; + const short unsigned int *ids; +}; + +struct quatech_feature { + u16 devid; + bool amcc; +}; + +enum pci_board_num_t { + pbn_default = 0, + pbn_b0_1_115200 = 1, + pbn_b0_2_115200 = 2, + pbn_b0_4_115200 = 3, + pbn_b0_5_115200 = 4, + pbn_b0_8_115200 = 5, + pbn_b0_1_921600 = 6, + pbn_b0_2_921600 = 7, + pbn_b0_4_921600 = 8, + pbn_b0_2_1130000 = 9, + pbn_b0_4_1152000 = 10, + pbn_b0_4_1250000 = 11, + pbn_b0_2_1843200 = 12, + pbn_b0_4_1843200 = 13, + pbn_b0_1_4000000 = 14, + pbn_b0_bt_1_115200 = 15, + pbn_b0_bt_2_115200 = 16, + pbn_b0_bt_4_115200 = 17, + pbn_b0_bt_8_115200 = 18, + pbn_b0_bt_1_460800 = 19, + pbn_b0_bt_2_460800 = 20, + pbn_b0_bt_4_460800 = 21, + pbn_b0_bt_1_921600 = 22, + pbn_b0_bt_2_921600 = 23, + pbn_b0_bt_4_921600 = 24, + pbn_b0_bt_8_921600 = 25, + pbn_b1_1_115200 = 26, + pbn_b1_2_115200 = 27, + pbn_b1_4_115200 = 28, + pbn_b1_8_115200 = 29, + pbn_b1_16_115200 = 30, + pbn_b1_1_921600 = 31, + pbn_b1_2_921600 = 32, + pbn_b1_4_921600 = 33, + pbn_b1_8_921600 = 34, + pbn_b1_2_1250000 = 35, + pbn_b1_bt_1_115200 = 36, + pbn_b1_bt_2_115200 = 37, + pbn_b1_bt_4_115200 = 38, + pbn_b1_bt_2_921600 = 39, + pbn_b1_1_1382400 = 40, + pbn_b1_2_1382400 = 41, + pbn_b1_4_1382400 = 42, + pbn_b1_8_1382400 = 43, + pbn_b2_1_115200 = 44, + pbn_b2_2_115200 = 45, + pbn_b2_4_115200 = 46, + pbn_b2_8_115200 = 47, + pbn_b2_1_460800 = 48, + pbn_b2_4_460800 = 49, + pbn_b2_8_460800 = 50, + pbn_b2_16_460800 = 51, + pbn_b2_1_921600 = 52, + pbn_b2_4_921600 = 53, + pbn_b2_8_921600 = 54, + pbn_b2_8_1152000 = 55, + pbn_b2_bt_1_115200 = 56, + pbn_b2_bt_2_115200 = 57, + pbn_b2_bt_4_115200 = 58, + pbn_b2_bt_2_921600 = 59, + pbn_b2_bt_4_921600 = 60, + pbn_b3_2_115200 = 61, + pbn_b3_4_115200 = 62, + pbn_b3_8_115200 = 63, + pbn_b4_bt_2_921600 = 64, + pbn_b4_bt_4_921600 = 65, + pbn_b4_bt_8_921600 = 66, + pbn_panacom = 67, + pbn_panacom2 = 68, + pbn_panacom4 = 69, + pbn_plx_romulus = 70, + pbn_endrun_2_4000000 = 71, + pbn_oxsemi = 72, + pbn_oxsemi_1_4000000 = 73, + pbn_oxsemi_2_4000000 = 74, + pbn_oxsemi_4_4000000 = 75, + pbn_oxsemi_8_4000000 = 76, + pbn_intel_i960 = 77, + pbn_sgi_ioc3 = 78, + pbn_computone_4 = 79, + pbn_computone_6 = 80, + pbn_computone_8 = 81, + pbn_sbsxrsio = 82, + pbn_pasemi_1682M = 83, + pbn_ni8430_2 = 84, + pbn_ni8430_4 = 85, + pbn_ni8430_8 = 86, + pbn_ni8430_16 = 87, + pbn_ADDIDATA_PCIe_1_3906250 = 88, + pbn_ADDIDATA_PCIe_2_3906250 = 89, + pbn_ADDIDATA_PCIe_4_3906250 = 90, + pbn_ADDIDATA_PCIe_8_3906250 = 91, + pbn_ce4100_1_115200 = 92, + pbn_omegapci = 93, + pbn_NETMOS9900_2s_115200 = 94, + pbn_brcm_trumanage = 95, + pbn_fintek_4 = 96, + pbn_fintek_8 = 97, + pbn_fintek_12 = 98, + pbn_fintek_F81504A = 99, + pbn_fintek_F81508A = 100, + pbn_fintek_F81512A = 101, + pbn_wch382_2 = 102, + pbn_wch384_4 = 103, + pbn_wch384_8 = 104, + pbn_pericom_PI7C9X7951 = 105, + pbn_pericom_PI7C9X7952 = 106, + pbn_pericom_PI7C9X7954 = 107, + pbn_pericom_PI7C9X7958 = 108, + pbn_sunix_pci_1s = 109, + pbn_sunix_pci_2s = 110, + pbn_sunix_pci_4s = 111, + pbn_sunix_pci_8s = 112, + pbn_sunix_pci_16s = 113, + pbn_moxa8250_2p = 114, + pbn_moxa8250_4p = 115, + pbn_moxa8250_8p = 116, +}; + +struct exar8250_platform { + int (*rs485_config)(struct uart_port *, struct serial_rs485 *); + int (*register_gpio)(struct pci_dev *, struct uart_8250_port *); +}; + +struct exar8250; + +struct exar8250_board { + unsigned int num_ports; + unsigned int reg_shift; + int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int); + void (*exit)(struct pci_dev *); +}; + +struct exar8250 { + unsigned int nr; + struct exar8250_board *board; + void *virt; + int line[0]; +}; + +struct bcm2835aux_data { + struct clk *clk; + int line; + u32 cntl; +}; + +struct fsl8250_data { + int line; +}; + +enum dma_rx_status { + DMA_RX_START = 0, + DMA_RX_RUNNING = 1, + DMA_RX_SHUTDOWN = 2, +}; + +struct mtk8250_data { + int line; + unsigned int rx_pos; + unsigned int clk_count; + struct clk *uart_clk; + struct clk *bus_clk; + struct uart_8250_dma *dma; + enum dma_rx_status rx_status; + int rx_wakeup_irq; +}; + +enum { + MTK_UART_FC_NONE = 0, + MTK_UART_FC_SW = 1, + MTK_UART_FC_HW = 2, +}; + +struct tegra_uart { + struct clk *clk; + struct reset_control *rst; + int line; +}; + +struct of_serial_info { + struct clk *clk; + struct reset_control *rst; + int type; + int line; +}; + +enum amba_vendor { + AMBA_VENDOR_ARM = 65, + AMBA_VENDOR_ST = 128, + AMBA_VENDOR_QCOM = 81, + AMBA_VENDOR_LSI = 182, + AMBA_VENDOR_LINUX = 254, +}; + +struct amba_pl011_data { + bool (*dma_filter)(struct dma_chan *, void *); + void *dma_rx_param; + void *dma_tx_param; + bool dma_rx_poll_enable; + unsigned int dma_rx_poll_rate; + unsigned int dma_rx_poll_timeout; + void (*init)(); + void (*exit)(); +}; + +enum { + REG_DR = 0, + REG_ST_DMAWM = 1, + REG_ST_TIMEOUT = 2, + REG_FR = 3, + REG_LCRH_RX = 4, + REG_LCRH_TX = 5, + REG_IBRD = 6, + REG_FBRD = 7, + REG_CR = 8, + REG_IFLS = 9, + REG_IMSC = 10, + REG_RIS = 11, + REG_MIS = 12, + REG_ICR = 13, + REG_DMACR = 14, + REG_ST_XFCR = 15, + REG_ST_XON1 = 16, + REG_ST_XON2 = 17, + REG_ST_XOFF1 = 18, + REG_ST_XOFF2 = 19, + REG_ST_ITCR = 20, + REG_ST_ITIP = 21, + REG_ST_ABCR = 22, + REG_ST_ABIMSC = 23, + REG_ARRAY_SIZE = 24, +}; + +struct vendor_data { + const u16 *reg_offset; + unsigned int ifls; + unsigned int fr_busy; + unsigned int fr_dsr; + unsigned int fr_cts; + unsigned int fr_ri; + unsigned int inv_fr; + bool access_32b; + bool oversampling; + bool dma_threshold; + bool cts_event_workaround; + bool always_enabled; + bool fixed_options; + unsigned int (*get_fifosize)(struct amba_device *); +}; + +struct pl011_sgbuf { + struct scatterlist sg; + char *buf; +}; + +struct pl011_dmarx_data { + struct dma_chan *chan; + struct completion complete; + bool use_buf_b; + struct pl011_sgbuf sgbuf_a; + struct pl011_sgbuf sgbuf_b; + dma_cookie_t cookie; + bool running; + struct timer_list timer; + unsigned int last_residue; + long unsigned int last_jiffies; + bool auto_poll_rate; + unsigned int poll_rate; + unsigned int poll_timeout; +}; + +struct pl011_dmatx_data { + struct dma_chan *chan; + struct scatterlist sg; + char *buf; + bool queued; +}; + +struct uart_amba_port { + struct uart_port port; + const u16 *reg_offset; + struct clk *clk; + const struct vendor_data *vendor; + unsigned int dmacr; + unsigned int im; + unsigned int old_status; + unsigned int fifosize; + unsigned int old_cr; + unsigned int fixed_baud; + char type[12]; + bool using_tx_dma; + bool using_rx_dma; + struct pl011_dmarx_data dmarx; + struct pl011_dmatx_data dmatx; + bool dma_probed; +}; + +struct s3c2410_uartcfg { + unsigned char hwport; + unsigned char unused; + short unsigned int flags; + upf_t uart_flags; + unsigned int clk_sel; + unsigned int has_fracval; + long unsigned int ucon; + long unsigned int ulcon; + long unsigned int ufcon; +}; + +struct s3c24xx_uart_info { + char *name; + unsigned int type; + unsigned int fifosize; + long unsigned int rx_fifomask; + long unsigned int rx_fifoshift; + long unsigned int rx_fifofull; + long unsigned int tx_fifomask; + long unsigned int tx_fifoshift; + long unsigned int tx_fifofull; + unsigned int def_clk_sel; + long unsigned int num_clks; + long unsigned int clksel_mask; + long unsigned int clksel_shift; + unsigned int has_divslot: 1; +}; + +struct s3c24xx_serial_drv_data { + struct s3c24xx_uart_info *info; + struct s3c2410_uartcfg *def_cfg; + unsigned int fifosize[4]; +}; + +struct s3c24xx_uart_dma { + unsigned int rx_chan_id; + unsigned int tx_chan_id; + struct dma_slave_config rx_conf; + struct dma_slave_config tx_conf; + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; + dma_addr_t rx_addr; + dma_addr_t tx_addr; + dma_cookie_t rx_cookie; + dma_cookie_t tx_cookie; + char *rx_buf; + dma_addr_t tx_transfer_addr; + size_t rx_size; + size_t tx_size; + struct dma_async_tx_descriptor *tx_desc; + struct dma_async_tx_descriptor *rx_desc; + int tx_bytes_requested; + int rx_bytes_requested; +}; + +struct s3c24xx_uart_port { + unsigned char rx_claimed; + unsigned char tx_claimed; + unsigned char rx_enabled; + unsigned char tx_enabled; + unsigned int pm_level; + long unsigned int baudclk_rate; + unsigned int min_dma_size; + unsigned int rx_irq; + unsigned int tx_irq; + unsigned int tx_in_progress; + unsigned int tx_mode; + unsigned int rx_mode; + struct s3c24xx_uart_info *info; + struct clk *clk; + struct clk *baudclk; + struct uart_port port; + struct s3c24xx_serial_drv_data *drv_data; + struct s3c2410_uartcfg *cfg; + struct s3c24xx_uart_dma *dma; +}; + +struct samsung_early_console_data { + u32 txfull_mask; +}; + +enum { + UARTDM_1P1 = 1, + UARTDM_1P2 = 2, + UARTDM_1P3 = 3, + UARTDM_1P4 = 4, +}; + +struct msm_dma { + struct dma_chan *chan; + enum dma_data_direction dir; + dma_addr_t phys; + unsigned char *virt; + dma_cookie_t cookie; + u32 enable_bit; + unsigned int count; + struct dma_async_tx_descriptor *desc; +}; + +struct msm_port { + struct uart_port uart; + char name[16]; + struct clk *clk; + struct clk *pclk; + unsigned int imr; + int is_uartdm; + unsigned int old_snap_state; + bool break_detected; + struct msm_dma tx_dma; + struct msm_dma rx_dma; +}; + +struct msm_baud_map { + u16 divisor; + u8 code; + u8 rxstale; +}; + +struct cdns_uart { + struct uart_port *port; + struct clk *uartclk; + struct clk *pclk; + struct uart_driver *cdns_uart_driver; + unsigned int baud; + struct notifier_block clk_rate_change_nb; + u32 quirks; + bool cts_override; +}; + +struct cdns_platform_data { + u32 quirks; +}; + +enum { + UART_IRQ_SUM = 0, + UART_RX_IRQ = 0, + UART_TX_IRQ = 1, + UART_IRQ_COUNT = 2, +}; + +struct uart_regs_layout { + unsigned int rbr; + unsigned int tsh; + unsigned int ctrl; + unsigned int intr; +}; + +struct uart_flags { + unsigned int ctrl_tx_rdy_int; + unsigned int ctrl_rx_rdy_int; + unsigned int stat_tx_rdy; + unsigned int stat_rx_rdy; +}; + +struct mvebu_uart_driver_data { + bool is_ext; + struct uart_regs_layout regs; + struct uart_flags flags; +}; + +struct mvebu_uart_pm_regs { + unsigned int rbr; + unsigned int tsh; + unsigned int ctrl; + unsigned int intr; + unsigned int stat; + unsigned int brdv; + unsigned int osamp; +}; + +struct mvebu_uart { + struct uart_port *port; + struct clk *clk; + int irq[2]; + unsigned char *nb; + struct mvebu_uart_driver_data *data; + struct mvebu_uart_pm_regs pm_regs; +}; + +enum mctrl_gpio_idx { + UART_GPIO_CTS = 0, + UART_GPIO_DSR = 1, + UART_GPIO_DCD = 2, + UART_GPIO_RNG = 3, + UART_GPIO_RI = 3, + UART_GPIO_RTS = 4, + UART_GPIO_DTR = 5, + UART_GPIO_MAX = 6, +}; + +struct mctrl_gpios___2 { + struct uart_port *port; + struct gpio_desc *gpio[6]; + int irq[6]; + unsigned int mctrl_prev; + bool mctrl_on; +}; + +struct serdev_device; + +struct serdev_device_ops { + int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); + void (*write_wakeup)(struct serdev_device *); +}; + +struct serdev_controller; + +struct serdev_device { + struct device dev; + int nr; + struct serdev_controller *ctrl; + const struct serdev_device_ops *ops; + struct completion write_comp; + struct mutex write_lock; +}; + +struct serdev_controller_ops; + +struct serdev_controller { + struct device dev; + unsigned int nr; + struct serdev_device *serdev; + const struct serdev_controller_ops *ops; +}; + +struct serdev_device_driver { + struct device_driver driver; + int (*probe)(struct serdev_device *); + void (*remove)(struct serdev_device *); +}; + +enum serdev_parity { + SERDEV_PARITY_NONE = 0, + SERDEV_PARITY_EVEN = 1, + SERDEV_PARITY_ODD = 2, +}; + +struct serdev_controller_ops { + int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t); + void (*write_flush)(struct serdev_controller *); + int (*write_room)(struct serdev_controller *); + int (*open)(struct serdev_controller *); + void (*close)(struct serdev_controller *); + void (*set_flow_control)(struct serdev_controller *, bool); + int (*set_parity)(struct serdev_controller *, enum serdev_parity); + unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); + void (*wait_until_sent)(struct serdev_controller *, long int); + int (*get_tiocm)(struct serdev_controller *); + int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); +}; + +struct acpi_serdev_lookup { + acpi_handle device_handle; + acpi_handle controller_handle; + int n; + int index; +}; + +struct serport { + struct tty_port *port; + struct tty_struct *tty; + struct tty_driver *tty_drv; + int tty_idx; + long unsigned int flags; +}; + +struct memdev { + const char *name; + umode_t mode; + const struct file_operations *fops; + fmode_t fmode; +}; + +struct timer_rand_state { + cycles_t last_time; + long int last_delta; + long int last_delta2; +}; + +struct trace_event_raw_add_device_randomness { + struct trace_entry ent; + int bytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_random__mix_pool_bytes { + struct trace_entry ent; + const char *pool_name; + int bytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_credit_entropy_bits { + struct trace_entry ent; + const char *pool_name; + int bits; + int entropy_count; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_push_to_pool { + struct trace_entry ent; + const char *pool_name; + int pool_bits; + int input_bits; + char __data[0]; +}; + +struct trace_event_raw_debit_entropy { + struct trace_entry ent; + const char *pool_name; + int debit_bits; + char __data[0]; +}; + +struct trace_event_raw_add_input_randomness { + struct trace_entry ent; + int input_bits; + char __data[0]; +}; + +struct trace_event_raw_add_disk_randomness { + struct trace_entry ent; + dev_t dev; + int input_bits; + char __data[0]; +}; + +struct trace_event_raw_xfer_secondary_pool { + struct trace_entry ent; + const char *pool_name; + int xfer_bits; + int request_bits; + int pool_entropy; + int input_entropy; + char __data[0]; +}; + +struct trace_event_raw_random__get_random_bytes { + struct trace_entry ent; + int nbytes; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_random__extract_entropy { + struct trace_entry ent; + const char *pool_name; + int nbytes; + int entropy_count; + long unsigned int IP; + char __data[0]; +}; + +struct trace_event_raw_random_read { + struct trace_entry ent; + int got_bits; + int need_bits; + int pool_left; + int input_left; + char __data[0]; +}; + +struct trace_event_raw_urandom_read { + struct trace_entry ent; + int got_bits; + int pool_left; + int input_left; + char __data[0]; +}; + +struct trace_event_raw_prandom_u32 { + struct trace_entry ent; + unsigned int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_add_device_randomness {}; + +struct trace_event_data_offsets_random__mix_pool_bytes {}; + +struct trace_event_data_offsets_credit_entropy_bits {}; + +struct trace_event_data_offsets_push_to_pool {}; + +struct trace_event_data_offsets_debit_entropy {}; + +struct trace_event_data_offsets_add_input_randomness {}; + +struct trace_event_data_offsets_add_disk_randomness {}; + +struct trace_event_data_offsets_xfer_secondary_pool {}; + +struct trace_event_data_offsets_random__get_random_bytes {}; + +struct trace_event_data_offsets_random__extract_entropy {}; + +struct trace_event_data_offsets_random_read {}; + +struct trace_event_data_offsets_urandom_read {}; + +struct trace_event_data_offsets_prandom_u32 {}; + +typedef void (*btf_trace_add_device_randomness)(void *, int, long unsigned int); + +typedef void (*btf_trace_mix_pool_bytes)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_mix_pool_bytes_nolock)(void *, const char *, int, long unsigned int); + +typedef void (*btf_trace_credit_entropy_bits)(void *, const char *, int, int, long unsigned int); + +typedef void (*btf_trace_push_to_pool)(void *, const char *, int, int); + +typedef void (*btf_trace_debit_entropy)(void *, const char *, int); + +typedef void (*btf_trace_add_input_randomness)(void *, int); + +typedef void (*btf_trace_add_disk_randomness)(void *, dev_t, int); + +typedef void (*btf_trace_xfer_secondary_pool)(void *, const char *, int, int, int, int); + +typedef void (*btf_trace_get_random_bytes)(void *, int, long unsigned int); + +typedef void (*btf_trace_get_random_bytes_arch)(void *, int, long unsigned int); + +typedef void (*btf_trace_extract_entropy)(void *, const char *, int, int, long unsigned int); + +typedef void (*btf_trace_extract_entropy_user)(void *, const char *, int, int, long unsigned int); + +typedef void (*btf_trace_random_read)(void *, int, int, int, int); + +typedef void (*btf_trace_urandom_read)(void *, int, int, int); + +typedef void (*btf_trace_prandom_u32)(void *, unsigned int); + +struct poolinfo { + int poolbitshift; + int poolwords; + int poolbytes; + int poolfracbits; + int tap1; + int tap2; + int tap3; + int tap4; + int tap5; +}; + +struct crng_state { + __u32 state[16]; + long unsigned int init_time; + spinlock_t lock; +}; + +struct entropy_store { + const struct poolinfo *poolinfo; + __u32 *pool; + const char *name; + spinlock_t lock; + short unsigned int add_ptr; + short unsigned int input_rotate; + int entropy_count; + unsigned int initialized: 1; + unsigned int last_data_init: 1; + __u8 last_data[10]; +}; + +struct fast_pool { + __u32 pool[4]; + long unsigned int last; + short unsigned int reg_idx; + unsigned char count; +}; + +struct batched_entropy { + union { + u64 entropy_u64[8]; + u32 entropy_u32[16]; + }; + unsigned int position; + spinlock_t batch_lock; +}; + +struct ttyprintk_port { + struct tty_port port; + spinlock_t spinlock; +}; + +enum tpm2_startup_types { + TPM2_SU_CLEAR = 0, + TPM2_SU_STATE = 1, +}; + +enum tpm_chip_flags { + TPM_CHIP_FLAG_TPM2 = 2, + TPM_CHIP_FLAG_IRQ = 4, + TPM_CHIP_FLAG_VIRTUAL = 8, + TPM_CHIP_FLAG_HAVE_TIMEOUTS = 16, + TPM_CHIP_FLAG_ALWAYS_POWERED = 32, + TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = 64, +}; + +enum tpm2_structures { + TPM2_ST_NO_SESSIONS = 32769, + TPM2_ST_SESSIONS = 32770, +}; + +enum tpm2_return_codes { + TPM2_RC_SUCCESS = 0, + TPM2_RC_HASH = 131, + TPM2_RC_HANDLE = 139, + TPM2_RC_INITIALIZE = 256, + TPM2_RC_FAILURE = 257, + TPM2_RC_DISABLED = 288, + TPM2_RC_COMMAND_CODE = 323, + TPM2_RC_TESTING = 2314, + TPM2_RC_REFERENCE_H0 = 2320, + TPM2_RC_RETRY = 2338, +}; + +struct tpm_header { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __attribute__((packed)); + +struct file_priv { + struct tpm_chip *chip; + struct tpm_space *space; + struct mutex buffer_mutex; + struct timer_list user_read_timer; + struct work_struct timeout_work; + struct work_struct async_work; + wait_queue_head_t async_wait; + ssize_t response_length; + bool response_read; + bool command_enqueued; + u8 data_buffer[4096]; +}; + +enum TPM_OPS_FLAGS { + TPM_OPS_AUTO_STARTUP = 1, +}; + +enum tpm2_timeouts { + TPM2_TIMEOUT_A = 750, + TPM2_TIMEOUT_B = 2000, + TPM2_TIMEOUT_C = 200, + TPM2_TIMEOUT_D = 30, + TPM2_DURATION_SHORT = 20, + TPM2_DURATION_MEDIUM = 750, + TPM2_DURATION_LONG = 2000, + TPM2_DURATION_LONG_LONG = 300000, + TPM2_DURATION_DEFAULT = 120000, +}; + +enum tpm2_command_codes { + TPM2_CC_FIRST = 287, + TPM2_CC_HIERARCHY_CONTROL = 289, + TPM2_CC_HIERARCHY_CHANGE_AUTH = 297, + TPM2_CC_CREATE_PRIMARY = 305, + TPM2_CC_SEQUENCE_COMPLETE = 318, + TPM2_CC_SELF_TEST = 323, + TPM2_CC_STARTUP = 324, + TPM2_CC_SHUTDOWN = 325, + TPM2_CC_NV_READ = 334, + TPM2_CC_CREATE = 339, + TPM2_CC_LOAD = 343, + TPM2_CC_SEQUENCE_UPDATE = 348, + TPM2_CC_UNSEAL = 350, + TPM2_CC_CONTEXT_LOAD = 353, + TPM2_CC_CONTEXT_SAVE = 354, + TPM2_CC_FLUSH_CONTEXT = 357, + TPM2_CC_VERIFY_SIGNATURE = 375, + TPM2_CC_GET_CAPABILITY = 378, + TPM2_CC_GET_RANDOM = 379, + TPM2_CC_PCR_READ = 382, + TPM2_CC_PCR_EXTEND = 386, + TPM2_CC_EVENT_SEQUENCE_COMPLETE = 389, + TPM2_CC_HASH_SEQUENCE_START = 390, + TPM2_CC_CREATE_LOADED = 401, + TPM2_CC_LAST = 403, +}; + +struct tpm_buf { + unsigned int flags; + u8 *data; +}; + +enum tpm_timeout { + TPM_TIMEOUT = 5, + TPM_TIMEOUT_RETRY = 100, + TPM_TIMEOUT_RANGE_US = 300, + TPM_TIMEOUT_POLL = 1, + TPM_TIMEOUT_USECS_MIN = 100, + TPM_TIMEOUT_USECS_MAX = 500, +}; + +enum tpm_buf_flags { + TPM_BUF_OVERFLOW = 1, +}; + +struct stclear_flags_t { + __be16 tag; + u8 deactivated; + u8 disableForceClear; + u8 physicalPresence; + u8 physicalPresenceLock; + u8 bGlobalLock; +} __attribute__((packed)); + +struct tpm1_version { + u8 major; + u8 minor; + u8 rev_major; + u8 rev_minor; +}; + +struct tpm1_version2 { + __be16 tag; + struct tpm1_version version; +}; + +struct timeout_t { + __be32 a; + __be32 b; + __be32 c; + __be32 d; +}; + +struct duration_t { + __be32 tpm_short; + __be32 tpm_medium; + __be32 tpm_long; +}; + +struct permanent_flags_t { + __be16 tag; + u8 disable; + u8 ownership; + u8 deactivated; + u8 readPubek; + u8 disableOwnerClear; + u8 allowMaintenance; + u8 physicalPresenceLifetimeLock; + u8 physicalPresenceHWEnable; + u8 physicalPresenceCMDEnable; + u8 CEKPUsed; + u8 TPMpost; + u8 TPMpostLock; + u8 FIPS; + u8 operator; + u8 enableRevokeEK; + u8 nvLocked; + u8 readSRKPub; + u8 tpmEstablished; + u8 maintenanceDone; + u8 disableFullDALogicInfo; +}; + +typedef union { + struct permanent_flags_t perm_flags; + struct stclear_flags_t stclear_flags; + __u8 owned; + __be32 num_pcrs; + struct tpm1_version version1; + struct tpm1_version2 version2; + __be32 manufacturer_id; + struct timeout_t timeout; + struct duration_t duration; +} cap_t; + +enum tpm_capabilities { + TPM_CAP_FLAG = 4, + TPM_CAP_PROP = 5, + TPM_CAP_VERSION_1_1 = 6, + TPM_CAP_VERSION_1_2 = 26, +}; + +enum tpm_sub_capabilities { + TPM_CAP_PROP_PCR = 257, + TPM_CAP_PROP_MANUFACTURER = 259, + TPM_CAP_FLAG_PERM = 264, + TPM_CAP_FLAG_VOL = 265, + TPM_CAP_PROP_OWNER = 273, + TPM_CAP_PROP_TIS_TIMEOUT = 277, + TPM_CAP_PROP_TIS_DURATION = 288, +}; + +struct tpm1_get_random_out { + __be32 rng_data_len; + u8 rng_data[128]; +}; + +enum tpm2_const { + TPM2_PLATFORM_PCR = 24, + TPM2_PCR_SELECT_MIN = 3, +}; + +enum tpm2_permanent_handles { + TPM2_RS_PW = 1073741833, +}; + +enum tpm2_capabilities { + TPM2_CAP_HANDLES = 1, + TPM2_CAP_COMMANDS = 2, + TPM2_CAP_PCRS = 5, + TPM2_CAP_TPM_PROPERTIES = 6, +}; + +enum tpm2_properties { + TPM_PT_TOTAL_COMMANDS = 297, +}; + +enum tpm2_cc_attrs { + TPM2_CC_ATTR_CHANDLES = 25, + TPM2_CC_ATTR_RHANDLE = 28, +}; + +struct tpm2_hash { + unsigned int crypto_id; + unsigned int tpm_id; +}; + +struct tpm2_pcr_read_out { + __be32 update_cnt; + __be32 pcr_selects_cnt; + __be16 hash_alg; + u8 pcr_select_size; + u8 pcr_select[3]; + __be32 digests_cnt; + __be16 digest_size; + u8 digest[0]; +} __attribute__((packed)); + +struct tpm2_null_auth_area { + __be32 handle; + __be16 nonce_size; + u8 attributes; + __be16 auth_size; +} __attribute__((packed)); + +struct tpm2_get_random_out { + __be16 size; + u8 buffer[128]; +}; + +struct tpm2_get_cap_out { + u8 more_data; + __be32 subcap_id; + __be32 property_cnt; + __be32 property_id; + __be32 value; +} __attribute__((packed)); + +struct tpm2_pcr_selection { + __be16 hash_alg; + u8 size_of_select; + u8 pcr_select[3]; +}; + +struct tpmrm_priv { + struct file_priv priv; + struct tpm_space space; +}; + +enum tpm2_handle_types { + TPM2_HT_HMAC_SESSION = 33554432, + TPM2_HT_POLICY_SESSION = 50331648, + TPM2_HT_TRANSIENT = 2147483648, +}; + +struct tpm2_context { + __be64 sequence; + __be32 saved_handle; + __be32 hierarchy; + __be16 blob_size; +} __attribute__((packed)); + +struct tpm2_cap_handles { + u8 more_data; + __be32 capability; + __be32 count; + __be32 handles[0]; +} __attribute__((packed)); + +struct tpm_readpubek_out { + u8 algorithm[4]; + u8 encscheme[2]; + u8 sigscheme[2]; + __be32 paramsize; + u8 parameters[12]; + __be32 keysize; + u8 modulus[256]; + u8 checksum[20]; +}; + +struct tcpa_event { + u32 pcr_index; + u32 event_type; + u8 pcr_value[20]; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE = 1, + UNUSED = 2, + NO_ACTION = 3, + SEPARATOR = 4, + ACTION = 5, + EVENT_TAG = 6, + SCRTM_CONTENTS = 7, + SCRTM_VERSION = 8, + CPU_MICROCODE = 9, + PLATFORM_CONFIG_FLAGS = 10, + TABLE_OF_DEVICES = 11, + COMPACT_HASH = 12, + IPL = 13, + IPL_PARTITION_DATA = 14, + NONHOST_CODE = 15, + NONHOST_CONFIG = 16, + NONHOST_INFO = 17, +}; + +struct tcpa_pc_event { + u32 event_id; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_pc_event_ids { + SMBIOS = 1, + BIS_CERT = 2, + POST_BIOS_ROM = 3, + ESCD = 4, + CMOS = 5, + NVRAM = 6, + OPTION_ROM_EXEC = 7, + OPTION_ROM_CONFIG = 8, + OPTION_ROM_MICROCODE = 10, + S_CRTM_VERSION = 11, + S_CRTM_CONTENTS = 12, + POST_CONTENTS = 13, + HOST_TABLE_OF_DEVICES = 14, +}; + +struct tcg_efi_specid_event_algs { + u16 alg_id; + u16 digest_size; +}; + +struct tcg_efi_specid_event_head { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintnsize; + u32 num_algs; + struct tcg_efi_specid_event_algs digest_sizes[0]; +}; + +struct tcg_pcr_event { + u32 pcr_idx; + u32 event_type; + u8 digest[20]; + u32 event_size; + u8 event[0]; +}; + +struct tcg_event_field { + u32 event_size; + u8 event[0]; +}; + +struct tcg_pcr_event2_head { + u32 pcr_idx; + u32 event_type; + u32 count; + struct tpm_digest digests[0]; +}; + +struct acpi_table_tpm2 { + struct acpi_table_header header; + u16 platform_class; + u16 reserved; + u64 control_address; + u32 start_method; +} __attribute__((packed)); + +struct acpi_tpm2_phy { + u8 start_method_specific[12]; + u32 log_area_minimum_length; + u64 log_area_start_address; +}; + +enum bios_platform_class { + BIOS_CLIENT = 0, + BIOS_SERVER = 1, +}; + +struct client_hdr { + u32 log_max_len; + u64 log_start_addr; +} __attribute__((packed)); + +struct server_hdr { + u16 reserved; + u64 log_max_len; + u64 log_start_addr; +} __attribute__((packed)); + +struct acpi_tcpa { + struct acpi_table_header hdr; + u16 platform_class; + union { + struct client_hdr client; + struct server_hdr server; + }; +} __attribute__((packed)); + +struct linux_efi_tpm_eventlog { + u32 size; + u32 final_events_preboot_size; + u8 version; + u8 log[0]; +}; + +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[0]; +}; + +enum tis_access { + TPM_ACCESS_VALID = 128, + TPM_ACCESS_ACTIVE_LOCALITY = 32, + TPM_ACCESS_REQUEST_PENDING = 4, + TPM_ACCESS_REQUEST_USE = 2, +}; + +enum tis_status { + TPM_STS_VALID = 128, + TPM_STS_COMMAND_READY = 64, + TPM_STS_GO = 32, + TPM_STS_DATA_AVAIL = 16, + TPM_STS_DATA_EXPECT = 8, + TPM_STS_READ_ZERO = 35, +}; + +enum tis_int_flags { + TPM_GLOBAL_INT_ENABLE = 2147483648, + TPM_INTF_BURST_COUNT_STATIC = 256, + TPM_INTF_CMD_READY_INT = 128, + TPM_INTF_INT_EDGE_FALLING = 64, + TPM_INTF_INT_EDGE_RISING = 32, + TPM_INTF_INT_LEVEL_LOW = 16, + TPM_INTF_INT_LEVEL_HIGH = 8, + TPM_INTF_LOCALITY_CHANGE_INT = 4, + TPM_INTF_STS_VALID_INT = 2, + TPM_INTF_DATA_AVAIL_INT = 1, +}; + +enum tis_defaults { + TIS_MEM_LEN = 20480, + TIS_SHORT_TIMEOUT = 750, + TIS_LONG_TIMEOUT = 2000, +}; + +enum tpm_tis_flags { + TPM_TIS_ITPM_WORKAROUND = 1, +}; + +struct tpm_tis_phy_ops; + +struct tpm_tis_data { + u16 manufacturer_id; + int locality; + int irq; + bool irq_tested; + unsigned int flags; + void *ilb_base_addr; + u16 clkrun_enabled; + wait_queue_head_t int_queue; + wait_queue_head_t read_queue; + const struct tpm_tis_phy_ops *phy_ops; + short unsigned int rng_quality; +}; + +struct tpm_tis_phy_ops { + int (*read_bytes)(struct tpm_tis_data *, u32, u16, u8 *); + int (*write_bytes)(struct tpm_tis_data *, u32, u16, const u8 *); + int (*read16)(struct tpm_tis_data *, u32, u16 *); + int (*read32)(struct tpm_tis_data *, u32, u32 *); + int (*write32)(struct tpm_tis_data *, u32, u32); +}; + +struct tis_vendor_durations_override { + u32 did_vid; + struct tpm1_version version; + long unsigned int durations[3]; +}; + +struct tis_vendor_timeout_override { + u32 did_vid; + long unsigned int timeout_us[4]; +}; + +struct tpm_info { + struct resource res; + int irq; +}; + +struct tpm_tis_tcg_phy { + struct tpm_tis_data priv; + void *iobase; +}; + +enum crb_defaults { + CRB_ACPI_START_REVISION_ID = 1, + CRB_ACPI_START_INDEX = 1, +}; + +enum crb_loc_ctrl { + CRB_LOC_CTRL_REQUEST_ACCESS = 1, + CRB_LOC_CTRL_RELINQUISH = 2, +}; + +enum crb_loc_state { + CRB_LOC_STATE_LOC_ASSIGNED = 2, + CRB_LOC_STATE_TPM_REG_VALID_STS = 128, +}; + +enum crb_ctrl_req { + CRB_CTRL_REQ_CMD_READY = 1, + CRB_CTRL_REQ_GO_IDLE = 2, +}; + +enum crb_ctrl_sts { + CRB_CTRL_STS_ERROR = 1, + CRB_CTRL_STS_TPM_IDLE = 2, +}; + +enum crb_start { + CRB_START_INVOKE = 1, +}; + +enum crb_cancel { + CRB_CANCEL_INVOKE = 1, +}; + +struct crb_regs_head { + u32 loc_state; + u32 reserved1; + u32 loc_ctrl; + u32 loc_sts; + u8 reserved2[32]; + u64 intf_id; + u64 ctrl_ext; +}; + +struct crb_regs_tail { + u32 ctrl_req; + u32 ctrl_sts; + u32 ctrl_cancel; + u32 ctrl_start; + u32 ctrl_int_enable; + u32 ctrl_int_sts; + u32 ctrl_cmd_size; + u32 ctrl_cmd_pa_low; + u32 ctrl_cmd_pa_high; + u32 ctrl_rsp_size; + u64 ctrl_rsp_pa; +}; + +enum crb_status { + CRB_DRV_STS_COMPLETE = 1, +}; + +struct crb_priv { + u32 sm; + const char *hid; + struct crb_regs_head *regs_h; + struct crb_regs_tail *regs_t; + u8 *cmd; + u8 *rsp; + u32 cmd_size; + u32 smc_func_id; +}; + +struct tpm2_crb_smc { + u32 interrupt; + u8 interrupt_flags; + u8 op_flags; + u16 reserved2; + u32 smc_func_id; +}; + +enum io_pgtable_fmt { + ARM_32_LPAE_S1 = 0, + ARM_32_LPAE_S2 = 1, + ARM_64_LPAE_S1 = 2, + ARM_64_LPAE_S2 = 3, + ARM_V7S = 4, + ARM_MALI_LPAE = 5, + IO_PGTABLE_NUM_FMTS = 6, +}; + +struct iommu_flush_ops { + void (*tlb_flush_all)(void *); + void (*tlb_flush_walk)(long unsigned int, size_t, size_t, void *); + void (*tlb_flush_leaf)(long unsigned int, size_t, size_t, void *); + void (*tlb_add_page)(struct iommu_iotlb_gather *, long unsigned int, size_t, void *); +}; + +struct io_pgtable_cfg { + long unsigned int quirks; + long unsigned int pgsize_bitmap; + unsigned int ias; + unsigned int oas; + bool coherent_walk; + const struct iommu_flush_ops *tlb; + struct device *iommu_dev; + union { + struct { + u64 ttbr; + struct { + u32 ips: 3; + u32 tg: 2; + u32 sh: 2; + u32 orgn: 2; + u32 irgn: 2; + u32 tsz: 6; + } tcr; + u64 mair; + } arm_lpae_s1_cfg; + struct { + u64 vttbr; + struct { + u32 ps: 3; + u32 tg: 2; + u32 sh: 2; + u32 orgn: 2; + u32 irgn: 2; + u32 sl: 2; + u32 tsz: 6; + } vtcr; + } arm_lpae_s2_cfg; + struct { + u32 ttbr; + u32 tcr; + u32 nmrr; + u32 prrr; + } arm_v7s_cfg; + struct { + u64 transtab; + u64 memattr; + } arm_mali_lpae_cfg; + }; +}; + +struct io_pgtable_ops { + int (*map)(struct io_pgtable_ops *, long unsigned int, phys_addr_t, size_t, int, gfp_t); + size_t (*unmap)(struct io_pgtable_ops *, long unsigned int, size_t, struct iommu_iotlb_gather *); + phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *, long unsigned int); +}; + +enum arm_smmu_s2cr_privcfg { + S2CR_PRIVCFG_DEFAULT = 0, + S2CR_PRIVCFG_DIPAN = 1, + S2CR_PRIVCFG_UNPRIV = 2, + S2CR_PRIVCFG_PRIV = 3, +}; + +enum arm_smmu_s2cr_type { + S2CR_TYPE_TRANS = 0, + S2CR_TYPE_BYPASS = 1, + S2CR_TYPE_FAULT = 2, +}; + +enum arm_smmu_cbar_type { + CBAR_TYPE_S2_TRANS = 0, + CBAR_TYPE_S1_TRANS_S2_BYPASS = 1, + CBAR_TYPE_S1_TRANS_S2_FAULT = 2, + CBAR_TYPE_S1_TRANS_S2_TRANS = 3, +}; + +enum arm_smmu_arch_version { + ARM_SMMU_V1 = 0, + ARM_SMMU_V1_64K = 1, + ARM_SMMU_V2 = 2, +}; + +enum arm_smmu_implementation { + GENERIC_SMMU = 0, + ARM_MMU500 = 1, + CAVIUM_SMMUV2 = 2, + QCOM_SMMUV2 = 3, +}; + +struct arm_smmu_s2cr { + struct iommu_group *group; + int count; + enum arm_smmu_s2cr_type type; + enum arm_smmu_s2cr_privcfg privcfg; + u8 cbndx; +}; + +struct arm_smmu_smr { + u16 mask; + u16 id; + bool valid; + bool pinned; +}; + +struct arm_smmu_impl; + +struct arm_smmu_cb; + +struct arm_smmu_device { + struct device *dev; + void *base; + unsigned int numpage; + unsigned int pgshift; + u32 features; + enum arm_smmu_arch_version version; + enum arm_smmu_implementation model; + const struct arm_smmu_impl *impl; + u32 num_context_banks; + u32 num_s2_context_banks; + long unsigned int context_map[2]; + struct arm_smmu_cb *cbs; + atomic_t irptndx; + u32 num_mapping_groups; + u16 streamid_mask; + u16 smr_mask_mask; + struct arm_smmu_smr *smrs; + struct arm_smmu_s2cr *s2crs; + struct mutex stream_map_mutex; + long unsigned int va_size; + long unsigned int ipa_size; + long unsigned int pa_size; + long unsigned int pgsize_bitmap; + u32 num_global_irqs; + u32 num_context_irqs; + unsigned int *irqs; + struct clk_bulk_data *clks; + int num_clks; + spinlock_t global_sync_lock; + struct iommu_device iommu; +}; + +struct arm_smmu_domain; + +struct arm_smmu_impl { + u32 (*read_reg)(struct arm_smmu_device *, int, int); + void (*write_reg)(struct arm_smmu_device *, int, int, u32); + u64 (*read_reg64)(struct arm_smmu_device *, int, int); + void (*write_reg64)(struct arm_smmu_device *, int, int, u64); + int (*cfg_probe)(struct arm_smmu_device *); + int (*reset)(struct arm_smmu_device *); + int (*init_context)(struct arm_smmu_domain *, struct io_pgtable_cfg *, struct device *); + void (*tlb_sync)(struct arm_smmu_device *, int, int, int); + int (*def_domain_type)(struct device *); + irqreturn_t (*global_fault)(int, void *); + irqreturn_t (*context_fault)(int, void *); + int (*alloc_context_bank)(struct arm_smmu_domain *, struct arm_smmu_device *, struct device *, int); + void (*write_s2cr)(struct arm_smmu_device *, int); +}; + +struct arm_smmu_cfg; + +struct arm_smmu_cb { + u64 ttbr[2]; + u32 tcr[2]; + u32 mair[2]; + struct arm_smmu_cfg *cfg; +}; + +enum arm_smmu_context_fmt { + ARM_SMMU_CTX_FMT_NONE = 0, + ARM_SMMU_CTX_FMT_AARCH64 = 1, + ARM_SMMU_CTX_FMT_AARCH32_L = 2, + ARM_SMMU_CTX_FMT_AARCH32_S = 3, +}; + +struct arm_smmu_cfg { + u8 cbndx; + u8 irptndx; + union { + u16 asid; + u16 vmid; + }; + enum arm_smmu_cbar_type cbar; + enum arm_smmu_context_fmt fmt; +}; + +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2 = 1, + ARM_SMMU_DOMAIN_NESTED = 2, + ARM_SMMU_DOMAIN_BYPASS = 3, +}; + +struct arm_smmu_domain { + struct arm_smmu_device *smmu; + struct io_pgtable_ops *pgtbl_ops; + const struct iommu_flush_ops *flush_ops; + struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; + bool non_strict; + struct mutex init_mutex; + spinlock_t cb_lock; + struct iommu_domain domain; +}; + +struct arm_smmu_master_cfg { + struct arm_smmu_device *smmu; + s16 smendx[0]; +}; + +struct arm_smmu_match_data { + enum arm_smmu_arch_version version; + enum arm_smmu_implementation model; +}; + +struct cavium_smmu { + struct arm_smmu_device smmu; + u32 id_base; +}; + +struct nvidia_smmu { + struct arm_smmu_device smmu; + void *bases[2]; +}; + +struct qcom_smmu { + struct arm_smmu_device smmu; + bool bypass_quirk; + u8 bypass_cbndx; +}; + +enum pri_resp { + PRI_RESP_DENY = 0, + PRI_RESP_FAIL = 1, + PRI_RESP_SUCC = 2, +}; + +struct arm_smmu_cmdq_ent { + u8 opcode; + bool substream_valid; + union { + struct { + u32 sid; + u8 size; + u64 addr; + } prefetch; + struct { + u32 sid; + u32 ssid; + union { + bool leaf; + u8 span; + }; + } cfgi; + struct { + u8 num; + u8 scale; + u16 asid; + u16 vmid; + bool leaf; + u8 ttl; + u8 tg; + u64 addr; + } tlbi; + struct { + u32 sid; + u32 ssid; + u64 addr; + u8 size; + bool global; + } atc; + struct { + u32 sid; + u32 ssid; + u16 grpid; + enum pri_resp resp; + } pri; + struct { + u64 msiaddr; + } sync; + }; +}; + +struct arm_smmu_ll_queue { + union { + u64 val; + struct { + u32 prod; + u32 cons; + }; + struct { + atomic_t prod; + atomic_t cons; + } atomic; + u8 __pad[64]; + }; + u32 max_n_shift; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct arm_smmu_queue { + struct arm_smmu_ll_queue llq; + int irq; + __le64 *base; + dma_addr_t base_dma; + u64 q_base; + size_t ent_dwords; + u32 *prod_reg; + u32 *cons_reg; + long: 64; +}; + +struct arm_smmu_queue_poll { + ktime_t timeout; + unsigned int delay; + unsigned int spin_cnt; + bool wfe; +}; + +struct arm_smmu_cmdq { + struct arm_smmu_queue q; + atomic_long_t *valid_map; + atomic_t owner_prod; + atomic_t lock; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct arm_smmu_cmdq_batch { + u64 cmds[128]; + int num; +}; + +struct arm_smmu_evtq { + struct arm_smmu_queue q; + u32 max_stalls; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct arm_smmu_priq { + struct arm_smmu_queue q; +}; + +struct arm_smmu_strtab_l1_desc { + u8 span; + __le64 *l2ptr; + dma_addr_t l2ptr_dma; +}; + +struct arm_smmu_ctx_desc { + u16 asid; + u64 ttbr; + u64 tcr; + u64 mair; + refcount_t refs; + struct mm_struct *mm; +}; + +struct arm_smmu_l1_ctx_desc { + __le64 *l2ptr; + dma_addr_t l2ptr_dma; +}; + +struct arm_smmu_ctx_desc_cfg { + __le64 *cdtab; + dma_addr_t cdtab_dma; + struct arm_smmu_l1_ctx_desc *l1_desc; + unsigned int num_l1_ents; +}; + +struct arm_smmu_s1_cfg { + struct arm_smmu_ctx_desc_cfg cdcfg; + struct arm_smmu_ctx_desc cd; + u8 s1fmt; + u8 s1cdmax; +}; + +struct arm_smmu_s2_cfg { + u16 vmid; + u64 vttbr; + u64 vtcr; +}; + +struct arm_smmu_strtab_cfg { + __le64 *strtab; + dma_addr_t strtab_dma; + struct arm_smmu_strtab_l1_desc *l1_desc; + unsigned int num_l1_ents; + u64 strtab_base; + u32 strtab_base_cfg; +}; + +struct arm_smmu_device___2 { + struct device *dev; + void *base; + void *page1; + u32 features; + u32 options; + long: 64; + long: 64; + long: 64; + long: 64; + struct arm_smmu_cmdq cmdq; + struct arm_smmu_evtq evtq; + struct arm_smmu_priq priq; + int gerr_irq; + int combined_irq; + long unsigned int ias; + long unsigned int oas; + long unsigned int pgsize_bitmap; + unsigned int asid_bits; + unsigned int vmid_bits; + long unsigned int vmid_map[1024]; + unsigned int ssid_bits; + unsigned int sid_bits; + struct arm_smmu_strtab_cfg strtab_cfg; + struct iommu_device iommu; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct arm_smmu_domain___2; + +struct arm_smmu_master { + struct arm_smmu_device___2 *smmu; + struct device *dev; + struct arm_smmu_domain___2 *domain; + struct list_head domain_head; + u32 *sids; + unsigned int num_sids; + bool ats_enabled; + bool sva_enabled; + struct list_head bonds; + unsigned int ssid_bits; +}; + +struct arm_smmu_domain___2 { + struct arm_smmu_device___2 *smmu; + struct mutex init_mutex; + struct io_pgtable_ops *pgtbl_ops; + bool non_strict; + atomic_t nr_ats_masters; + enum arm_smmu_domain_stage stage; + union { + struct arm_smmu_s1_cfg s1_cfg; + struct arm_smmu_s2_cfg s2_cfg; + }; + struct iommu_domain domain; + struct list_head devices; + spinlock_t devices_lock; +}; + +enum arm_smmu_msi_index { + EVTQ_MSI_INDEX = 0, + GERROR_MSI_INDEX = 1, + PRIQ_MSI_INDEX = 2, + ARM_SMMU_MAX_MSIS = 3, +}; + +struct arm_smmu_option_prop { + u32 opt; + const char *prop; +}; + +struct iommu_group { + struct kobject kobj; + struct kobject *devices_kobj; + struct list_head devices; + struct mutex mutex; + struct blocking_notifier_head notifier; + void *iommu_data; + void (*iommu_data_release)(void *); + char *name; + int id; + struct iommu_domain *default_domain; + struct iommu_domain *domain; + struct list_head entry; +}; + +typedef unsigned int ioasid_t; + +enum iommu_fault_type { + IOMMU_FAULT_DMA_UNRECOV = 1, + IOMMU_FAULT_PAGE_REQ = 2, +}; + +enum iommu_inv_granularity { + IOMMU_INV_GRANU_DOMAIN = 0, + IOMMU_INV_GRANU_PASID = 1, + IOMMU_INV_GRANU_ADDR = 2, + IOMMU_INV_GRANU_NR = 3, +}; + +struct fsl_mc_obj_desc { + char type[16]; + int id; + u16 vendor; + u16 ver_major; + u16 ver_minor; + u8 irq_count; + u8 region_count; + u32 state; + char label[16]; + u16 flags; +}; + +struct fsl_mc_io; + +struct fsl_mc_device_irq; + +struct fsl_mc_resource; + +struct fsl_mc_device { + struct device dev; + u64 dma_mask; + u16 flags; + u32 icid; + u16 mc_handle; + struct fsl_mc_io *mc_io; + struct fsl_mc_obj_desc obj_desc; + struct resource *regions; + struct fsl_mc_device_irq **irqs; + struct fsl_mc_resource *resource; + struct device_link *consumer_link; + char *driver_override; +}; + +enum fsl_mc_pool_type { + FSL_MC_POOL_DPMCP = 0, + FSL_MC_POOL_DPBP = 1, + FSL_MC_POOL_DPCON = 2, + FSL_MC_POOL_IRQ = 3, + FSL_MC_NUM_POOL_TYPES = 4, +}; + +struct fsl_mc_resource_pool; + +struct fsl_mc_resource { + enum fsl_mc_pool_type type; + s32 id; + void *data; + struct fsl_mc_resource_pool *parent_pool; + struct list_head node; +}; + +struct fsl_mc_device_irq { + struct msi_desc *msi_desc; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +struct fsl_mc_io { + struct device *dev; + u16 flags; + u32 portal_size; + phys_addr_t portal_phys_addr; + void *portal_virt_addr; + struct fsl_mc_device *dpmcp_dev; + union { + struct mutex mutex; + raw_spinlock_t spinlock; + }; +}; + +struct group_device { + struct list_head list; + struct device *dev; + char *name; +}; + +struct iommu_group_attribute { + struct attribute attr; + ssize_t (*show)(struct iommu_group *, char *); + ssize_t (*store)(struct iommu_group *, const char *, size_t); +}; + +struct group_for_pci_data { + struct pci_dev *pdev; + struct iommu_group *group; +}; + +struct __group_domain_type { + struct device *dev; + unsigned int type; +}; + +struct trace_event_raw_iommu_group_event { + struct trace_entry ent; + int gid; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_iommu_device_event { + struct trace_entry ent; + u32 __data_loc_device; + char __data[0]; +}; + +struct trace_event_raw_map { + struct trace_entry ent; + u64 iova; + u64 paddr; + size_t size; + char __data[0]; +}; + +struct trace_event_raw_unmap { + struct trace_entry ent; + u64 iova; + size_t size; + size_t unmapped_size; + char __data[0]; +}; + +struct trace_event_raw_iommu_error { + struct trace_entry ent; + u32 __data_loc_device; + u32 __data_loc_driver; + u64 iova; + int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_iommu_group_event { + u32 device; +}; + +struct trace_event_data_offsets_iommu_device_event { + u32 device; +}; + +struct trace_event_data_offsets_map {}; + +struct trace_event_data_offsets_unmap {}; + +struct trace_event_data_offsets_iommu_error { + u32 device; + u32 driver; +}; + +typedef void (*btf_trace_add_device_to_group)(void *, int, struct device *); + +typedef void (*btf_trace_remove_device_from_group)(void *, int, struct device *); + +typedef void (*btf_trace_attach_device_to_domain)(void *, struct device *); + +typedef void (*btf_trace_detach_device_from_domain)(void *, struct device *); + +typedef void (*btf_trace_map)(void *, long unsigned int, phys_addr_t, size_t); + +typedef void (*btf_trace_unmap)(void *, long unsigned int, size_t, size_t); + +typedef void (*btf_trace_io_page_fault)(void *, struct device *, long unsigned int, int); + +struct iova { + struct rb_node node; + long unsigned int pfn_hi; + long unsigned int pfn_lo; +}; + +struct iova_magazine; + +struct iova_cpu_rcache; + +struct iova_rcache { + spinlock_t lock; + long unsigned int depot_size; + struct iova_magazine *depot[32]; + struct iova_cpu_rcache *cpu_rcaches; +}; + +struct iova_domain; + +typedef void (*iova_flush_cb)(struct iova_domain *); + +typedef void (*iova_entry_dtor)(long unsigned int); + +struct iova_fq; + +struct iova_domain { + spinlock_t iova_rbtree_lock; + struct rb_root rbroot; + struct rb_node *cached_node; + struct rb_node *cached32_node; + long unsigned int granule; + long unsigned int start_pfn; + long unsigned int dma_32bit_pfn; + long unsigned int max32_alloc_size; + struct iova_fq *fq; + atomic64_t fq_flush_start_cnt; + atomic64_t fq_flush_finish_cnt; + struct iova anchor; + struct iova_rcache rcaches[6]; + iova_flush_cb flush_cb; + iova_entry_dtor entry_dtor; + struct timer_list fq_timer; + atomic_t fq_timer_on; +}; + +struct iova_fq_entry { + long unsigned int iova_pfn; + long unsigned int pages; + long unsigned int data; + u64 counter; +}; + +struct iova_fq { + struct iova_fq_entry entries[256]; + unsigned int head; + unsigned int tail; + spinlock_t lock; +}; + +struct iommu_dma_msi_page { + struct list_head list; + dma_addr_t iova; + phys_addr_t phys; +}; + +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE = 0, + IOMMU_DMA_MSI_COOKIE = 1, +}; + +struct iommu_dma_cookie { + enum iommu_dma_cookie_type type; + union { + struct iova_domain iovad; + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + struct iommu_domain *fq_domain; +}; + +struct io_pgtable { + enum io_pgtable_fmt fmt; + void *cookie; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops ops; +}; + +struct io_pgtable_init_fns { + struct io_pgtable * (*alloc)(struct io_pgtable_cfg *, void *); + void (*free)(struct io_pgtable *); +}; + +struct arm_lpae_io_pgtable { + struct io_pgtable iop; + int pgd_bits; + int start_level; + int bits_per_level; + void *pgd; +}; + +typedef u64 arm_lpae_iopte; + +struct iova_magazine { + long unsigned int size; + long unsigned int pfns[128]; +}; + +struct iova_cpu_rcache { + spinlock_t lock; + struct iova_magazine *loaded; + struct iova_magazine *prev; +}; + +struct of_pci_iommu_alias_info { + struct device *dev; + struct device_node *np; +}; + +struct rk_iommu_domain { + struct list_head iommus; + u32 *dt; + dma_addr_t dt_dma; + spinlock_t iommus_lock; + spinlock_t dt_lock; + struct iommu_domain domain; +}; + +struct rk_iommu { + struct device *dev; + void **bases; + int num_mmu; + int num_irq; + struct clk_bulk_data *clocks; + int num_clocks; + bool reset_disabled; + struct iommu_device iommu; + struct list_head node; + struct iommu_domain *domain; + struct iommu_group *group; +}; + +struct rk_iommudata { + struct device_link *link; + struct rk_iommu *iommu; +}; + +struct tegra_smmu_enable { + unsigned int reg; + unsigned int bit; +}; + +struct tegra_mc_timing { + long unsigned int rate; + u32 *emem_data; +}; + +struct tegra_mc_la { + unsigned int reg; + unsigned int shift; + unsigned int mask; + unsigned int def; +}; + +struct tegra_mc_client { + unsigned int id; + const char *name; + unsigned int swgroup; + unsigned int fifo_size; + struct tegra_smmu_enable smmu; + struct tegra_mc_la la; +}; + +struct tegra_smmu_swgroup { + const char *name; + unsigned int swgroup; + unsigned int reg; +}; + +struct tegra_smmu_group_soc { + const char *name; + const unsigned int *swgroups; + unsigned int num_swgroups; +}; + +struct tegra_smmu_soc { + const struct tegra_mc_client *clients; + unsigned int num_clients; + const struct tegra_smmu_swgroup *swgroups; + unsigned int num_swgroups; + const struct tegra_smmu_group_soc *groups; + unsigned int num_groups; + bool supports_round_robin_arbitration; + bool supports_request_limit; + unsigned int num_tlb_lines; + unsigned int num_asids; +}; + +struct tegra_mc_reset { + const char *name; + long unsigned int id; + unsigned int control; + unsigned int status; + unsigned int reset; + unsigned int bit; +}; + +struct tegra_mc; + +struct tegra_mc_reset_ops { + int (*hotreset_assert)(struct tegra_mc *, const struct tegra_mc_reset *); + int (*hotreset_deassert)(struct tegra_mc *, const struct tegra_mc_reset *); + int (*block_dma)(struct tegra_mc *, const struct tegra_mc_reset *); + bool (*dma_idling)(struct tegra_mc *, const struct tegra_mc_reset *); + int (*unblock_dma)(struct tegra_mc *, const struct tegra_mc_reset *); + int (*reset_status)(struct tegra_mc *, const struct tegra_mc_reset *); +}; + +struct gart_device; + +struct tegra_smmu; + +struct tegra_mc_soc; + +struct tegra_mc { + struct device *dev; + struct tegra_smmu *smmu; + struct gart_device *gart; + void *regs; + struct clk *clk; + int irq; + const struct tegra_mc_soc *soc; + long unsigned int tick; + struct tegra_mc_timing *timings; + unsigned int num_timings; + struct reset_controller_dev reset; + spinlock_t lock; +}; + +struct tegra_mc_soc { + const struct tegra_mc_client *clients; + unsigned int num_clients; + const long unsigned int *emem_regs; + unsigned int num_emem_regs; + unsigned int num_address_bits; + unsigned int atom_size; + u8 client_id_mask; + const struct tegra_smmu_soc *smmu; + u32 intmask; + const struct tegra_mc_reset_ops *reset_ops; + const struct tegra_mc_reset *resets; + unsigned int num_resets; +}; + +struct tegra_smmu { + void *regs; + struct device *dev; + struct tegra_mc *mc; + const struct tegra_smmu_soc *soc; + struct list_head groups; + long unsigned int pfn_mask; + long unsigned int tlb_mask; + long unsigned int *asids; + struct mutex lock; + struct list_head list; + struct dentry *debugfs; + struct iommu_device iommu; +}; + +struct tegra_smmu_group { + struct list_head list; + struct tegra_smmu *smmu; + const struct tegra_smmu_group_soc *soc; + struct iommu_group *group; + unsigned int swgroup; +}; + +struct tegra_smmu_as { + struct iommu_domain domain; + struct tegra_smmu *smmu; + unsigned int use_count; + spinlock_t lock; + u32 *count; + struct page **pts; + struct page *pd; + dma_addr_t pd_dma; + unsigned int id; + u32 attr; +}; + +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + size_t tx_len; + const void *tx_buf; + size_t rx_len; + void *rx_buf; +}; + +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +struct mipi_dsi_host; + +struct mipi_dsi_device; + +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *, struct mipi_dsi_device *); + int (*detach)(struct mipi_dsi_host *, struct mipi_dsi_device *); + ssize_t (*transfer)(struct mipi_dsi_host *, const struct mipi_dsi_msg *); +}; + +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; + struct list_head list; +}; + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888 = 0, + MIPI_DSI_FMT_RGB666 = 1, + MIPI_DSI_FMT_RGB666_PACKED = 2, + MIPI_DSI_FMT_RGB565 = 3, +}; + +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + char name[20]; + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + long unsigned int mode_flags; + long unsigned int hs_rate; + long unsigned int lp_rate; +}; + +struct mipi_dsi_device_info { + char type[20]; + u32 channel; + struct device_node *node; +}; + +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK = 0, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK = 1, +}; + +struct mipi_dsi_driver { + struct device_driver driver; + int (*probe)(struct mipi_dsi_device *); + int (*remove)(struct mipi_dsi_device *); + void (*shutdown)(struct mipi_dsi_device *); +}; + +struct drm_dsc_picture_parameter_set { + u8 dsc_version; + u8 pps_identifier; + u8 pps_reserved; + u8 pps_3; + u8 pps_4; + u8 bits_per_pixel_low; + __be16 pic_height; + __be16 pic_width; + __be16 slice_height; + __be16 slice_width; + __be16 chunk_size; + u8 initial_xmit_delay_high; + u8 initial_xmit_delay_low; + __be16 initial_dec_delay; + u8 pps20_reserved; + u8 initial_scale_value; + __be16 scale_increment_interval; + u8 scale_decrement_interval_high; + u8 scale_decrement_interval_low; + u8 pps26_reserved; + u8 first_line_bpg_offset; + __be16 nfl_bpg_offset; + __be16 slice_bpg_offset; + __be16 initial_offset; + __be16 final_offset; + u8 flatness_min_qp; + u8 flatness_max_qp; + __be16 rc_model_size; + u8 rc_edge_factor; + u8 rc_quant_incr_limit0; + u8 rc_quant_incr_limit1; + u8 rc_tgt_offset; + u8 rc_buf_thresh[14]; + __be16 rc_range_parameters[15]; + u8 native_422_420; + u8 second_line_bpg_offset; + __be16 nsl_bpg_offset; + __be16 second_line_offset_adj; + u32 pps_long_94_reserved; + u32 pps_long_98_reserved; + u32 pps_long_102_reserved; + u32 pps_long_106_reserved; + u32 pps_long_110_reserved; + u32 pps_long_114_reserved; + u32 pps_long_118_reserved; + u32 pps_long_122_reserved; + __be16 pps_short_126_reserved; +} __attribute__((packed)); + +enum { + MIPI_DSI_V_SYNC_START = 1, + MIPI_DSI_V_SYNC_END = 17, + MIPI_DSI_H_SYNC_START = 33, + MIPI_DSI_H_SYNC_END = 49, + MIPI_DSI_COMPRESSION_MODE = 7, + MIPI_DSI_END_OF_TRANSMISSION = 8, + MIPI_DSI_COLOR_MODE_OFF = 2, + MIPI_DSI_COLOR_MODE_ON = 18, + MIPI_DSI_SHUTDOWN_PERIPHERAL = 34, + MIPI_DSI_TURN_ON_PERIPHERAL = 50, + MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 3, + MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 19, + MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 35, + MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 4, + MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 20, + MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 36, + MIPI_DSI_DCS_SHORT_WRITE = 5, + MIPI_DSI_DCS_SHORT_WRITE_PARAM = 21, + MIPI_DSI_DCS_READ = 6, + MIPI_DSI_EXECUTE_QUEUE = 22, + MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 55, + MIPI_DSI_NULL_PACKET = 9, + MIPI_DSI_BLANKING_PACKET = 25, + MIPI_DSI_GENERIC_LONG_WRITE = 41, + MIPI_DSI_DCS_LONG_WRITE = 57, + MIPI_DSI_PICTURE_PARAMETER_SET = 10, + MIPI_DSI_COMPRESSED_PIXEL_STREAM = 11, + MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 12, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 28, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 44, + MIPI_DSI_PACKED_PIXEL_STREAM_30 = 13, + MIPI_DSI_PACKED_PIXEL_STREAM_36 = 29, + MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 61, + MIPI_DSI_PACKED_PIXEL_STREAM_16 = 14, + MIPI_DSI_PACKED_PIXEL_STREAM_18 = 30, + MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 46, + MIPI_DSI_PACKED_PIXEL_STREAM_24 = 62, +}; + +enum { + MIPI_DCS_NOP = 0, + MIPI_DCS_SOFT_RESET = 1, + MIPI_DCS_GET_COMPRESSION_MODE = 3, + MIPI_DCS_GET_DISPLAY_ID = 4, + MIPI_DCS_GET_ERROR_COUNT_ON_DSI = 5, + MIPI_DCS_GET_RED_CHANNEL = 6, + MIPI_DCS_GET_GREEN_CHANNEL = 7, + MIPI_DCS_GET_BLUE_CHANNEL = 8, + MIPI_DCS_GET_DISPLAY_STATUS = 9, + MIPI_DCS_GET_POWER_MODE = 10, + MIPI_DCS_GET_ADDRESS_MODE = 11, + MIPI_DCS_GET_PIXEL_FORMAT = 12, + MIPI_DCS_GET_DISPLAY_MODE = 13, + MIPI_DCS_GET_SIGNAL_MODE = 14, + MIPI_DCS_GET_DIAGNOSTIC_RESULT = 15, + MIPI_DCS_ENTER_SLEEP_MODE = 16, + MIPI_DCS_EXIT_SLEEP_MODE = 17, + MIPI_DCS_ENTER_PARTIAL_MODE = 18, + MIPI_DCS_ENTER_NORMAL_MODE = 19, + MIPI_DCS_GET_IMAGE_CHECKSUM_RGB = 20, + MIPI_DCS_GET_IMAGE_CHECKSUM_CT = 21, + MIPI_DCS_EXIT_INVERT_MODE = 32, + MIPI_DCS_ENTER_INVERT_MODE = 33, + MIPI_DCS_SET_GAMMA_CURVE = 38, + MIPI_DCS_SET_DISPLAY_OFF = 40, + MIPI_DCS_SET_DISPLAY_ON = 41, + MIPI_DCS_SET_COLUMN_ADDRESS = 42, + MIPI_DCS_SET_PAGE_ADDRESS = 43, + MIPI_DCS_WRITE_MEMORY_START = 44, + MIPI_DCS_WRITE_LUT = 45, + MIPI_DCS_READ_MEMORY_START = 46, + MIPI_DCS_SET_PARTIAL_ROWS = 48, + MIPI_DCS_SET_PARTIAL_COLUMNS = 49, + MIPI_DCS_SET_SCROLL_AREA = 51, + MIPI_DCS_SET_TEAR_OFF = 52, + MIPI_DCS_SET_TEAR_ON = 53, + MIPI_DCS_SET_ADDRESS_MODE = 54, + MIPI_DCS_SET_SCROLL_START = 55, + MIPI_DCS_EXIT_IDLE_MODE = 56, + MIPI_DCS_ENTER_IDLE_MODE = 57, + MIPI_DCS_SET_PIXEL_FORMAT = 58, + MIPI_DCS_WRITE_MEMORY_CONTINUE = 60, + MIPI_DCS_SET_3D_CONTROL = 61, + MIPI_DCS_READ_MEMORY_CONTINUE = 62, + MIPI_DCS_GET_3D_CONTROL = 63, + MIPI_DCS_SET_VSYNC_TIMING = 64, + MIPI_DCS_SET_TEAR_SCANLINE = 68, + MIPI_DCS_GET_SCANLINE = 69, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 81, + MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 82, + MIPI_DCS_WRITE_CONTROL_DISPLAY = 83, + MIPI_DCS_GET_CONTROL_DISPLAY = 84, + MIPI_DCS_WRITE_POWER_SAVE = 85, + MIPI_DCS_GET_POWER_SAVE = 86, + MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 94, + MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 95, + MIPI_DCS_READ_DDB_START = 161, + MIPI_DCS_READ_PPS_START = 162, + MIPI_DCS_READ_DDB_CONTINUE = 168, + MIPI_DCS_READ_PPS_CONTINUE = 169, +}; + +struct drm_dmi_panel_orientation_data { + int width; + int height; + const char * const *bios_dates; + int orientation; +}; + +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; + unsigned int owns; + unsigned int locks; + unsigned int io_lock_cnt; + unsigned int mem_lock_cnt; + unsigned int io_norm_cnt; + unsigned int mem_norm_cnt; + bool bridge_has_one_vga; + void *cookie; + void (*irq_set_state)(void *, bool); + unsigned int (*set_vga_decode)(void *, bool); +}; + +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[16]; + spinlock_t lock; +}; + +struct cb_id { + __u32 idx; + __u32 val; +}; + +struct cn_msg { + struct cb_id id; + __u32 seq; + __u32 ack; + __u16 len; + __u16 flags; + __u8 data[0]; +}; + +struct cn_queue_dev { + atomic_t refcnt; + unsigned char name[32]; + struct list_head queue_list; + spinlock_t queue_lock; + struct sock *nls; +}; + +struct cn_callback_id { + unsigned char name[32]; + struct cb_id id; +}; + +struct cn_callback_entry { + struct list_head callback_entry; + refcount_t refcnt; + struct cn_queue_dev *pdev; + struct cn_callback_id id; + void (*callback)(struct cn_msg *, struct netlink_skb_parms *); + u32 seq; + u32 group; +}; + +struct cn_dev { + struct cb_id id; + u32 seq; + u32 groups; + struct sock *nls; + struct cn_queue_dev *cbdev; +}; + +enum proc_cn_mcast_op { + PROC_CN_MCAST_LISTEN = 1, + PROC_CN_MCAST_IGNORE = 2, +}; + +struct fork_proc_event { + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; + __kernel_pid_t child_pid; + __kernel_pid_t child_tgid; +}; + +struct exec_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct id_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + union { + __u32 ruid; + __u32 rgid; + } r; + union { + __u32 euid; + __u32 egid; + } e; +}; + +struct sid_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; +}; + +struct ptrace_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t tracer_pid; + __kernel_pid_t tracer_tgid; +}; + +struct comm_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + char comm[16]; +}; + +struct coredump_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct exit_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __u32 exit_code; + __u32 exit_signal; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; +}; + +struct proc_event { + enum what what; + __u32 cpu; + __u64 timestamp_ns; + union { + struct { + __u32 err; + } ack; + struct fork_proc_event fork; + struct exec_proc_event exec; + struct id_proc_event id; + struct sid_proc_event sid; + struct ptrace_proc_event ptrace; + struct comm_proc_event comm; + struct coredump_proc_event coredump; + struct exit_proc_event exit; + } event_data; +}; + +struct local_event { + local_lock_t lock; + __u32 count; +}; + +struct nvm_ioctl_info_tgt { + __u32 version[3]; + __u32 reserved; + char tgtname[48]; +}; + +struct nvm_ioctl_info { + __u32 version[3]; + __u16 tgtsize; + __u16 reserved16; + __u32 reserved[12]; + struct nvm_ioctl_info_tgt tgts[63]; +}; + +struct nvm_ioctl_device_info { + char devname[32]; + char bmname[48]; + __u32 bmversion[3]; + __u32 flags; + __u32 reserved[8]; +}; + +struct nvm_ioctl_get_devices { + __u32 nr_devices; + __u32 reserved[31]; + struct nvm_ioctl_device_info info[31]; +}; + +struct nvm_ioctl_create_simple { + __u32 lun_begin; + __u32 lun_end; +}; + +struct nvm_ioctl_create_extended { + __u16 lun_begin; + __u16 lun_end; + __u16 op; + __u16 rsv; +}; + +enum { + NVM_CONFIG_TYPE_SIMPLE = 0, + NVM_CONFIG_TYPE_EXTENDED = 1, +}; + +struct nvm_ioctl_create_conf { + __u32 type; + union { + struct nvm_ioctl_create_simple s; + struct nvm_ioctl_create_extended e; + }; +}; + +enum { + NVM_TARGET_FACTORY = 1, +}; + +struct nvm_ioctl_create { + char dev[32]; + char tgttype[48]; + char tgtname[32]; + __u32 flags; + struct nvm_ioctl_create_conf conf; +}; + +struct nvm_ioctl_remove { + char tgtname[32]; + __u32 flags; +}; + +struct nvm_ioctl_dev_init { + char dev[32]; + char mmtype[8]; + __u32 flags; +}; + +enum { + NVM_FACTORY_ERASE_ONLY_USER = 1, + NVM_FACTORY_RESET_HOST_BLKS = 2, + NVM_FACTORY_RESET_GRWN_BBLKS = 4, + NVM_FACTORY_NR_BITS = 8, +}; + +struct nvm_ioctl_dev_factory { + char dev[32]; + __u32 flags; +}; + +enum { + NVM_INFO_CMD = 32, + NVM_GET_DEVICES_CMD = 33, + NVM_DEV_CREATE_CMD = 34, + NVM_DEV_REMOVE_CMD = 35, + NVM_DEV_INIT_CMD = 36, + NVM_DEV_FACTORY_CMD = 37, + NVM_DEV_VIO_ADMIN_CMD = 65, + NVM_DEV_VIO_CMD = 66, + NVM_DEV_VIO_USER_CMD = 67, +}; + +enum { + NVM_OCSSD_SPEC_12 = 12, + NVM_OCSSD_SPEC_20 = 20, +}; + +struct ppa_addr { + union { + struct { + u64 ch: 8; + u64 lun: 8; + u64 blk: 16; + u64 reserved: 32; + } a; + struct { + u64 ch: 8; + u64 lun: 8; + u64 blk: 16; + u64 pg: 16; + u64 pl: 4; + u64 sec: 4; + u64 reserved: 8; + } g; + struct { + u64 grp: 8; + u64 pu: 8; + u64 chk: 16; + u64 sec: 24; + u64 reserved: 8; + } m; + struct { + u64 line: 63; + u64 is_cached: 1; + } c; + u64 ppa; + }; +}; + +struct nvm_dev; + +typedef int nvm_id_fn(struct nvm_dev *); + +struct nvm_addrf { + u8 ch_len; + u8 lun_len; + u8 chk_len; + u8 sec_len; + u8 rsv_len[2]; + u8 ch_offset; + u8 lun_offset; + u8 chk_offset; + u8 sec_offset; + u8 rsv_off[2]; + u64 ch_mask; + u64 lun_mask; + u64 chk_mask; + u64 sec_mask; + u64 rsv_mask[2]; +}; + +struct nvm_geo { + u8 major_ver_id; + u8 minor_ver_id; + u8 version; + int num_ch; + int num_lun; + int all_luns; + int all_chunks; + int op; + sector_t total_secs; + u32 num_chk; + u32 clba; + u16 csecs; + u16 sos; + bool ext; + u32 mdts; + u32 ws_min; + u32 ws_opt; + u32 mw_cunits; + u32 maxoc; + u32 maxocpu; + u32 mccap; + u32 trdt; + u32 trdm; + u32 tprt; + u32 tprm; + u32 tbet; + u32 tbem; + struct nvm_addrf addrf; + u8 vmnt; + u32 cap; + u32 dom; + u8 mtype; + u8 fmtype; + u16 cpar; + u32 mpos; + u8 num_pln; + u8 pln_mode; + u16 num_pg; + u16 fpg_sz; +}; + +struct nvm_dev_ops; + +struct nvm_dev { + struct nvm_dev_ops *ops; + struct list_head devices; + struct nvm_geo geo; + long unsigned int *lun_map; + void *dma_pool; + struct request_queue *q; + char name[32]; + void *private_data; + struct kref ref; + void *rmap; + struct mutex mlock; + spinlock_t lock; + struct list_head area_list; + struct list_head targets; +}; + +typedef int nvm_op_bb_tbl_fn(struct nvm_dev *, struct ppa_addr, u8 *); + +typedef int nvm_op_set_bb_fn(struct nvm_dev *, struct ppa_addr *, int, int); + +struct nvm_chk_meta; + +typedef int nvm_get_chk_meta_fn(struct nvm_dev *, sector_t, int, struct nvm_chk_meta *); + +struct nvm_chk_meta { + u8 state; + u8 type; + u8 wi; + u8 rsvd[5]; + u64 slba; + u64 cnlb; + u64 wp; +}; + +struct nvm_rq; + +typedef int nvm_submit_io_fn(struct nvm_dev *, struct nvm_rq *, void *); + +typedef void nvm_end_io_fn(struct nvm_rq *); + +struct nvm_tgt_dev; + +struct nvm_rq { + struct nvm_tgt_dev *dev; + struct bio *bio; + union { + struct ppa_addr ppa_addr; + dma_addr_t dma_ppa_list; + }; + struct ppa_addr *ppa_list; + void *meta_list; + dma_addr_t dma_meta_list; + nvm_end_io_fn *end_io; + uint8_t opcode; + uint16_t nr_ppas; + uint16_t flags; + u64 ppa_status; + int error; + int is_seq; + void *private; +}; + +typedef void *nvm_create_dma_pool_fn(struct nvm_dev *, char *, int); + +typedef void nvm_destroy_dma_pool_fn(void *); + +typedef void *nvm_dev_dma_alloc_fn(struct nvm_dev *, void *, gfp_t, dma_addr_t *); + +typedef void nvm_dev_dma_free_fn(void *, void *, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb_tbl; + nvm_get_chk_meta_fn *get_chk_meta; + nvm_submit_io_fn *submit_io; + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; +}; + +enum { + NVM_RSP_L2P = 1, + NVM_RSP_ECC = 2, + NVM_ADDRMODE_LINEAR = 0, + NVM_ADDRMODE_CHANNEL = 1, + NVM_PLANE_SINGLE = 1, + NVM_PLANE_DOUBLE = 2, + NVM_PLANE_QUAD = 4, + NVM_RSP_SUCCESS = 0, + NVM_RSP_NOT_CHANGEABLE = 1, + NVM_RSP_ERR_FAILWRITE = 16639, + NVM_RSP_ERR_EMPTYPAGE = 17151, + NVM_RSP_ERR_FAILECC = 17025, + NVM_RSP_ERR_FAILCRC = 16388, + NVM_RSP_WARN_HIGHECC = 18176, + NVM_OP_PWRITE = 145, + NVM_OP_PREAD = 146, + NVM_OP_ERASE = 144, + NVM_IO_SNGL_ACCESS = 0, + NVM_IO_DUAL_ACCESS = 1, + NVM_IO_QUAD_ACCESS = 2, + NVM_IO_SUSPEND = 128, + NVM_IO_SLC_MODE = 256, + NVM_IO_SCRAMBLE_ENABLE = 512, + NVM_BLK_T_FREE = 0, + NVM_BLK_T_BAD = 1, + NVM_BLK_T_GRWN_BAD = 2, + NVM_BLK_T_DEV = 4, + NVM_BLK_T_HOST = 8, + NVM_ID_CAP_SLC = 1, + NVM_ID_CAP_CMD_SUSPEND = 2, + NVM_ID_CAP_SCRAMBLE = 4, + NVM_ID_CAP_ENCRYPT = 8, + NVM_ID_FMTYPE_SLC = 0, + NVM_ID_FMTYPE_MLC = 1, + NVM_ID_DCAP_BBLKMGMT = 1, + NVM_UD_DCAP_ECC = 2, +}; + +struct nvm_addrf_12 { + u8 ch_len; + u8 lun_len; + u8 blk_len; + u8 pg_len; + u8 pln_len; + u8 sec_len; + u8 ch_offset; + u8 lun_offset; + u8 blk_offset; + u8 pg_offset; + u8 pln_offset; + u8 sec_offset; + u64 ch_mask; + u64 lun_mask; + u64 blk_mask; + u64 pg_mask; + u64 pln_mask; + u64 sec_mask; +}; + +enum { + NVM_CHK_ST_FREE = 1, + NVM_CHK_ST_CLOSED = 2, + NVM_CHK_ST_OPEN = 4, + NVM_CHK_ST_OFFLINE = 8, + NVM_CHK_TP_W_SEQ = 1, + NVM_CHK_TP_W_RAN = 2, + NVM_CHK_TP_SZ_SPEC = 16, +}; + +struct nvm_tgt_type; + +struct nvm_target { + struct list_head list; + struct nvm_tgt_dev *dev; + struct nvm_tgt_type *type; + struct gendisk *disk; +}; + +struct nvm_tgt_dev { + struct nvm_geo geo; + struct ppa_addr *luns; + struct request_queue *q; + struct nvm_dev *parent; + void *map; +}; + +typedef sector_t nvm_tgt_capacity_fn(void *); + +typedef void *nvm_tgt_init_fn(struct nvm_tgt_dev *, struct gendisk *, int); + +typedef void nvm_tgt_exit_fn(void *, bool); + +typedef int nvm_tgt_sysfs_init_fn(struct gendisk *); + +typedef void nvm_tgt_sysfs_exit_fn(struct gendisk *); + +struct nvm_tgt_type { + const char *name; + unsigned int version[3]; + int flags; + const struct block_device_operations *bops; + nvm_tgt_capacity_fn *capacity; + nvm_tgt_init_fn *init; + nvm_tgt_exit_fn *exit; + nvm_tgt_sysfs_init_fn *sysfs_init; + nvm_tgt_sysfs_exit_fn *sysfs_exit; + struct list_head list; + struct module *owner; +}; + +enum { + NVM_TGT_F_DEV_L2P = 0, + NVM_TGT_F_HOST_L2P = 1, +}; + +struct nvm_ch_map { + int ch_off; + int num_lun; + int *lun_offs; +}; + +struct nvm_dev_map { + struct nvm_ch_map *chnls; + int num_ch; +}; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +struct component_master_ops { + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct master; + +struct component { + struct list_head node; + struct master *master; + bool bound; + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct master { + struct list_head node; + bool bound; + const struct component_master_ops *ops; + struct device *dev; + struct component_match *match; + struct dentry *dentry; +}; + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +enum dpm_order { + DPM_ORDER_NONE = 0, + DPM_ORDER_DEV_AFTER_PARENT = 1, + DPM_ORDER_PARENT_BEFORE_DEV = 2, + DPM_ORDER_DEV_LAST = 3, +}; + +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe: 1; + struct bus_type *bus; + struct kset glue_dirs; + struct class *class; +}; + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; + +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead: 1; +}; + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +struct class_dir { + struct kobject kobj; + struct class *class; +}; + +struct root_device { + struct device dev; + struct module *owner; +}; + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *, struct subsys_interface *); + void (*remove_dev)(struct device *, struct subsys_interface *); +}; + +struct device_attach_data { + struct device *dev; + bool check_async; + bool want_async; + bool have_async; +}; + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; + +struct class_compat { + struct kobject *kobj; +}; + +struct platform_object { + struct platform_device pdev; + char name[0]; +}; + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask * const map; +}; + +typedef struct kobject *kobj_probe_t(dev_t, int *, void *); + +struct probe { + struct probe *next; + dev_t dev; + long unsigned int range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; +}; + +struct kobj_map___2 { + struct probe *probes[255]; + struct mutex *lock; +}; + +typedef int (*dr_match_t)(struct device *, void *, void *); + +struct devres_node { + struct list_head entry; + dr_release_t release; +}; + +struct devres___2 { + struct devres_node node; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u8 data[0]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; +}; + +struct action_devres { + void *data; + void (*action)(void *); +}; + +struct pages_devres { + long unsigned int addr; + unsigned int order; +}; + +struct attribute_container { + struct list_head node; + struct klist containers; + struct class *class; + const struct attribute_group *grp; + struct device_attribute **attrs; + int (*match)(struct attribute_container *, struct device *); + long unsigned int flags; +}; + +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + struct device classdev; +}; + +struct transport_container; + +struct transport_class { + struct class class; + int (*setup)(struct transport_container *, struct device *, struct device *); + int (*configure)(struct transport_container *, struct device *, struct device *); + int (*remove)(struct transport_container *, struct device *, struct device *); +}; + +struct transport_container { + struct attribute_container ac; + const struct attribute_group *statistics; +}; + +struct anon_transport_class { + struct transport_class tclass; + struct attribute_container container; +}; + +typedef void * (*devcon_match_fn_t)(struct fwnode_handle *, const char *, void *); + +struct mii_bus; + +struct mdio_device { + struct device dev; + struct mii_bus *bus; + char modalias[32]; + int (*bus_match)(struct device *, struct device_driver *); + void (*device_free)(struct mdio_device *); + void (*device_remove)(struct mdio_device *); + int addr; + int flags; + struct gpio_desc *reset_gpio; + struct reset_control *reset_ctrl; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; +}; + +struct phy_c45_device_ids { + u32 devices_in_package; + u32 mmds_present; + u32 device_ids[32]; +}; + +enum phy_state { + PHY_DOWN = 0, + PHY_READY = 1, + PHY_HALTED = 2, + PHY_UP = 3, + PHY_RUNNING = 4, + PHY_NOLINK = 5, + PHY_CABLETEST = 6, +}; + +typedef enum { + PHY_INTERFACE_MODE_NA = 0, + PHY_INTERFACE_MODE_INTERNAL = 1, + PHY_INTERFACE_MODE_MII = 2, + PHY_INTERFACE_MODE_GMII = 3, + PHY_INTERFACE_MODE_SGMII = 4, + PHY_INTERFACE_MODE_TBI = 5, + PHY_INTERFACE_MODE_REVMII = 6, + PHY_INTERFACE_MODE_RMII = 7, + PHY_INTERFACE_MODE_RGMII = 8, + PHY_INTERFACE_MODE_RGMII_ID = 9, + PHY_INTERFACE_MODE_RGMII_RXID = 10, + PHY_INTERFACE_MODE_RGMII_TXID = 11, + PHY_INTERFACE_MODE_RTBI = 12, + PHY_INTERFACE_MODE_SMII = 13, + PHY_INTERFACE_MODE_XGMII = 14, + PHY_INTERFACE_MODE_XLGMII = 15, + PHY_INTERFACE_MODE_MOCA = 16, + PHY_INTERFACE_MODE_QSGMII = 17, + PHY_INTERFACE_MODE_TRGMII = 18, + PHY_INTERFACE_MODE_1000BASEX = 19, + PHY_INTERFACE_MODE_2500BASEX = 20, + PHY_INTERFACE_MODE_RXAUI = 21, + PHY_INTERFACE_MODE_XAUI = 22, + PHY_INTERFACE_MODE_10GBASER = 23, + PHY_INTERFACE_MODE_USXGMII = 24, + PHY_INTERFACE_MODE_10GKR = 25, + PHY_INTERFACE_MODE_MAX = 26, +} phy_interface_t; + +struct phylink; + +struct phy_driver; + +struct phy_package_shared; + +struct mii_timestamper; + +struct phy_device { + struct mdio_device mdio; + struct phy_driver *drv; + u32 phy_id; + struct phy_c45_device_ids c45_ids; + unsigned int is_c45: 1; + unsigned int is_internal: 1; + unsigned int is_pseudo_fixed_link: 1; + unsigned int is_gigabit_capable: 1; + unsigned int has_fixups: 1; + unsigned int suspended: 1; + unsigned int suspended_by_mdio_bus: 1; + unsigned int sysfs_links: 1; + unsigned int loopback_enabled: 1; + unsigned int downshifted_rate: 1; + unsigned int autoneg: 1; + unsigned int link: 1; + unsigned int autoneg_complete: 1; + unsigned int interrupts: 1; + enum phy_state state; + u32 dev_flags; + phy_interface_t interface; + int speed; + int duplex; + int pause; + int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; + long unsigned int supported[2]; + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + long unsigned int adv_old[2]; + u32 eee_broken_modes; + int irq; + void *priv; + struct phy_package_shared *shared; + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + struct delayed_work state_queue; + struct mutex lock; + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; + struct phylink *phylink; + struct net_device *attached_dev; + struct mii_timestamper *mii_ts; + u8 mdix; + u8 mdix_ctrl; + void (*phy_link_change)(struct phy_device *, bool); + void (*adjust_link)(struct net_device *); + const struct macsec_ops *macsec_ops; +}; + +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; + +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + struct u64_stats_sync syncp; +}; + +struct mii_bus { + struct module *owner; + const char *name; + char id[61]; + void *priv; + int (*read)(struct mii_bus *, int, int); + int (*write)(struct mii_bus *, int, int, u16); + int (*reset)(struct mii_bus *); + struct mdio_bus_stats stats[32]; + struct mutex mdio_lock; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED = 2, + MDIOBUS_UNREGISTERED = 3, + MDIOBUS_RELEASED = 4, + } state; + struct device dev; + struct mdio_device *mdio_map[32]; + u32 phy_mask; + u32 phy_ignore_ta_mask; + int irq[32]; + int reset_delay_us; + int reset_post_delay_us; + struct gpio_desc *reset_gpiod; + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22 = 1, + MDIOBUS_C45 = 2, + MDIOBUS_C22_C45 = 3, + } probe_capabilities; + struct mutex shared_lock; + struct phy_package_shared *shared[32]; +}; + +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; + +struct mii_timestamper { + bool (*rxtstamp)(struct mii_timestamper *, struct sk_buff *, int); + void (*txtstamp)(struct mii_timestamper *, struct sk_buff *, int); + int (*hwtstamp)(struct mii_timestamper *, struct ifreq *); + void (*link_state)(struct mii_timestamper *, struct phy_device *); + int (*ts_info)(struct mii_timestamper *, struct ethtool_ts_info *); + struct device *device; +}; + +struct phy_package_shared { + int addr; + refcount_t refcnt; + long unsigned int flags; + size_t priv_size; + void *priv; +}; + +struct phy_driver { + struct mdio_driver_common mdiodrv; + u32 phy_id; + char *name; + u32 phy_id_mask; + const long unsigned int * const features; + u32 flags; + const void *driver_data; + int (*soft_reset)(struct phy_device *); + int (*config_init)(struct phy_device *); + int (*probe)(struct phy_device *); + int (*get_features)(struct phy_device *); + int (*suspend)(struct phy_device *); + int (*resume)(struct phy_device *); + int (*config_aneg)(struct phy_device *); + int (*aneg_done)(struct phy_device *); + int (*read_status)(struct phy_device *); + int (*ack_interrupt)(struct phy_device *); + int (*config_intr)(struct phy_device *); + int (*did_interrupt)(struct phy_device *); + irqreturn_t (*handle_interrupt)(struct phy_device *); + void (*remove)(struct phy_device *); + int (*match_phy_device)(struct phy_device *); + int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *); + void (*link_change_notify)(struct phy_device *); + int (*read_mmd)(struct phy_device *, int, u16); + int (*write_mmd)(struct phy_device *, int, u16, u16); + int (*read_page)(struct phy_device *); + int (*write_page)(struct phy_device *, int); + int (*module_info)(struct phy_device *, struct ethtool_modinfo *); + int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *); + int (*cable_test_start)(struct phy_device *); + int (*cable_test_tdr_start)(struct phy_device *, const struct phy_tdr_config *); + int (*cable_test_get_status)(struct phy_device *, bool *); + int (*get_sset_count)(struct phy_device *); + void (*get_strings)(struct phy_device *, u8 *); + void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *); + int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *); + int (*set_loopback)(struct phy_device *, bool); + int (*get_sqi)(struct phy_device *); + int (*get_sqi_max)(struct phy_device *); +}; + +struct cache_type_info { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +struct software_node; + +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[8]; +}; + +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; +}; + +struct swnode { + int id; + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + unsigned int allocated: 1; +}; + +struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; + kuid_t uid; + kgid_t gid; + struct device *dev; +}; + +typedef int (*pm_callback_t)(struct device *); + +struct of_phandle_iterator { + const char *cells_name; + int cell_count; + const struct device_node *parent; + const __be32 *list_end; + const __be32 *phandle_end; + const __be32 *cur; + uint32_t cur_count; + phandle phandle; + struct device_node *node; +}; + +enum genpd_notication { + GENPD_NOTIFY_PRE_OFF = 0, + GENPD_NOTIFY_OFF = 1, + GENPD_NOTIFY_PRE_ON = 2, + GENPD_NOTIFY_ON = 3, +}; + +struct gpd_link { + struct generic_pm_domain *parent; + struct list_head parent_node; + struct generic_pm_domain *child; + struct list_head child_node; + unsigned int performance_state; + unsigned int prev_performance_state; +}; + +struct gpd_timing_data { + s64 suspend_latency_ns; + s64 resume_latency_ns; + s64 effective_constraint_ns; + bool constraint_changed; + bool cached_suspend_ok; +}; + +struct generic_pm_domain_data { + struct pm_domain_data base; + struct gpd_timing_data td; + struct notifier_block nb; + struct notifier_block *power_nb; + int cpu; + unsigned int performance_state; + void *data; +}; + +struct of_genpd_provider { + struct list_head link; + struct device_node *node; + genpd_xlate_t xlate; + void *data; +}; + +struct pm_clk_notifier_block { + struct notifier_block nb; + struct dev_pm_domain *pm_domain; + char *con_ids[0]; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED = 1, + PCE_STATUS_ENABLED = 2, + PCE_STATUS_ERROR = 3, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; +}; + +struct firmware_fallback_config { + unsigned int force_sysfs_fallback; + unsigned int ignore_sysfs_fallback; + int old_timeout; + int loading_timeout; +}; + +struct builtin_fw { + char *name; + void *data; + long unsigned int size; +}; + +enum fw_opt { + FW_OPT_UEVENT = 1, + FW_OPT_NOWAIT = 2, + FW_OPT_USERHELPER = 4, + FW_OPT_NO_WARN = 8, + FW_OPT_NOCACHE = 16, + FW_OPT_NOFALLBACK_SYSFS = 32, + FW_OPT_FALLBACK_PLATFORM = 64, + FW_OPT_PARTIAL = 128, +}; + +enum fw_status { + FW_STATUS_UNKNOWN = 0, + FW_STATUS_LOADING = 1, + FW_STATUS_DONE = 2, + FW_STATUS_ABORTED = 3, +}; + +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct firmware_cache; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; + bool is_paged_buf; + struct page **pages; + int nr_pages; + int page_array_size; + bool need_uevent; + struct list_head pending_list; + const char *fw_name; +}; + +struct firmware_cache { + spinlock_t lock; + struct list_head head; + int state; +}; + +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *, void *); + u32 opt_flags; +}; + +struct fw_sysfs { + bool nowait; + struct device dev; + struct fw_priv *fw_priv; + struct firmware *fw; +}; + +typedef void (*node_registration_func_t)(struct node___2 *); + +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); + +struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned int access; + struct node_hmem_attrs hmem_attrs; +}; + +struct node_cache_info { + struct device dev; + struct list_head node; + struct node_cache_attrs cache_attrs; +}; + +struct node_attr { + struct device_attribute attr; + enum node_states state; +}; + +struct for_each_memory_block_cb_data { + walk_memory_blocks_func_t func; + void *arg; +}; + +typedef int (*regmap_hw_write)(void *, const void *, size_t); + +typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t); + +struct regmap_async; + +typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, struct regmap_async *); + +struct regmap___2; + +struct regmap_async { + struct list_head list; + struct regmap___2 *map; + void *work_buf; +}; + +typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t); + +typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *); + +typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int); + +typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + +typedef struct regmap_async * (*regmap_hw_async_alloc)(); + +typedef void (*regmap_hw_free_context)(void *); + +struct regmap_bus { + bool fast_io; + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_async_write async_write; + regmap_hw_reg_write reg_write; + regmap_hw_reg_update_bits reg_update_bits; + regmap_hw_read read; + regmap_hw_reg_read reg_read; + regmap_hw_free_context free_context; + regmap_hw_async_alloc async_alloc; + u8 read_flag_mask; + enum regmap_endian reg_format_endian_default; + enum regmap_endian val_format_endian_default; + size_t max_raw_read; + size_t max_raw_write; +}; + +struct reg_field { + unsigned int reg; + unsigned int lsb; + unsigned int msb; + unsigned int id_size; + unsigned int id_offset; +}; + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + void (*format_write)(struct regmap___2 *, unsigned int, unsigned int); + void (*format_reg)(void *, unsigned int, unsigned int); + void (*format_val)(void *, unsigned int, unsigned int); + unsigned int (*parse_val)(const void *); + void (*parse_inplace)(void *); +}; + +struct regcache_ops; + +struct regmap___2 { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + long unsigned int spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + gfp_t alloc_flags; + struct device *dev; + void *work_buf; + struct regmap_format format; + const struct regmap_bus *bus; + void *bus_context; + const char *name; + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + struct list_head debugfs_off_cache; + struct mutex cache_lock; + unsigned int max_register; + bool (*writeable_reg)(struct device *, unsigned int); + bool (*readable_reg)(struct device *, unsigned int); + bool (*volatile_reg)(struct device *, unsigned int); + bool (*precious_reg)(struct device *, unsigned int); + bool (*writeable_noinc_reg)(struct device *, unsigned int); + bool (*readable_noinc_reg)(struct device *, unsigned int); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + int (*reg_read)(void *, unsigned int, unsigned int *); + int (*reg_write)(void *, unsigned int, unsigned int); + int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int); + bool defer_caching; + long unsigned int read_flag_mask; + long unsigned int write_flag_mask; + int reg_shift; + int reg_stride; + int reg_stride_order; + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + unsigned int cache_size_raw; + unsigned int cache_word_size; + unsigned int num_reg_defaults; + unsigned int num_reg_defaults_raw; + bool cache_only; + bool cache_bypass; + bool cache_free; + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + bool cache_dirty; + bool no_sync_defaults; + struct reg_sequence *patch; + int patch_regs; + bool use_single_read; + bool use_single_write; + bool can_multi_write; + size_t max_raw_read; + size_t max_raw_write; + struct rb_root range_tree; + void *selector_work_buf; + struct hwspinlock *hwlock; + bool can_sleep; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap___2 *); + int (*exit)(struct regmap___2 *); + void (*debugfs_init)(struct regmap___2 *); + int (*read)(struct regmap___2 *, unsigned int, unsigned int *); + int (*write)(struct regmap___2 *, unsigned int, unsigned int); + int (*sync)(struct regmap___2 *, unsigned int, unsigned int); + int (*drop)(struct regmap___2 *, unsigned int, unsigned int); +}; + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap___2 *map; + unsigned int range_min; + unsigned int range_max; + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap___2 *regmap; + unsigned int mask; + unsigned int shift; + unsigned int reg; + unsigned int id_size; + unsigned int id_offset; +}; + +struct trace_event_raw_regmap_reg { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + unsigned int val; + char __data[0]; +}; + +struct trace_event_raw_regmap_block { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int reg; + int count; + char __data[0]; +}; + +struct trace_event_raw_regcache_sync { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_status; + u32 __data_loc_type; + int type; + char __data[0]; +}; + +struct trace_event_raw_regmap_bool { + struct trace_entry ent; + u32 __data_loc_name; + int flag; + char __data[0]; +}; + +struct trace_event_raw_regmap_async { + struct trace_entry ent; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_regcache_drop_region { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int from; + unsigned int to; + char __data[0]; +}; + +struct trace_event_data_offsets_regmap_reg { + u32 name; +}; + +struct trace_event_data_offsets_regmap_block { + u32 name; +}; + +struct trace_event_data_offsets_regcache_sync { + u32 name; + u32 status; + u32 type; +}; + +struct trace_event_data_offsets_regmap_bool { + u32 name; +}; + +struct trace_event_data_offsets_regmap_async { + u32 name; +}; + +struct trace_event_data_offsets_regcache_drop_region { + u32 name; +}; + +typedef void (*btf_trace_regmap_reg_write)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_reg_read)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_reg_read_cache)(void *, struct regmap___2 *, unsigned int, unsigned int); + +typedef void (*btf_trace_regmap_hw_read_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_read_done)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_hw_write_done)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regcache_sync)(void *, struct regmap___2 *, const char *, const char *); + +typedef void (*btf_trace_regmap_cache_only)(void *, struct regmap___2 *, bool); + +typedef void (*btf_trace_regmap_cache_bypass)(void *, struct regmap___2 *, bool); + +typedef void (*btf_trace_regmap_async_write_start)(void *, struct regmap___2 *, unsigned int, int); + +typedef void (*btf_trace_regmap_async_io_complete)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regmap_async_complete_start)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regmap_async_complete_done)(void *, struct regmap___2 *); + +typedef void (*btf_trace_regcache_drop_region)(void *, struct regmap___2 *, unsigned int, unsigned int); + +struct regcache_rbtree_node { + void *block; + long int *cache_present; + unsigned int base_reg; + unsigned int blklen; + struct rb_node node; +}; + +struct regcache_rbtree_ctx { + struct rb_root root; + struct regcache_rbtree_node *cached_rbnode; +}; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct regmap_debugfs_node { + struct regmap___2 *map; + struct list_head link; +}; + +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +struct spi_statistics { + spinlock_t lock; + long unsigned int messages; + long unsigned int transfers; + long unsigned int errors; + long unsigned int timedout; + long unsigned int spi_sync; + long unsigned int spi_sync_immediate; + long unsigned int spi_async; + long long unsigned int bytes; + long long unsigned int bytes_rx; + long long unsigned int bytes_tx; + long unsigned int transfer_bytes_histo[17]; + long unsigned int transfers_split_maxsize; +}; + +struct spi_delay { + u16 value; + u8 unit; +}; + +struct spi_controller; + +struct spi_device { + struct device dev; + struct spi_controller *controller; + struct spi_controller *master; + u32 max_speed_hz; + u8 chip_select; + u8 bits_per_word; + bool rt; + u32 mode; + int irq; + void *controller_state; + void *controller_data; + char modalias[32]; + const char *driver_override; + int cs_gpio; + struct gpio_desc *cs_gpiod; + struct spi_delay word_delay; + struct spi_statistics statistics; +}; + +struct spi_message; + +struct spi_transfer; + +struct spi_controller_mem_ops; + +struct spi_controller { + struct device dev; + struct list_head list; + s16 bus_num; + u16 num_chipselect; + u16 dma_alignment; + u32 mode_bits; + u32 buswidth_override_bits; + u32 bits_per_word_mask; + u32 min_speed_hz; + u32 max_speed_hz; + u16 flags; + bool slave; + size_t (*max_transfer_size)(struct spi_device *); + size_t (*max_message_size)(struct spi_device *); + struct mutex io_mutex; + spinlock_t bus_lock_spinlock; + struct mutex bus_lock_mutex; + bool bus_lock_flag; + int (*setup)(struct spi_device *); + int (*set_cs_timing)(struct spi_device *, struct spi_delay *, struct spi_delay *, struct spi_delay *); + int (*transfer)(struct spi_device *, struct spi_message *); + void (*cleanup)(struct spi_device *); + bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *); + bool queued; + struct kthread_worker *kworker; + struct kthread_work pump_messages; + spinlock_t queue_lock; + struct list_head queue; + struct spi_message *cur_msg; + bool idling; + bool busy; + bool running; + bool rt; + bool auto_runtime_pm; + bool cur_msg_prepared; + bool cur_msg_mapped; + bool last_cs_enable; + bool last_cs_mode_high; + bool fallback; + struct completion xfer_completion; + size_t max_dma_len; + int (*prepare_transfer_hardware)(struct spi_controller *); + int (*transfer_one_message)(struct spi_controller *, struct spi_message *); + int (*unprepare_transfer_hardware)(struct spi_controller *); + int (*prepare_message)(struct spi_controller *, struct spi_message *); + int (*unprepare_message)(struct spi_controller *, struct spi_message *); + int (*slave_abort)(struct spi_controller *); + void (*set_cs)(struct spi_device *, bool); + int (*transfer_one)(struct spi_controller *, struct spi_device *, struct spi_transfer *); + void (*handle_err)(struct spi_controller *, struct spi_message *); + const struct spi_controller_mem_ops *mem_ops; + struct spi_delay cs_setup; + struct spi_delay cs_hold; + struct spi_delay cs_inactive; + int *cs_gpios; + struct gpio_desc **cs_gpiods; + bool use_gpio_descriptors; + u8 unused_native_cs; + u8 max_native_cs; + struct spi_statistics statistics; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; + void *dummy_rx; + void *dummy_tx; + int (*fw_translate_cs)(struct spi_controller *, unsigned int); + bool ptp_sts_supported; + long unsigned int irq_flags; +}; + +struct spi_message { + struct list_head transfers; + struct spi_device *spi; + unsigned int is_dma_mapped: 1; + void (*complete)(void *); + void *context; + unsigned int frame_length; + unsigned int actual_length; + int status; + struct list_head queue; + void *state; + struct list_head resources; +}; + +struct spi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned int len; + dma_addr_t tx_dma; + dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; + unsigned int cs_change: 1; + unsigned int tx_nbits: 3; + unsigned int rx_nbits: 3; + u8 bits_per_word; + u16 delay_usecs; + struct spi_delay delay; + struct spi_delay cs_change_delay; + struct spi_delay word_delay; + u32 speed_hz; + u32 effective_speed_hz; + unsigned int ptp_sts_word_pre; + unsigned int ptp_sts_word_post; + struct ptp_system_timestamp *ptp_sts; + bool timestamped; + struct list_head transfer_list; + u16 error; +}; + +struct spi_mem; + +struct spi_mem_op; + +struct spi_mem_dirmap_desc; + +struct spi_controller_mem_ops { + int (*adjust_op_size)(struct spi_mem *, struct spi_mem_op *); + bool (*supports_op)(struct spi_mem *, const struct spi_mem_op *); + int (*exec_op)(struct spi_mem *, const struct spi_mem_op *); + const char * (*get_name)(struct spi_mem *); + int (*dirmap_create)(struct spi_mem_dirmap_desc *); + void (*dirmap_destroy)(struct spi_mem_dirmap_desc *); + ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *, u64, size_t, void *); + ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *, u64, size_t, const void *); +}; + +struct regmap_async_spi { + struct regmap_async core; + struct spi_message m; + struct spi_transfer t[2]; +}; + +struct regmap_mmio_context { + void *regs; + unsigned int val_bytes; + bool attached_clk; + struct clk *clk; + void (*reg_write)(struct regmap_mmio_context *, unsigned int, unsigned int); + unsigned int (*reg_read)(struct regmap_mmio_context *, unsigned int); +}; + +struct regmap_irq_chip_data___2 { + struct mutex lock; + struct irq_chip irq_chip; + struct regmap___2 *map; + const struct regmap_irq_chip *chip; + int irq_base; + struct irq_domain *domain; + int irq; + int wake_count; + void *status_reg_buf; + unsigned int *main_status_buf; + unsigned int *status_buf; + unsigned int *mask_buf; + unsigned int *mask_buf_def; + unsigned int *wake_buf; + unsigned int *type_buf; + unsigned int *type_buf_def; + unsigned int irq_reg_stride; + unsigned int type_reg_stride; + bool clear_status: 1; +}; + +struct soc_device___2 { + struct device dev; + struct soc_device_attribute *attr; + int soc_dev_num; +}; + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + struct module *owner; + ssize_t (*read)(char *, loff_t, size_t, void *, size_t); + void (*free)(void *); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *); + +struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; + irq_write_msi_msg_t write_msg; + int devid; +}; + +struct brd_device { + int brd_number; + struct request_queue *brd_queue; + struct gendisk *brd_disk; + struct list_head brd_list; + spinlock_t brd_lock; + struct xarray brd_pages; +}; + +typedef unsigned int __kernel_old_dev_t; + +enum { + LO_FLAGS_READ_ONLY = 1, + LO_FLAGS_AUTOCLEAR = 4, + LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, +}; + +struct loop_info { + int lo_number; + __kernel_old_dev_t lo_device; + long unsigned int lo_inode; + __kernel_old_dev_t lo_rdevice; + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; + int lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + long unsigned int lo_init[2]; + char reserved[4]; +}; + +struct loop_info64 { + __u64 lo_device; + __u64 lo_inode; + __u64 lo_rdevice; + __u64 lo_offset; + __u64 lo_sizelimit; + __u32 lo_number; + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; + __u32 lo_flags; + __u8 lo_file_name[64]; + __u8 lo_crypt_name[64]; + __u8 lo_encrypt_key[32]; + __u64 lo_init[2]; +}; + +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + +enum { + Lo_unbound = 0, + Lo_bound = 1, + Lo_rundown = 2, +}; + +struct loop_func_table; + +struct loop_device { + int lo_number; + atomic_t lo_refcnt; + loff_t lo_offset; + loff_t lo_sizelimit; + int lo_flags; + int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t); + char lo_file_name[64]; + char lo_crypt_name[64]; + char lo_encrypt_key[32]; + int lo_encrypt_key_size; + struct loop_func_table *lo_encryption; + __u32 lo_init[2]; + kuid_t lo_key_owner; + int (*ioctl)(struct loop_device *, int, long unsigned int); + struct file *lo_backing_file; + struct block_device *lo_device; + void *key_data; + gfp_t old_gfp_mask; + spinlock_t lo_lock; + int lo_state; + struct kthread_worker worker; + struct task_struct *worker_task; + bool use_dio; + bool sysfs_inited; + struct request_queue *lo_queue; + struct blk_mq_tag_set tag_set; + struct gendisk *lo_disk; +}; + +struct loop_func_table { + int number; + int (*transfer)(struct loop_device *, int, struct page *, unsigned int, struct page *, unsigned int, int, sector_t); + int (*init)(struct loop_device *, const struct loop_info64 *); + int (*release)(struct loop_device *); + int (*ioctl)(struct loop_device *, int, long unsigned int); + struct module *owner; +}; + +struct loop_cmd { + struct kthread_work work; + bool use_aio; + atomic_t ref; + long int ret; + struct kiocb iocb; + struct bio_vec *bvec; + struct cgroup_subsys_state *css; +}; + +struct compat_loop_info { + compat_int_t lo_number; + compat_dev_t lo_device; + compat_ulong_t lo_inode; + compat_dev_t lo_rdevice; + compat_int_t lo_offset; + compat_int_t lo_encrypt_type; + compat_int_t lo_encrypt_key_size; + compat_int_t lo_flags; + char lo_name[64]; + unsigned char lo_encrypt_key[32]; + compat_ulong_t lo_init[2]; + char reserved[4]; +}; + +typedef uint16_t blkif_vdev_t; + +typedef uint64_t blkif_sector_t; + +struct blkif_request_segment { + grant_ref_t gref; + uint8_t first_sect; + uint8_t last_sect; +}; + +struct blkif_request_rw { + uint8_t nr_segments; + blkif_vdev_t handle; + uint32_t _pad1; + uint64_t id; + blkif_sector_t sector_number; + struct blkif_request_segment seg[11]; +} __attribute__((packed)); + +struct blkif_request_discard { + uint8_t flag; + blkif_vdev_t _pad1; + uint32_t _pad2; + uint64_t id; + blkif_sector_t sector_number; + uint64_t nr_sectors; + uint8_t _pad3; +} __attribute__((packed)); + +struct blkif_request_other { + uint8_t _pad1; + blkif_vdev_t _pad2; + uint32_t _pad3; + uint64_t id; +} __attribute__((packed)); + +struct blkif_request_indirect { + uint8_t indirect_op; + uint16_t nr_segments; + uint32_t _pad1; + uint64_t id; + blkif_sector_t sector_number; + blkif_vdev_t handle; + uint16_t _pad2; + grant_ref_t indirect_grefs[8]; + uint32_t _pad3; +} __attribute__((packed)); + +struct blkif_request { + uint8_t operation; + union { + struct blkif_request_rw rw; + struct blkif_request_discard discard; + struct blkif_request_other other; + struct blkif_request_indirect indirect; + } u; +} __attribute__((packed)); + +struct blkif_response { + uint64_t id; + uint8_t operation; + int16_t status; +}; + +union blkif_sring_entry { + struct blkif_request req; + struct blkif_response rsp; +}; + +struct blkif_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union blkif_sring_entry ring[1]; +}; + +struct blkif_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct blkif_sring *sring; +}; + +enum blkif_state { + BLKIF_STATE_DISCONNECTED = 0, + BLKIF_STATE_CONNECTED = 1, + BLKIF_STATE_SUSPENDED = 2, +}; + +struct grant { + grant_ref_t gref; + struct page *page; + struct list_head node; +}; + +enum blk_req_status { + REQ_WAITING = 0, + REQ_DONE = 1, + REQ_ERROR = 2, + REQ_EOPNOTSUPP = 3, +}; + +struct blk_shadow { + struct blkif_request req; + struct request *request; + struct grant **grants_used; + struct grant **indirect_grants; + struct scatterlist *sg; + unsigned int num_sg; + enum blk_req_status status; + long unsigned int associated_id; +}; + +struct blkif_req { + blk_status_t error; +}; + +struct blkfront_info; + +struct blkfront_ring_info { + spinlock_t ring_lock; + struct blkif_front_ring ring; + unsigned int ring_ref[16]; + unsigned int evtchn; + unsigned int irq; + struct work_struct work; + struct gnttab_free_callback callback; + struct list_head indirect_pages; + struct list_head grants; + unsigned int persistent_gnts_c; + long unsigned int shadow_free; + struct blkfront_info *dev_info; + struct blk_shadow shadow[0]; +}; + +struct blkfront_info { + struct mutex mutex; + struct xenbus_device *xbdev; + struct gendisk *gd; + u16 sector_size; + unsigned int physical_sector_size; + int vdevice; + blkif_vdev_t handle; + enum blkif_state connected; + unsigned int nr_ring_pages; + struct request_queue *rq; + unsigned int feature_flush: 1; + unsigned int feature_fua: 1; + unsigned int feature_discard: 1; + unsigned int feature_secdiscard: 1; + unsigned int feature_persistent: 1; + unsigned int discard_granularity; + unsigned int discard_alignment; + unsigned int max_indirect_segments; + int is_ready; + struct blk_mq_tag_set tag_set; + struct blkfront_ring_info *rinfo; + unsigned int nr_rings; + unsigned int rinfo_size; + struct list_head requests; + struct bio_list bio_list; + struct list_head info_list; +}; + +struct setup_rw_req { + unsigned int grant_idx; + struct blkif_request_segment *segments; + struct blkfront_ring_info *rinfo; + struct blkif_request *ring_req; + grant_ref_t gref_head; + unsigned int id; + bool need_copy; + unsigned int bvec_off; + char *bvec_data; + bool require_extra_req; + struct blkif_request *extra_ring_req; +}; + +struct copy_from_grant { + const struct blk_shadow *s; + unsigned int grant_idx; + unsigned int bvec_offset; + char *bvec_data; +}; + +struct test_struct { + char *get; + char *put; + void (*get_handler)(char *); + int (*put_handler)(char *, char *); +}; + +struct test_state { + char *name; + struct test_struct *tst; + int idx; + int (*run_test)(int, int); + int (*validate_put)(char *); +}; + +struct sram_partition { + void *base; + struct gen_pool *pool; + struct bin_attribute battr; + struct mutex lock; + struct list_head list; +}; + +struct sram_dev { + struct device *dev; + void *virt_base; + struct gen_pool *pool; + struct clk *clk; + struct sram_partition *partition; + u32 partitions; +}; + +struct sram_reserve { + struct list_head list; + u32 start; + u32 size; + bool export; + bool pool; + bool protect_exec; + const char *label; +}; + +struct mfd_cell_acpi_match; + +struct mfd_cell { + const char *name; + int id; + int level; + int (*enable)(struct platform_device *); + int (*disable)(struct platform_device *); + int (*suspend)(struct platform_device *); + int (*resume)(struct platform_device *); + void *platform_data; + size_t pdata_size; + const struct property_entry *properties; + const char *of_compatible; + const u64 of_reg; + bool use_of_reg; + const struct mfd_cell_acpi_match *acpi_match; + int num_resources; + const struct resource *resources; + bool ignore_resource_conflicts; + bool pm_runtime_no_callbacks; + const char * const *parent_supplies; + int num_parent_supplies; +}; + +struct mfd_cell_acpi_match { + const char *pnpid; + const long long unsigned int adr; +}; + +struct prcm_data { + int nsubdevs; + const struct mfd_cell *subdevs; +}; + +struct arizona_micbias { + int mV; + unsigned int ext_cap: 1; + unsigned int discharge: 1; + unsigned int soft_start: 1; + unsigned int bypass: 1; +}; + +struct arizona_micd_config { + unsigned int src; + unsigned int bias; + bool gpio; +}; + +struct arizona_micd_range { + int max; + int key; +}; + +struct arizona_pdata { + struct gpio_desc *reset; + struct arizona_micsupp_pdata micvdd; + struct arizona_ldo1_pdata ldo1; + int clk32k_src; + unsigned int irq_flags; + int gpio_base; + unsigned int gpio_defaults[5]; + unsigned int max_channels_clocked[3]; + bool jd_gpio5; + bool jd_gpio5_nopull; + bool jd_invert; + bool hpdet_acc_id; + bool hpdet_acc_id_line; + int hpdet_id_gpio; + unsigned int hpdet_channel; + bool micd_software_compare; + unsigned int micd_detect_debounce; + int micd_pol_gpio; + unsigned int micd_bias_start_time; + unsigned int micd_rate; + unsigned int micd_dbtime; + unsigned int micd_timeout; + bool micd_force_micbias; + const struct arizona_micd_range *micd_ranges; + int num_micd_ranges; + struct arizona_micd_config *micd_configs; + int num_micd_configs; + int dmic_ref[4]; + struct arizona_micbias micbias[3]; + int inmode[4]; + int out_mono[6]; + unsigned int out_vol_limit[12]; + unsigned int spk_mute[2]; + unsigned int spk_fmt[2]; + unsigned int hap_act; + int irq_gpio; + unsigned int gpsw; +}; + +enum { + ARIZONA_MCLK1 = 0, + ARIZONA_MCLK2 = 1, + ARIZONA_NUM_MCLK = 2, +}; + +enum arizona_type { + WM5102 = 1, + WM5110 = 2, + WM8997 = 3, + WM8280 = 4, + WM8998 = 5, + WM1814 = 6, + WM1831 = 7, + CS47L24 = 8, +}; + +struct arizona { + struct regmap *regmap; + struct device *dev; + enum arizona_type type; + unsigned int rev; + int num_core_supplies; + struct regulator_bulk_data core_supplies[2]; + struct regulator *dcvdd; + bool has_fully_powered_off; + struct arizona_pdata pdata; + unsigned int external_dcvdd: 1; + int irq; + struct irq_domain *virq; + struct regmap_irq_chip_data *aod_irq_chip; + struct regmap_irq_chip_data *irq_chip; + bool hpdet_clamp; + unsigned int hp_ena; + struct mutex clk_lock; + int clk32k_ref; + struct clk *mclk[2]; + bool ctrlif_error; + struct snd_soc_dapm_context *dapm; + int tdm_width[3]; + int tdm_slots[3]; + uint16_t dac_comp_coeff; + uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; + struct blocking_notifier_head notifier; +}; + +struct arizona_sysclk_state { + unsigned int fll; + unsigned int sysclk; +}; + +enum tps65912_irqs { + TPS65912_IRQ_PWRHOLD_F = 0, + TPS65912_IRQ_VMON = 1, + TPS65912_IRQ_PWRON = 2, + TPS65912_IRQ_PWRON_LP = 3, + TPS65912_IRQ_PWRHOLD_R = 4, + TPS65912_IRQ_HOTDIE = 5, + TPS65912_IRQ_GPIO1_R = 6, + TPS65912_IRQ_GPIO1_F = 7, + TPS65912_IRQ_GPIO2_R = 8, + TPS65912_IRQ_GPIO2_F = 9, + TPS65912_IRQ_GPIO3_R = 10, + TPS65912_IRQ_GPIO3_F = 11, + TPS65912_IRQ_GPIO4_R = 12, + TPS65912_IRQ_GPIO4_F = 13, + TPS65912_IRQ_GPIO5_R = 14, + TPS65912_IRQ_GPIO5_F = 15, + TPS65912_IRQ_PGOOD_DCDC1 = 16, + TPS65912_IRQ_PGOOD_DCDC2 = 17, + TPS65912_IRQ_PGOOD_DCDC3 = 18, + TPS65912_IRQ_PGOOD_DCDC4 = 19, + TPS65912_IRQ_PGOOD_LDO1 = 20, + TPS65912_IRQ_PGOOD_LDO2 = 21, + TPS65912_IRQ_PGOOD_LDO3 = 22, + TPS65912_IRQ_PGOOD_LDO4 = 23, + TPS65912_IRQ_PGOOD_LDO5 = 24, + TPS65912_IRQ_PGOOD_LDO6 = 25, + TPS65912_IRQ_PGOOD_LDO7 = 26, + TPS65912_IRQ_PGOOD_LDO8 = 27, + TPS65912_IRQ_PGOOD_LDO9 = 28, + TPS65912_IRQ_PGOOD_LDO10 = 29, +}; + +struct tps65912 { + struct device *dev; + struct regmap *regmap; + int irq; + struct regmap_irq_chip_data *irq_data; +}; + +struct spi_device_id { + char name[32]; + kernel_ulong_t driver_data; +}; + +struct spi_driver { + const struct spi_device_id *id_table; + int (*probe)(struct spi_device *); + int (*remove)(struct spi_device *); + void (*shutdown)(struct spi_device *); + struct device_driver driver; +}; + +struct mfd_of_node_entry { + struct list_head list; + struct device *dev; + struct device_node *np; +}; + +struct da9052 { + struct device *dev; + struct regmap *regmap; + struct mutex auxadc_lock; + struct completion done; + int irq_base; + struct regmap_irq_chip_data *irq_data; + u8 chip_id; + int chip_irq; + int (*fix_io)(struct da9052 *, unsigned char); +}; + +struct led_platform_data; + +struct da9052_pdata { + struct led_platform_data *pled; + int (*init)(struct da9052 *); + int irq_base; + int gpio_base; + int use_for_apm; + struct regulator_init_data *regulators[14]; +}; + +enum da9052_chip_id { + DA9052 = 0, + DA9053_AA = 1, + DA9053_BA = 2, + DA9053_BB = 3, + DA9053_BC = 4, +}; + +struct syscon_platform_data { + const char *label; +}; + +struct syscon { + struct device_node *np; + struct regmap *regmap; + struct list_head list; +}; + +struct dax_device___2; + +struct dax_operations { + long int (*direct_access)(struct dax_device___2 *, long unsigned int, long int, void **, pfn_t *); + bool (*dax_supported)(struct dax_device___2 *, struct block_device *, int, sector_t, sector_t); + size_t (*copy_from_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *); + size_t (*copy_to_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *); + int (*zero_page_range)(struct dax_device___2 *, long unsigned int, size_t); +}; + +struct dax_device___2 { + struct hlist_node list; + struct inode inode; + struct cdev cdev; + const char *host; + void *private; + long unsigned int flags; + const struct dax_operations *ops; +}; + +enum dax_device_flags { + DAXDEV_ALIVE = 0, + DAXDEV_WRITE_CACHE = 1, + DAXDEV_SYNC = 2, +}; + +struct dax_region { + int id; + int target_node; + struct kref kref; + struct device *dev; + unsigned int align; + struct ida ida; + struct resource res; + struct device *seed; + struct device *youngest; +}; + +struct dax_mapping { + struct device dev; + int range_id; + int id; +}; + +struct dev_dax_range { + long unsigned int pgoff; + struct range range; + struct dax_mapping *mapping; +}; + +struct dev_dax { + struct dax_region *region; + struct dax_device *dax_dev; + unsigned int align; + int target_node; + int id; + struct ida ida; + struct device dev; + struct dev_pagemap *pgmap; + int nr_range; + struct dev_dax_range *ranges; +}; + +enum dev_dax_subsys { + DEV_DAX_BUS = 0, + DEV_DAX_CLASS = 1, +}; + +struct dev_dax_data { + struct dax_region *dax_region; + struct dev_pagemap *pgmap; + enum dev_dax_subsys subsys; + resource_size_t size; + int id; +}; + +struct dax_device_driver { + struct device_driver drv; + struct list_head ids; + int match_always; + int (*probe)(struct dev_dax *); + int (*remove)(struct dev_dax *); +}; + +struct dax_id { + struct list_head list; + char dev_name[30]; +}; + +enum id_action { + ID_REMOVE = 0, + ID_ADD = 1, +}; + +struct memregion_info { + int target_node; +}; + +struct seqcount_ww_mutex { + seqcount_t seqcount; +}; + +typedef struct seqcount_ww_mutex seqcount_ww_mutex_t; + +struct dma_fence_ops; + +struct dma_fence { + spinlock_t *lock; + const struct dma_fence_ops *ops; + union { + struct list_head cb_list; + ktime_t timestamp; + struct callback_head rcu; + }; + u64 context; + u64 seqno; + long unsigned int flags; + struct kref refcount; + int error; +}; + +struct dma_fence_ops { + bool use_64bit_seqno; + const char * (*get_driver_name)(struct dma_fence *); + const char * (*get_timeline_name)(struct dma_fence *); + bool (*enable_signaling)(struct dma_fence *); + bool (*signaled)(struct dma_fence *); + long int (*wait)(struct dma_fence *, bool, long int); + void (*release)(struct dma_fence *); + void (*fence_value_str)(struct dma_fence *, char *, int); + void (*timeline_value_str)(struct dma_fence *, char *, int); +}; + +enum dma_fence_flag_bits { + DMA_FENCE_FLAG_SIGNALED_BIT = 0, + DMA_FENCE_FLAG_TIMESTAMP_BIT = 1, + DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2, + DMA_FENCE_FLAG_USER_BITS = 3, +}; + +struct dma_fence_cb; + +typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *); + +struct dma_fence_cb { + struct list_head node; + dma_fence_func_t func; +}; + +struct dma_buf; + +struct dma_buf_attachment; + +struct dma_buf_ops { + bool cache_sgt_mapping; + int (*attach)(struct dma_buf *, struct dma_buf_attachment *); + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + int (*pin)(struct dma_buf_attachment *); + void (*unpin)(struct dma_buf_attachment *); + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); + void (*release)(struct dma_buf *); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); + int (*mmap)(struct dma_buf *, struct vm_area_struct *); + void * (*vmap)(struct dma_buf *); + void (*vunmap)(struct dma_buf *, void *); +}; + +struct dma_buf_poll_cb_t { + struct dma_fence_cb cb; + wait_queue_head_t *poll; + __poll_t active; +}; + +struct dma_resv; + +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + struct mutex lock; + unsigned int vmapping_counter; + void *vmap_ptr; + const char *exp_name; + const char *name; + spinlock_t name_lock; + struct module *owner; + struct list_head list_node; + void *priv; + struct dma_resv *resv; + wait_queue_head_t poll; + struct dma_buf_poll_cb_t cb_excl; + struct dma_buf_poll_cb_t cb_shared; +}; + +struct dma_buf_attach_ops; + +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; + bool peer2peer; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; + void *priv; +}; + +struct dma_resv_list; + +struct dma_resv { + struct ww_mutex lock; + seqcount_ww_mutex_t seq; + struct dma_fence *fence_excl; + struct dma_resv_list *fence; +}; + +struct dma_buf_attach_ops { + bool allow_peer2peer; + void (*move_notify)(struct dma_buf_attachment *); +}; + +struct dma_buf_export_info { + const char *exp_name; + struct module *owner; + const struct dma_buf_ops *ops; + size_t size; + int flags; + struct dma_resv *resv; + void *priv; +}; + +struct dma_resv_list { + struct callback_head rcu; + u32 shared_count; + u32 shared_max; + struct dma_fence *shared[0]; +}; + +struct dma_buf_sync { + __u64 flags; +}; + +struct dma_buf_list { + struct list_head head; + struct mutex lock; +}; + +struct trace_event_raw_dma_fence { + struct trace_entry ent; + u32 __data_loc_driver; + u32 __data_loc_timeline; + unsigned int context; + unsigned int seqno; + char __data[0]; +}; + +struct trace_event_data_offsets_dma_fence { + u32 driver; + u32 timeline; +}; + +typedef void (*btf_trace_dma_fence_emit)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_init)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_destroy)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_enable_signal)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_signaled)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_start)(void *, struct dma_fence *); + +typedef void (*btf_trace_dma_fence_wait_end)(void *, struct dma_fence *); + +struct default_wait_cb { + struct dma_fence_cb base; + struct task_struct *task; +}; + +struct dma_fence_array; + +struct dma_fence_array_cb { + struct dma_fence_cb cb; + struct dma_fence_array *array; +}; + +struct dma_fence_array { + struct dma_fence base; + spinlock_t lock; + unsigned int num_fences; + atomic_t num_pending; + struct dma_fence **fences; + struct irq_work work; +}; + +struct dma_fence_chain { + struct dma_fence base; + spinlock_t lock; + struct dma_fence *prev; + u64 prev_seqno; + struct dma_fence *fence; + struct dma_fence_cb cb; + struct irq_work work; +}; + +enum seqno_fence_condition { + SEQNO_FENCE_WAIT_GEQUAL = 0, + SEQNO_FENCE_WAIT_NONZERO = 1, +}; + +struct seqno_fence { + struct dma_fence base; + const struct dma_fence_ops *ops; + struct dma_buf *sync_buf; + uint32_t seqno_ofs; + enum seqno_fence_condition condition; +}; + +struct sync_file { + struct file *file; + char user_name[32]; + struct list_head sync_file_list; + wait_queue_head_t wq; + long unsigned int flags; + struct dma_fence *fence; + struct dma_fence_cb cb; +}; + +struct sync_merge_data { + char name[32]; + __s32 fd2; + __s32 fence; + __u32 flags; + __u32 pad; +}; + +struct sync_fence_info { + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u32 flags; + __u64 timestamp_ns; +}; + +struct sync_file_info { + char name[32]; + __s32 status; + __u32 flags; + __u32 num_fences; + __u32 pad; + __u64 sync_fence_info; +}; + +struct scsi_lun { + __u8 scsi_lun[8]; +}; + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 rsvd2; + __u64 result; +}; + +struct nvme_id_power_state { + __le16 max_power; + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; + __le32 exit_lat; + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; + +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1, + NVME_PS_FLAGS_NON_OP_STATE = 2, +}; + +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = 1, + NVME_CTRL_ATTR_TBKAS = 64, +}; + +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 cmic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __le32 rtd3r; + __le32 rtd3e; + __le32 oaes; + __le32 ctratt; + __u8 rsvd100[28]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __le16 mtfa; + __le32 hmpre; + __le32 hmmin; + __u8 tnvmcap[16]; + __u8 unvmcap[16]; + __le32 rpmbs; + __le16 edstt; + __u8 dsto; + __u8 fwug; + __le16 kas; + __le16 hctma; + __le16 mntmt; + __le16 mxtmt; + __le32 sanicap; + __le32 hmminds; + __le16 hmmaxd; + __u8 rsvd338[4]; + __u8 anatt; + __u8 anacap; + __le32 anagrpmax; + __le32 nanagrpid; + __u8 rsvd352[160]; + __u8 sqes; + __u8 cqes; + __le16 maxcmd; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 nwpc; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __le32 mnan; + __u8 rsvd544[224]; + char subnqn[256]; + __u8 rsvd1024[768]; + __le32 ioccsz; + __le32 iorcsz; + __le16 icdoff; + __u8 ctrattr; + __u8 msdbd; + __u8 rsvd1804[244]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; + +enum { + NVME_CTRL_CMIC_MULTI_CTRL = 2, + NVME_CTRL_CMIC_ANA = 8, + NVME_CTRL_ONCS_COMPARE = 1, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 2, + NVME_CTRL_ONCS_DSM = 4, + NVME_CTRL_ONCS_WRITE_ZEROES = 8, + NVME_CTRL_ONCS_RESERVATIONS = 32, + NVME_CTRL_ONCS_TIMESTAMP = 64, + NVME_CTRL_VWC_PRESENT = 1, + NVME_CTRL_OACS_SEC_SUPP = 1, + NVME_CTRL_OACS_DIRECTIVES = 32, + NVME_CTRL_OACS_DBBUF_SUPP = 256, + NVME_CTRL_LPA_CMD_EFFECTS_LOG = 2, + NVME_CTRL_CTRATT_128_ID = 1, + NVME_CTRL_CTRATT_NON_OP_PSP = 2, + NVME_CTRL_CTRATT_NVM_SETS = 4, + NVME_CTRL_CTRATT_READ_RECV_LVLS = 8, + NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 16, + NVME_CTRL_CTRATT_PREDICTABLE_LAT = 32, + NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 128, + NVME_CTRL_CTRATT_UUID_LIST = 512, +}; + +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; +}; + +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 dlfeat; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __le16 noiob; + __u8 nvmcap[16]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; + __le32 anagrpid; + __u8 rsvd96[3]; + __u8 nsattr; + __le16 nvmsetid; + __le16 endgid; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[16]; + __u8 rsvd192[192]; + __u8 vs[3712]; +}; + +enum { + NVME_ID_CNS_NS = 0, + NVME_ID_CNS_CTRL = 1, + NVME_ID_CNS_NS_ACTIVE_LIST = 2, + NVME_ID_CNS_NS_DESC_LIST = 3, + NVME_ID_CNS_CS_NS = 5, + NVME_ID_CNS_CS_CTRL = 6, + NVME_ID_CNS_NS_PRESENT_LIST = 16, + NVME_ID_CNS_NS_PRESENT = 17, + NVME_ID_CNS_CTRL_NS_LIST = 18, + NVME_ID_CNS_CTRL_LIST = 19, + NVME_ID_CNS_SCNDRY_CTRL_LIST = 21, + NVME_ID_CNS_NS_GRANULARITY = 22, + NVME_ID_CNS_UUID_LIST = 23, +}; + +enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { + NVME_DIR_IDENTIFY = 0, + NVME_DIR_STREAMS = 1, + NVME_DIR_SND_ID_OP_ENABLE = 1, + NVME_DIR_SND_ST_OP_REL_ID = 1, + NVME_DIR_SND_ST_OP_REL_RSC = 2, + NVME_DIR_RCV_ID_OP_PARAM = 1, + NVME_DIR_RCV_ST_OP_PARAM = 1, + NVME_DIR_RCV_ST_OP_STATUS = 2, + NVME_DIR_RCV_ST_OP_RESOURCE = 3, + NVME_DIR_ENDIR = 1, +}; + +enum { + NVME_NS_FEAT_THIN = 1, + NVME_NS_FEAT_ATOMICS = 2, + NVME_NS_FEAT_IO_OPT = 16, + NVME_NS_ATTR_RO = 1, + NVME_NS_FLBAS_LBA_MASK = 15, + NVME_NS_FLBAS_META_EXT = 16, + NVME_NS_NMIC_SHARED = 1, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 16, + NVME_NS_DPC_PI_FIRST = 8, + NVME_NS_DPC_PI_TYPE3 = 4, + NVME_NS_DPC_PI_TYPE2 = 2, + NVME_NS_DPC_PI_TYPE1 = 1, + NVME_NS_DPS_PI_FIRST = 8, + NVME_NS_DPS_PI_MASK = 7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +struct nvme_ns_id_desc { + __u8 nidt; + __u8 nidl; + __le16 reserved; +}; + +enum { + NVME_NIDT_EUI64 = 1, + NVME_NIDT_NGUID = 2, + NVME_NIDT_UUID = 3, + NVME_NIDT_CSI = 4, +}; + +struct nvme_fw_slot_info_log { + __u8 afi; + __u8 rsvd1[7]; + __le64 frs[7]; + __u8 rsvd64[448]; +}; + +enum { + NVME_CMD_EFFECTS_CSUPP = 1, + NVME_CMD_EFFECTS_LBCC = 2, + NVME_CMD_EFFECTS_NCC = 4, + NVME_CMD_EFFECTS_NIC = 8, + NVME_CMD_EFFECTS_CCC = 16, + NVME_CMD_EFFECTS_CSE_MASK = 196608, + NVME_CMD_EFFECTS_UUID_SEL = 524288, +}; + +struct nvme_effects_log { + __le32 acs[256]; + __le32 iocs[256]; + __u8 resv[2048]; +}; + +enum { + NVME_AER_ERROR = 0, + NVME_AER_SMART = 1, + NVME_AER_NOTICE = 2, + NVME_AER_CSS = 6, + NVME_AER_VS = 7, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0, + NVME_AER_NOTICE_FW_ACT_STARTING = 1, + NVME_AER_NOTICE_ANA = 3, + NVME_AER_NOTICE_DISC_CHANGED = 240, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 256, + NVME_AEN_CFG_FW_ACT = 512, + NVME_AEN_CFG_ANA_CHANGE = 2048, + NVME_AEN_CFG_DISC_CHANGE = 2147483648, +}; + +enum nvme_opcode { + nvme_cmd_flush = 0, + nvme_cmd_write = 1, + nvme_cmd_read = 2, + nvme_cmd_write_uncor = 4, + nvme_cmd_compare = 5, + nvme_cmd_write_zeroes = 8, + nvme_cmd_dsm = 9, + nvme_cmd_verify = 12, + nvme_cmd_resv_register = 13, + nvme_cmd_resv_report = 14, + nvme_cmd_resv_acquire = 17, + nvme_cmd_resv_release = 21, + nvme_cmd_zone_mgmt_send = 121, + nvme_cmd_zone_mgmt_recv = 122, + nvme_cmd_zone_append = 125, +}; + +struct nvme_sgl_desc { + __le64 addr; + __le32 length; + __u8 rsvd[3]; + __u8 type; +}; + +struct nvme_keyed_sgl_desc { + __le64 addr; + __u8 length[3]; + __u8 key[4]; + __u8 type; +}; + +union nvme_data_ptr { + struct { + __le64 prp1; + __le64 prp2; + }; + struct nvme_sgl_desc sgl; + struct nvme_keyed_sgl_desc ksgl; +}; + +enum { + NVME_CMD_FUSE_FIRST = 1, + NVME_CMD_FUSE_SECOND = 2, + NVME_CMD_SGL_METABUF = 64, + NVME_CMD_SGL_METASEG = 128, + NVME_CMD_SGL_ALL = 192, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum { + NVME_RW_LR = 32768, + NVME_RW_FUA = 16384, + NVME_RW_APPEND_PIREMAP = 512, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0, + NVME_RW_DSM_LATENCY_IDLE = 16, + NVME_RW_DSM_LATENCY_NORM = 32, + NVME_RW_DSM_LATENCY_LOW = 48, + NVME_RW_DSM_SEQ_REQ = 64, + NVME_RW_DSM_COMPRESSED = 128, + NVME_RW_PRINFO_PRCHK_REF = 1024, + NVME_RW_PRINFO_PRCHK_APP = 2048, + NVME_RW_PRINFO_PRCHK_GUARD = 4096, + NVME_RW_PRINFO_PRACT = 8192, + NVME_RW_DTYPE_STREAMS = 16, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +enum { + NVME_DSMGMT_IDR = 1, + NVME_DSMGMT_IDW = 2, + NVME_DSMGMT_AD = 4, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +struct nvme_write_zeroes_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 1, + NVME_ZONE_FINISH = 2, + NVME_ZONE_OPEN = 3, + NVME_ZONE_RESET = 4, + NVME_ZONE_OFFLINE = 5, + NVME_ZONE_SET_DESC_EXT = 16, +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + +struct nvme_feat_host_behavior { + __u8 acre; + __u8 resv1[511]; +}; + +enum { + NVME_ENABLE_ACRE = 1, +}; + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0, + nvme_admin_create_sq = 1, + nvme_admin_get_log_page = 2, + nvme_admin_delete_cq = 4, + nvme_admin_create_cq = 5, + nvme_admin_identify = 6, + nvme_admin_abort_cmd = 8, + nvme_admin_set_features = 9, + nvme_admin_get_features = 10, + nvme_admin_async_event = 12, + nvme_admin_ns_mgmt = 13, + nvme_admin_activate_fw = 16, + nvme_admin_download_fw = 17, + nvme_admin_dev_self_test = 20, + nvme_admin_ns_attach = 21, + nvme_admin_keep_alive = 24, + nvme_admin_directive_send = 25, + nvme_admin_directive_recv = 26, + nvme_admin_virtual_mgmt = 28, + nvme_admin_nvme_mi_send = 29, + nvme_admin_nvme_mi_recv = 30, + nvme_admin_dbbuf = 124, + nvme_admin_format_nvm = 128, + nvme_admin_security_send = 129, + nvme_admin_security_recv = 130, + nvme_admin_sanitize_nvm = 132, + nvme_admin_get_lba_status = 134, + nvme_admin_vendor_start = 192, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = 1, + NVME_CQ_IRQ_ENABLED = 2, + NVME_SQ_PRIO_URGENT = 0, + NVME_SQ_PRIO_HIGH = 2, + NVME_SQ_PRIO_MEDIUM = 4, + NVME_SQ_PRIO_LOW = 6, + NVME_FEAT_ARBITRATION = 1, + NVME_FEAT_POWER_MGMT = 2, + NVME_FEAT_LBA_RANGE = 3, + NVME_FEAT_TEMP_THRESH = 4, + NVME_FEAT_ERR_RECOVERY = 5, + NVME_FEAT_VOLATILE_WC = 6, + NVME_FEAT_NUM_QUEUES = 7, + NVME_FEAT_IRQ_COALESCE = 8, + NVME_FEAT_IRQ_CONFIG = 9, + NVME_FEAT_WRITE_ATOMIC = 10, + NVME_FEAT_ASYNC_EVENT = 11, + NVME_FEAT_AUTO_PST = 12, + NVME_FEAT_HOST_MEM_BUF = 13, + NVME_FEAT_TIMESTAMP = 14, + NVME_FEAT_KATO = 15, + NVME_FEAT_HCTM = 16, + NVME_FEAT_NOPSC = 17, + NVME_FEAT_RRL = 18, + NVME_FEAT_PLM_CONFIG = 19, + NVME_FEAT_PLM_WINDOW = 20, + NVME_FEAT_HOST_BEHAVIOR = 22, + NVME_FEAT_SANITIZE = 23, + NVME_FEAT_SW_PROGRESS = 128, + NVME_FEAT_HOST_ID = 129, + NVME_FEAT_RESV_MASK = 130, + NVME_FEAT_RESV_PERSIST = 131, + NVME_FEAT_WRITE_PROTECT = 132, + NVME_FEAT_VENDOR_START = 192, + NVME_FEAT_VENDOR_END = 255, + NVME_LOG_ERROR = 1, + NVME_LOG_SMART = 2, + NVME_LOG_FW_SLOT = 3, + NVME_LOG_CHANGED_NS = 4, + NVME_LOG_CMD_EFFECTS = 5, + NVME_LOG_DEVICE_SELF_TEST = 6, + NVME_LOG_TELEMETRY_HOST = 7, + NVME_LOG_TELEMETRY_CTRL = 8, + NVME_LOG_ENDURANCE_GROUP = 9, + NVME_LOG_ANA = 12, + NVME_LOG_DISC = 112, + NVME_LOG_RESERVATION = 128, + NVME_FWACT_REPL = 0, + NVME_FWACT_REPL_ACTV = 8, + NVME_FWACT_ACTV = 16, +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; + __u8 rsvd11[3]; + __u8 csi; + __u32 rsvd12[4]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 fid; + __le32 dword11; + __le32 dword12; + __le32 dword13; + __le32 dword14; + __le32 dword15; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + union nvme_data_ptr dptr; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_get_log_page_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __u8 lid; + __u8 lsp; + __le16 numdl; + __le16 numdu; + __u16 rsvd11; + union { + struct { + __le32 lpol; + __le32 lpou; + }; + __le64 lpo; + }; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; +}; + +struct nvme_directive_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + union nvme_data_ptr dptr; + __le32 numd; + __u8 doper; + __u8 dtype; + __le16 dspec; + __u8 endir; + __u8 tdtype; + __u16 rsvd15; + __u32 rsvd16[3]; +}; + +enum nvmf_fabrics_opcode { + nvme_fabrics_command = 127, +}; + +enum nvmf_capsule_command { + nvme_fabrics_type_property_set = 0, + nvme_fabrics_type_connect = 1, + nvme_fabrics_type_property_get = 4, +}; + +struct nvmf_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 ts[24]; +}; + +struct nvmf_connect_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __le16 recfmt; + __le16 qid; + __le16 sqsize; + __u8 cattr; + __u8 resv3; + __le32 kato; + __u8 resv4[12]; +}; + +struct nvmf_property_set_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __le64 value; + __u8 resv4[8]; +}; + +struct nvmf_property_get_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[35]; + __u8 attrib; + __u8 resv3[3]; + __le32 offset; + __u8 resv4[16]; +}; + +struct nvme_dbbuf { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __u32 rsvd12[6]; +}; + +struct streams_directive_params { + __le16 msl; + __le16 nssa; + __le16 nsso; + __u8 rsvd[10]; + __le32 sws; + __le16 sgs; + __le16 nsa; + __le16 nso; + __u8 rsvd2[6]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; + struct nvme_abort_cmd abort; + struct nvme_get_log_page_command get_log_page; + struct nvmf_common_command fabrics; + struct nvmf_connect_command connect; + struct nvmf_property_set_command prop_set; + struct nvmf_property_get_command prop_get; + struct nvme_dbbuf dbbuf; + struct nvme_directive_cmd directive; + }; +}; + +enum { + NVME_SC_SUCCESS = 0, + NVME_SC_INVALID_OPCODE = 1, + NVME_SC_INVALID_FIELD = 2, + NVME_SC_CMDID_CONFLICT = 3, + NVME_SC_DATA_XFER_ERROR = 4, + NVME_SC_POWER_LOSS = 5, + NVME_SC_INTERNAL = 6, + NVME_SC_ABORT_REQ = 7, + NVME_SC_ABORT_QUEUE = 8, + NVME_SC_FUSED_FAIL = 9, + NVME_SC_FUSED_MISSING = 10, + NVME_SC_INVALID_NS = 11, + NVME_SC_CMD_SEQ_ERROR = 12, + NVME_SC_SGL_INVALID_LAST = 13, + NVME_SC_SGL_INVALID_COUNT = 14, + NVME_SC_SGL_INVALID_DATA = 15, + NVME_SC_SGL_INVALID_METADATA = 16, + NVME_SC_SGL_INVALID_TYPE = 17, + NVME_SC_SGL_INVALID_OFFSET = 22, + NVME_SC_SGL_INVALID_SUBTYPE = 23, + NVME_SC_SANITIZE_FAILED = 28, + NVME_SC_SANITIZE_IN_PROGRESS = 29, + NVME_SC_NS_WRITE_PROTECTED = 32, + NVME_SC_CMD_INTERRUPTED = 33, + NVME_SC_LBA_RANGE = 128, + NVME_SC_CAP_EXCEEDED = 129, + NVME_SC_NS_NOT_READY = 130, + NVME_SC_RESERVATION_CONFLICT = 131, + NVME_SC_CQ_INVALID = 256, + NVME_SC_QID_INVALID = 257, + NVME_SC_QUEUE_SIZE = 258, + NVME_SC_ABORT_LIMIT = 259, + NVME_SC_ABORT_MISSING = 260, + NVME_SC_ASYNC_LIMIT = 261, + NVME_SC_FIRMWARE_SLOT = 262, + NVME_SC_FIRMWARE_IMAGE = 263, + NVME_SC_INVALID_VECTOR = 264, + NVME_SC_INVALID_LOG_PAGE = 265, + NVME_SC_INVALID_FORMAT = 266, + NVME_SC_FW_NEEDS_CONV_RESET = 267, + NVME_SC_INVALID_QUEUE = 268, + NVME_SC_FEATURE_NOT_SAVEABLE = 269, + NVME_SC_FEATURE_NOT_CHANGEABLE = 270, + NVME_SC_FEATURE_NOT_PER_NS = 271, + NVME_SC_FW_NEEDS_SUBSYS_RESET = 272, + NVME_SC_FW_NEEDS_RESET = 273, + NVME_SC_FW_NEEDS_MAX_TIME = 274, + NVME_SC_FW_ACTIVATE_PROHIBITED = 275, + NVME_SC_OVERLAPPING_RANGE = 276, + NVME_SC_NS_INSUFFICIENT_CAP = 277, + NVME_SC_NS_ID_UNAVAILABLE = 278, + NVME_SC_NS_ALREADY_ATTACHED = 280, + NVME_SC_NS_IS_PRIVATE = 281, + NVME_SC_NS_NOT_ATTACHED = 282, + NVME_SC_THIN_PROV_NOT_SUPP = 283, + NVME_SC_CTRL_LIST_INVALID = 284, + NVME_SC_BP_WRITE_PROHIBITED = 286, + NVME_SC_PMR_SAN_PROHIBITED = 291, + NVME_SC_BAD_ATTRIBUTES = 384, + NVME_SC_INVALID_PI = 385, + NVME_SC_READ_ONLY = 386, + NVME_SC_ONCS_NOT_SUPPORTED = 387, + NVME_SC_CONNECT_FORMAT = 384, + NVME_SC_CONNECT_CTRL_BUSY = 385, + NVME_SC_CONNECT_INVALID_PARAM = 386, + NVME_SC_CONNECT_RESTART_DISC = 387, + NVME_SC_CONNECT_INVALID_HOST = 388, + NVME_SC_DISCOVERY_RESTART = 400, + NVME_SC_AUTH_REQUIRED = 401, + NVME_SC_ZONE_BOUNDARY_ERROR = 440, + NVME_SC_ZONE_FULL = 441, + NVME_SC_ZONE_READ_ONLY = 442, + NVME_SC_ZONE_OFFLINE = 443, + NVME_SC_ZONE_INVALID_WRITE = 444, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 445, + NVME_SC_ZONE_TOO_MANY_OPEN = 446, + NVME_SC_ZONE_INVALID_TRANSITION = 447, + NVME_SC_WRITE_FAULT = 640, + NVME_SC_READ_ERROR = 641, + NVME_SC_GUARD_CHECK = 642, + NVME_SC_APPTAG_CHECK = 643, + NVME_SC_REFTAG_CHECK = 644, + NVME_SC_COMPARE_FAILED = 645, + NVME_SC_ACCESS_DENIED = 646, + NVME_SC_UNWRITTEN_BLOCK = 647, + NVME_SC_ANA_PERSISTENT_LOSS = 769, + NVME_SC_ANA_INACCESSIBLE = 770, + NVME_SC_ANA_TRANSITION = 771, + NVME_SC_HOST_PATH_ERROR = 880, + NVME_SC_HOST_ABORTED_CMD = 881, + NVME_SC_CRD = 6144, + NVME_SC_DNR = 16384, +}; + +union nvme_result { + __le16 u16; + __le32 u32; + __le64 u64; +}; + +enum nvme_quirks { + NVME_QUIRK_STRIPE_SIZE = 1, + NVME_QUIRK_IDENTIFY_CNS = 2, + NVME_QUIRK_DEALLOCATE_ZEROES = 4, + NVME_QUIRK_DELAY_BEFORE_CHK_RDY = 8, + NVME_QUIRK_NO_APST = 16, + NVME_QUIRK_NO_DEEPEST_PS = 32, + NVME_QUIRK_LIGHTNVM = 64, + NVME_QUIRK_MEDIUM_PRIO_SQ = 128, + NVME_QUIRK_IGNORE_DEV_SUBNQN = 256, + NVME_QUIRK_DISABLE_WRITE_ZEROES = 512, + NVME_QUIRK_SIMPLE_SUSPEND = 1024, + NVME_QUIRK_SINGLE_VECTOR = 2048, + NVME_QUIRK_128_BYTES_SQES = 4096, + NVME_QUIRK_SHARED_TAGS = 8192, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE = 16384, + NVME_QUIRK_NO_NS_DESC_LIST = 32768, +}; + +struct nvme_ctrl; + +struct nvme_request { + struct nvme_command *cmd; + union nvme_result result; + u8 retries; + u8 flags; + u16 status; + struct nvme_ctrl *ctrl; +}; + +enum nvme_ctrl_state { + NVME_CTRL_NEW = 0, + NVME_CTRL_LIVE = 1, + NVME_CTRL_RESETTING = 2, + NVME_CTRL_CONNECTING = 3, + NVME_CTRL_DELETING = 4, + NVME_CTRL_DELETING_NOIO = 5, + NVME_CTRL_DEAD = 6, +}; + +struct opal_dev; + +struct nvme_fault_inject {}; + +struct nvme_ctrl_ops; + +struct nvme_subsystem; + +struct nvmf_ctrl_options; + +struct nvme_ctrl { + bool comp_seen; + enum nvme_ctrl_state state; + bool identified; + spinlock_t lock; + struct mutex scan_lock; + const struct nvme_ctrl_ops *ops; + struct request_queue *admin_q; + struct request_queue *connect_q; + struct request_queue *fabrics_q; + struct device *dev; + int instance; + int numa_node; + struct blk_mq_tag_set *tagset; + struct blk_mq_tag_set *admin_tagset; + struct list_head namespaces; + struct rw_semaphore namespaces_rwsem; + struct device ctrl_device; + struct device *device; + struct cdev cdev; + struct work_struct reset_work; + struct work_struct delete_work; + wait_queue_head_t state_wq; + struct nvme_subsystem *subsys; + struct list_head subsys_entry; + struct opal_dev *opal_dev; + char name[12]; + u16 cntlid; + u32 ctrl_config; + u16 mtfa; + u32 queue_count; + u64 cap; + u32 max_hw_sectors; + u32 max_segments; + u32 max_integrity_segments; + u32 max_zone_append; + u16 crdt[3]; + u16 oncs; + u16 oacs; + u16 nssa; + u16 nr_streams; + u16 sqsize; + u32 max_namespaces; + atomic_t abort_limit; + u8 vwc; + u32 vs; + u32 sgls; + u16 kas; + u8 npss; + u8 apsta; + u16 wctemp; + u16 cctemp; + u32 oaes; + u32 aen_result; + u32 ctratt; + unsigned int shutdown_timeout; + unsigned int kato; + bool subsystem; + long unsigned int quirks; + struct nvme_id_power_state psd[32]; + struct nvme_effects_log *effects; + struct xarray cels; + struct work_struct scan_work; + struct work_struct async_event_work; + struct delayed_work ka_work; + struct nvme_command ka_cmd; + struct work_struct fw_act_work; + long unsigned int events; + u64 ps_max_latency_us; + bool apst_enabled; + u32 hmpre; + u32 hmmin; + u32 hmminds; + u16 hmmaxd; + u32 ioccsz; + u32 iorcsz; + u16 icdoff; + u16 maxcmd; + int nr_reconnects; + struct nvmf_ctrl_options *opts; + struct page *discard_page; + long unsigned int discard_page_busy; + struct nvme_fault_inject fault_inject; +}; + +enum { + NVME_REQ_CANCELLED = 1, + NVME_REQ_USERCMD = 2, +}; + +struct nvme_ctrl_ops { + const char *name; + struct module *module; + unsigned int flags; + int (*reg_read32)(struct nvme_ctrl *, u32, u32 *); + int (*reg_write32)(struct nvme_ctrl *, u32, u32); + int (*reg_read64)(struct nvme_ctrl *, u32, u64 *); + void (*free_ctrl)(struct nvme_ctrl *); + void (*submit_async_event)(struct nvme_ctrl *); + void (*delete_ctrl)(struct nvme_ctrl *); + int (*get_address)(struct nvme_ctrl *, char *, int); +}; + +struct nvme_subsystem { + int instance; + struct device dev; + struct kref ref; + struct list_head entry; + struct mutex lock; + struct list_head ctrls; + struct list_head nsheads; + char subnqn[223]; + char serial[20]; + char model[40]; + char firmware_rev[8]; + u8 cmic; + u16 vendor_id; + u16 awupf; + struct ida ns_ida; +}; + +struct nvmf_host; + +struct nvmf_ctrl_options { + unsigned int mask; + char *transport; + char *subsysnqn; + char *traddr; + char *trsvcid; + char *host_traddr; + size_t queue_size; + unsigned int nr_io_queues; + unsigned int reconnect_delay; + bool discovery_nqn; + bool duplicate_connect; + unsigned int kato; + struct nvmf_host *host; + int max_reconnects; + bool disable_sqflow; + bool hdr_digest; + bool data_digest; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; + int tos; +}; + +struct nvme_ns_ids { + u8 eui64[8]; + u8 nguid[16]; + uuid_t uuid; + u8 csi; +}; + +struct nvme_ns_head { + struct list_head list; + struct srcu_struct srcu; + struct nvme_subsystem *subsys; + unsigned int ns_id; + struct nvme_ns_ids ids; + struct list_head entry; + struct kref ref; + bool shared; + int instance; + struct nvme_effects_log *effects; +}; + +enum nvme_ns_features { + NVME_NS_EXT_LBAS = 1, + NVME_NS_METADATA_SUPPORTED = 2, +}; + +struct nvme_ns { + struct list_head list; + struct nvme_ctrl *ctrl; + struct request_queue *queue; + struct gendisk *disk; + struct list_head siblings; + struct nvm_dev *ndev; + struct kref kref; + struct nvme_ns_head *head; + int lba_shift; + u16 ms; + u16 sgs; + u32 sws; + u8 pi_type; + u64 zsze; + long unsigned int features; + long unsigned int flags; + struct nvme_fault_inject fault_inject; +}; + +struct nvmf_host { + struct kref ref; + struct list_head list; + char nqn[223]; + uuid_t id; +}; + +struct trace_event_raw_nvme_setup_cmd { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + u8 opcode; + u8 flags; + u8 fctype; + u16 cid; + u32 nsid; + u64 metadata; + u8 cdw10[24]; + char __data[0]; +}; + +struct trace_event_raw_nvme_complete_rq { + struct trace_entry ent; + char disk[32]; + int ctrl_id; + int qid; + int cid; + u64 result; + u8 retries; + u8 flags; + u16 status; + char __data[0]; +}; + +struct trace_event_raw_nvme_async_event { + struct trace_entry ent; + int ctrl_id; + u32 result; + char __data[0]; +}; + +struct trace_event_raw_nvme_sq { + struct trace_entry ent; + int ctrl_id; + char disk[32]; + int qid; + u16 sq_head; + u16 sq_tail; + char __data[0]; +}; + +struct trace_event_data_offsets_nvme_setup_cmd {}; + +struct trace_event_data_offsets_nvme_complete_rq {}; + +struct trace_event_data_offsets_nvme_async_event {}; + +struct trace_event_data_offsets_nvme_sq {}; + +typedef void (*btf_trace_nvme_setup_cmd)(void *, struct request *, struct nvme_command *); + +typedef void (*btf_trace_nvme_complete_rq)(void *, struct request *); + +typedef void (*btf_trace_nvme_async_event)(void *, struct nvme_ctrl *, u32); + +typedef void (*btf_trace_nvme_sq)(void *, struct request *, __le16, int); + +enum nvme_disposition { + COMPLETE = 0, + RETRY = 1, + FAILOVER = 2, +}; + +struct nvme_core_quirk_entry { + u16 vid; + const char *mn; + const char *fr; + long unsigned int quirks; +}; + +struct nvm_user_vio { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nppas; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 ppa_list; + __u32 metadata_len; + __u32 data_len; + __u64 status; + __u32 result; + __u32 rsvd3[3]; +}; + +struct nvm_passthru_vio { + __u8 opcode; + __u8 flags; + __u8 rsvd[2]; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u64 ppa_list; + __u16 nppas; + __u16 control; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u64 status; + __u32 result; + __u32 timeout_ms; +}; + +enum nvme_nvm_admin_opcode { + nvme_nvm_admin_identity = 226, + nvme_nvm_admin_get_bb_tbl = 242, + nvme_nvm_admin_set_bb_tbl = 241, +}; + +enum nvme_nvm_log_page { + NVME_NVM_LOG_REPORT_CHUNK = 202, +}; + +struct nvme_nvm_ph_rw { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le64 resv; +}; + +struct nvme_nvm_erase_blk { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le64 resv; +}; + +struct nvme_nvm_identity { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __u32 rsvd11[6]; +}; + +struct nvme_nvm_getbbtbl { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __u32 rsvd4[4]; +}; + +struct nvme_nvm_setbbtbl { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 nlb; + __u8 value; + __u8 rsvd3; + __u32 rsvd4[3]; +}; + +struct nvme_nvm_command { + union { + struct nvme_common_command common; + struct nvme_nvm_ph_rw ph_rw; + struct nvme_nvm_erase_blk erase; + struct nvme_nvm_identity identity; + struct nvme_nvm_getbbtbl get_bb; + struct nvme_nvm_setbbtbl set_bb; + }; +}; + +struct nvme_nvm_id12_grp { + __u8 mtype; + __u8 fmtype; + __le16 res16; + __u8 num_ch; + __u8 num_lun; + __u8 num_pln; + __u8 rsvd1; + __le16 num_chk; + __le16 num_pg; + __le16 fpg_sz; + __le16 csecs; + __le16 sos; + __le16 rsvd2; + __le32 trdt; + __le32 trdm; + __le32 tprt; + __le32 tprm; + __le32 tbet; + __le32 tbem; + __le32 mpos; + __le32 mccap; + __le16 cpar; + __u8 reserved[906]; +}; + +struct nvme_nvm_id12_addrf { + __u8 ch_offset; + __u8 ch_len; + __u8 lun_offset; + __u8 lun_len; + __u8 pln_offset; + __u8 pln_len; + __u8 blk_offset; + __u8 blk_len; + __u8 pg_offset; + __u8 pg_len; + __u8 sec_offset; + __u8 sec_len; + __u8 res[4]; +}; + +struct nvme_nvm_id12 { + __u8 ver_id; + __u8 vmnt; + __u8 cgrps; + __u8 res; + __le32 cap; + __le32 dom; + struct nvme_nvm_id12_addrf ppaf; + __u8 resv[228]; + struct nvme_nvm_id12_grp grp; + __u8 resv2[2880]; +}; + +struct nvme_nvm_bb_tbl { + __u8 tblid[4]; + __le16 verid; + __le16 revid; + __le32 rvsd1; + __le32 tblks; + __le32 tfact; + __le32 tgrown; + __le32 tdresv; + __le32 thresv; + __le32 rsvd2[8]; + __u8 blk[0]; +}; + +struct nvme_nvm_id20_addrf { + __u8 grp_len; + __u8 pu_len; + __u8 chk_len; + __u8 lba_len; + __u8 resv[4]; +}; + +struct nvme_nvm_id20 { + __u8 mjr; + __u8 mnr; + __u8 resv[6]; + struct nvme_nvm_id20_addrf lbaf; + __le32 mccap; + __u8 resv2[12]; + __u8 wit; + __u8 resv3[31]; + __le16 num_grp; + __le16 num_pu; + __le32 num_chk; + __le32 clba; + __u8 resv4[52]; + __le32 ws_min; + __le32 ws_opt; + __le32 mw_cunits; + __le32 maxoc; + __le32 maxocpu; + __u8 resv5[44]; + __le32 trdt; + __le32 trdm; + __le32 twrt; + __le32 twrm; + __le32 tcrst; + __le32 tcrsm; + __u8 resv6[40]; + __u8 resv7[2816]; + __u8 vs[1024]; +}; + +struct nvme_nvm_chk_meta { + __u8 state; + __u8 type; + __u8 wi; + __u8 rsvd[5]; + __le64 slba; + __le64 cnlb; + __le64 wp; +}; + +struct nvme_zns_lbafe { + __le64 zsze; + __u8 zdes; + __u8 rsvd9[7]; +}; + +struct nvme_id_ns_zns { + __le16 zoc; + __le16 ozcs; + __le32 mar; + __le32 mor; + __le32 rrl; + __le32 frl; + __u8 rsvd20[2796]; + struct nvme_zns_lbafe lbafe[16]; + __u8 rsvd3072[768]; + __u8 vs[256]; +}; + +struct nvme_id_ctrl_zns { + __u8 zasl; + __u8 rsvd1[4095]; +}; + +struct nvme_zone_descriptor { + __u8 zt; + __u8 zs; + __u8 za; + __u8 rsvd3[5]; + __le64 zcap; + __le64 zslba; + __le64 wp; + __u8 rsvd32[32]; +}; + +enum { + NVME_ZONE_TYPE_SEQWRITE_REQ = 2, +}; + +struct nvme_zone_report { + __le64 nr_zones; + __u8 resv8[56]; + struct nvme_zone_descriptor entries[0]; +}; + +enum { + NVME_ZRA_ZONE_REPORT = 0, + NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_REPORT_ZONE_PARTIAL = 1, +}; + +enum { + NVME_CMBSZ_SQS = 1, + NVME_CMBSZ_CQS = 2, + NVME_CMBSZ_LISTS = 4, + NVME_CMBSZ_RDS = 8, + NVME_CMBSZ_WDS = 16, + NVME_CMBSZ_SZ_SHIFT = 12, + NVME_CMBSZ_SZ_MASK = 1048575, + NVME_CMBSZ_SZU_SHIFT = 8, + NVME_CMBSZ_SZU_MASK = 15, +}; + +enum { + NVME_SGL_FMT_DATA_DESC = 0, + NVME_SGL_FMT_SEG_DESC = 2, + NVME_SGL_FMT_LAST_SEG_DESC = 3, + NVME_KEY_SGL_FMT_DATA_DESC = 4, + NVME_TRANSPORT_SGL_DATA_DESC = 5, +}; + +enum { + NVME_HOST_MEM_ENABLE = 1, + NVME_HOST_MEM_RETURN = 2, +}; + +struct nvme_host_mem_buf_desc { + __le64 addr; + __le32 size; + __u32 rsvd; +}; + +struct nvme_completion { + union nvme_result result; + __le16 sq_head; + __le16 sq_id; + __u16 command_id; + __le16 status; +}; + +struct nvme_queue; + +struct nvme_dev { + struct nvme_queue *queues; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; + u32 *dbs; + struct device *dev; + struct dma_pool___2 *prp_page_pool; + struct dma_pool___2 *prp_small_pool; + unsigned int online_queues; + unsigned int max_qid; + unsigned int io_queues[3]; + unsigned int num_vecs; + u32 q_depth; + int io_sqes; + u32 db_stride; + void *bar; + long unsigned int bar_mapped_size; + struct work_struct remove_work; + struct mutex shutdown_lock; + bool subsystem; + u64 cmb_size; + bool cmb_use_sqes; + u32 cmbsz; + u32 cmbloc; + struct nvme_ctrl ctrl; + u32 last_ps; + mempool_t *iod_mempool; + u32 *dbbuf_dbs; + dma_addr_t dbbuf_dbs_dma_addr; + u32 *dbbuf_eis; + dma_addr_t dbbuf_eis_dma_addr; + u64 host_mem_size; + u32 nr_host_mem_descs; + dma_addr_t host_mem_descs_dma; + struct nvme_host_mem_buf_desc *host_mem_descs; + void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; +}; + +struct nvme_queue { + struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t cq_poll_lock; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 *q_db; + u32 q_depth; + u16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + long unsigned int flags; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +struct nvme_iod { + struct nvme_request req; + struct nvme_queue *nvmeq; + bool use_sgl; + int aborted; + int npages; + int nents; + dma_addr_t first_dma; + unsigned int dma_len; + dma_addr_t meta_dma; + struct scatterlist *sg; +}; + +typedef void (*spi_res_release_t)(struct spi_controller *, struct spi_message *, void *); + +struct spi_res { + struct list_head entry; + spi_res_release_t release; + long long unsigned int data[0]; +}; + +struct spi_replaced_transfers; + +typedef void (*spi_replaced_release_t)(struct spi_controller *, struct spi_message *, struct spi_replaced_transfers *); + +struct spi_replaced_transfers { + spi_replaced_release_t release; + void *extradata; + struct list_head replaced_transfers; + struct list_head *replaced_after; + size_t inserted; + struct spi_transfer inserted_transfers[0]; +}; + +struct spi_board_info { + char modalias[32]; + const void *platform_data; + const struct property_entry *properties; + void *controller_data; + int irq; + u32 max_speed_hz; + u16 bus_num; + u16 chip_select; + u32 mode; +}; + +enum spi_mem_data_dir { + SPI_MEM_NO_DATA = 0, + SPI_MEM_DATA_IN = 1, + SPI_MEM_DATA_OUT = 2, +}; + +struct spi_mem_op { + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + u16 opcode; + } cmd; + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + u64 val; + } addr; + struct { + u8 nbytes; + u8 buswidth; + u8 dtr: 1; + } dummy; + struct { + u8 buswidth; + u8 dtr: 1; + enum spi_mem_data_dir dir; + unsigned int nbytes; + union { + void *in; + const void *out; + } buf; + } data; +}; + +struct spi_mem_dirmap_info { + struct spi_mem_op op_tmpl; + u64 offset; + u64 length; +}; + +struct spi_mem_dirmap_desc { + struct spi_mem *mem; + struct spi_mem_dirmap_info info; + unsigned int nodirmap; + void *priv; +}; + +struct spi_mem { + struct spi_device *spi; + void *drvpriv; + const char *name; +}; + +struct trace_event_raw_spi_controller { + struct trace_entry ent; + int bus_num; + char __data[0]; +}; + +struct trace_event_raw_spi_message { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_message *msg; + char __data[0]; +}; + +struct trace_event_raw_spi_message_done { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_message *msg; + unsigned int frame; + unsigned int actual; + char __data[0]; +}; + +struct trace_event_raw_spi_transfer { + struct trace_entry ent; + int bus_num; + int chip_select; + struct spi_transfer *xfer; + int len; + u32 __data_loc_rx_buf; + u32 __data_loc_tx_buf; + char __data[0]; +}; + +struct trace_event_data_offsets_spi_controller {}; + +struct trace_event_data_offsets_spi_message {}; + +struct trace_event_data_offsets_spi_message_done {}; + +struct trace_event_data_offsets_spi_transfer { + u32 rx_buf; + u32 tx_buf; +}; + +typedef void (*btf_trace_spi_controller_idle)(void *, struct spi_controller *); + +typedef void (*btf_trace_spi_controller_busy)(void *, struct spi_controller *); + +typedef void (*btf_trace_spi_message_submit)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_message_start)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_message_done)(void *, struct spi_message *); + +typedef void (*btf_trace_spi_transfer_start)(void *, struct spi_message *, struct spi_transfer *); + +typedef void (*btf_trace_spi_transfer_stop)(void *, struct spi_message *, struct spi_transfer *); + +struct boardinfo { + struct list_head list; + struct spi_board_info board_info; +}; + +struct acpi_spi_lookup { + struct spi_controller *ctlr; + u32 max_speed_hz; + u32 mode; + int irq; + u8 bits_per_word; + u8 chip_select; +}; + +struct spi_mem_driver { + struct spi_driver spidrv; + int (*probe)(struct spi_mem *); + int (*remove)(struct spi_mem *); + void (*shutdown)(struct spi_mem *); +}; + +struct meson_spifc { + struct spi_controller *master; + struct regmap *regmap; + struct clk *clk; + struct device *dev; +}; + +enum orion_spi_type { + ORION_SPI = 0, + ARMADA_SPI = 1, +}; + +struct orion_spi_dev { + enum orion_spi_type typ; + long unsigned int max_hz; + unsigned int min_divisor; + unsigned int max_divisor; + u32 prescale_mask; + bool is_errata_50mhz_ac; +}; + +struct orion_direct_acc { + void *vaddr; + u32 size; +}; + +struct orion_child_options { + struct orion_direct_acc direct_access; +}; + +struct orion_spi { + struct spi_controller *master; + void *base; + struct clk *clk; + struct clk *axi_clk; + const struct orion_spi_dev *devdata; + struct orion_child_options child[8]; +}; + +enum ssp_loopback { + LOOPBACK_DISABLED = 0, + LOOPBACK_ENABLED = 1, +}; + +enum ssp_interface { + SSP_INTERFACE_MOTOROLA_SPI = 0, + SSP_INTERFACE_TI_SYNC_SERIAL = 1, + SSP_INTERFACE_NATIONAL_MICROWIRE = 2, + SSP_INTERFACE_UNIDIRECTIONAL = 3, +}; + +enum ssp_hierarchy { + SSP_MASTER = 0, + SSP_SLAVE = 1, +}; + +struct ssp_clock_params { + u8 cpsdvsr; + u8 scr; +}; + +enum ssp_rx_endian { + SSP_RX_MSB = 0, + SSP_RX_LSB = 1, +}; + +enum ssp_tx_endian { + SSP_TX_MSB = 0, + SSP_TX_LSB = 1, +}; + +enum ssp_data_size { + SSP_DATA_BITS_4 = 3, + SSP_DATA_BITS_5 = 4, + SSP_DATA_BITS_6 = 5, + SSP_DATA_BITS_7 = 6, + SSP_DATA_BITS_8 = 7, + SSP_DATA_BITS_9 = 8, + SSP_DATA_BITS_10 = 9, + SSP_DATA_BITS_11 = 10, + SSP_DATA_BITS_12 = 11, + SSP_DATA_BITS_13 = 12, + SSP_DATA_BITS_14 = 13, + SSP_DATA_BITS_15 = 14, + SSP_DATA_BITS_16 = 15, + SSP_DATA_BITS_17 = 16, + SSP_DATA_BITS_18 = 17, + SSP_DATA_BITS_19 = 18, + SSP_DATA_BITS_20 = 19, + SSP_DATA_BITS_21 = 20, + SSP_DATA_BITS_22 = 21, + SSP_DATA_BITS_23 = 22, + SSP_DATA_BITS_24 = 23, + SSP_DATA_BITS_25 = 24, + SSP_DATA_BITS_26 = 25, + SSP_DATA_BITS_27 = 26, + SSP_DATA_BITS_28 = 27, + SSP_DATA_BITS_29 = 28, + SSP_DATA_BITS_30 = 29, + SSP_DATA_BITS_31 = 30, + SSP_DATA_BITS_32 = 31, +}; + +enum ssp_mode { + INTERRUPT_TRANSFER = 0, + POLLING_TRANSFER = 1, + DMA_TRANSFER = 2, +}; + +enum ssp_rx_level_trig { + SSP_RX_1_OR_MORE_ELEM = 0, + SSP_RX_4_OR_MORE_ELEM = 1, + SSP_RX_8_OR_MORE_ELEM = 2, + SSP_RX_16_OR_MORE_ELEM = 3, + SSP_RX_32_OR_MORE_ELEM = 4, +}; + +enum ssp_tx_level_trig { + SSP_TX_1_OR_MORE_EMPTY_LOC = 0, + SSP_TX_4_OR_MORE_EMPTY_LOC = 1, + SSP_TX_8_OR_MORE_EMPTY_LOC = 2, + SSP_TX_16_OR_MORE_EMPTY_LOC = 3, + SSP_TX_32_OR_MORE_EMPTY_LOC = 4, +}; + +enum ssp_spi_clk_phase { + SSP_CLK_FIRST_EDGE = 0, + SSP_CLK_SECOND_EDGE = 1, +}; + +enum ssp_spi_clk_pol { + SSP_CLK_POL_IDLE_LOW = 0, + SSP_CLK_POL_IDLE_HIGH = 1, +}; + +enum ssp_microwire_ctrl_len { + SSP_BITS_4 = 3, + SSP_BITS_5 = 4, + SSP_BITS_6 = 5, + SSP_BITS_7 = 6, + SSP_BITS_8 = 7, + SSP_BITS_9 = 8, + SSP_BITS_10 = 9, + SSP_BITS_11 = 10, + SSP_BITS_12 = 11, + SSP_BITS_13 = 12, + SSP_BITS_14 = 13, + SSP_BITS_15 = 14, + SSP_BITS_16 = 15, + SSP_BITS_17 = 16, + SSP_BITS_18 = 17, + SSP_BITS_19 = 18, + SSP_BITS_20 = 19, + SSP_BITS_21 = 20, + SSP_BITS_22 = 21, + SSP_BITS_23 = 22, + SSP_BITS_24 = 23, + SSP_BITS_25 = 24, + SSP_BITS_26 = 25, + SSP_BITS_27 = 26, + SSP_BITS_28 = 27, + SSP_BITS_29 = 28, + SSP_BITS_30 = 29, + SSP_BITS_31 = 30, + SSP_BITS_32 = 31, +}; + +enum ssp_microwire_wait_state { + SSP_MWIRE_WAIT_ZERO = 0, + SSP_MWIRE_WAIT_ONE = 1, +}; + +enum ssp_duplex { + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX = 0, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX = 1, +}; + +enum ssp_clkdelay { + SSP_FEEDBACK_CLK_DELAY_NONE = 0, + SSP_FEEDBACK_CLK_DELAY_1T = 1, + SSP_FEEDBACK_CLK_DELAY_2T = 2, + SSP_FEEDBACK_CLK_DELAY_3T = 3, + SSP_FEEDBACK_CLK_DELAY_4T = 4, + SSP_FEEDBACK_CLK_DELAY_5T = 5, + SSP_FEEDBACK_CLK_DELAY_6T = 6, + SSP_FEEDBACK_CLK_DELAY_7T = 7, +}; + +enum ssp_chip_select { + SSP_CHIP_SELECT = 0, + SSP_CHIP_DESELECT = 1, +}; + +struct pl022_ssp_controller { + u16 bus_id; + u8 num_chipselect; + u8 enable_dma: 1; + bool (*dma_filter)(struct dma_chan *, void *); + void *dma_rx_param; + void *dma_tx_param; + int autosuspend_delay; + bool rt; + int *chipselects; +}; + +struct pl022_config_chip { + enum ssp_interface iface; + enum ssp_hierarchy hierarchy; + bool slave_tx_disable; + struct ssp_clock_params clk_freq; + enum ssp_mode com_mode; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + enum ssp_microwire_ctrl_len ctrl_len; + enum ssp_microwire_wait_state wait_state; + enum ssp_duplex duplex; + enum ssp_clkdelay clkdelay; + void (*cs_control)(u32); +}; + +enum ssp_reading { + READING_NULL = 0, + READING_U8 = 1, + READING_U16 = 2, + READING_U32 = 3, +}; + +enum ssp_writing { + WRITING_NULL = 0, + WRITING_U8 = 1, + WRITING_U16 = 2, + WRITING_U32 = 3, +}; + +struct vendor_data___2 { + int fifodepth; + int max_bpw; + bool unidir; + bool extended_cr; + bool pl023; + bool loopback; + bool internal_cs_ctrl; +}; + +struct chip_data; + +struct pl022 { + struct amba_device *adev; + struct vendor_data___2 *vendor; + resource_size_t phybase; + void *virtbase; + struct clk *clk; + struct spi_controller *master; + struct pl022_ssp_controller *master_info; + struct tasklet_struct pump_transfers; + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + bool next_msg_cs_active; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + enum ssp_reading read; + enum ssp_writing write; + u32 exp_fifo_level; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + struct sg_table sgt_rx; + struct sg_table sgt_tx; + char *dummypage; + bool dma_running; + int cur_cs; + int *chipselects; +}; + +struct chip_data { + u32 cr0; + u16 cr1; + u16 dmacr; + u16 cpsr; + u8 n_bytes; + bool enable_dma; + enum ssp_reading read; + enum ssp_writing write; + void (*cs_control)(u32); + int xfer_type; +}; + +struct spi_qup { + void *base; + struct device *dev; + struct clk *cclk; + struct clk *iclk; + int irq; + spinlock_t lock; + int in_fifo_sz; + int out_fifo_sz; + int in_blk_sz; + int out_blk_sz; + struct spi_transfer *xfer; + struct completion done; + int error; + int w_size; + int n_words; + int tx_bytes; + int rx_bytes; + const u8 *tx_buf; + u8 *rx_buf; + int qup_v1; + int mode; + struct dma_slave_config rx_conf; + struct dma_slave_config tx_conf; +}; + +struct rockchip_spi { + struct device *dev; + struct clk *spiclk; + struct clk *apb_pclk; + void *regs; + dma_addr_t dma_addr_rx; + dma_addr_t dma_addr_tx; + const void *tx; + void *rx; + unsigned int tx_left; + unsigned int rx_left; + atomic_t state; + u32 fifo_len; + u32 freq; + u8 n_bytes; + u8 rsd; + bool cs_asserted[2]; + bool slave_abort; +}; + +struct s3c64xx_spi_csinfo { + u8 fb_delay; + unsigned int line; +}; + +struct s3c64xx_spi_info { + int src_clk_nr; + int num_cs; + bool no_cs; + int (*cfg_gpio)(); +}; + +struct s3c64xx_spi_dma_data { + struct dma_chan *ch; + dma_cookie_t cookie; + enum dma_transfer_direction direction; +}; + +struct s3c64xx_spi_port_config { + int fifo_lvl_mask[6]; + int rx_lvl_offset; + int tx_st_done; + int quirks; + bool high_speed; + bool clk_from_cmu; + bool clk_ioclk; +}; + +struct s3c64xx_spi_driver_data { + void *regs; + struct clk *clk; + struct clk *src_clk; + struct clk *ioclk; + struct platform_device *pdev; + struct spi_controller *master; + struct s3c64xx_spi_info *cntrlr_info; + spinlock_t lock; + long unsigned int sfr_start; + struct completion xfer_completion; + unsigned int state; + unsigned int cur_mode; + unsigned int cur_bpw; + unsigned int cur_speed; + struct s3c64xx_spi_dma_data rx_dma; + struct s3c64xx_spi_dma_data tx_dma; + struct s3c64xx_spi_port_config *port_conf; + unsigned int port_id; +}; + +struct devprobe2 { + struct net_device * (*probe)(int); + int status; +}; + +enum { + NETIF_F_SG_BIT = 0, + NETIF_F_IP_CSUM_BIT = 1, + __UNUSED_NETIF_F_1 = 2, + NETIF_F_HW_CSUM_BIT = 3, + NETIF_F_IPV6_CSUM_BIT = 4, + NETIF_F_HIGHDMA_BIT = 5, + NETIF_F_FRAGLIST_BIT = 6, + NETIF_F_HW_VLAN_CTAG_TX_BIT = 7, + NETIF_F_HW_VLAN_CTAG_RX_BIT = 8, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9, + NETIF_F_VLAN_CHALLENGED_BIT = 10, + NETIF_F_GSO_BIT = 11, + NETIF_F_LLTX_BIT = 12, + NETIF_F_NETNS_LOCAL_BIT = 13, + NETIF_F_GRO_BIT = 14, + NETIF_F_LRO_BIT = 15, + NETIF_F_GSO_SHIFT = 16, + NETIF_F_TSO_BIT = 16, + NETIF_F_GSO_ROBUST_BIT = 17, + NETIF_F_TSO_ECN_BIT = 18, + NETIF_F_TSO_MANGLEID_BIT = 19, + NETIF_F_TSO6_BIT = 20, + NETIF_F_FSO_BIT = 21, + NETIF_F_GSO_GRE_BIT = 22, + NETIF_F_GSO_GRE_CSUM_BIT = 23, + NETIF_F_GSO_IPXIP4_BIT = 24, + NETIF_F_GSO_IPXIP6_BIT = 25, + NETIF_F_GSO_UDP_TUNNEL_BIT = 26, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27, + NETIF_F_GSO_PARTIAL_BIT = 28, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29, + NETIF_F_GSO_SCTP_BIT = 30, + NETIF_F_GSO_ESP_BIT = 31, + NETIF_F_GSO_UDP_BIT = 32, + NETIF_F_GSO_UDP_L4_BIT = 33, + NETIF_F_GSO_FRAGLIST_BIT = 34, + NETIF_F_GSO_LAST = 34, + NETIF_F_FCOE_CRC_BIT = 35, + NETIF_F_SCTP_CRC_BIT = 36, + NETIF_F_FCOE_MTU_BIT = 37, + NETIF_F_NTUPLE_BIT = 38, + NETIF_F_RXHASH_BIT = 39, + NETIF_F_RXCSUM_BIT = 40, + NETIF_F_NOCACHE_COPY_BIT = 41, + NETIF_F_LOOPBACK_BIT = 42, + NETIF_F_RXFCS_BIT = 43, + NETIF_F_RXALL_BIT = 44, + NETIF_F_HW_VLAN_STAG_TX_BIT = 45, + NETIF_F_HW_VLAN_STAG_RX_BIT = 46, + NETIF_F_HW_VLAN_STAG_FILTER_BIT = 47, + NETIF_F_HW_L2FW_DOFFLOAD_BIT = 48, + NETIF_F_HW_TC_BIT = 49, + NETIF_F_HW_ESP_BIT = 50, + NETIF_F_HW_ESP_TX_CSUM_BIT = 51, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 52, + NETIF_F_HW_TLS_TX_BIT = 53, + NETIF_F_HW_TLS_RX_BIT = 54, + NETIF_F_GRO_HW_BIT = 55, + NETIF_F_HW_TLS_RECORD_BIT = 56, + NETIF_F_GRO_FRAGLIST_BIT = 57, + NETIF_F_HW_MACSEC_BIT = 58, + NETDEV_FEATURE_COUNT = 59, +}; + +enum { + SKBTX_HW_TSTAMP = 1, + SKBTX_SW_TSTAMP = 2, + SKBTX_IN_PROGRESS = 4, + SKBTX_DEV_ZEROCOPY = 8, + SKBTX_WIFI_STATUS = 16, + SKBTX_SHARED_FRAG = 32, + SKBTX_SCHED_TSTAMP = 64, +}; + +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1, + IFF_EBRIDGE = 2, + IFF_BONDING = 4, + IFF_ISATAP = 8, + IFF_WAN_HDLC = 16, + IFF_XMIT_DST_RELEASE = 32, + IFF_DONT_BRIDGE = 64, + IFF_DISABLE_NETPOLL = 128, + IFF_MACVLAN_PORT = 256, + IFF_BRIDGE_PORT = 512, + IFF_OVS_DATAPATH = 1024, + IFF_TX_SKB_SHARING = 2048, + IFF_UNICAST_FLT = 4096, + IFF_TEAM_PORT = 8192, + IFF_SUPP_NOFCS = 16384, + IFF_LIVE_ADDR_CHANGE = 32768, + IFF_MACVLAN = 65536, + IFF_XMIT_DST_RELEASE_PERM = 131072, + IFF_L3MDEV_MASTER = 262144, + IFF_NO_QUEUE = 524288, + IFF_OPENVSWITCH = 1048576, + IFF_L3MDEV_SLAVE = 2097152, + IFF_TEAM = 4194304, + IFF_RXFH_CONFIGURED = 8388608, + IFF_PHONY_HEADROOM = 16777216, + IFF_MACSEC = 33554432, + IFF_NO_RX_HANDLER = 67108864, + IFF_FAILOVER = 134217728, + IFF_FAILOVER_SLAVE = 268435456, + IFF_L3MDEV_RX_HANDLER = 536870912, + IFF_LIVE_RENAME_OK = 1073741824, +}; + +struct mdio_board_info { + const char *bus_id; + char modalias[32]; + int mdio_addr; + const void *platform_data; +}; + +struct mdio_board_entry { + struct list_head list; + struct mdio_board_info board_info; +}; + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +enum netdev_state_t { + __LINK_STATE_START = 0, + __LINK_STATE_PRESENT = 1, + __LINK_STATE_NOCARRIER = 2, + __LINK_STATE_LINKWATCH_PENDING = 3, + __LINK_STATE_DORMANT = 4, + __LINK_STATE_TESTING = 5, +}; + +struct mii_ioctl_data { + __u16 phy_id; + __u16 reg_num; + __u16 val_in; + __u16 val_out; +}; + +enum { + ETHTOOL_MSG_KERNEL_NONE = 0, + ETHTOOL_MSG_STRSET_GET_REPLY = 1, + ETHTOOL_MSG_LINKINFO_GET_REPLY = 2, + ETHTOOL_MSG_LINKINFO_NTF = 3, + ETHTOOL_MSG_LINKMODES_GET_REPLY = 4, + ETHTOOL_MSG_LINKMODES_NTF = 5, + ETHTOOL_MSG_LINKSTATE_GET_REPLY = 6, + ETHTOOL_MSG_DEBUG_GET_REPLY = 7, + ETHTOOL_MSG_DEBUG_NTF = 8, + ETHTOOL_MSG_WOL_GET_REPLY = 9, + ETHTOOL_MSG_WOL_NTF = 10, + ETHTOOL_MSG_FEATURES_GET_REPLY = 11, + ETHTOOL_MSG_FEATURES_SET_REPLY = 12, + ETHTOOL_MSG_FEATURES_NTF = 13, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY = 14, + ETHTOOL_MSG_PRIVFLAGS_NTF = 15, + ETHTOOL_MSG_RINGS_GET_REPLY = 16, + ETHTOOL_MSG_RINGS_NTF = 17, + ETHTOOL_MSG_CHANNELS_GET_REPLY = 18, + ETHTOOL_MSG_CHANNELS_NTF = 19, + ETHTOOL_MSG_COALESCE_GET_REPLY = 20, + ETHTOOL_MSG_COALESCE_NTF = 21, + ETHTOOL_MSG_PAUSE_GET_REPLY = 22, + ETHTOOL_MSG_PAUSE_NTF = 23, + ETHTOOL_MSG_EEE_GET_REPLY = 24, + ETHTOOL_MSG_EEE_NTF = 25, + ETHTOOL_MSG_TSINFO_GET_REPLY = 26, + ETHTOOL_MSG_CABLE_TEST_NTF = 27, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 28, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 29, + __ETHTOOL_MSG_KERNEL_CNT = 30, + ETHTOOL_MSG_KERNEL_MAX = 29, +}; + +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *); + int (*get_strings)(struct phy_device *, u8 *); + int (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *); + int (*start_cable_test)(struct phy_device *, struct netlink_ext_ack *); + int (*start_cable_test_tdr)(struct phy_device *, struct netlink_ext_ack *, const struct phy_tdr_config *); +}; + +struct phy_fixup { + struct list_head list; + char bus_id[64]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *); +}; + +struct sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; + u8 if_1x_copper_passive: 1; + u8 if_1x_copper_active: 1; + u8 if_1x_lx: 1; + u8 if_1x_sx: 1; + u8 e10g_base_sr: 1; + u8 e10g_base_lr: 1; + u8 e10g_base_lrm: 1; + u8 e10g_base_er: 1; + u8 sonet_oc3_short_reach: 1; + u8 sonet_oc3_smf_intermediate_reach: 1; + u8 sonet_oc3_smf_long_reach: 1; + u8 unallocated_5_3: 1; + u8 sonet_oc12_short_reach: 1; + u8 sonet_oc12_smf_intermediate_reach: 1; + u8 sonet_oc12_smf_long_reach: 1; + u8 unallocated_5_7: 1; + u8 sonet_oc48_short_reach: 1; + u8 sonet_oc48_intermediate_reach: 1; + u8 sonet_oc48_long_reach: 1; + u8 sonet_reach_bit2: 1; + u8 sonet_reach_bit1: 1; + u8 sonet_oc192_short_reach: 1; + u8 escon_smf_1310_laser: 1; + u8 escon_mmf_1310_led: 1; + u8 e1000_base_sx: 1; + u8 e1000_base_lx: 1; + u8 e1000_base_cx: 1; + u8 e1000_base_t: 1; + u8 e100_base_lx: 1; + u8 e100_base_fx: 1; + u8 e_base_bx10: 1; + u8 e_base_px: 1; + u8 fc_tech_electrical_inter_enclosure: 1; + u8 fc_tech_lc: 1; + u8 fc_tech_sa: 1; + u8 fc_ll_m: 1; + u8 fc_ll_l: 1; + u8 fc_ll_i: 1; + u8 fc_ll_s: 1; + u8 fc_ll_v: 1; + u8 unallocated_8_0: 1; + u8 unallocated_8_1: 1; + u8 sfp_ct_passive: 1; + u8 sfp_ct_active: 1; + u8 fc_tech_ll: 1; + u8 fc_tech_sl: 1; + u8 fc_tech_sn: 1; + u8 fc_tech_electrical_intra_enclosure: 1; + u8 fc_media_sm: 1; + u8 unallocated_9_1: 1; + u8 fc_media_m5: 1; + u8 fc_media_m6: 1; + u8 fc_media_tv: 1; + u8 fc_media_mi: 1; + u8 fc_media_tp: 1; + u8 fc_media_tw: 1; + u8 fc_speed_100: 1; + u8 unallocated_10_1: 1; + u8 fc_speed_200: 1; + u8 fc_speed_3200: 1; + u8 fc_speed_400: 1; + u8 fc_speed_1600: 1; + u8 fc_speed_800: 1; + u8 fc_speed_1200: 1; + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + __be16 cable_compliance; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 reserved60_2: 6; + u8 reserved61: 8; + } passive; + struct { + u8 sff8431_app_e: 1; + u8 fc_pi_4_app_h: 1; + u8 sff8431_lim: 1; + u8 fc_pi_4_lim: 1; + u8 reserved60_4: 4; + u8 reserved61: 8; + } active; + }; + u8 reserved62; + u8 cc_base; +}; + +struct sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +struct sfp_upstream_ops { + void (*attach)(void *, struct sfp_bus *); + void (*detach)(void *, struct sfp_bus *); + int (*module_insert)(void *, const struct sfp_eeprom_id *); + void (*module_remove)(void *); + int (*module_start)(void *); + void (*module_stop)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *); +}; + +struct trace_event_raw_mdio_access { + struct trace_entry ent; + char busid[61]; + char read; + u8 addr; + u16 val; + unsigned int regnum; + char __data[0]; +}; + +struct trace_event_data_offsets_mdio_access {}; + +typedef void (*btf_trace_mdio_access)(void *, struct mii_bus *, char, u8, unsigned int, u16, int); + +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +struct mdio_driver { + struct mdio_driver_common mdiodrv; + int (*probe)(struct mdio_device *); + void (*remove)(struct mdio_device *); +}; + +struct fixed_phy_status { + int link; + int speed; + int duplex; + int pause; + int asym_pause; +}; + +struct swmii_regs { + u16 bmsr; + u16 lpa; + u16 lpagb; + u16 estat; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100 = 1, + SWMII_SPEED_1000 = 2, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL = 1, +}; + +struct sfp; + +struct sfp_socket_ops; + +struct sfp_quirk; + +struct sfp_bus { + struct kref kref; + struct list_head node; + struct fwnode_handle *fwnode; + const struct sfp_socket_ops *socket_ops; + struct device *sfp_dev; + struct sfp *sfp; + const struct sfp_quirk *sfp_quirk; + const struct sfp_upstream_ops *upstream_ops; + void *upstream; + struct phy_device *phydev; + bool registered; + bool started; +}; + +enum { + SFF8024_ID_UNK = 0, + SFF8024_ID_SFF_8472 = 2, + SFF8024_ID_SFP = 3, + SFF8024_ID_DWDM_SFP = 11, + SFF8024_ID_QSFP_8438 = 12, + SFF8024_ID_QSFP_8436_8636 = 13, + SFF8024_ID_QSFP28_8636 = 17, + SFF8024_ENCODING_UNSPEC = 0, + SFF8024_ENCODING_8B10B = 1, + SFF8024_ENCODING_4B5B = 2, + SFF8024_ENCODING_NRZ = 3, + SFF8024_ENCODING_8472_MANCHESTER = 4, + SFF8024_ENCODING_8472_SONET = 5, + SFF8024_ENCODING_8472_64B66B = 6, + SFF8024_ENCODING_8436_MANCHESTER = 6, + SFF8024_ENCODING_8436_SONET = 4, + SFF8024_ENCODING_8436_64B66B = 5, + SFF8024_ENCODING_256B257B = 7, + SFF8024_ENCODING_PAM4 = 8, + SFF8024_CONNECTOR_UNSPEC = 0, + SFF8024_CONNECTOR_SC = 1, + SFF8024_CONNECTOR_FIBERJACK = 6, + SFF8024_CONNECTOR_LC = 7, + SFF8024_CONNECTOR_MT_RJ = 8, + SFF8024_CONNECTOR_MU = 9, + SFF8024_CONNECTOR_SG = 10, + SFF8024_CONNECTOR_OPTICAL_PIGTAIL = 11, + SFF8024_CONNECTOR_MPO_1X12 = 12, + SFF8024_CONNECTOR_MPO_2X16 = 13, + SFF8024_CONNECTOR_HSSDC_II = 32, + SFF8024_CONNECTOR_COPPER_PIGTAIL = 33, + SFF8024_CONNECTOR_RJ45 = 34, + SFF8024_CONNECTOR_NOSEPARATE = 35, + SFF8024_CONNECTOR_MXC_2X16 = 36, + SFF8024_ECC_UNSPEC = 0, + SFF8024_ECC_100G_25GAUI_C2M_AOC = 1, + SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 2, + SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 3, + SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 4, + SFF8024_ECC_100GBASE_SR10 = 5, + SFF8024_ECC_100GBASE_CR4 = 11, + SFF8024_ECC_25GBASE_CR_S = 12, + SFF8024_ECC_25GBASE_CR_N = 13, + SFF8024_ECC_10GBASE_T_SFI = 22, + SFF8024_ECC_10GBASE_T_SR = 28, + SFF8024_ECC_5GBASE_T = 29, + SFF8024_ECC_2_5GBASE_T = 30, +}; + +struct sfp_socket_ops { + void (*attach)(struct sfp *); + void (*detach)(struct sfp *); + void (*start)(struct sfp *); + void (*stop)(struct sfp *); + int (*module_info)(struct sfp *, struct ethtool_modinfo *); + int (*module_eeprom)(struct sfp *, struct ethtool_eeprom *, u8 *); +}; + +struct sfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct sfp_eeprom_id *, long unsigned int *); +}; + +struct mdio_device_id { + __u32 phy_id; + __u32 phy_id_mask; +}; + +enum { + MDIO_AN_C22 = 65504, +}; + +struct fixed_mdio_bus { + struct mii_bus *mii_bus; + struct list_head phys; +}; + +struct fixed_phy { + int addr; + struct phy_device *phydev; + struct fixed_phy_status status; + bool no_carrier; + int (*link_update)(struct net_device *, struct fixed_phy_status *); + struct list_head node; + struct gpio_desc *link_gpiod; +}; + +struct mdio_mux_child_bus; + +struct mdio_mux_parent_bus { + struct mii_bus *mii_bus; + int current_child; + int parent_id; + void *switch_data; + int (*switch_fn)(int, int, void *); + struct mdio_mux_child_bus *children; +}; + +struct mdio_mux_child_bus { + struct mii_bus *mii_bus; + struct mdio_mux_parent_bus *parent; + struct mdio_mux_child_bus *next; + int bus_number; +}; + +struct mdio_mux_mmioreg_state { + void *mux_handle; + phys_addr_t phys; + unsigned int iosize; + unsigned int mask; +}; + +struct flow_dissector { + unsigned int used_keys; + short unsigned int offset[28]; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP = 1, + FLOW_ACTION_TRAP = 2, + FLOW_ACTION_GOTO = 3, + FLOW_ACTION_REDIRECT = 4, + FLOW_ACTION_MIRRED = 5, + FLOW_ACTION_REDIRECT_INGRESS = 6, + FLOW_ACTION_MIRRED_INGRESS = 7, + FLOW_ACTION_VLAN_PUSH = 8, + FLOW_ACTION_VLAN_POP = 9, + FLOW_ACTION_VLAN_MANGLE = 10, + FLOW_ACTION_TUNNEL_ENCAP = 11, + FLOW_ACTION_TUNNEL_DECAP = 12, + FLOW_ACTION_MANGLE = 13, + FLOW_ACTION_ADD = 14, + FLOW_ACTION_CSUM = 15, + FLOW_ACTION_MARK = 16, + FLOW_ACTION_PTYPE = 17, + FLOW_ACTION_PRIORITY = 18, + FLOW_ACTION_WAKE = 19, + FLOW_ACTION_QUEUE = 20, + FLOW_ACTION_SAMPLE = 21, + FLOW_ACTION_POLICE = 22, + FLOW_ACTION_CT = 23, + FLOW_ACTION_CT_METADATA = 24, + FLOW_ACTION_MPLS_PUSH = 25, + FLOW_ACTION_MPLS_POP = 26, + FLOW_ACTION_MPLS_MANGLE = 27, + FLOW_ACTION_GATE = 28, + NUM_FLOW_ACTIONS = 29, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = 1, + FLOW_ACTION_HW_STATS_DELAYED = 2, + FLOW_ACTION_HW_STATS_ANY = 3, + FLOW_ACTION_HW_STATS_DISABLED = 4, + FLOW_ACTION_HW_STATS_DONT_CARE = 7, +}; + +typedef void (*action_destr)(void *); + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH = 1, + FLOW_ACT_MANGLE_HDR_TYPE_IP4 = 2, + FLOW_ACT_MANGLE_HDR_TYPE_IP6 = 3, + FLOW_ACT_MANGLE_HDR_TYPE_TCP = 4, + FLOW_ACT_MANGLE_HDR_TYPE_UDP = 5, +}; + +struct nf_flowtable; + +struct ip_tunnel_info; + +struct psample_group; + +struct action_gate_entry; + +struct flow_action_cookie; + +struct flow_action_entry { + enum flow_action_id id; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + u32 index; + u32 burst; + u64 rate_bytes_ps; + u32 mtu; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + long unsigned int cookie; + u32 mark; + u32 labels[4]; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + u32 index; + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + }; + struct flow_action_cookie *cookie; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[0]; +}; + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +struct dsa_chip_data { + struct device *host_dev; + int sw_addr; + struct device *netdev[12]; + int eeprom_len; + struct device_node *of_node; + char *port_names[12]; + struct device_node *port_dn[12]; + s8 rtable[4]; +}; + +struct dsa_platform_data { + struct device *netdev; + struct net_device *of_netdev; + int nr_chips; + struct dsa_chip_data *chip; +}; + +struct phylink_link_state { + long unsigned int advertising[2]; + long unsigned int lp_advertising[2]; + phy_interface_t interface; + int speed; + int duplex; + int pause; + unsigned int link: 1; + unsigned int an_enabled: 1; + unsigned int an_complete: 1; +}; + +enum phylink_op_type { + PHYLINK_NETDEV = 0, + PHYLINK_DEV = 1, +}; + +struct phylink_config { + struct device *dev; + enum phylink_op_type type; + bool pcs_poll; + bool poll_fixed_state; + void (*get_fixed_state)(struct phylink_config *, struct phylink_link_state *); +}; + +enum devlink_port_type { + DEVLINK_PORT_TYPE_NOTSET = 0, + DEVLINK_PORT_TYPE_AUTO = 1, + DEVLINK_PORT_TYPE_ETH = 2, + DEVLINK_PORT_TYPE_IB = 3, +}; + +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0, + DEVLINK_PORT_FLAVOUR_CPU = 1, + DEVLINK_PORT_FLAVOUR_DSA = 2, + DEVLINK_PORT_FLAVOUR_PCI_PF = 3, + DEVLINK_PORT_FLAVOUR_PCI_VF = 4, + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5, + DEVLINK_PORT_FLAVOUR_UNUSED = 6, +}; + +struct devlink_port_phys_attrs { + u32 port_number; + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u32 controller; + u16 pf; + u8 external: 1; +}; + +struct devlink_port_pci_vf_attrs { + u32 controller; + u16 pf; + u16 vf; + u8 external: 1; +}; + +struct devlink_port_attrs { + u8 split: 1; + u8 splittable: 1; + u32 lanes; + enum devlink_port_flavour flavour; + struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + }; +}; + +struct devlink; + +struct devlink_port { + struct list_head list; + struct list_head param_list; + struct list_head region_list; + struct devlink *devlink; + unsigned int index; + bool registered; + spinlock_t type_lock; + enum devlink_port_type type; + enum devlink_port_type desired_type; + void *type_dev; + struct devlink_port_attrs attrs; + u8 attrs_set: 1; + u8 switch_port: 1; + struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct mutex reporters_lock; +}; + +struct dsa_device_ops; + +struct dsa_switch_tree; + +struct packet_type; + +struct dsa_switch; + +struct dsa_netdevice_ops; + +struct dsa_port { + union { + struct net_device *master; + struct net_device *slave; + }; + const struct dsa_device_ops *tag_ops; + struct dsa_switch_tree *dst; + struct sk_buff * (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *); + bool (*filter)(const struct sk_buff *, struct net_device *); + enum { + DSA_PORT_TYPE_UNUSED = 0, + DSA_PORT_TYPE_CPU = 1, + DSA_PORT_TYPE_DSA = 2, + DSA_PORT_TYPE_USER = 3, + } type; + struct dsa_switch *ds; + unsigned int index; + const char *name; + struct dsa_port *cpu_dp; + const char *mac; + struct device_node *dn; + unsigned int ageing_time; + bool vlan_filtering; + u8 stp_state; + struct net_device *bridge_dev; + struct devlink_port devlink_port; + bool devlink_port_setup; + struct phylink *pl; + struct phylink_config pl_config; + struct list_head list; + void *priv; + const struct ethtool_ops *orig_ethtool_ops; + const struct dsa_netdevice_ops *netdev_ops; + bool setup; +}; + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*list_func)(struct list_head *, struct packet_type *, struct net_device *); + bool (*id_match)(struct packet_type *, struct sock *); + void *af_packet_priv; + struct list_head list; +}; + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[0]; +}; + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 drops; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +enum flow_cls_command { + FLOW_CLS_REPLACE = 0, + FLOW_CLS_DESTROY = 1, + FLOW_CLS_STATS = 2, + FLOW_CLS_TMPLT_CREATE = 3, + FLOW_CLS_TMPLT_DESTROY = 4, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + long unsigned int cookie; + struct flow_rule *rule; + struct flow_stats stats; + u32 classid; +}; + +enum devlink_sb_pool_type { + DEVLINK_SB_POOL_TYPE_INGRESS = 0, + DEVLINK_SB_POOL_TYPE_EGRESS = 1, +}; + +enum devlink_sb_threshold_type { + DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0, + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1, +}; + +enum devlink_eswitch_encap_mode { + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0, + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1, +}; + +enum devlink_param_cmode { + DEVLINK_PARAM_CMODE_RUNTIME = 0, + DEVLINK_PARAM_CMODE_DRIVERINIT = 1, + DEVLINK_PARAM_CMODE_PERMANENT = 2, + __DEVLINK_PARAM_CMODE_MAX = 3, + DEVLINK_PARAM_CMODE_MAX = 2, +}; + +enum devlink_trap_action { + DEVLINK_TRAP_ACTION_DROP = 0, + DEVLINK_TRAP_ACTION_TRAP = 1, + DEVLINK_TRAP_ACTION_MIRROR = 2, +}; + +enum devlink_trap_type { + DEVLINK_TRAP_TYPE_DROP = 0, + DEVLINK_TRAP_TYPE_EXCEPTION = 1, + DEVLINK_TRAP_TYPE_CONTROL = 2, +}; + +enum devlink_reload_action { + DEVLINK_RELOAD_ACTION_UNSPEC = 0, + DEVLINK_RELOAD_ACTION_DRIVER_REINIT = 1, + DEVLINK_RELOAD_ACTION_FW_ACTIVATE = 2, + __DEVLINK_RELOAD_ACTION_MAX = 3, + DEVLINK_RELOAD_ACTION_MAX = 2, +}; + +enum devlink_reload_limit { + DEVLINK_RELOAD_LIMIT_UNSPEC = 0, + DEVLINK_RELOAD_LIMIT_NO_RESET = 1, + __DEVLINK_RELOAD_LIMIT_MAX = 2, + DEVLINK_RELOAD_LIMIT_MAX = 1, +}; + +enum devlink_dpipe_field_mapping_type { + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0, + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1, +}; + +struct devlink_dev_stats { + u32 reload_stats[6]; + u32 remote_reload_stats[6]; +}; + +struct devlink_dpipe_headers; + +struct devlink_ops; + +struct devlink { + struct list_head list; + struct list_head port_list; + struct list_head sb_list; + struct list_head dpipe_table_list; + struct list_head resource_list; + struct list_head param_list; + struct list_head region_list; + struct list_head reporter_list; + struct mutex reporters_lock; + struct devlink_dpipe_headers *dpipe_headers; + struct list_head trap_list; + struct list_head trap_group_list; + struct list_head trap_policer_list; + const struct devlink_ops *ops; + struct xarray snapshot_ids; + struct devlink_dev_stats stats; + struct device *dev; + possible_net_t _net; + struct mutex lock; + u8 reload_failed: 1; + u8 reload_enabled: 1; + u8 registered: 1; + long: 61; + long: 64; + char priv[0]; +}; + +struct devlink_dpipe_header; + +struct devlink_dpipe_headers { + struct devlink_dpipe_header **headers; + unsigned int headers_count; +}; + +struct devlink_sb_pool_info; + +struct devlink_info_req; + +struct devlink_flash_update_params; + +struct devlink_trap; + +struct devlink_trap_group; + +struct devlink_trap_policer; + +struct devlink_ops { + u32 supported_flash_update_params; + long unsigned int reload_actions; + long unsigned int reload_limits; + int (*reload_down)(struct devlink *, bool, enum devlink_reload_action, enum devlink_reload_limit, struct netlink_ext_ack *); + int (*reload_up)(struct devlink *, enum devlink_reload_action, enum devlink_reload_limit, u32 *, struct netlink_ext_ack *); + int (*port_type_set)(struct devlink_port *, enum devlink_port_type); + int (*port_split)(struct devlink *, unsigned int, unsigned int, struct netlink_ext_ack *); + int (*port_unsplit)(struct devlink *, unsigned int, struct netlink_ext_ack *); + int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *); + int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type, struct netlink_ext_ack *); + int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *); + int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32, struct netlink_ext_ack *); + int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *); + int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32, struct netlink_ext_ack *); + int (*sb_occ_snapshot)(struct devlink *, unsigned int); + int (*sb_occ_max_clear)(struct devlink *, unsigned int); + int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *); + int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *); + int (*eswitch_mode_get)(struct devlink *, u16 *); + int (*eswitch_mode_set)(struct devlink *, u16, struct netlink_ext_ack *); + int (*eswitch_inline_mode_get)(struct devlink *, u8 *); + int (*eswitch_inline_mode_set)(struct devlink *, u8, struct netlink_ext_ack *); + int (*eswitch_encap_mode_get)(struct devlink *, enum devlink_eswitch_encap_mode *); + int (*eswitch_encap_mode_set)(struct devlink *, enum devlink_eswitch_encap_mode, struct netlink_ext_ack *); + int (*info_get)(struct devlink *, struct devlink_info_req *, struct netlink_ext_ack *); + int (*flash_update)(struct devlink *, struct devlink_flash_update_params *, struct netlink_ext_ack *); + int (*trap_init)(struct devlink *, const struct devlink_trap *, void *); + void (*trap_fini)(struct devlink *, const struct devlink_trap *, void *); + int (*trap_action_set)(struct devlink *, const struct devlink_trap *, enum devlink_trap_action, struct netlink_ext_ack *); + int (*trap_group_init)(struct devlink *, const struct devlink_trap_group *); + int (*trap_group_set)(struct devlink *, const struct devlink_trap_group *, const struct devlink_trap_policer *, struct netlink_ext_ack *); + int (*trap_group_action_set)(struct devlink *, const struct devlink_trap_group *, enum devlink_trap_action, struct netlink_ext_ack *); + int (*trap_policer_init)(struct devlink *, const struct devlink_trap_policer *); + void (*trap_policer_fini)(struct devlink *, const struct devlink_trap_policer *); + int (*trap_policer_set)(struct devlink *, const struct devlink_trap_policer *, u64, u64, struct netlink_ext_ack *); + int (*trap_policer_counter_get)(struct devlink *, const struct devlink_trap_policer *, u64 *); + int (*port_function_hw_addr_get)(struct devlink *, struct devlink_port *, u8 *, int *, struct netlink_ext_ack *); + int (*port_function_hw_addr_set)(struct devlink *, struct devlink_port *, const u8 *, int, struct netlink_ext_ack *); +}; + +struct devlink_sb_pool_info { + enum devlink_sb_pool_type pool_type; + u32 size; + enum devlink_sb_threshold_type threshold_type; + u32 cell_size; +}; + +struct devlink_dpipe_field { + const char *name; + unsigned int id; + unsigned int bitwidth; + enum devlink_dpipe_field_mapping_type mapping_type; +}; + +struct devlink_dpipe_header { + const char *name; + unsigned int id; + struct devlink_dpipe_field *fields; + unsigned int fields_count; + bool global; +}; + +union devlink_param_value { + u8 vu8; + u16 vu16; + u32 vu32; + char vstr[32]; + bool vbool; +}; + +struct devlink_param_gset_ctx { + union devlink_param_value val; + enum devlink_param_cmode cmode; +}; + +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; + +struct devlink_trap_policer { + u32 id; + u64 init_rate; + u64 init_burst; + u64 max_rate; + u64 min_rate; + u64 max_burst; + u64 min_burst; +}; + +struct devlink_trap_group { + const char *name; + u16 id; + bool generic; + u32 init_policer_id; +}; + +struct devlink_trap { + enum devlink_trap_type type; + enum devlink_trap_action init_action; + bool generic; + u16 id; + const char *name; + u16 init_group_id; + u32 metadata_cap; +}; + +struct switchdev_trans { + bool ph_prepare; +}; + +enum switchdev_obj_id { + SWITCHDEV_OBJ_ID_UNDEFINED = 0, + SWITCHDEV_OBJ_ID_PORT_VLAN = 1, + SWITCHDEV_OBJ_ID_PORT_MDB = 2, + SWITCHDEV_OBJ_ID_HOST_MDB = 3, + SWITCHDEV_OBJ_ID_MRP = 4, + SWITCHDEV_OBJ_ID_RING_TEST_MRP = 5, + SWITCHDEV_OBJ_ID_RING_ROLE_MRP = 6, + SWITCHDEV_OBJ_ID_RING_STATE_MRP = 7, + SWITCHDEV_OBJ_ID_IN_TEST_MRP = 8, + SWITCHDEV_OBJ_ID_IN_ROLE_MRP = 9, + SWITCHDEV_OBJ_ID_IN_STATE_MRP = 10, +}; + +struct switchdev_obj { + struct net_device *orig_dev; + enum switchdev_obj_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); +}; + +struct switchdev_obj_port_vlan { + struct switchdev_obj obj; + u16 flags; + u16 vid_begin; + u16 vid_end; +}; + +struct switchdev_obj_port_mdb { + struct switchdev_obj obj; + unsigned char addr[6]; + u16 vid; +}; + +enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_BRCM = 1, + DSA_TAG_PROTO_BRCM_PREPEND = 2, + DSA_TAG_PROTO_DSA = 3, + DSA_TAG_PROTO_EDSA = 4, + DSA_TAG_PROTO_GSWIP = 5, + DSA_TAG_PROTO_KSZ9477 = 6, + DSA_TAG_PROTO_KSZ9893 = 7, + DSA_TAG_PROTO_LAN9303 = 8, + DSA_TAG_PROTO_MTK = 9, + DSA_TAG_PROTO_QCA = 10, + DSA_TAG_PROTO_TRAILER = 11, + DSA_TAG_PROTO_8021Q = 12, + DSA_TAG_PROTO_SJA1105 = 13, + DSA_TAG_PROTO_KSZ8795 = 14, + DSA_TAG_PROTO_OCELOT = 15, + DSA_TAG_PROTO_AR9331 = 16, + DSA_TAG_PROTO_RTL4_A = 17, +}; + +struct dsa_device_ops { + struct sk_buff * (*xmit)(struct sk_buff *, struct net_device *); + struct sk_buff * (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *); + void (*flow_dissect)(const struct sk_buff *, __be16 *, int *); + bool (*filter)(const struct sk_buff *, struct net_device *); + unsigned int overhead; + const char *name; + enum dsa_tag_protocol proto; + bool promisc_on_master; + bool tail_tag; +}; + +struct dsa_netdevice_ops { + int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int); +}; + +struct dsa_switch_tree { + struct list_head list; + struct raw_notifier_head nh; + unsigned int index; + struct kref refcount; + bool setup; + struct dsa_platform_data *pd; + struct list_head ports; + struct list_head rtable; +}; + +struct dsa_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +struct dsa_mall_policer_tc_entry { + u32 burst; + u64 rate_bytes_per_sec; +}; + +struct dsa_switch_ops; + +struct dsa_switch { + bool setup; + struct device *dev; + struct dsa_switch_tree *dst; + unsigned int index; + struct notifier_block nb; + void *priv; + struct dsa_chip_data *cd; + const struct dsa_switch_ops *ops; + u32 phys_mii_mask; + struct mii_bus *slave_mii_bus; + unsigned int ageing_time_min; + unsigned int ageing_time_max; + struct devlink *devlink; + unsigned int num_tx_queues; + bool vlan_filtering_is_global; + bool configure_vlan_while_not_filtering; + bool untag_bridge_pvid; + bool vlan_filtering; + bool pcs_poll; + bool mtu_enforcement_ingress; + size_t num_ports; +}; + +struct fixed_phy_status___2; + +typedef int dsa_fdb_dump_cb_t(const unsigned char *, u16, bool, void *); + +struct dsa_switch_ops { + enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *, int, enum dsa_tag_protocol); + int (*setup)(struct dsa_switch *); + void (*teardown)(struct dsa_switch *); + u32 (*get_phy_flags)(struct dsa_switch *, int); + int (*phy_read)(struct dsa_switch *, int, int); + int (*phy_write)(struct dsa_switch *, int, int, u16); + void (*adjust_link)(struct dsa_switch *, int, struct phy_device *); + void (*fixed_link_update)(struct dsa_switch *, int, struct fixed_phy_status___2 *); + void (*phylink_validate)(struct dsa_switch *, int, long unsigned int *, struct phylink_link_state *); + int (*phylink_mac_link_state)(struct dsa_switch *, int, struct phylink_link_state *); + void (*phylink_mac_config)(struct dsa_switch *, int, unsigned int, const struct phylink_link_state *); + void (*phylink_mac_an_restart)(struct dsa_switch *, int); + void (*phylink_mac_link_down)(struct dsa_switch *, int, unsigned int, phy_interface_t); + void (*phylink_mac_link_up)(struct dsa_switch *, int, unsigned int, phy_interface_t, struct phy_device *, int, int, bool, bool); + void (*phylink_fixed_state)(struct dsa_switch *, int, struct phylink_link_state *); + void (*get_strings)(struct dsa_switch *, int, u32, uint8_t *); + void (*get_ethtool_stats)(struct dsa_switch *, int, uint64_t *); + int (*get_sset_count)(struct dsa_switch *, int, int); + void (*get_ethtool_phy_stats)(struct dsa_switch *, int, uint64_t *); + void (*get_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); + int (*set_wol)(struct dsa_switch *, int, struct ethtool_wolinfo *); + int (*get_ts_info)(struct dsa_switch *, int, struct ethtool_ts_info *); + int (*suspend)(struct dsa_switch *); + int (*resume)(struct dsa_switch *); + int (*port_enable)(struct dsa_switch *, int, struct phy_device *); + void (*port_disable)(struct dsa_switch *, int); + int (*set_mac_eee)(struct dsa_switch *, int, struct ethtool_eee *); + int (*get_mac_eee)(struct dsa_switch *, int, struct ethtool_eee *); + int (*get_eeprom_len)(struct dsa_switch *); + int (*get_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct dsa_switch *, struct ethtool_eeprom *, u8 *); + int (*get_regs_len)(struct dsa_switch *, int); + void (*get_regs)(struct dsa_switch *, int, struct ethtool_regs *, void *); + int (*set_ageing_time)(struct dsa_switch *, unsigned int); + int (*port_bridge_join)(struct dsa_switch *, int, struct net_device *); + void (*port_bridge_leave)(struct dsa_switch *, int, struct net_device *); + void (*port_stp_state_set)(struct dsa_switch *, int, u8); + void (*port_fast_age)(struct dsa_switch *, int); + int (*port_egress_floods)(struct dsa_switch *, int, bool, bool); + int (*port_vlan_filtering)(struct dsa_switch *, int, bool, struct switchdev_trans *); + int (*port_vlan_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); + void (*port_vlan_add)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); + int (*port_vlan_del)(struct dsa_switch *, int, const struct switchdev_obj_port_vlan *); + int (*port_fdb_add)(struct dsa_switch *, int, const unsigned char *, u16); + int (*port_fdb_del)(struct dsa_switch *, int, const unsigned char *, u16); + int (*port_fdb_dump)(struct dsa_switch *, int, dsa_fdb_dump_cb_t *, void *); + int (*port_mdb_prepare)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); + void (*port_mdb_add)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); + int (*port_mdb_del)(struct dsa_switch *, int, const struct switchdev_obj_port_mdb *); + int (*get_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *, u32 *); + int (*set_rxnfc)(struct dsa_switch *, int, struct ethtool_rxnfc *); + int (*cls_flower_add)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*cls_flower_del)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*cls_flower_stats)(struct dsa_switch *, int, struct flow_cls_offload *, bool); + int (*port_mirror_add)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *, bool); + void (*port_mirror_del)(struct dsa_switch *, int, struct dsa_mall_mirror_tc_entry *); + int (*port_policer_add)(struct dsa_switch *, int, struct dsa_mall_policer_tc_entry *); + void (*port_policer_del)(struct dsa_switch *, int); + int (*port_setup_tc)(struct dsa_switch *, int, enum tc_setup_type, void *); + int (*crosschip_bridge_join)(struct dsa_switch *, int, int, int, struct net_device *); + void (*crosschip_bridge_leave)(struct dsa_switch *, int, int, int, struct net_device *); + int (*port_hwtstamp_get)(struct dsa_switch *, int, struct ifreq *); + int (*port_hwtstamp_set)(struct dsa_switch *, int, struct ifreq *); + bool (*port_txtstamp)(struct dsa_switch *, int, struct sk_buff *, unsigned int); + bool (*port_rxtstamp)(struct dsa_switch *, int, struct sk_buff *, unsigned int); + int (*devlink_param_get)(struct dsa_switch *, u32, struct devlink_param_gset_ctx *); + int (*devlink_param_set)(struct dsa_switch *, u32, struct devlink_param_gset_ctx *); + int (*devlink_info_get)(struct dsa_switch *, struct devlink_info_req *, struct netlink_ext_ack *); + int (*port_change_mtu)(struct dsa_switch *, int, int); + int (*port_max_mtu)(struct dsa_switch *, int); +}; + +struct dsa_loop_pdata { + struct dsa_chip_data cd; + const char *name; + unsigned int enabled_ports; + const char *netdev; +}; + +struct ptp_clock_time { + __s64 sec; + __u32 nsec; + __u32 reserved; +}; + +struct ptp_extts_request { + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +struct ptp_perout_request { + union { + struct ptp_clock_time start; + struct ptp_clock_time phase; + }; + struct ptp_clock_time period; + unsigned int index; + unsigned int flags; + union { + struct ptp_clock_time on; + unsigned int rsv[4]; + }; +}; + +enum ptp_pin_function { + PTP_PF_NONE = 0, + PTP_PF_EXTTS = 1, + PTP_PF_PEROUT = 2, + PTP_PF_PHYSYNC = 3, +}; + +struct ptp_pin_desc { + char name[64]; + unsigned int index; + unsigned int func; + unsigned int chan; + unsigned int rsv[5]; +}; + +struct ptp_clock_request { + enum { + PTP_CLK_REQ_EXTTS = 0, + PTP_CLK_REQ_PEROUT = 1, + PTP_CLK_REQ_PPS = 2, + } type; + union { + struct ptp_extts_request extts; + struct ptp_perout_request perout; + }; +}; + +struct ptp_clock_info { + struct module *owner; + char name[16]; + s32 max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int n_pins; + int pps; + struct ptp_pin_desc *pin_config; + int (*adjfine)(struct ptp_clock_info *, long int); + int (*adjfreq)(struct ptp_clock_info *, s32); + int (*adjphase)(struct ptp_clock_info *, s32); + int (*adjtime)(struct ptp_clock_info *, s64); + int (*gettime64)(struct ptp_clock_info *, struct timespec64 *); + int (*gettimex64)(struct ptp_clock_info *, struct timespec64 *, struct ptp_system_timestamp *); + int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *); + int (*settime64)(struct ptp_clock_info *, const struct timespec64 *); + int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int); + int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int); + long int (*do_aux_work)(struct ptp_clock_info *); +}; + +struct ptp_clock; + +struct cavium_ptp { + struct pci_dev *pdev; + spinlock_t spin_lock; + struct cyclecounter cycle_counter; + struct timecounter time_counter; + void *reg_base; + u32 clock_rate; + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; +}; + +struct mlxfw_dev_ops; + +struct mlxfw_dev { + const struct mlxfw_dev_ops *ops; + const char *psid; + u16 psid_size; + struct devlink *devlink; +}; + +enum mlxfw_fsm_state { + MLXFW_FSM_STATE_IDLE = 0, + MLXFW_FSM_STATE_LOCKED = 1, + MLXFW_FSM_STATE_INITIALIZE = 2, + MLXFW_FSM_STATE_DOWNLOAD = 3, + MLXFW_FSM_STATE_VERIFY = 4, + MLXFW_FSM_STATE_APPLY = 5, + MLXFW_FSM_STATE_ACTIVATE = 6, +}; + +enum mlxfw_fsm_state_err { + MLXFW_FSM_STATE_ERR_OK = 0, + MLXFW_FSM_STATE_ERR_ERROR = 1, + MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR = 2, + MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE = 3, + MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY = 4, + MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED = 5, + MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED = 6, + MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE = 7, + MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT = 8, + MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET = 9, + MLXFW_FSM_STATE_ERR_MAX = 10, +}; + +struct mlxfw_dev_ops { + int (*component_query)(struct mlxfw_dev *, u16, u32 *, u8 *, u16 *); + int (*fsm_lock)(struct mlxfw_dev *, u32 *); + int (*fsm_component_update)(struct mlxfw_dev *, u32, u16, u32); + int (*fsm_block_download)(struct mlxfw_dev *, u32, u8 *, u16, u32); + int (*fsm_component_verify)(struct mlxfw_dev *, u32, u16); + int (*fsm_activate)(struct mlxfw_dev *, u32); + int (*fsm_reactivate)(struct mlxfw_dev *, u8 *); + int (*fsm_query_state)(struct mlxfw_dev *, u32, enum mlxfw_fsm_state *, enum mlxfw_fsm_state_err *); + void (*fsm_cancel)(struct mlxfw_dev *, u32); + void (*fsm_release)(struct mlxfw_dev *, u32); +}; + +enum mlxfw_fsm_reactivate_status { + MLXFW_FSM_REACTIVATE_STATUS_OK = 0, + MLXFW_FSM_REACTIVATE_STATUS_BUSY = 1, + MLXFW_FSM_REACTIVATE_STATUS_PROHIBITED_FW_VER_ERR = 2, + MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_COPY_FAILED = 3, + MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_ERASE_FAILED = 4, + MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_RESTORE_FAILED = 5, + MLXFW_FSM_REACTIVATE_STATUS_CANDIDATE_FW_DEACTIVATION_FAILED = 6, + MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED = 7, + MLXFW_FSM_REACTIVATE_STATUS_ERR_DEVICE_RESET_REQUIRED = 8, + MLXFW_FSM_REACTIVATE_STATUS_ERR_FW_PROGRAMMING_NEEDED = 9, + MLXFW_FSM_REACTIVATE_STATUS_MAX = 10, +}; + +struct mlxfw_mfa2_component { + u16 index; + u32 data_size; + u8 *data; +}; + +struct mlxfw_mfa2_file; + +struct mlxfw_mfa2_tlv; + +struct mlxfw_mfa2_file___2 { + const struct firmware *fw; + const struct mlxfw_mfa2_tlv *first_dev; + u16 dev_count; + const struct mlxfw_mfa2_tlv *first_component; + u16 component_count; + const void *cb; + u32 cb_archive_size; +}; + +struct mlxfw_mfa2_tlv { + u8 version; + u8 type; + __be16 len; + u8 data[0]; +}; + +enum mlxfw_mfa2_tlv_type { + MLXFW_MFA2_TLV_MULTI_PART = 1, + MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR = 2, + MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR = 4, + MLXFW_MFA2_TLV_COMPONENT_PTR = 34, + MLXFW_MFA2_TLV_PSID = 42, +}; + +struct mlxfw_mfa2_tlv_multi { + __be16 num_extensions; + __be16 total_len; +}; + +struct mlxfw_mfa2_tlv_package_descriptor { + __be16 num_components; + __be16 num_devices; + __be32 cb_offset; + __be32 cb_archive_size; + __be32 cb_size_h; + __be32 cb_size_l; + u8 padding[3]; + u8 cv_compression; + __be32 user_data_offset; +}; + +struct mlxfw_mfa2_tlv_psid { + u8 psid[0]; +}; + +struct mlxfw_mfa2_tlv_component_ptr { + __be16 storage_id; + __be16 component_index; + __be32 storage_address; +}; + +struct mlxfw_mfa2_tlv_component_descriptor { + __be16 pldm_classification; + __be16 identifier; + __be32 cb_offset_h; + __be32 cb_offset_l; + __be32 size; +}; + +struct mlxfw_mfa2_comp_data { + struct mlxfw_mfa2_component comp; + u8 buff[0]; +}; + +struct wl1251_platform_data { + int power_gpio; + int irq; + bool use_eeprom; +}; + +enum { + SKB_GSO_TCPV4 = 1, + SKB_GSO_DODGY = 2, + SKB_GSO_TCP_ECN = 4, + SKB_GSO_TCP_FIXEDID = 8, + SKB_GSO_TCPV6 = 16, + SKB_GSO_FCOE = 32, + SKB_GSO_GRE = 64, + SKB_GSO_GRE_CSUM = 128, + SKB_GSO_IPXIP4 = 256, + SKB_GSO_IPXIP6 = 512, + SKB_GSO_UDP_TUNNEL = 1024, + SKB_GSO_UDP_TUNNEL_CSUM = 2048, + SKB_GSO_PARTIAL = 4096, + SKB_GSO_TUNNEL_REMCSUM = 8192, + SKB_GSO_SCTP = 16384, + SKB_GSO_ESP = 32768, + SKB_GSO_UDP = 65536, + SKB_GSO_UDP_L4 = 131072, + SKB_GSO_FRAGLIST = 262144, +}; + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS = 1, + ETH_SS_PRIV_FLAGS = 2, + ETH_SS_NTUPLE_FILTERS = 3, + ETH_SS_FEATURES = 4, + ETH_SS_RSS_HASH_FUNCS = 5, + ETH_SS_TUNABLES = 6, + ETH_SS_PHY_STATS = 7, + ETH_SS_PHY_TUNABLES = 8, + ETH_SS_LINK_MODES = 9, + ETH_SS_MSG_CLASSES = 10, + ETH_SS_WOL_MODES = 11, + ETH_SS_SOF_TIMESTAMPING = 12, + ETH_SS_TS_TX_TYPES = 13, + ETH_SS_TS_RX_FILTERS = 14, + ETH_SS_UDP_TUNNEL_TYPES = 15, + ETH_SS_COUNT = 16, +}; + +struct gro_list { + struct list_head list; + int count; +}; + +struct napi_struct { + struct list_head poll_list; + long unsigned int state; + int weight; + int defer_hard_irqs_count; + long unsigned int gro_bitmask; + int (*poll)(struct napi_struct *, int); + int poll_owner; + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + struct hrtimer timer; + struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; +}; + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF = 0, + __QUEUE_STATE_STACK_XOFF = 1, + __QUEUE_STATE_FROZEN = 2, +}; + +enum skb_free_reason { + SKB_REASON_CONSUMED = 0, + SKB_REASON_DROPPED = 1, +}; + +struct pp_alloc_cache { + u32 count; + void *cache[128]; +}; + +struct page_pool_params { + unsigned int flags; + unsigned int order; + unsigned int pool_size; + int nid; + struct device *dev; + enum dma_data_direction dma_dir; + unsigned int max_len; + unsigned int offset; +}; + +struct page_pool { + struct page_pool_params p; + struct delayed_work release_dw; + void (*disconnect)(void *); + long unsigned int defer_start; + long unsigned int defer_warn; + u32 pages_state_hold_cnt; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + struct pp_alloc_cache alloc; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct ptr_ring ring; + atomic_t pages_state_release_cnt; + refcount_t user_cnt; + u64 destroy_cnt; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xen_netif_tx_request { + grant_ref_t gref; + uint16_t offset; + uint16_t flags; + uint16_t id; + uint16_t size; +}; + +struct xen_netif_extra_info { + uint8_t type; + uint8_t flags; + union { + struct { + uint16_t size; + uint8_t type; + uint8_t pad; + uint16_t features; + } gso; + struct { + uint8_t addr[6]; + } mcast; + struct { + uint8_t type; + uint8_t algorithm; + uint8_t value[4]; + } hash; + struct { + uint16_t headroom; + uint16_t pad[2]; + } xdp; + uint16_t pad[3]; + } u; +}; + +struct xen_netif_tx_response { + uint16_t id; + int16_t status; +}; + +struct xen_netif_rx_request { + uint16_t id; + uint16_t pad; + grant_ref_t gref; +}; + +struct xen_netif_rx_response { + uint16_t id; + uint16_t offset; + uint16_t flags; + int16_t status; +}; + +union xen_netif_tx_sring_entry { + struct xen_netif_tx_request req; + struct xen_netif_tx_response rsp; +}; + +struct xen_netif_tx_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union xen_netif_tx_sring_entry ring[1]; +}; + +struct xen_netif_tx_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct xen_netif_tx_sring *sring; +}; + +union xen_netif_rx_sring_entry { + struct xen_netif_rx_request req; + struct xen_netif_rx_response rsp; +}; + +struct xen_netif_rx_sring { + RING_IDX req_prod; + RING_IDX req_event; + RING_IDX rsp_prod; + RING_IDX rsp_event; + uint8_t pad[48]; + union xen_netif_rx_sring_entry ring[1]; +}; + +struct xen_netif_rx_front_ring { + RING_IDX req_prod_pvt; + RING_IDX rsp_cons; + unsigned int nr_ents; + struct xen_netif_rx_sring *sring; +}; + +struct netfront_cb { + int pull_to; +}; + +struct netfront_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +union skb_entry { + struct sk_buff *skb; + long unsigned int link; +}; + +struct netfront_info; + +struct netfront_queue { + unsigned int id; + char name[22]; + struct netfront_info *info; + struct bpf_prog *xdp_prog; + struct napi_struct napi; + unsigned int tx_evtchn; + unsigned int rx_evtchn; + unsigned int tx_irq; + unsigned int rx_irq; + char tx_irq_name[25]; + char rx_irq_name[25]; + spinlock_t tx_lock; + struct xen_netif_tx_front_ring tx; + int tx_ring_ref; + union skb_entry tx_skbs[256]; + grant_ref_t gref_tx_head; + grant_ref_t grant_tx_ref[256]; + struct page *grant_tx_page[256]; + unsigned int tx_skb_freelist; + long: 32; + long: 64; + long: 64; + spinlock_t rx_lock; + struct xen_netif_rx_front_ring rx; + int rx_ring_ref; + struct timer_list rx_refill_timer; + struct sk_buff *rx_skbs[256]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[256]; + struct page_pool *page_pool; + long: 64; + long: 64; + long: 64; + long: 64; + struct xdp_rxq_info xdp_rxq; +}; + +struct netfront_info { + struct list_head list; + struct net_device *netdev; + struct xenbus_device *xbdev; + struct netfront_queue *queues; + struct netfront_stats *rx_stats; + struct netfront_stats *tx_stats; + bool netback_has_xdp_headroom; + bool netfront_xdp_enabled; + atomic_t rx_gso_checksum_fixup; +}; + +struct netfront_rx_info { + struct xen_netif_rx_response rx; + struct xen_netif_extra_info extras[5]; +}; + +struct xennet_gnttab_make_txreq { + struct netfront_queue *queue; + struct sk_buff *skb; + struct page *page; + struct xen_netif_tx_request *tx; + unsigned int size; +}; + +struct xennet_stat { + char name[32]; + u16 offset; +}; + +enum usb_otg_state { + OTG_STATE_UNDEFINED = 0, + OTG_STATE_B_IDLE = 1, + OTG_STATE_B_SRP_INIT = 2, + OTG_STATE_B_PERIPHERAL = 3, + OTG_STATE_B_WAIT_ACON = 4, + OTG_STATE_B_HOST = 5, + OTG_STATE_A_IDLE = 6, + OTG_STATE_A_WAIT_VRISE = 7, + OTG_STATE_A_WAIT_BCON = 8, + OTG_STATE_A_HOST = 9, + OTG_STATE_A_SUSPEND = 10, + OTG_STATE_A_PERIPHERAL = 11, + OTG_STATE_A_WAIT_VFALL = 12, + OTG_STATE_A_VBUS_ERR = 13, +}; + +struct usb_otg_caps { + u16 otg_rev; + bool hnp_support; + bool srp_support; + bool adp_support; +}; + +struct extcon_dev; + +enum usb_charger_type { + UNKNOWN_TYPE = 0, + SDP_TYPE = 1, + DCP_TYPE = 2, + CDP_TYPE = 3, + ACA_TYPE = 4, +}; + +enum usb_charger_state { + USB_CHARGER_DEFAULT = 0, + USB_CHARGER_PRESENT = 1, + USB_CHARGER_ABSENT = 2, +}; + +enum usb_phy_events { + USB_EVENT_NONE = 0, + USB_EVENT_VBUS = 1, + USB_EVENT_ID = 2, + USB_EVENT_CHARGER = 3, + USB_EVENT_ENUMERATED = 4, +}; + +enum usb_phy_type { + USB_PHY_TYPE_UNDEFINED = 0, + USB_PHY_TYPE_USB2 = 1, + USB_PHY_TYPE_USB3 = 2, +}; + +struct usb_phy___2; + +struct usb_phy_io_ops { + int (*read)(struct usb_phy___2 *, u32); + int (*write)(struct usb_phy___2 *, u32, u32); +}; + +struct usb_charger_current { + unsigned int sdp_min; + unsigned int sdp_max; + unsigned int dcp_min; + unsigned int dcp_max; + unsigned int cdp_min; + unsigned int cdp_max; + unsigned int aca_min; + unsigned int aca_max; +}; + +struct usb_otg; + +struct usb_phy___2 { + struct device *dev; + const char *label; + unsigned int flags; + enum usb_phy_type type; + enum usb_phy_events last_event; + struct usb_otg *otg; + struct device *io_dev; + struct usb_phy_io_ops *io_ops; + void *io_priv; + struct extcon_dev *edev; + struct extcon_dev *id_edev; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block type_nb; + enum usb_charger_type chg_type; + enum usb_charger_state chg_state; + struct usb_charger_current chg_cur; + struct work_struct chg_work; + struct atomic_notifier_head notifier; + u16 port_status; + u16 port_change; + struct list_head head; + int (*init)(struct usb_phy___2 *); + void (*shutdown)(struct usb_phy___2 *); + int (*set_vbus)(struct usb_phy___2 *, int); + int (*set_power)(struct usb_phy___2 *, unsigned int); + int (*set_suspend)(struct usb_phy___2 *, int); + int (*set_wakeup)(struct usb_phy___2 *, bool); + int (*notify_connect)(struct usb_phy___2 *, enum usb_device_speed); + int (*notify_disconnect)(struct usb_phy___2 *, enum usb_device_speed); + enum usb_charger_type (*charger_detect)(struct usb_phy___2 *); +}; + +struct phy_devm { + struct usb_phy___2 *phy; + struct notifier_block *nb; +}; + +enum usb_phy_interface { + USBPHY_INTERFACE_MODE_UNKNOWN = 0, + USBPHY_INTERFACE_MODE_UTMI = 1, + USBPHY_INTERFACE_MODE_UTMIW = 2, + USBPHY_INTERFACE_MODE_ULPI = 3, + USBPHY_INTERFACE_MODE_SERIAL = 4, + USBPHY_INTERFACE_MODE_HSIC = 5, +}; + +struct usb_gadget; + +struct usb_otg { + u8 default_a; + struct phy *phy; + struct usb_phy___2 *usb_phy; + struct usb_bus *host; + struct usb_gadget *gadget; + enum usb_otg_state state; + int (*set_host)(struct usb_otg *, struct usb_bus *); + int (*set_peripheral)(struct usb_otg *, struct usb_gadget *); + int (*set_vbus)(struct usb_otg *, bool); + int (*start_srp)(struct usb_otg *); + int (*start_hnp)(struct usb_otg *); +}; + +struct ulpi_info { + unsigned int id; + char *name; +}; + +enum amd_chipset_gen { + NOT_AMD_CHIPSET = 0, + AMD_CHIPSET_SB600 = 1, + AMD_CHIPSET_SB700 = 2, + AMD_CHIPSET_SB800 = 3, + AMD_CHIPSET_HUDSON2 = 4, + AMD_CHIPSET_BOLTON = 5, + AMD_CHIPSET_YANGTZE = 6, + AMD_CHIPSET_TAISHAN = 7, + AMD_CHIPSET_UNKNOWN = 8, +}; + +struct amd_chipset_type { + enum amd_chipset_gen gen; + u8 rev; +}; + +struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + struct amd_chipset_type sb_type; + int isoc_reqs; + int probe_count; + bool need_pll_quirk; +}; + +struct serio_device_id { + __u8 type; + __u8 extra; + __u8 id; + __u8 proto; +}; + +struct serio_driver; + +struct serio { + void *port_data; + char name[32]; + char phys[32]; + char firmware_id[128]; + bool manual_bind; + struct serio_device_id id; + spinlock_t lock; + int (*write)(struct serio *, unsigned char); + int (*open)(struct serio *); + void (*close)(struct serio *); + int (*start)(struct serio *); + void (*stop)(struct serio *); + struct serio *parent; + struct list_head child_node; + struct list_head children; + unsigned int depth; + struct serio_driver *drv; + struct mutex drv_mutex; + struct device dev; + struct list_head node; + struct mutex *ps2_cmd_mutex; +}; + +struct serio_driver { + const char *description; + const struct serio_device_id *id_table; + bool manual_bind; + void (*write_wakeup)(struct serio *); + irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); + int (*connect)(struct serio *, struct serio_driver *); + int (*reconnect)(struct serio *); + int (*fast_reconnect)(struct serio *); + void (*disconnect)(struct serio *); + void (*cleanup)(struct serio *); + struct device_driver driver; +}; + +enum serio_event_type { + SERIO_RESCAN_PORT = 0, + SERIO_RECONNECT_PORT = 1, + SERIO_RECONNECT_SUBTREE = 2, + SERIO_REGISTER_PORT = 3, + SERIO_ATTACH_DRIVER = 4, +}; + +struct serio_event { + enum serio_event_type type; + void *object; + struct module *owner; + struct list_head node; +}; + +struct amba_kmi_port { + struct serio *io; + struct clk *clk; + void *base; + unsigned int irq; + unsigned int divisor; + unsigned int open; +}; + +struct ps2dev { + struct serio *serio; + struct mutex cmd_mutex; + wait_queue_head_t wait; + long unsigned int flags; + u8 cmdbuf[8]; + u8 cmdcnt; + u8 nak; +}; + +struct input_mt_slot { + int abs[14]; + unsigned int frame; + unsigned int key; +}; + +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[0]; +}; + +union input_seq_state { + struct { + short unsigned int pos; + bool mutex_acquired; + }; + void *p; +}; + +struct input_devres { + struct input_dev *input; +}; + +struct input_event { + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct input_event_compat { + compat_ulong_t sec; + compat_ulong_t usec; + __u16 type; + __u16 code; + __s32 value; +}; + +struct ff_periodic_effect_compat { + __u16 waveform; + __u16 period; + __s16 magnitude; + __s16 offset; + __u16 phase; + struct ff_envelope envelope; + __u32 custom_len; + compat_uptr_t custom_data; +}; + +struct ff_effect_compat { + __u16 type; + __s16 id; + __u16 direction; + struct ff_trigger trigger; + struct ff_replay replay; + union { + struct ff_constant_effect constant; + struct ff_ramp_effect ramp; + struct ff_periodic_effect_compat periodic; + struct ff_condition_effect condition[2]; + struct ff_rumble_effect rumble; + } u; +}; + +struct input_mt_pos { + s16 x; + s16 y; +}; + +struct input_dev_poller { + void (*poll)(struct input_dev *); + unsigned int poll_interval; + unsigned int poll_interval_max; + unsigned int poll_interval_min; + struct input_dev *input; + struct delayed_work work; +}; + +struct mousedev_hw_data { + int dx; + int dy; + int dz; + int x; + int y; + int abs_event; + long unsigned int buttons; +}; + +struct mousedev { + int open; + struct input_handle handle; + wait_queue_head_t wait; + struct list_head client_list; + spinlock_t client_lock; + struct mutex mutex; + struct device dev; + struct cdev cdev; + bool exist; + struct list_head mixdev_node; + bool opened_by_mixdev; + struct mousedev_hw_data packet; + unsigned int pkt_count; + int old_x[4]; + int old_y[4]; + int frac_dx; + int frac_dy; + long unsigned int touch; + int (*open_device)(struct mousedev *); + void (*close_device)(struct mousedev *); +}; + +enum mousedev_emul { + MOUSEDEV_EMUL_PS2 = 0, + MOUSEDEV_EMUL_IMPS = 1, + MOUSEDEV_EMUL_EXPS = 2, +}; + +struct mousedev_motion { + int dx; + int dy; + int dz; + long unsigned int buttons; +}; + +struct mousedev_client { + struct fasync_struct *fasync; + struct mousedev *mousedev; + struct list_head node; + struct mousedev_motion packets[16]; + unsigned int head; + unsigned int tail; + spinlock_t packet_lock; + int pos_x; + int pos_y; + u8 ps2[6]; + unsigned char ready; + unsigned char buffer; + unsigned char bufsiz; + unsigned char imexseq; + unsigned char impsseq; + enum mousedev_emul mode; + long unsigned int last_buttons; +}; + +enum { + FRACTION_DENOM = 128, +}; + +struct atkbd { + struct ps2dev ps2dev; + struct input_dev *dev; + char name[64]; + char phys[32]; + short unsigned int id; + short unsigned int keycode[512]; + long unsigned int force_release_mask[8]; + unsigned char set; + bool translated; + bool extra; + bool write; + bool softrepeat; + bool softraw; + bool scroll; + bool enabled; + unsigned char emul; + bool resend; + bool release; + long unsigned int xl_bit; + unsigned int last; + long unsigned int time; + long unsigned int err_count; + struct delayed_work event_work; + long unsigned int event_jiffies; + long unsigned int event_mask; + struct mutex mutex; + u32 function_row_physmap[24]; + int num_function_row_keys; +}; + +struct touchscreen_properties { + unsigned int max_x; + unsigned int max_y; + bool invert_x; + bool invert_y; + bool swap_x_y; +}; + +struct xenkbd_motion { + uint8_t type; + int32_t rel_x; + int32_t rel_y; + int32_t rel_z; +}; + +struct xenkbd_key { + uint8_t type; + uint8_t pressed; + uint32_t keycode; +}; + +struct xenkbd_position { + uint8_t type; + int32_t abs_x; + int32_t abs_y; + int32_t rel_z; +}; + +struct xenkbd_mtouch { + uint8_t type; + uint8_t event_type; + uint8_t contact_id; + uint8_t reserved[5]; + union { + struct { + int32_t abs_x; + int32_t abs_y; + } pos; + struct { + uint32_t major; + uint32_t minor; + } shape; + int16_t orientation; + } u; +}; + +union xenkbd_in_event { + uint8_t type; + struct xenkbd_motion motion; + struct xenkbd_key key; + struct xenkbd_position pos; + struct xenkbd_mtouch mtouch; + char pad[40]; +}; + +struct xenkbd_page { + uint32_t in_cons; + uint32_t in_prod; + uint32_t out_cons; + uint32_t out_prod; +}; + +struct xenkbd_info { + struct input_dev *kbd; + struct input_dev *ptr; + struct input_dev *mtouch; + struct xenkbd_page *page; + int gref; + int irq; + struct xenbus_device *xbdev; + char phys[32]; + int mtouch_cur_contact_id; +}; + +enum { + KPARAM_X = 0, + KPARAM_Y = 1, + KPARAM_CNT___2 = 2, +}; + +struct trace_event_raw_rtc_time_alarm_class { + struct trace_entry ent; + time64_t secs; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_freq { + struct trace_entry ent; + int freq; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_irq_set_state { + struct trace_entry ent; + int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_alarm_irq_enable { + struct trace_entry ent; + unsigned int enabled; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_offset_class { + struct trace_entry ent; + long int offset; + int err; + char __data[0]; +}; + +struct trace_event_raw_rtc_timer_class { + struct trace_entry ent; + struct rtc_timer *timer; + ktime_t expires; + ktime_t period; + char __data[0]; +}; + +struct trace_event_data_offsets_rtc_time_alarm_class {}; + +struct trace_event_data_offsets_rtc_irq_set_freq {}; + +struct trace_event_data_offsets_rtc_irq_set_state {}; + +struct trace_event_data_offsets_rtc_alarm_irq_enable {}; + +struct trace_event_data_offsets_rtc_offset_class {}; + +struct trace_event_data_offsets_rtc_timer_class {}; + +typedef void (*btf_trace_rtc_set_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_time)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_set_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_read_alarm)(void *, time64_t, int); + +typedef void (*btf_trace_rtc_irq_set_freq)(void *, int, int); + +typedef void (*btf_trace_rtc_irq_set_state)(void *, int, int); + +typedef void (*btf_trace_rtc_alarm_irq_enable)(void *, unsigned int, int); + +typedef void (*btf_trace_rtc_set_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_read_offset)(void *, long int, int); + +typedef void (*btf_trace_rtc_timer_enqueue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_dequeue)(void *, struct rtc_timer *); + +typedef void (*btf_trace_rtc_timer_fired)(void *, struct rtc_timer *); + +enum { + none = 0, + day = 1, + month = 2, + year = 3, +}; + +struct pl031_vendor_data { + struct rtc_class_ops ops; + bool clockwatch; + bool st_weekday; + long unsigned int irqflags; + time64_t range_min; + timeu64_t range_max; +}; + +struct pl031_local { + struct pl031_vendor_data *vendor; + struct rtc_device *rtc; + void *base; +}; + +struct sun6i_rtc_clk_data { + long unsigned int rc_osc_rate; + unsigned int fixed_prescaler: 16; + unsigned int has_prescaler: 1; + unsigned int has_out_clk: 1; + unsigned int export_iosc: 1; + unsigned int has_losc_en: 1; + unsigned int has_auto_swt: 1; +}; + +struct sun6i_rtc_dev { + struct rtc_device *rtc; + const struct sun6i_rtc_clk_data *data; + void *base; + int irq; + long unsigned int alarm; + struct clk_hw hw; + struct clk_hw *int_osc; + struct clk *losc; + struct clk *ext_losc; + spinlock_t lock; +}; + +struct xgene_rtc_dev { + struct rtc_device *rtc; + void *csr_base; + struct clk *clk; + unsigned int irq_wake; + unsigned int irq_enabled; +}; + +struct i2c_board_info { + char type[20]; + short unsigned int flags; + short unsigned int addr; + const char *dev_name; + void *platform_data; + struct device_node *of_node; + struct fwnode_handle *fwnode; + const struct property_entry *properties; + const struct resource *resources; + unsigned int num_resources; + int irq; +}; + +struct i2c_devinfo { + struct list_head list; + int busnum; + struct i2c_board_info board_info; +}; + +struct i2c_device_id { + char name[20]; + kernel_ulong_t driver_data; +}; + +struct i2c_device_identity { + u16 manufacturer_id; + u16 part_id; + u8 die_revision; +}; + +enum i2c_alert_protocol { + I2C_PROTOCOL_SMBUS_ALERT = 0, + I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1, +}; + +struct i2c_driver { + unsigned int class; + int (*probe)(struct i2c_client *, const struct i2c_device_id *); + int (*remove)(struct i2c_client *); + int (*probe_new)(struct i2c_client *); + void (*shutdown)(struct i2c_client *); + void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int); + int (*command)(struct i2c_client *, unsigned int, void *); + struct device_driver driver; + const struct i2c_device_id *id_table; + int (*detect)(struct i2c_client *, struct i2c_board_info *); + const short unsigned int *address_list; + struct list_head clients; +}; + +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; + u32 sda_hold_ns; + u32 digital_filter_width_ns; + u32 analog_filter_cutoff_freq_hz; +}; + +struct trace_event_raw_i2c_write { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_read { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + char __data[0]; +}; + +struct trace_event_raw_i2c_reply { + struct trace_entry ent; + int adapter_nr; + __u16 msg_nr; + __u16 addr; + __u16 flags; + __u16 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_i2c_result { + struct trace_entry ent; + int adapter_nr; + __u16 nr_msgs; + __s16 ret; + char __data[0]; +}; + +struct trace_event_data_offsets_i2c_write { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_read {}; + +struct trace_event_data_offsets_i2c_reply { + u32 buf; +}; + +struct trace_event_data_offsets_i2c_result {}; + +typedef void (*btf_trace_i2c_write)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_read)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_reply)(void *, const struct i2c_adapter *, const struct i2c_msg *, int); + +typedef void (*btf_trace_i2c_result)(void *, const struct i2c_adapter *, int, int); + +struct i2c_dummy_devres { + struct i2c_client *client; +}; + +struct class_compat___2; + +struct i2c_cmd_arg { + unsigned int cmd; + void *arg; +}; + +struct i2c_smbus_alert_setup { + int irq; +}; + +struct trace_event_raw_smbus_write { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_read { + struct trace_entry ent; + int adapter_nr; + __u16 flags; + __u16 addr; + __u8 command; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_reply { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 command; + __u8 len; + __u32 protocol; + __u8 buf[34]; + char __data[0]; +}; + +struct trace_event_raw_smbus_result { + struct trace_entry ent; + int adapter_nr; + __u16 addr; + __u16 flags; + __u8 read_write; + __u8 command; + __s16 res; + __u32 protocol; + char __data[0]; +}; + +struct trace_event_data_offsets_smbus_write {}; + +struct trace_event_data_offsets_smbus_read {}; + +struct trace_event_data_offsets_smbus_reply {}; + +struct trace_event_data_offsets_smbus_result {}; + +typedef void (*btf_trace_smbus_write)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *); + +typedef void (*btf_trace_smbus_read)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int); + +typedef void (*btf_trace_smbus_reply)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, const union i2c_smbus_data *, int); + +typedef void (*btf_trace_smbus_result)(void *, const struct i2c_adapter *, u16, short unsigned int, char, u8, int, int); + +struct i2c_acpi_handler_data { + struct acpi_connection_info info; + struct i2c_adapter *adapter; +}; + +struct gsb_buffer { + u8 status; + u8 len; + union { + u16 wdata; + u8 bdata; + u8 data[0]; + }; +}; + +struct i2c_acpi_lookup { + struct i2c_board_info *info; + acpi_handle adapter_handle; + acpi_handle device_handle; + acpi_handle search_handle; + int n; + int index; + u32 speed; + u32 min_speed; + u32 force_speed; +}; + +struct dw_i2c_dev { + struct device *dev; + struct regmap *map; + struct regmap *sysmap; + void *base; + void *ext; + struct completion cmd_complete; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct i2c_client *slave; + u32 (*get_clk_rate_khz)(struct dw_i2c_dev *); + int cmd_err; + struct i2c_msg *msgs; + int msgs_num; + int msg_write_idx; + u32 tx_buf_len; + u8 *tx_buf; + int msg_read_idx; + u32 rx_buf_len; + u8 *rx_buf; + int msg_err; + unsigned int status; + u32 abort_source; + int irq; + u32 flags; + struct i2c_adapter adapter; + u32 functionality; + u32 master_cfg; + u32 slave_cfg; + unsigned int tx_fifo_depth; + unsigned int rx_fifo_depth; + int rx_outstanding; + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + int (*acquire_lock)(); + void (*release_lock)(); + bool shared_with_punit; + void (*disable)(struct dw_i2c_dev *); + void (*disable_int)(struct dw_i2c_dev *); + int (*init)(struct dw_i2c_dev *); + int (*set_sda_hold_time)(struct dw_i2c_dev *); + int mode; + struct i2c_bus_recovery_info rinfo; + bool suspended; +}; + +struct dw_i2c_platform_data { + unsigned int i2c_scl_freq; +}; + +struct bsc_regs { + u32 chip_address; + u32 data_in[8]; + u32 cnt_reg; + u32 ctl_reg; + u32 iic_enable; + u32 data_out[8]; + u32 ctlhi_reg; + u32 scl_param; +}; + +struct bsc_clk_param { + u32 hz; + u32 scl_mask; + u32 div_mask; +}; + +enum bsc_xfer_cmd { + CMD_WR = 0, + CMD_RD = 1, + CMD_WR_NOACK = 2, + CMD_RD_NOACK = 3, +}; + +enum bus_speeds { + SPD_375K = 0, + SPD_390K = 1, + SPD_187K = 2, + SPD_200K = 3, + SPD_93K = 4, + SPD_97K = 5, + SPD_46K = 6, + SPD_50K = 7, +}; + +struct brcmstb_i2c_dev { + struct device *device; + void *base; + int irq; + struct bsc_regs *bsc_regmap; + struct i2c_adapter adapter; + struct completion done; + u32 clk_freq_hz; + int data_regsz; +}; + +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_ktime_compat { + __s64 sec; + __s32 nsec; + __u32 flags; +}; + +struct pps_kinfo { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; +}; + +struct pps_kinfo_compat { + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime_compat assert_tu; + struct pps_ktime_compat clear_tu; + int current_mode; +} __attribute__((packed)); + +struct pps_kparams { + int api_version; + int mode; + struct pps_ktime assert_off_tu; + struct pps_ktime clear_off_tu; +}; + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +struct pps_fdata_compat { + struct pps_kinfo_compat info; + struct pps_ktime_compat timeout; +} __attribute__((packed)); + +struct pps_bind_args { + int tsformat; + int edge; + int consumer; +}; + +struct pps_device; + +struct pps_source_info { + char name[32]; + char path[32]; + int mode; + void (*echo)(struct pps_device *, int, void *); + struct module *owner; + struct device *dev; +}; + +struct pps_device { + struct pps_source_info info; + struct pps_kparams params; + __u32 assert_sequence; + __u32 clear_sequence; + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; + unsigned int last_ev; + wait_queue_head_t queue; + unsigned int id; + const void *lookup_cookie; + struct cdev cdev; + struct device *dev; + struct fasync_struct *async_queue; + spinlock_t lock; +}; + +struct pps_event_time { + struct timespec64 ts_real; +}; + +struct ptp_extts_event { + struct ptp_clock_time t; + unsigned int index; + unsigned int flags; + unsigned int rsv[2]; +}; + +enum ptp_clock_events { + PTP_CLOCK_ALARM = 0, + PTP_CLOCK_EXTTS = 1, + PTP_CLOCK_PPS = 2, + PTP_CLOCK_PPSUSR = 3, +}; + +struct ptp_clock_event { + int type; + int index; + union { + u64 timestamp; + struct pps_event_time pps_times; + }; +}; + +struct timestamp_event_queue { + struct ptp_extts_event buf[128]; + int head; + int tail; + spinlock_t lock; +}; + +struct ptp_clock___2 { + struct posix_clock clock; + struct device dev; + struct ptp_clock_info *info; + dev_t devid; + int index; + struct pps_device *pps_source; + long int dialed_frequency; + struct timestamp_event_queue tsevq; + struct mutex tsevq_mux; + struct mutex pincfg_mux; + wait_queue_head_t tsev_wq; + int defunct; + struct device_attribute *pin_dev_attr; + struct attribute **pin_attr; + struct attribute_group pin_attr_group; + const struct attribute_group *pin_attr_groups[2]; + struct kthread_worker *kworker; + struct kthread_delayed_work aux_work; +}; + +struct ptp_clock_caps { + int max_adj; + int n_alarm; + int n_ext_ts; + int n_per_out; + int pps; + int n_pins; + int cross_timestamping; + int adjust_phase; + int rsv[12]; +}; + +struct ptp_sys_offset { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[51]; +}; + +struct ptp_sys_offset_extended { + unsigned int n_samples; + unsigned int rsv[3]; + struct ptp_clock_time ts[75]; +}; + +struct ptp_sys_offset_precise { + struct ptp_clock_time device; + struct ptp_clock_time sys_realtime; + struct ptp_clock_time sys_monoraw; + unsigned int rsv[4]; +}; + +struct gpio_restart { + struct gpio_desc *reset_gpio; + struct notifier_block restart_handler; + u32 active_delay_ms; + u32 inactive_delay_ms; + u32 wait_delay_ms; +}; + +enum vexpress_reset_func { + FUNC_RESET = 0, + FUNC_SHUTDOWN = 1, + FUNC_REBOOT = 2, +}; + +struct xgene_reboot_context { + struct device *dev; + void *csr; + u32 mask; + struct notifier_block restart_handler; +}; + +struct syscon_reboot_context { + struct regmap *map; + u32 offset; + u32 value; + u32 mask; + struct notifier_block restart_handler; +}; + +struct reboot_mode_driver { + struct device *dev; + struct list_head head; + int (*write)(struct reboot_mode_driver *, unsigned int); + struct notifier_block reboot_notifier; +}; + +struct mode_info { + const char *mode; + u32 magic; + struct list_head list; +}; + +struct syscon_reboot_mode { + struct regmap *map; + struct reboot_mode_driver reboot; + u32 offset; + u32 mask; +}; + +enum power_supply_property { + POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE = 1, + POWER_SUPPLY_PROP_HEALTH = 2, + POWER_SUPPLY_PROP_PRESENT = 3, + POWER_SUPPLY_PROP_ONLINE = 4, + POWER_SUPPLY_PROP_AUTHENTIC = 5, + POWER_SUPPLY_PROP_TECHNOLOGY = 6, + POWER_SUPPLY_PROP_CYCLE_COUNT = 7, + POWER_SUPPLY_PROP_VOLTAGE_MAX = 8, + POWER_SUPPLY_PROP_VOLTAGE_MIN = 9, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11, + POWER_SUPPLY_PROP_VOLTAGE_NOW = 12, + POWER_SUPPLY_PROP_VOLTAGE_AVG = 13, + POWER_SUPPLY_PROP_VOLTAGE_OCV = 14, + POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15, + POWER_SUPPLY_PROP_CURRENT_MAX = 16, + POWER_SUPPLY_PROP_CURRENT_NOW = 17, + POWER_SUPPLY_PROP_CURRENT_AVG = 18, + POWER_SUPPLY_PROP_CURRENT_BOOT = 19, + POWER_SUPPLY_PROP_POWER_NOW = 20, + POWER_SUPPLY_PROP_POWER_AVG = 21, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22, + POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23, + POWER_SUPPLY_PROP_CHARGE_FULL = 24, + POWER_SUPPLY_PROP_CHARGE_EMPTY = 25, + POWER_SUPPLY_PROP_CHARGE_NOW = 26, + POWER_SUPPLY_PROP_CHARGE_AVG = 27, + POWER_SUPPLY_PROP_CHARGE_COUNTER = 28, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34, + POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD = 35, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD = 36, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 37, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT = 38, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT = 39, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 40, + POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 41, + POWER_SUPPLY_PROP_ENERGY_FULL = 42, + POWER_SUPPLY_PROP_ENERGY_EMPTY = 43, + POWER_SUPPLY_PROP_ENERGY_NOW = 44, + POWER_SUPPLY_PROP_ENERGY_AVG = 45, + POWER_SUPPLY_PROP_CAPACITY = 46, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 47, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 48, + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN = 49, + POWER_SUPPLY_PROP_CAPACITY_LEVEL = 50, + POWER_SUPPLY_PROP_TEMP = 51, + POWER_SUPPLY_PROP_TEMP_MAX = 52, + POWER_SUPPLY_PROP_TEMP_MIN = 53, + POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 54, + POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 55, + POWER_SUPPLY_PROP_TEMP_AMBIENT = 56, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 57, + POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 58, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 59, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 60, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 61, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 62, + POWER_SUPPLY_PROP_TYPE = 63, + POWER_SUPPLY_PROP_USB_TYPE = 64, + POWER_SUPPLY_PROP_SCOPE = 65, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 66, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 67, + POWER_SUPPLY_PROP_CALIBRATE = 68, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR = 69, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH = 70, + POWER_SUPPLY_PROP_MANUFACTURE_DAY = 71, + POWER_SUPPLY_PROP_MODEL_NAME = 72, + POWER_SUPPLY_PROP_MANUFACTURER = 73, + POWER_SUPPLY_PROP_SERIAL_NUMBER = 74, +}; + +enum power_supply_type { + POWER_SUPPLY_TYPE_UNKNOWN = 0, + POWER_SUPPLY_TYPE_BATTERY = 1, + POWER_SUPPLY_TYPE_UPS = 2, + POWER_SUPPLY_TYPE_MAINS = 3, + POWER_SUPPLY_TYPE_USB = 4, + POWER_SUPPLY_TYPE_USB_DCP = 5, + POWER_SUPPLY_TYPE_USB_CDP = 6, + POWER_SUPPLY_TYPE_USB_ACA = 7, + POWER_SUPPLY_TYPE_USB_TYPE_C = 8, + POWER_SUPPLY_TYPE_USB_PD = 9, + POWER_SUPPLY_TYPE_USB_PD_DRP = 10, + POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11, + POWER_SUPPLY_TYPE_WIRELESS = 12, +}; + +enum power_supply_usb_type { + POWER_SUPPLY_USB_TYPE_UNKNOWN = 0, + POWER_SUPPLY_USB_TYPE_SDP = 1, + POWER_SUPPLY_USB_TYPE_DCP = 2, + POWER_SUPPLY_USB_TYPE_CDP = 3, + POWER_SUPPLY_USB_TYPE_ACA = 4, + POWER_SUPPLY_USB_TYPE_C = 5, + POWER_SUPPLY_USB_TYPE_PD = 6, + POWER_SUPPLY_USB_TYPE_PD_DRP = 7, + POWER_SUPPLY_USB_TYPE_PD_PPS = 8, + POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9, +}; + +enum power_supply_notifier_events { + PSY_EVENT_PROP_CHANGED = 0, +}; + +union power_supply_propval { + int intval; + const char *strval; +}; + +struct power_supply_config { + struct device_node *of_node; + struct fwnode_handle *fwnode; + void *drv_data; + const struct attribute_group **attr_grp; + char **supplied_to; + size_t num_supplicants; +}; + +struct power_supply; + +struct power_supply_desc { + const char *name; + enum power_supply_type type; + const enum power_supply_usb_type *usb_types; + size_t num_usb_types; + const enum power_supply_property *properties; + size_t num_properties; + int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *); + int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *); + int (*property_is_writeable)(struct power_supply *, enum power_supply_property); + void (*external_power_changed)(struct power_supply *); + void (*set_charged)(struct power_supply *); + bool no_thermal; + int use_for_apm; +}; + +struct thermal_zone_device; + +struct power_supply { + const struct power_supply_desc *desc; + char **supplied_to; + size_t num_supplicants; + char **supplied_from; + size_t num_supplies; + struct device_node *of_node; + void *drv_data; + struct device dev; + struct work_struct changed_work; + struct delayed_work deferred_register_work; + spinlock_t changed_lock; + bool changed; + bool initialized; + bool removing; + atomic_t use_cnt; + struct thermal_zone_device *tzd; + struct thermal_cooling_device *tcd; + struct led_trigger *charging_full_trig; + char *charging_full_trig_name; + struct led_trigger *charging_trig; + char *charging_trig_name; + struct led_trigger *full_trig; + char *full_trig_name; + struct led_trigger *online_trig; + char *online_trig_name; + struct led_trigger *charging_blink_full_solid_trig; + char *charging_blink_full_solid_trig_name; +}; + +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED = 1, +}; + +enum thermal_notify_event { + THERMAL_EVENT_UNSPECIFIED = 0, + THERMAL_EVENT_TEMP_SAMPLE = 1, + THERMAL_TRIP_VIOLATED = 2, + THERMAL_TRIP_CHANGED = 3, + THERMAL_DEVICE_DOWN = 4, + THERMAL_DEVICE_UP = 5, + THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6, + THERMAL_TABLE_CHANGED = 7, + THERMAL_EVENT_KEEP_ALIVE = 8, +}; + +struct thermal_attr; + +struct thermal_zone_device_ops; + +struct thermal_zone_params; + +struct thermal_governor; + +struct thermal_zone_device { + int id; + char type[20]; + struct device device; + struct attribute_group trips_attribute_group; + struct thermal_attr *trip_temp_attrs; + struct thermal_attr *trip_type_attrs; + struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; + void *devdata; + int trips; + long unsigned int trips_disabled; + int passive_delay; + int polling_delay; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + unsigned int forced_passive; + atomic_t need_update; + struct thermal_zone_device_ops *ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct list_head thermal_instances; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; +}; + +struct power_supply_battery_ocv_table { + int ocv; + int capacity; +}; + +struct power_supply_resistance_temp_table { + int temp; + int resistance; +}; + +struct power_supply_battery_info { + int energy_full_design_uwh; + int charge_full_design_uah; + int voltage_min_design_uv; + int voltage_max_design_uv; + int tricklecharge_current_ua; + int precharge_current_ua; + int precharge_voltage_max_uv; + int charge_term_current_ua; + int charge_restart_voltage_uv; + int overvoltage_limit_uv; + int constant_charge_current_max_ua; + int constant_charge_voltage_max_uv; + int factory_internal_resistance_uohm; + int ocv_temp[20]; + int temp_ambient_alert_min; + int temp_ambient_alert_max; + int temp_alert_min; + int temp_alert_max; + int temp_min; + int temp_max; + struct power_supply_battery_ocv_table *ocv_table[20]; + int ocv_table_size[20]; + struct power_supply_resistance_temp_table *resist_table; + int resist_table_size; +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE = 1, + THERMAL_TRIP_HOT = 2, + THERMAL_TRIP_CRITICAL = 3, +}; + +enum thermal_trend { + THERMAL_TREND_STABLE = 0, + THERMAL_TREND_RAISING = 1, + THERMAL_TREND_DROPPING = 2, + THERMAL_TREND_RAISE_FULL = 3, + THERMAL_TREND_DROP_FULL = 4, +}; + +struct thermal_zone_device_ops { + int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *); + int (*get_temp)(struct thermal_zone_device *, int *); + int (*set_trips)(struct thermal_zone_device *, int, int); + int (*change_mode)(struct thermal_zone_device *, enum thermal_device_mode); + int (*get_trip_type)(struct thermal_zone_device *, int, enum thermal_trip_type *); + int (*get_trip_temp)(struct thermal_zone_device *, int, int *); + int (*set_trip_temp)(struct thermal_zone_device *, int, int); + int (*get_trip_hyst)(struct thermal_zone_device *, int, int *); + int (*set_trip_hyst)(struct thermal_zone_device *, int, int); + int (*get_crit_temp)(struct thermal_zone_device *, int *); + int (*set_emul_temp)(struct thermal_zone_device *, int); + int (*get_trend)(struct thermal_zone_device *, int, enum thermal_trend *); + int (*notify)(struct thermal_zone_device *, int, enum thermal_trip_type); +}; + +struct thermal_bind_params; + +struct thermal_zone_params { + char governor_name[20]; + bool no_hwmon; + int num_tbps; + struct thermal_bind_params *tbp; + u32 sustainable_power; + s32 k_po; + s32 k_pu; + s32 k_i; + s32 k_d; + s32 integral_cutoff; + int slope; + int offset; +}; + +struct thermal_governor { + char name[20]; + int (*bind_to_tz)(struct thermal_zone_device *); + void (*unbind_from_tz)(struct thermal_zone_device *); + int (*throttle)(struct thermal_zone_device *, int); + struct list_head governor_list; +}; + +struct thermal_bind_params { + struct thermal_cooling_device *cdev; + int weight; + int trip_mask; + long unsigned int *binding_limits; + int (*match)(struct thermal_zone_device *, struct thermal_cooling_device *); +}; + +struct psy_am_i_supplied_data { + struct power_supply *psy; + unsigned int count; +}; + +enum { + POWER_SUPPLY_STATUS_UNKNOWN = 0, + POWER_SUPPLY_STATUS_CHARGING = 1, + POWER_SUPPLY_STATUS_DISCHARGING = 2, + POWER_SUPPLY_STATUS_NOT_CHARGING = 3, + POWER_SUPPLY_STATUS_FULL = 4, +}; + +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE = 1, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE = 2, + POWER_SUPPLY_CHARGE_TYPE_FAST = 3, + POWER_SUPPLY_CHARGE_TYPE_STANDARD = 4, + POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE = 5, + POWER_SUPPLY_CHARGE_TYPE_CUSTOM = 6, + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE = 7, +}; + +enum { + POWER_SUPPLY_HEALTH_UNKNOWN = 0, + POWER_SUPPLY_HEALTH_GOOD = 1, + POWER_SUPPLY_HEALTH_OVERHEAT = 2, + POWER_SUPPLY_HEALTH_DEAD = 3, + POWER_SUPPLY_HEALTH_OVERVOLTAGE = 4, + POWER_SUPPLY_HEALTH_UNSPEC_FAILURE = 5, + POWER_SUPPLY_HEALTH_COLD = 6, + POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE = 7, + POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE = 8, + POWER_SUPPLY_HEALTH_OVERCURRENT = 9, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED = 10, + POWER_SUPPLY_HEALTH_WARM = 11, + POWER_SUPPLY_HEALTH_COOL = 12, + POWER_SUPPLY_HEALTH_HOT = 13, +}; + +enum { + POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0, + POWER_SUPPLY_TECHNOLOGY_NiMH = 1, + POWER_SUPPLY_TECHNOLOGY_LION = 2, + POWER_SUPPLY_TECHNOLOGY_LIPO = 3, + POWER_SUPPLY_TECHNOLOGY_LiFe = 4, + POWER_SUPPLY_TECHNOLOGY_NiCd = 5, + POWER_SUPPLY_TECHNOLOGY_LiMn = 6, +}; + +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1, + POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4, + POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5, +}; + +enum { + POWER_SUPPLY_SCOPE_UNKNOWN = 0, + POWER_SUPPLY_SCOPE_SYSTEM = 1, + POWER_SUPPLY_SCOPE_DEVICE = 2, +}; + +struct power_supply_attr { + const char *prop_name; + char attr_name[31]; + struct device_attribute dev_attr; + const char * const *text_values; + int text_values_len; +}; + +enum data_source { + CM_BATTERY_PRESENT = 0, + CM_NO_BATTERY = 1, + CM_FUEL_GAUGE = 2, + CM_CHARGER_STAT = 3, +}; + +enum polling_modes { + CM_POLL_DISABLE = 0, + CM_POLL_ALWAYS = 1, + CM_POLL_EXTERNAL_POWER_ONLY = 2, + CM_POLL_CHARGING_ONLY = 3, +}; + +enum cm_batt_temp { + CM_BATT_OK = 0, + CM_BATT_OVERHEAT = 1, + CM_BATT_COLD = 2, +}; + +struct charger_regulator; + +struct charger_manager; + +struct charger_cable { + const char *extcon_name; + const char *name; + struct extcon_dev *extcon_dev; + u64 extcon_type; + struct work_struct wq; + struct notifier_block nb; + bool attached; + struct charger_regulator *charger; + int min_uA; + int max_uA; + struct charger_manager *cm; +}; + +struct charger_regulator { + const char *regulator_name; + struct regulator *consumer; + int externally_control; + struct charger_cable *cables; + int num_cables; + struct attribute_group attr_grp; + struct device_attribute attr_name; + struct device_attribute attr_state; + struct device_attribute attr_externally_control; + struct attribute *attrs[4]; + struct charger_manager *cm; +}; + +struct charger_desc; + +struct charger_manager { + struct list_head entry; + struct device *dev; + struct charger_desc *desc; + struct thermal_zone_device *tzd_batt; + bool charger_enabled; + int emergency_stop; + char psy_name_buf[31]; + struct power_supply_desc charger_psy_desc; + struct power_supply *charger_psy; + u64 charging_start_time; + u64 charging_end_time; + int battery_status; +}; + +struct charger_desc { + const char *psy_name; + enum polling_modes polling_mode; + unsigned int polling_interval_ms; + unsigned int fullbatt_vchkdrop_uV; + unsigned int fullbatt_uV; + unsigned int fullbatt_soc; + unsigned int fullbatt_full_capacity; + enum data_source battery_present; + const char **psy_charger_stat; + int num_charger_regulators; + struct charger_regulator *charger_regulators; + const struct attribute_group **sysfs_groups; + const char *psy_fuel_gauge; + const char *thermal_zone; + int temp_min; + int temp_max; + int temp_diff; + bool measure_battery_temp; + u32 charging_max_duration_ms; + u32 discharging_max_duration_ms; +}; + +struct thermal_attr { + struct device_attribute attr; + char name[20]; +}; + +struct devfreq_dev_status { + long unsigned int total_time; + long unsigned int busy_time; + long unsigned int current_frequency; + void *private_data; +}; + +struct trace_event_raw_thermal_temperature { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int temp_prev; + int temp; + char __data[0]; +}; + +struct trace_event_raw_cdev_update { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int target; + char __data[0]; +}; + +struct trace_event_raw_thermal_zone_trip { + struct trace_entry ent; + u32 __data_loc_thermal_zone; + int id; + int trip; + enum thermal_trip_type trip_type; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_cpu_get_power { + struct trace_entry ent; + u32 __data_loc_cpumask; + long unsigned int freq; + u32 __data_loc_load; + size_t load_len; + u32 dynamic_power; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_cpu_limit { + struct trace_entry ent; + u32 __data_loc_cpumask; + unsigned int freq; + long unsigned int cdev_state; + u32 power; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_devfreq_get_power { + struct trace_entry ent; + u32 __data_loc_type; + long unsigned int freq; + u32 load; + u32 dynamic_power; + u32 static_power; + u32 power; + char __data[0]; +}; + +struct trace_event_raw_thermal_power_devfreq_limit { + struct trace_entry ent; + u32 __data_loc_type; + unsigned int freq; + long unsigned int cdev_state; + u32 power; + char __data[0]; +}; + +struct trace_event_data_offsets_thermal_temperature { + u32 thermal_zone; +}; + +struct trace_event_data_offsets_cdev_update { + u32 type; +}; + +struct trace_event_data_offsets_thermal_zone_trip { + u32 thermal_zone; +}; + +struct trace_event_data_offsets_thermal_power_cpu_get_power { + u32 cpumask; + u32 load; +}; + +struct trace_event_data_offsets_thermal_power_cpu_limit { + u32 cpumask; +}; + +struct trace_event_data_offsets_thermal_power_devfreq_get_power { + u32 type; +}; + +struct trace_event_data_offsets_thermal_power_devfreq_limit { + u32 type; +}; + +typedef void (*btf_trace_thermal_temperature)(void *, struct thermal_zone_device *); + +typedef void (*btf_trace_cdev_update)(void *, struct thermal_cooling_device *, long unsigned int); + +typedef void (*btf_trace_thermal_zone_trip)(void *, struct thermal_zone_device *, int, enum thermal_trip_type); + +typedef void (*btf_trace_thermal_power_cpu_get_power)(void *, const struct cpumask *, long unsigned int, u32 *, size_t, u32); + +typedef void (*btf_trace_thermal_power_cpu_limit)(void *, const struct cpumask *, unsigned int, long unsigned int, u32); + +typedef void (*btf_trace_thermal_power_devfreq_get_power)(void *, struct thermal_cooling_device *, struct devfreq_dev_status *, long unsigned int, u32, u32, u32); + +typedef void (*btf_trace_thermal_power_devfreq_limit)(void *, struct thermal_cooling_device *, long unsigned int, long unsigned int, u32); + +struct thermal_instance { + int id; + char name[20]; + struct thermal_zone_device *tz; + struct thermal_cooling_device *cdev; + int trip; + bool initialized; + long unsigned int upper; + long unsigned int lower; + long unsigned int target; + char attr_name[20]; + struct device_attribute attr; + char weight_attr_name[20]; + struct device_attribute weight_attr; + struct list_head tz_node; + struct list_head cdev_node; + unsigned int weight; +}; + +struct genl_dumpit_info { + const struct genl_family *family; + struct genl_ops op; + struct nlattr **attrs; +}; + +enum thermal_genl_attr { + THERMAL_GENL_ATTR_UNSPEC = 0, + THERMAL_GENL_ATTR_TZ = 1, + THERMAL_GENL_ATTR_TZ_ID = 2, + THERMAL_GENL_ATTR_TZ_TEMP = 3, + THERMAL_GENL_ATTR_TZ_TRIP = 4, + THERMAL_GENL_ATTR_TZ_TRIP_ID = 5, + THERMAL_GENL_ATTR_TZ_TRIP_TYPE = 6, + THERMAL_GENL_ATTR_TZ_TRIP_TEMP = 7, + THERMAL_GENL_ATTR_TZ_TRIP_HYST = 8, + THERMAL_GENL_ATTR_TZ_MODE = 9, + THERMAL_GENL_ATTR_TZ_NAME = 10, + THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT = 11, + THERMAL_GENL_ATTR_TZ_GOV = 12, + THERMAL_GENL_ATTR_TZ_GOV_NAME = 13, + THERMAL_GENL_ATTR_CDEV = 14, + THERMAL_GENL_ATTR_CDEV_ID = 15, + THERMAL_GENL_ATTR_CDEV_CUR_STATE = 16, + THERMAL_GENL_ATTR_CDEV_MAX_STATE = 17, + THERMAL_GENL_ATTR_CDEV_NAME = 18, + THERMAL_GENL_ATTR_GOV_NAME = 19, + __THERMAL_GENL_ATTR_MAX = 20, +}; + +enum thermal_genl_sampling { + THERMAL_GENL_SAMPLING_TEMP = 0, + __THERMAL_GENL_SAMPLING_MAX = 1, +}; + +enum thermal_genl_event { + THERMAL_GENL_EVENT_UNSPEC = 0, + THERMAL_GENL_EVENT_TZ_CREATE = 1, + THERMAL_GENL_EVENT_TZ_DELETE = 2, + THERMAL_GENL_EVENT_TZ_DISABLE = 3, + THERMAL_GENL_EVENT_TZ_ENABLE = 4, + THERMAL_GENL_EVENT_TZ_TRIP_UP = 5, + THERMAL_GENL_EVENT_TZ_TRIP_DOWN = 6, + THERMAL_GENL_EVENT_TZ_TRIP_CHANGE = 7, + THERMAL_GENL_EVENT_TZ_TRIP_ADD = 8, + THERMAL_GENL_EVENT_TZ_TRIP_DELETE = 9, + THERMAL_GENL_EVENT_CDEV_ADD = 10, + THERMAL_GENL_EVENT_CDEV_DELETE = 11, + THERMAL_GENL_EVENT_CDEV_STATE_UPDATE = 12, + THERMAL_GENL_EVENT_TZ_GOV_CHANGE = 13, + __THERMAL_GENL_EVENT_MAX = 14, +}; + +enum thermal_genl_cmd { + THERMAL_GENL_CMD_UNSPEC = 0, + THERMAL_GENL_CMD_TZ_GET_ID = 1, + THERMAL_GENL_CMD_TZ_GET_TRIP = 2, + THERMAL_GENL_CMD_TZ_GET_TEMP = 3, + THERMAL_GENL_CMD_TZ_GET_GOV = 4, + THERMAL_GENL_CMD_TZ_GET_MODE = 5, + THERMAL_GENL_CMD_CDEV_GET = 6, + __THERMAL_GENL_CMD_MAX = 7, +}; + +struct param { + struct nlattr **attrs; + struct sk_buff *msg; + const char *name; + int tz_id; + int cdev_id; + int trip_id; + int trip_temp; + int trip_type; + int trip_hyst; + int temp; + int cdev_state; + int cdev_max_state; +}; + +typedef int (*cb_t)(struct param *); + +struct thermal_zone_of_device_ops { + int (*get_temp)(void *, int *); + int (*get_trend)(void *, int, enum thermal_trend *); + int (*set_trips)(void *, int, int); + int (*set_emul_temp)(void *, int); + int (*set_trip_temp)(void *, int, int); +}; + +struct thermal_trip { + struct device_node *np; + int temperature; + int hysteresis; + enum thermal_trip_type type; +}; + +struct __thermal_cooling_bind_param { + struct device_node *cooling_device; + long unsigned int min; + long unsigned int max; +}; + +struct __thermal_bind_params { + struct __thermal_cooling_bind_param *tcbp; + unsigned int count; + unsigned int trip_id; + unsigned int usage; +}; + +struct __thermal_zone { + int passive_delay; + int polling_delay; + int slope; + int offset; + int ntrips; + struct thermal_trip *trips; + int num_tbps; + struct __thermal_bind_params *tbps; + void *sensor_data; + const struct thermal_zone_of_device_ops *ops; +}; + +struct time_in_idle { + u64 time; + u64 timestamp; +}; + +struct cpufreq_cooling_device { + int id; + u32 last_load; + unsigned int cpufreq_state; + unsigned int max_level; + struct em_perf_domain *em; + struct cpufreq_policy *policy; + struct list_head node; + struct time_in_idle *idle_time; + struct freq_qos_request qos_req; +}; + +enum devfreq_timer { + DEVFREQ_TIMER_DEFERRABLE = 0, + DEVFREQ_TIMER_DELAYED = 1, + DEVFREQ_TIMER_NUM = 2, +}; + +struct devfreq_dev_profile { + long unsigned int initial_freq; + unsigned int polling_ms; + enum devfreq_timer timer; + int (*target)(struct device *, long unsigned int *, u32); + int (*get_dev_status)(struct device *, struct devfreq_dev_status *); + int (*get_cur_freq)(struct device *, long unsigned int *); + void (*exit)(struct device *); + long unsigned int *freq_table; + unsigned int max_state; +}; + +struct devfreq_stats { + unsigned int total_trans; + unsigned int *trans_table; + u64 *time_in_state; + u64 last_update; +}; + +struct devfreq_governor; + +struct devfreq { + struct list_head node; + struct mutex lock; + struct device dev; + struct devfreq_dev_profile *profile; + const struct devfreq_governor *governor; + char governor_name[16]; + struct notifier_block nb; + struct delayed_work work; + long unsigned int previous_freq; + struct devfreq_dev_status last_status; + void *data; + struct dev_pm_qos_request user_min_freq_req; + struct dev_pm_qos_request user_max_freq_req; + long unsigned int scaling_min_freq; + long unsigned int scaling_max_freq; + bool stop_polling; + long unsigned int suspend_freq; + long unsigned int resume_freq; + atomic_t suspend_count; + struct devfreq_stats stats; + struct srcu_notifier_head transition_notifier_list; + struct notifier_block nb_min; + struct notifier_block nb_max; +}; + +struct devfreq_governor { + struct list_head node; + const char name[16]; + const unsigned int immutable; + const unsigned int interrupt_driven; + int (*get_target_freq)(struct devfreq *, long unsigned int *); + int (*event_handler)(struct devfreq *, unsigned int, void *); +}; + +struct devfreq_cooling_power { + long unsigned int (*get_static_power)(struct devfreq *, long unsigned int); + long unsigned int (*get_dynamic_power)(struct devfreq *, long unsigned int, long unsigned int); + int (*get_real_power)(struct devfreq *, u32 *, long unsigned int, long unsigned int); + long unsigned int dyn_power_coeff; +}; + +struct devfreq_cooling_device { + int id; + struct thermal_cooling_device *cdev; + struct devfreq *devfreq; + long unsigned int cooling_state; + u32 *power_table; + u32 *freq_table; + size_t freq_table_size; + struct devfreq_cooling_power *power_ops; + u32 res_util; + int capped_state; + struct dev_pm_qos_request req_max_freq; +}; + +struct amlogic_thermal_soc_calib_data { + int A; + int B; + int m; + int n; +}; + +struct amlogic_thermal_data { + int u_efuse_off; + const struct amlogic_thermal_soc_calib_data *calibration_parameters; + const struct regmap_config *regmap_config; +}; + +struct amlogic_thermal { + struct platform_device *pdev; + const struct amlogic_thermal_data *data; + struct regmap *regmap; + struct regmap *sec_ao_map; + struct clk *clk; + struct thermal_zone_device *tzd; + u32 trim_info; +}; + +struct watchdog_info { + __u32 options; + __u32 firmware_version; + __u8 identity[32]; +}; + +struct watchdog_device; + +struct watchdog_ops { + struct module *owner; + int (*start)(struct watchdog_device *); + int (*stop)(struct watchdog_device *); + int (*ping)(struct watchdog_device *); + unsigned int (*status)(struct watchdog_device *); + int (*set_timeout)(struct watchdog_device *, unsigned int); + int (*set_pretimeout)(struct watchdog_device *, unsigned int); + unsigned int (*get_timeleft)(struct watchdog_device *); + int (*restart)(struct watchdog_device *, long unsigned int, void *); + long int (*ioctl)(struct watchdog_device *, unsigned int, long unsigned int); +}; + +struct watchdog_governor; + +struct watchdog_core_data; + +struct watchdog_device { + int id; + struct device *parent; + const struct attribute_group **groups; + const struct watchdog_info *info; + const struct watchdog_ops *ops; + const struct watchdog_governor *gov; + unsigned int bootstatus; + unsigned int timeout; + unsigned int pretimeout; + unsigned int min_timeout; + unsigned int max_timeout; + unsigned int min_hw_heartbeat_ms; + unsigned int max_hw_heartbeat_ms; + struct notifier_block reboot_nb; + struct notifier_block restart_nb; + void *driver_data; + struct watchdog_core_data *wd_data; + long unsigned int status; + struct list_head deferred; +}; + +struct watchdog_governor { + const char name[20]; + void (*pretimeout)(struct watchdog_device *); +}; + +struct watchdog_core_data { + struct device dev; + struct cdev cdev; + struct watchdog_device *wdd; + struct mutex lock; + ktime_t last_keepalive; + ktime_t last_hw_keepalive; + ktime_t open_deadline; + struct hrtimer timer; + struct kthread_work work; + long unsigned int status; +}; + +struct mdp_device_descriptor_s { + __u32 number; + __u32 major; + __u32 minor; + __u32 raid_disk; + __u32 state; + __u32 reserved[27]; +}; + +typedef struct mdp_device_descriptor_s mdp_disk_t; + +struct mdp_superblock_s { + __u32 md_magic; + __u32 major_version; + __u32 minor_version; + __u32 patch_version; + __u32 gvalid_words; + __u32 set_uuid0; + __u32 ctime; + __u32 level; + __u32 size; + __u32 nr_disks; + __u32 raid_disks; + __u32 md_minor; + __u32 not_persistent; + __u32 set_uuid1; + __u32 set_uuid2; + __u32 set_uuid3; + __u32 gstate_creserved[16]; + __u32 utime; + __u32 state; + __u32 active_disks; + __u32 working_disks; + __u32 failed_disks; + __u32 spare_disks; + __u32 sb_csum; + __u32 events_lo; + __u32 events_hi; + __u32 cp_events_lo; + __u32 cp_events_hi; + __u32 recovery_cp; + __u64 reshape_position; + __u32 new_level; + __u32 delta_disks; + __u32 new_layout; + __u32 new_chunk; + __u32 gstate_sreserved[14]; + __u32 layout; + __u32 chunk_size; + __u32 root_pv; + __u32 root_block; + __u32 pstate_reserved[60]; + mdp_disk_t disks[27]; + __u32 reserved[0]; + mdp_disk_t this_disk; +}; + +typedef struct mdp_superblock_s mdp_super_t; + +struct mdp_superblock_1 { + __le32 magic; + __le32 major_version; + __le32 feature_map; + __le32 pad0; + __u8 set_uuid[16]; + char set_name[32]; + __le64 ctime; + __le32 level; + __le32 layout; + __le64 size; + __le32 chunksize; + __le32 raid_disks; + union { + __le32 bitmap_offset; + struct { + __le16 offset; + __le16 size; + } ppl; + }; + __le32 new_level; + __le64 reshape_position; + __le32 delta_disks; + __le32 new_layout; + __le32 new_chunk; + __le32 new_offset; + __le64 data_offset; + __le64 data_size; + __le64 super_offset; + union { + __le64 recovery_offset; + __le64 journal_tail; + }; + __le32 dev_number; + __le32 cnt_corrected_read; + __u8 device_uuid[16]; + __u8 devflags; + __u8 bblog_shift; + __le16 bblog_size; + __le32 bblog_offset; + __le64 utime; + __le64 events; + __le64 resync_offset; + __le32 sb_csum; + __le32 max_dev; + __u8 pad3[32]; + __le16 dev_roles[0]; +}; + +struct mdu_version_s { + int major; + int minor; + int patchlevel; +}; + +typedef struct mdu_version_s mdu_version_t; + +struct mdu_array_info_s { + int major_version; + int minor_version; + int patch_version; + unsigned int ctime; + int level; + int size; + int nr_disks; + int raid_disks; + int md_minor; + int not_persistent; + unsigned int utime; + int state; + int active_disks; + int working_disks; + int failed_disks; + int spare_disks; + int layout; + int chunk_size; +}; + +typedef struct mdu_array_info_s mdu_array_info_t; + +struct mdu_disk_info_s { + int number; + int major; + int minor; + int raid_disk; + int state; +}; + +typedef struct mdu_disk_info_s mdu_disk_info_t; + +struct mdu_bitmap_file_s { + char pathname[4096]; +}; + +typedef struct mdu_bitmap_file_s mdu_bitmap_file_t; + +struct mddev; + +struct md_rdev; + +struct md_cluster_operations { + int (*join)(struct mddev *, int); + int (*leave)(struct mddev *); + int (*slot_number)(struct mddev *); + int (*resync_info_update)(struct mddev *, sector_t, sector_t); + void (*resync_info_get)(struct mddev *, sector_t *, sector_t *); + int (*metadata_update_start)(struct mddev *); + int (*metadata_update_finish)(struct mddev *); + void (*metadata_update_cancel)(struct mddev *); + int (*resync_start)(struct mddev *); + int (*resync_finish)(struct mddev *); + int (*area_resyncing)(struct mddev *, int, sector_t, sector_t); + int (*add_new_disk)(struct mddev *, struct md_rdev *); + void (*add_new_disk_cancel)(struct mddev *); + int (*new_disk_ack)(struct mddev *, bool); + int (*remove_disk)(struct mddev *, struct md_rdev *); + void (*load_bitmaps)(struct mddev *, int); + int (*gather_bitmaps)(struct md_rdev *); + int (*resize_bitmaps)(struct mddev *, sector_t, sector_t); + int (*lock_all_bitmaps)(struct mddev *); + void (*unlock_all_bitmaps)(struct mddev *); + void (*update_size)(struct mddev *, sector_t); +}; + +struct md_cluster_info; + +struct md_personality; + +struct md_thread; + +struct bitmap; + +struct mddev { + void *private; + struct md_personality *pers; + dev_t unit; + int md_minor; + struct list_head disks; + long unsigned int flags; + long unsigned int sb_flags; + int suspended; + atomic_t active_io; + int ro; + int sysfs_active; + struct gendisk *gendisk; + struct kobject kobj; + int hold_active; + int major_version; + int minor_version; + int patch_version; + int persistent; + int external; + char metadata_type[17]; + int chunk_sectors; + time64_t ctime; + time64_t utime; + int level; + int layout; + char clevel[16]; + int raid_disks; + int max_disks; + sector_t dev_sectors; + sector_t array_sectors; + int external_size; + __u64 events; + int can_decrease_events; + char uuid[16]; + sector_t reshape_position; + int delta_disks; + int new_level; + int new_layout; + int new_chunk_sectors; + int reshape_backwards; + struct md_thread *thread; + struct md_thread *sync_thread; + char *last_sync_action; + sector_t curr_resync; + sector_t curr_resync_completed; + long unsigned int resync_mark; + sector_t resync_mark_cnt; + sector_t curr_mark_cnt; + sector_t resync_max_sectors; + atomic64_t resync_mismatches; + sector_t suspend_lo; + sector_t suspend_hi; + int sync_speed_min; + int sync_speed_max; + int parallel_resync; + int ok_start_degraded; + long unsigned int recovery; + int recovery_disabled; + int in_sync; + struct mutex open_mutex; + struct mutex reconfig_mutex; + atomic_t active; + atomic_t openers; + int changed; + int degraded; + atomic_t recovery_active; + wait_queue_head_t recovery_wait; + sector_t recovery_cp; + sector_t resync_min; + sector_t resync_max; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_action; + struct kernfs_node *sysfs_completed; + struct kernfs_node *sysfs_degraded; + struct kernfs_node *sysfs_level; + struct work_struct del_work; + spinlock_t lock; + wait_queue_head_t sb_wait; + atomic_t pending_writes; + unsigned int safemode; + unsigned int safemode_delay; + struct timer_list safemode_timer; + struct percpu_ref writes_pending; + int sync_checkers; + struct request_queue *queue; + struct bitmap *bitmap; + struct { + struct file *file; + loff_t offset; + long unsigned int space; + loff_t default_offset; + long unsigned int default_space; + struct mutex mutex; + long unsigned int chunksize; + long unsigned int daemon_sleep; + long unsigned int max_write_behind; + int external; + int nodes; + char cluster_name[64]; + } bitmap_info; + atomic_t max_corr_read_errors; + struct list_head all_mddevs; + struct attribute_group *to_remove; + struct bio_set bio_set; + struct bio_set sync_set; + mempool_t md_io_pool; + struct bio *flush_bio; + atomic_t flush_pending; + ktime_t start_flush; + ktime_t last_flush; + struct work_struct flush_work; + struct work_struct event_work; + mempool_t *serial_info_pool; + void (*sync_super)(struct mddev *, struct md_rdev *); + struct md_cluster_info *cluster_info; + unsigned int good_device_nr; + unsigned int noio_flag; + bool has_superblocks: 1; + bool fail_last_dev: 1; + bool serialize_policy: 1; +}; + +struct serial_in_rdev; + +struct md_rdev { + struct list_head same_set; + sector_t sectors; + struct mddev *mddev; + int last_events; + struct block_device *meta_bdev; + struct block_device *bdev; + struct page *sb_page; + struct page *bb_page; + int sb_loaded; + __u64 sb_events; + sector_t data_offset; + sector_t new_data_offset; + sector_t sb_start; + int sb_size; + int preferred_minor; + struct kobject kobj; + long unsigned int flags; + wait_queue_head_t blocked_wait; + int desc_nr; + int raid_disk; + int new_raid_disk; + int saved_raid_disk; + union { + sector_t recovery_offset; + sector_t journal_tail; + }; + atomic_t nr_pending; + atomic_t read_errors; + time64_t last_read_error; + atomic_t corrected_errors; + struct serial_in_rdev *serial; + struct work_struct del_work; + struct kernfs_node *sysfs_state; + struct kernfs_node *sysfs_unack_badblocks; + struct kernfs_node *sysfs_badblocks; + struct badblocks badblocks; + struct { + short int offset; + unsigned int size; + sector_t sector; + } ppl; +}; + +struct serial_in_rdev { + struct rb_root_cached serial_rb; + spinlock_t serial_lock; + wait_queue_head_t serial_io_wait; +}; + +enum flag_bits { + Faulty = 0, + In_sync = 1, + Bitmap_sync = 2, + WriteMostly = 3, + AutoDetected = 4, + Blocked = 5, + WriteErrorSeen = 6, + FaultRecorded = 7, + BlockedBadBlocks = 8, + WantReplacement = 9, + Replacement = 10, + Candidate = 11, + Journal = 12, + ClusterRemove = 13, + RemoveSynchronized = 14, + ExternalBbl = 15, + FailFast = 16, + LastDev = 17, + CollisionCheck = 18, +}; + +enum mddev_flags { + MD_ARRAY_FIRST_USE = 0, + MD_CLOSING = 1, + MD_JOURNAL_CLEAN = 2, + MD_HAS_JOURNAL = 3, + MD_CLUSTER_RESYNC_LOCKED = 4, + MD_FAILFAST_SUPPORTED = 5, + MD_HAS_PPL = 6, + MD_HAS_MULTIPLE_PPLS = 7, + MD_ALLOW_SB_UPDATE = 8, + MD_UPDATING_SB = 9, + MD_NOT_READY = 10, + MD_BROKEN = 11, +}; + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS = 0, + MD_SB_CHANGE_CLEAN = 1, + MD_SB_CHANGE_PENDING = 2, + MD_SB_NEED_REWRITE = 3, +}; + +struct md_personality { + char *name; + int level; + struct list_head list; + struct module *owner; + bool (*make_request)(struct mddev *, struct bio *); + int (*run)(struct mddev *); + int (*start)(struct mddev *); + void (*free)(struct mddev *, void *); + void (*status)(struct seq_file *, struct mddev *); + void (*error_handler)(struct mddev *, struct md_rdev *); + int (*hot_add_disk)(struct mddev *, struct md_rdev *); + int (*hot_remove_disk)(struct mddev *, struct md_rdev *); + int (*spare_active)(struct mddev *); + sector_t (*sync_request)(struct mddev *, sector_t, int *); + int (*resize)(struct mddev *, sector_t); + sector_t (*size)(struct mddev *, sector_t, int); + int (*check_reshape)(struct mddev *); + int (*start_reshape)(struct mddev *); + void (*finish_reshape)(struct mddev *); + void (*update_reshape_pos)(struct mddev *); + void (*quiesce)(struct mddev *, int); + void * (*takeover)(struct mddev *); + int (*change_consistency_policy)(struct mddev *, const char *); +}; + +struct md_thread { + void (*run)(struct md_thread *); + struct mddev *mddev; + wait_queue_head_t wqueue; + long unsigned int flags; + struct task_struct *tsk; + long unsigned int timeout; + void *private; +}; + +struct bitmap_page; + +struct bitmap_counts { + spinlock_t lock; + struct bitmap_page *bp; + long unsigned int pages; + long unsigned int missing_pages; + long unsigned int chunkshift; + long unsigned int chunks; +}; + +struct bitmap_storage { + struct file *file; + struct page *sb_page; + struct page **filemap; + long unsigned int *filemap_attr; + long unsigned int file_pages; + long unsigned int bytes; +}; + +struct bitmap { + struct bitmap_counts counts; + struct mddev *mddev; + __u64 events_cleared; + int need_sync; + struct bitmap_storage storage; + long unsigned int flags; + int allclean; + atomic_t behind_writes; + long unsigned int behind_writes_used; + long unsigned int daemon_lastrun; + long unsigned int last_end_sync; + atomic_t pending_writes; + wait_queue_head_t write_wait; + wait_queue_head_t overflow_wait; + wait_queue_head_t behind_wait; + struct kernfs_node *sysfs_can_clear; + int cluster_slot; +}; + +enum recovery_flags { + MD_RECOVERY_RUNNING = 0, + MD_RECOVERY_SYNC = 1, + MD_RECOVERY_RECOVER = 2, + MD_RECOVERY_INTR = 3, + MD_RECOVERY_DONE = 4, + MD_RECOVERY_NEEDED = 5, + MD_RECOVERY_REQUESTED = 6, + MD_RECOVERY_CHECK = 7, + MD_RECOVERY_RESHAPE = 8, + MD_RECOVERY_FROZEN = 9, + MD_RECOVERY_ERROR = 10, + MD_RECOVERY_WAIT = 11, + MD_RESYNCING_REMOTE = 12, +}; + +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct mddev *, char *); + ssize_t (*store)(struct mddev *, const char *, size_t); +}; + +struct bitmap_page { + char *map; + unsigned int hijacked: 1; + unsigned int pending: 1; + unsigned int count: 30; +}; + +struct md_io { + struct mddev *mddev; + bio_end_io_t *orig_bi_end_io; + void *orig_bi_private; + long unsigned int start_time; + struct hd_struct *part; +}; + +struct super_type { + char *name; + struct module *owner; + int (*load_super)(struct md_rdev *, struct md_rdev *, int); + int (*validate_super)(struct mddev *, struct md_rdev *); + void (*sync_super)(struct mddev *, struct md_rdev *); + long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t); + int (*allow_new_offset)(struct md_rdev *, long long unsigned int); +}; + +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct md_rdev *, char *); + ssize_t (*store)(struct md_rdev *, const char *, size_t); +}; + +enum array_state { + clear = 0, + inactive = 1, + suspended = 2, + readonly = 3, + read_auto = 4, + clean = 5, + active = 6, + write_pending = 7, + active_idle = 8, + broken = 9, + bad_word = 10, +}; + +struct detected_devices_node { + struct list_head list; + dev_t dev; +}; + +typedef __u16 bitmap_counter_t; + +enum bitmap_state { + BITMAP_STALE = 1, + BITMAP_WRITE_ERROR = 2, + BITMAP_HOSTENDIAN = 15, +}; + +struct bitmap_super_s { + __le32 magic; + __le32 version; + __u8 uuid[16]; + __le64 events; + __le64 events_cleared; + __le64 sync_size; + __le32 state; + __le32 chunksize; + __le32 daemon_sleep; + __le32 write_behind; + __le32 sectors_reserved; + __le32 nodes; + __u8 cluster_name[64]; + __u8 pad[120]; +}; + +typedef struct bitmap_super_s bitmap_super_t; + +enum bitmap_page_attr { + BITMAP_PAGE_DIRTY = 0, + BITMAP_PAGE_PENDING = 1, + BITMAP_PAGE_NEEDWRITE = 2, +}; + +struct md_setup_args { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +}; + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +enum dev_type { + DEV_UNKNOWN = 0, + DEV_X1 = 1, + DEV_X2 = 2, + DEV_X4 = 3, + DEV_X8 = 4, + DEV_X16 = 5, + DEV_X32 = 6, + DEV_X64 = 7, +}; + +enum hw_event_mc_err_type { + HW_EVENT_ERR_CORRECTED = 0, + HW_EVENT_ERR_UNCORRECTED = 1, + HW_EVENT_ERR_DEFERRED = 2, + HW_EVENT_ERR_FATAL = 3, + HW_EVENT_ERR_INFO = 4, +}; + +enum mem_type { + MEM_EMPTY = 0, + MEM_RESERVED = 1, + MEM_UNKNOWN = 2, + MEM_FPM = 3, + MEM_EDO = 4, + MEM_BEDO = 5, + MEM_SDR = 6, + MEM_RDR = 7, + MEM_DDR = 8, + MEM_RDDR = 9, + MEM_RMBS = 10, + MEM_DDR2 = 11, + MEM_FB_DDR2 = 12, + MEM_RDDR2 = 13, + MEM_XDR = 14, + MEM_DDR3 = 15, + MEM_RDDR3 = 16, + MEM_LRDDR3 = 17, + MEM_DDR4 = 18, + MEM_RDDR4 = 19, + MEM_LRDDR4 = 20, + MEM_NVDIMM = 21, +}; + +enum edac_type { + EDAC_UNKNOWN = 0, + EDAC_NONE = 1, + EDAC_RESERVED = 2, + EDAC_PARITY = 3, + EDAC_EC = 4, + EDAC_SECDED = 5, + EDAC_S2ECD2ED = 6, + EDAC_S4ECD4ED = 7, + EDAC_S8ECD8ED = 8, + EDAC_S16ECD16ED = 9, +}; + +enum scrub_type { + SCRUB_UNKNOWN = 0, + SCRUB_NONE = 1, + SCRUB_SW_PROG = 2, + SCRUB_SW_SRC = 3, + SCRUB_SW_PROG_SRC = 4, + SCRUB_SW_TUNABLE = 5, + SCRUB_HW_PROG = 6, + SCRUB_HW_SRC = 7, + SCRUB_HW_PROG_SRC = 8, + SCRUB_HW_TUNABLE = 9, +}; + +enum edac_mc_layer_type { + EDAC_MC_LAYER_BRANCH = 0, + EDAC_MC_LAYER_CHANNEL = 1, + EDAC_MC_LAYER_SLOT = 2, + EDAC_MC_LAYER_CHIP_SELECT = 3, + EDAC_MC_LAYER_ALL_MEM = 4, +}; + +struct edac_mc_layer { + enum edac_mc_layer_type type; + unsigned int size; + bool is_virt_csrow; +}; + +struct mem_ctl_info; + +struct dimm_info { + struct device dev; + char label[32]; + unsigned int location[3]; + struct mem_ctl_info *mci; + unsigned int idx; + u32 grain; + enum dev_type dtype; + enum mem_type mtype; + enum edac_type edac_mode; + u32 nr_pages; + unsigned int csrow; + unsigned int cschannel; + u16 smbios_handle; + u32 ce_count; + u32 ue_count; +}; + +struct mcidev_sysfs_attribute; + +struct edac_raw_error_desc { + char location[256]; + char label[296]; + long int grain; + u16 error_count; + enum hw_event_mc_err_type type; + int top_layer; + int mid_layer; + int low_layer; + long unsigned int page_frame_number; + long unsigned int offset_in_page; + long unsigned int syndrome; + const char *msg; + const char *other_detail; +}; + +struct csrow_info; + +struct mem_ctl_info { + struct device dev; + struct bus_type *bus; + struct list_head link; + struct module *owner; + long unsigned int mtype_cap; + long unsigned int edac_ctl_cap; + long unsigned int edac_cap; + long unsigned int scrub_cap; + enum scrub_type scrub_mode; + int (*set_sdram_scrub_rate)(struct mem_ctl_info *, u32); + int (*get_sdram_scrub_rate)(struct mem_ctl_info *); + void (*edac_check)(struct mem_ctl_info *); + long unsigned int (*ctl_page_to_phys)(struct mem_ctl_info *, long unsigned int); + int mc_idx; + struct csrow_info **csrows; + unsigned int nr_csrows; + unsigned int num_cschannel; + unsigned int n_layers; + struct edac_mc_layer *layers; + bool csbased; + unsigned int tot_dimms; + struct dimm_info **dimms; + struct device *pdev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + u32 ce_noinfo_count; + u32 ue_noinfo_count; + u32 ue_mc; + u32 ce_mc; + struct completion complete; + const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; + struct delayed_work work; + struct edac_raw_error_desc error_desc; + int op_state; + struct dentry *debugfs; + u8 fake_inject_layer[3]; + bool fake_inject_ue; + u16 fake_inject_count; +}; + +struct rank_info { + int chan_idx; + struct csrow_info *csrow; + struct dimm_info *dimm; + u32 ce_count; +}; + +struct csrow_info { + struct device dev; + long unsigned int first_page; + long unsigned int last_page; + long unsigned int page_mask; + int csrow_idx; + u32 ue_count; + u32 ce_count; + struct mem_ctl_info *mci; + u32 nr_channels; + struct rank_info **channels; +}; + +struct edac_device_counter { + u32 ue_count; + u32 ce_count; +}; + +struct edac_device_ctl_info; + +struct edac_dev_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_ctl_info *, char *); + ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); +}; + +struct edac_device_instance; + +struct edac_device_ctl_info { + struct list_head link; + struct module *owner; + int dev_idx; + int log_ue; + int log_ce; + int panic_on_ue; + unsigned int poll_msec; + long unsigned int delay; + struct edac_dev_sysfs_attribute *sysfs_attributes; + struct bus_type *edac_subsys; + int op_state; + struct delayed_work work; + void (*edac_check)(struct edac_device_ctl_info *); + struct device *dev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + struct completion removal_complete; + char name[32]; + u32 nr_instances; + struct edac_device_instance *instances; + struct edac_device_counter counters; + struct kobject kobj; +}; + +struct edac_device_block; + +struct edac_dev_sysfs_block_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); + struct edac_device_block *block; + unsigned int value; +}; + +struct edac_device_block { + struct edac_device_instance *instance; + char name[32]; + struct edac_device_counter counters; + int nr_attribs; + struct edac_dev_sysfs_block_attribute *block_attributes; + struct kobject kobj; +}; + +struct edac_device_instance { + struct edac_device_ctl_info *ctl; + char name[35]; + struct edac_device_counter counters; + u32 nr_blocks; + struct edac_device_block *blocks; + struct kobject kobj; +}; + +struct dev_ch_attribute { + struct device_attribute attr; + unsigned int channel; +}; + +struct ctl_info_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_ctl_info *, char *); + ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); +}; + +struct instance_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_instance *, char *); + ssize_t (*store)(struct edac_device_instance *, const char *, size_t); +}; + +struct edac_pci_counter { + atomic_t pe_count; + atomic_t npe_count; +}; + +struct edac_pci_ctl_info { + struct list_head link; + int pci_idx; + struct bus_type *edac_subsys; + int op_state; + struct delayed_work work; + void (*edac_check)(struct edac_pci_ctl_info *); + struct device *dev; + const char *mod_name; + const char *ctl_name; + const char *dev_name; + void *pvt_info; + long unsigned int start_time; + struct completion complete; + char name[32]; + struct edac_pci_counter counters; + struct kobject kobj; +}; + +struct edac_pci_gen_data { + int edac_idx; +}; + +struct instance_attribute___2 { + struct attribute attr; + ssize_t (*show)(struct edac_pci_ctl_info *, char *); + ssize_t (*store)(struct edac_pci_ctl_info *, const char *, size_t); +}; + +struct edac_pci_dev_attribute { + struct attribute attr; + void *value; + ssize_t (*show)(void *, char *); + ssize_t (*store)(void *, const char *, size_t); +}; + +typedef void (*pci_parity_check_fn_t)(struct pci_dev *); + +enum opp_table_access { + OPP_TABLE_ACCESS_UNKNOWN = 0, + OPP_TABLE_ACCESS_EXCLUSIVE = 1, + OPP_TABLE_ACCESS_SHARED = 2, +}; + +struct icc_path; + +struct dev_pm_opp___2; + +struct dev_pm_set_opp_data; + +struct opp_table___2 { + struct list_head node; + struct blocking_notifier_head head; + struct list_head dev_list; + struct list_head opp_list; + struct kref kref; + struct mutex lock; + struct device_node *np; + long unsigned int clock_latency_ns_max; + unsigned int voltage_tolerance_v1; + unsigned int parsed_static_opps; + enum opp_table_access shared_opp; + struct dev_pm_opp___2 *suspend_opp; + struct mutex genpd_virt_dev_lock; + struct device **genpd_virt_devs; + struct opp_table___2 **required_opp_tables; + unsigned int required_opp_count; + unsigned int *supported_hw; + unsigned int supported_hw_count; + const char *prop_name; + struct clk *clk; + struct regulator **regulators; + int regulator_count; + struct icc_path **paths; + unsigned int path_count; + bool enabled; + bool genpd_performance_state; + bool is_genpd; + int (*set_opp)(struct dev_pm_set_opp_data *); + struct dev_pm_set_opp_data *set_opp_data; + struct dentry *dentry; + char dentry_name[255]; +}; + +struct dev_pm_opp_supply; + +struct dev_pm_opp_icc_bw; + +struct dev_pm_opp___2 { + struct list_head node; + struct kref kref; + bool available; + bool dynamic; + bool turbo; + bool suspend; + unsigned int pstate; + long unsigned int rate; + unsigned int level; + struct dev_pm_opp_supply *supplies; + struct dev_pm_opp_icc_bw *bandwidth; + long unsigned int clock_latency_ns; + struct dev_pm_opp___2 **required_opps; + struct opp_table___2 *opp_table; + struct device_node *np; + struct dentry *dentry; +}; + +enum dev_pm_opp_event { + OPP_EVENT_ADD = 0, + OPP_EVENT_REMOVE = 1, + OPP_EVENT_ENABLE = 2, + OPP_EVENT_DISABLE = 3, + OPP_EVENT_ADJUST_VOLTAGE = 4, +}; + +struct dev_pm_opp_supply { + long unsigned int u_volt; + long unsigned int u_volt_min; + long unsigned int u_volt_max; + long unsigned int u_amp; +}; + +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + +struct dev_pm_opp_info { + long unsigned int rate; + struct dev_pm_opp_supply *supplies; +}; + +struct dev_pm_set_opp_data { + struct dev_pm_opp_info old_opp; + struct dev_pm_opp_info new_opp; + struct regulator **regulators; + unsigned int regulator_count; + struct clk *clk; + struct device *dev; +}; + +struct opp_device { + struct list_head node; + const struct device *dev; + struct dentry *dentry; +}; + +struct cpufreq_policy_data { + struct cpufreq_cpuinfo cpuinfo; + struct cpufreq_frequency_table *freq_table; + unsigned int cpu; + unsigned int min; + unsigned int max; +}; + +struct cpufreq_freqs { + struct cpufreq_policy *policy; + unsigned int old; + unsigned int new; + u8 flags; +}; + +struct freq_attr { + struct attribute attr; + ssize_t (*show)(struct cpufreq_policy *, char *); + ssize_t (*store)(struct cpufreq_policy *, const char *, size_t); +}; + +struct cpufreq_driver { + char name[16]; + u16 flags; + void *driver_data; + int (*init)(struct cpufreq_policy *); + int (*verify)(struct cpufreq_policy_data *); + int (*setpolicy)(struct cpufreq_policy *); + int (*target)(struct cpufreq_policy *, unsigned int, unsigned int); + int (*target_index)(struct cpufreq_policy *, unsigned int); + unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int); + unsigned int (*resolve_freq)(struct cpufreq_policy *, unsigned int); + unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int); + int (*target_intermediate)(struct cpufreq_policy *, unsigned int); + unsigned int (*get)(unsigned int); + void (*update_limits)(unsigned int); + int (*bios_limit)(int, unsigned int *); + int (*online)(struct cpufreq_policy *); + int (*offline)(struct cpufreq_policy *); + int (*exit)(struct cpufreq_policy *); + void (*stop_cpu)(struct cpufreq_policy *); + int (*suspend)(struct cpufreq_policy *); + int (*resume)(struct cpufreq_policy *); + void (*ready)(struct cpufreq_policy *); + struct freq_attr **attr; + bool boost_enabled; + int (*set_boost)(struct cpufreq_policy *, int); +}; + +struct cpufreq_stats { + unsigned int total_trans; + long long unsigned int last_time; + unsigned int max_state; + unsigned int state_num; + unsigned int last_index; + u64 *time_in_state; + unsigned int *freq_table; + unsigned int *trans_table; + unsigned int reset_pending; + long long unsigned int reset_time; +}; + +struct dbs_data { + struct gov_attr_set attr_set; + void *tuners; + unsigned int ignore_nice_load; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int io_is_busy; +}; + +struct policy_dbs_info { + struct cpufreq_policy *policy; + struct mutex update_mutex; + u64 last_sample_time; + s64 sample_delay_ns; + atomic_t work_count; + struct irq_work irq_work; + struct work_struct work; + struct dbs_data *dbs_data; + struct list_head list; + unsigned int rate_mult; + unsigned int idle_periods; + bool is_shared; + bool work_in_progress; +}; + +struct cpu_dbs_info { + u64 prev_cpu_idle; + u64 prev_update_time; + u64 prev_cpu_nice; + unsigned int prev_load; + struct update_util_data update_util; + struct policy_dbs_info *policy_dbs; +}; + +struct dbs_governor { + struct cpufreq_governor gov; + struct kobj_type kobj_type; + struct dbs_data *gdbs_data; + unsigned int (*gov_dbs_update)(struct cpufreq_policy *); + struct policy_dbs_info * (*alloc)(); + void (*free)(struct policy_dbs_info *); + int (*init)(struct dbs_data *); + void (*exit)(struct dbs_data *); + void (*start)(struct cpufreq_policy *); +}; + +struct cpufreq_policy___2; + +struct cpufreq_dt_platform_data { + bool have_governor_per_policy; + unsigned int (*get_intermediate)(struct cpufreq_policy___2 *, unsigned int); + int (*target_intermediate)(struct cpufreq_policy___2 *, unsigned int); + int (*suspend)(struct cpufreq_policy___2 *); + int (*resume)(struct cpufreq_policy___2 *); +}; + +struct tegra124_cpufreq_priv { + struct clk *cpu_clk; + struct clk *pllp_clk; + struct clk *pllx_clk; + struct clk *dfll_clk; + struct platform_device *cpufreq_dt_pdev; +}; + +struct cpuidle_governor { + char name[16]; + struct list_head governor_list; + unsigned int rating; + int (*enable)(struct cpuidle_driver *, struct cpuidle_device *); + void (*disable)(struct cpuidle_driver *, struct cpuidle_device *); + int (*select)(struct cpuidle_driver *, struct cpuidle_device *, bool *); + void (*reflect)(struct cpuidle_device *, int); +}; + +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; + struct cpuidle_device *device; +}; + +struct cpuidle_driver_kobj { + struct cpuidle_driver *drv; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_device_kobj { + struct cpuidle_device *dev; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_device *, char *); + ssize_t (*store)(struct cpuidle_device *, const char *, size_t); +}; + +struct cpuidle_state_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *); + ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t); +}; + +struct cpuidle_driver_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_driver *, char *); + ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); +}; + +struct ladder_device_state { + struct { + u32 promotion_count; + u32 demotion_count; + u64 promotion_time_ns; + u64 demotion_time_ns; + } threshold; + struct { + int promotion_count; + int demotion_count; + } stats; +}; + +struct ladder_device { + struct ladder_device_state states[10]; +}; + +struct menu_device { + int needs_update; + int tick_wakeup; + u64 next_timer_ns; + unsigned int bucket; + unsigned int correction_factor[12]; + unsigned int intervals[8]; + int interval_ptr; +}; + +struct teo_idle_state { + unsigned int early_hits; + unsigned int hits; + unsigned int misses; +}; + +struct teo_cpu { + u64 time_span_ns; + u64 sleep_length_ns; + struct teo_idle_state states[10]; + int interval_idx; + u64 intervals[8]; +}; + +struct pci_dev___2; + +struct sdhci_pci_data { + struct pci_dev___2 *pdev; + int slotno; + int rst_n_gpio; + int cd_gpio; + int (*setup)(struct sdhci_pci_data *); + void (*cleanup)(struct sdhci_pci_data *); +}; + +struct led_init_data { + struct fwnode_handle *fwnode; + const char *default_label; + const char *devicename; + bool devname_mandatory; +}; + +struct led_properties { + u32 color; + bool color_present; + const char *function; + u32 func_enum; + bool func_enum_present; + const char *label; +}; + +enum cpu_led_event { + CPU_LED_IDLE_START = 0, + CPU_LED_IDLE_END = 1, + CPU_LED_START = 2, + CPU_LED_STOP = 3, + CPU_LED_HALTED = 4, +}; + +struct led_trigger_cpu { + bool is_active; + char name[8]; + struct led_trigger *_trig; +}; + +enum scpi_error_codes { + SCPI_SUCCESS = 0, + SCPI_ERR_PARAM = 1, + SCPI_ERR_ALIGN = 2, + SCPI_ERR_SIZE = 3, + SCPI_ERR_HANDLER = 4, + SCPI_ERR_ACCESS = 5, + SCPI_ERR_RANGE = 6, + SCPI_ERR_TIMEOUT = 7, + SCPI_ERR_NOMEM = 8, + SCPI_ERR_PWRSTATE = 9, + SCPI_ERR_SUPPORT = 10, + SCPI_ERR_DEVICE = 11, + SCPI_ERR_BUSY = 12, + SCPI_ERR_MAX = 13, +}; + +enum scpi_std_cmd { + SCPI_CMD_INVALID = 0, + SCPI_CMD_SCPI_READY = 1, + SCPI_CMD_SCPI_CAPABILITIES = 2, + SCPI_CMD_SET_CSS_PWR_STATE = 3, + SCPI_CMD_GET_CSS_PWR_STATE = 4, + SCPI_CMD_SET_SYS_PWR_STATE = 5, + SCPI_CMD_SET_CPU_TIMER = 6, + SCPI_CMD_CANCEL_CPU_TIMER = 7, + SCPI_CMD_DVFS_CAPABILITIES = 8, + SCPI_CMD_GET_DVFS_INFO = 9, + SCPI_CMD_SET_DVFS = 10, + SCPI_CMD_GET_DVFS = 11, + SCPI_CMD_GET_DVFS_STAT = 12, + SCPI_CMD_CLOCK_CAPABILITIES = 13, + SCPI_CMD_GET_CLOCK_INFO = 14, + SCPI_CMD_SET_CLOCK_VALUE = 15, + SCPI_CMD_GET_CLOCK_VALUE = 16, + SCPI_CMD_PSU_CAPABILITIES = 17, + SCPI_CMD_GET_PSU_INFO = 18, + SCPI_CMD_SET_PSU = 19, + SCPI_CMD_GET_PSU = 20, + SCPI_CMD_SENSOR_CAPABILITIES = 21, + SCPI_CMD_SENSOR_INFO = 22, + SCPI_CMD_SENSOR_VALUE = 23, + SCPI_CMD_SENSOR_CFG_PERIODIC = 24, + SCPI_CMD_SENSOR_CFG_BOUNDS = 25, + SCPI_CMD_SENSOR_ASYNC_VALUE = 26, + SCPI_CMD_SET_DEVICE_PWR_STATE = 27, + SCPI_CMD_GET_DEVICE_PWR_STATE = 28, + SCPI_CMD_COUNT = 29, +}; + +enum legacy_scpi_std_cmd { + LEGACY_SCPI_CMD_INVALID = 0, + LEGACY_SCPI_CMD_SCPI_READY = 1, + LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 2, + LEGACY_SCPI_CMD_EVENT = 3, + LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 4, + LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 5, + LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 6, + LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 7, + LEGACY_SCPI_CMD_SYS_PWR_STATE = 8, + LEGACY_SCPI_CMD_L2_READY = 9, + LEGACY_SCPI_CMD_SET_AP_TIMER = 10, + LEGACY_SCPI_CMD_CANCEL_AP_TIME = 11, + LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 12, + LEGACY_SCPI_CMD_GET_DVFS_INFO = 13, + LEGACY_SCPI_CMD_SET_DVFS = 14, + LEGACY_SCPI_CMD_GET_DVFS = 15, + LEGACY_SCPI_CMD_GET_DVFS_STAT = 16, + LEGACY_SCPI_CMD_SET_RTC = 17, + LEGACY_SCPI_CMD_GET_RTC = 18, + LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 19, + LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 20, + LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 21, + LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 22, + LEGACY_SCPI_CMD_PSU_CAPABILITIES = 23, + LEGACY_SCPI_CMD_SET_PSU = 24, + LEGACY_SCPI_CMD_GET_PSU = 25, + LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 26, + LEGACY_SCPI_CMD_SENSOR_INFO = 27, + LEGACY_SCPI_CMD_SENSOR_VALUE = 28, + LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 29, + LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 30, + LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 31, + LEGACY_SCPI_CMD_COUNT = 32, +}; + +enum scpi_drv_cmds { + CMD_SCPI_CAPABILITIES = 0, + CMD_GET_CLOCK_INFO = 1, + CMD_GET_CLOCK_VALUE = 2, + CMD_SET_CLOCK_VALUE = 3, + CMD_GET_DVFS = 4, + CMD_SET_DVFS = 5, + CMD_GET_DVFS_INFO = 6, + CMD_SENSOR_CAPABILITIES = 7, + CMD_SENSOR_INFO = 8, + CMD_SENSOR_VALUE = 9, + CMD_SET_DEVICE_PWR_STATE = 10, + CMD_GET_DEVICE_PWR_STATE = 11, + CMD_MAX_COUNT = 12, +}; + +struct scpi_xfer { + u32 slot; + u32 cmd; + u32 status; + const void *tx_buf; + void *rx_buf; + unsigned int tx_len; + unsigned int rx_len; + struct list_head node; + struct completion done; +}; + +struct scpi_chan { + struct mbox_client cl; + struct mbox_chan___2 *chan; + void *tx_payload; + void *rx_payload; + struct list_head rx_pending; + struct list_head xfers_list; + struct scpi_xfer *xfers; + spinlock_t rx_lock; + struct mutex xfers_lock; + u8 token; +}; + +struct scpi_drvinfo { + u32 protocol_version; + u32 firmware_version; + bool is_legacy; + int num_chans; + int *commands; + long unsigned int cmd_priority[1]; + atomic_t next_chan; + struct scpi_ops *scpi_ops; + struct scpi_chan *channels; + struct scpi_dvfs_info *dvfs[8]; +}; + +struct scpi_shared_mem { + __le32 command; + __le32 status; + u8 payload[0]; +}; + +struct legacy_scpi_shared_mem { + __le32 status; + u8 payload[0]; +}; + +struct scp_capabilities { + __le32 protocol_version; + __le32 event_version; + __le32 platform_version; + __le32 commands[4]; +}; + +struct clk_get_info { + __le16 id; + __le16 flags; + __le32 min_rate; + __le32 max_rate; + u8 name[20]; +}; + +struct clk_set_value { + __le16 id; + __le16 reserved; + __le32 rate; +}; + +struct legacy_clk_set_value { + __le32 rate; + __le16 id; + __le16 reserved; +}; + +struct dvfs_info { + u8 domain; + u8 opp_count; + __le16 latency; + struct { + __le32 freq; + __le32 m_volt; + } opps[16]; +}; + +struct dvfs_set { + u8 domain; + u8 index; +}; + +struct _scpi_sensor_info { + __le16 sensor_id; + u8 class; + u8 trigger_type; + char name[20]; +}; + +struct dev_pstate_set { + __le16 dev_id; + u8 pstate; +} __attribute__((packed)); + +struct scpi_pm_domain { + struct generic_pm_domain genpd; + struct scpi_ops *ops; + u32 domain; + char name[30]; +}; + +enum scpi_power_domain_state { + SCPI_PD_STATE_ON = 0, + SCPI_PD_STATE_OFF = 3, +}; + +enum dmi_entry_type { + DMI_ENTRY_BIOS = 0, + DMI_ENTRY_SYSTEM = 1, + DMI_ENTRY_BASEBOARD = 2, + DMI_ENTRY_CHASSIS = 3, + DMI_ENTRY_PROCESSOR = 4, + DMI_ENTRY_MEM_CONTROLLER = 5, + DMI_ENTRY_MEM_MODULE = 6, + DMI_ENTRY_CACHE = 7, + DMI_ENTRY_PORT_CONNECTOR = 8, + DMI_ENTRY_SYSTEM_SLOT = 9, + DMI_ENTRY_ONBOARD_DEVICE = 10, + DMI_ENTRY_OEMSTRINGS = 11, + DMI_ENTRY_SYSCONF = 12, + DMI_ENTRY_BIOS_LANG = 13, + DMI_ENTRY_GROUP_ASSOC = 14, + DMI_ENTRY_SYSTEM_EVENT_LOG = 15, + DMI_ENTRY_PHYS_MEM_ARRAY = 16, + DMI_ENTRY_MEM_DEVICE = 17, + DMI_ENTRY_32_MEM_ERROR = 18, + DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19, + DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20, + DMI_ENTRY_BUILTIN_POINTING_DEV = 21, + DMI_ENTRY_PORTABLE_BATTERY = 22, + DMI_ENTRY_SYSTEM_RESET = 23, + DMI_ENTRY_HW_SECURITY = 24, + DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25, + DMI_ENTRY_VOLTAGE_PROBE = 26, + DMI_ENTRY_COOLING_DEV = 27, + DMI_ENTRY_TEMP_PROBE = 28, + DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29, + DMI_ENTRY_OOB_REMOTE_ACCESS = 30, + DMI_ENTRY_BIS_ENTRY = 31, + DMI_ENTRY_SYSTEM_BOOT = 32, + DMI_ENTRY_MGMT_DEV = 33, + DMI_ENTRY_MGMT_DEV_COMPONENT = 34, + DMI_ENTRY_MGMT_DEV_THRES = 35, + DMI_ENTRY_MEM_CHANNEL = 36, + DMI_ENTRY_IPMI_DEV = 37, + DMI_ENTRY_SYS_POWER_SUPPLY = 38, + DMI_ENTRY_ADDITIONAL = 39, + DMI_ENTRY_ONBOARD_DEV_EXT = 40, + DMI_ENTRY_MGMT_CONTROLLER_HOST = 41, + DMI_ENTRY_INACTIVE = 126, + DMI_ENTRY_END_OF_TABLE = 127, +}; + +struct dmi_memdev_info { + const char *device; + const char *bank; + u64 size; + u16 handle; + u8 type; +}; + +struct dmi_device_attribute { + struct device_attribute dev_attr; + int field; +}; + +struct mafield { + const char *prefix; + int field; +}; + +struct firmware_map_entry { + u64 start; + u64 end; + const char *type; + struct list_head list; + struct kobject kobj; +}; + +struct memmap_attribute { + struct attribute attr; + ssize_t (*show)(struct firmware_map_entry *, char *); +}; + +enum rpi_firmware_property_status { + RPI_FIRMWARE_STATUS_REQUEST = 0, + RPI_FIRMWARE_STATUS_SUCCESS = 2147483648, + RPI_FIRMWARE_STATUS_ERROR = 2147483649, +}; + +struct rpi_firmware_property_tag_header { + u32 tag; + u32 buf_size; + u32 req_resp_size; +}; + +struct rpi_firmware___2 { + struct mbox_client cl; + struct mbox_chan___2 *chan; + struct completion c; + u32 enabled; +}; + +struct qcom_scm_hdcp_req { + u32 addr; + u32 val; +}; + +struct qcom_scm_vmperm { + int vmid; + int perm; +}; + +enum qcom_scm_ocmem_client { + QCOM_SCM_OCMEM_UNUSED_ID = 0, + QCOM_SCM_OCMEM_GRAPHICS_ID = 1, + QCOM_SCM_OCMEM_VIDEO_ID = 2, + QCOM_SCM_OCMEM_LP_AUDIO_ID = 3, + QCOM_SCM_OCMEM_SENSORS_ID = 4, + QCOM_SCM_OCMEM_OTHER_OS_ID = 5, + QCOM_SCM_OCMEM_DEBUG_ID = 6, +}; + +enum qcom_scm_ice_cipher { + QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0, + QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1, + QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3, + QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4, +}; + +enum qcom_scm_convention { + SMC_CONVENTION_UNKNOWN = 0, + SMC_CONVENTION_LEGACY = 1, + SMC_CONVENTION_ARM_32 = 2, + SMC_CONVENTION_ARM_64 = 3, +}; + +enum qcom_scm_arg_types { + QCOM_SCM_VAL = 0, + QCOM_SCM_RO = 1, + QCOM_SCM_RW = 2, + QCOM_SCM_BUFVAL = 3, +}; + +struct qcom_scm_desc { + u32 svc; + u32 cmd; + u32 arginfo; + u64 args[10]; + u32 owner; +}; + +struct qcom_scm_res { + u64 result[3]; +}; + +struct qcom_scm { + struct device *dev; + struct clk *core_clk; + struct clk *iface_clk; + struct clk *bus_clk; + struct reset_controller_dev reset; + u64 dload_mode_addr; +}; + +struct qcom_scm_current_perm_info { + __le32 vmid; + __le32 perm; + __le64 ctx; + __le32 ctx_size; + __le32 unused; +}; + +struct qcom_scm_mem_map_info { + __le64 mem_addr; + __le64 mem_size; +}; + +struct qcom_scm_wb_entry { + int flag; + void *entry; +}; + +struct arm_smccc_quirk { + int id; + union { + long unsigned int a6; + } state; +}; + +struct arm_smccc_args { + long unsigned int args[8]; +}; + +struct scm_legacy_command { + __le32 len; + __le32 buf_offset; + __le32 resp_hdr_offset; + __le32 id; + __le32 buf[0]; +}; + +struct scm_legacy_response { + __le32 len; + __le32 buf_offset; + __le32 is_complete; +}; + +struct meson_sm_cmd { + unsigned int index; + u32 smc_id; +}; + +struct meson_sm_chip { + unsigned int shmem_size; + u32 cmd_shmem_in_base; + u32 cmd_shmem_out_base; + struct meson_sm_cmd cmd[0]; +}; + +struct meson_sm_firmware___2 { + const struct meson_sm_chip *chip; + void *sm_shmem_in_base; + void *sm_shmem_out_base; +}; + +struct bmp_header { + u16 id; + u32 size; +} __attribute__((packed)); + +typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool); + +typedef struct { + efi_guid_t guid; + u64 table; +} efi_config_table_64_t; + +typedef struct { + efi_guid_t guid; + u32 table; +} efi_config_table_32_t; + +typedef union { + struct { + efi_guid_t guid; + void *table; + }; + efi_config_table_32_t mixed_mode; +} efi_config_table_t; + +typedef struct { + efi_guid_t guid; + long unsigned int *ptr; + const char name[16]; +} efi_config_table_type_t; + +typedef struct { + u16 version; + u16 length; + u32 runtime_services_supported; +} efi_rt_properties_table_t; + +struct efivar_operations { + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_store_t *query_variable_store; +}; + +struct efivars { + struct kset *kset; + struct kobject *kobject; + const struct efivar_operations *ops; +}; + +struct efi_variable { + efi_char16_t VariableName[512]; + efi_guid_t VendorGuid; + long unsigned int DataSize; + __u8 Data[1024]; + efi_status_t Status; + __u32 Attributes; +} __attribute__((packed)); + +struct efivar_entry { + struct efi_variable var; + struct list_head list; + struct kobject kobj; + bool scanning; + bool deleting; +}; + +struct linux_efi_random_seed { + u32 size; + u8 bits[0]; +}; + +struct linux_efi_memreserve { + int size; + atomic_t count; + phys_addr_t next; + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; +}; + +struct efi_generic_dev_path { + u8 type; + u8 sub_type; + u16 length; +}; + +struct variable_validate { + efi_guid_t vendor; + char *name; + bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int); +}; + +typedef struct { + u32 version; + u32 num_entries; + u32 desc_size; + u32 reserved; + efi_memory_desc_t entry[0]; +} efi_memory_attributes_table_t; + +typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *); + +typedef u64 efi_physical_addr_t; + +typedef struct { + u64 length; + u64 data; +} efi_capsule_block_desc_t; + +struct efi_memory_map_data { + phys_addr_t phys_map; + long unsigned int size; + long unsigned int desc_version; + long unsigned int desc_size; + long unsigned int flags; +}; + +struct efi_mem_range { + struct range range; + u64 attribute; +}; + +enum { + SYSTAB = 0, + MMBASE = 1, + MMSIZE = 2, + DCSIZE = 3, + DCVERS = 4, + PARAMCOUNT = 5, +}; + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[0]; +}; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + struct kobject kobj; + struct list_head list; +}; + +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *, char *); + ssize_t (*store)(struct esre_entry *, const char *, size_t); +}; + +struct cper_sec_proc_generic { + u64 validation_bits; + u8 proc_type; + u8 proc_isa; + u8 proc_error_type; + u8 operation; + u8 flags; + u8 level; + u16 reserved; + u64 cpu_version; + char cpu_brand[128]; + u64 proc_id; + u64 target_addr; + u64 requestor_id; + u64 responder_id; + u64 ip; +}; + +struct cper_sec_proc_arm { + u32 validation_bits; + u16 err_info_num; + u16 context_info_num; + u32 section_length; + u8 affinity_level; + u8 reserved[3]; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; +}; + +struct cper_mem_err_compact { + u64 validation_bits; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_pos; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u16 rank; + u16 mem_array_handle; + u16 mem_dev_handle; + u8 extended; +} __attribute__((packed)); + +struct cper_sec_pcie { + u64 validation_bits; + u32 port_type; + struct { + u8 minor; + u8 major; + u8 reserved[2]; + } version; + u16 command; + u16 status; + u32 reserved; + struct { + u16 vendor_id; + u16 device_id; + u8 class_code[3]; + u8 function; + u8 device; + u16 segment; + u8 bus; + u8 secondary_bus; + u16 slot; + u8 reserved; + } __attribute__((packed)) device_id; + struct { + u32 lower; + u32 upper; + } serial_number; + struct { + u16 secondary_status; + u16 control; + } bridge; + u8 capability[60]; + u8 aer_info[96]; +}; + +struct cper_sec_fw_err_rec_ref { + u8 record_type; + u8 revision; + u8 reserved[6]; + u64 record_identifier; + guid_t record_identifier_guid; +}; + +struct acpi_hest_generic_data { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; +}; + +struct acpi_hest_generic_data_v300 { + u8 section_type[16]; + u32 error_severity; + u16 revision; + u8 validation_bits; + u8 flags; + u32 error_data_length; + u8 fru_id[16]; + u8 fru_text[20]; + u64 time_stamp; +}; + +enum efi_rts_ids { + EFI_NONE = 0, + EFI_GET_TIME = 1, + EFI_SET_TIME = 2, + EFI_GET_WAKEUP_TIME = 3, + EFI_SET_WAKEUP_TIME = 4, + EFI_GET_VARIABLE = 5, + EFI_GET_NEXT_VARIABLE = 6, + EFI_SET_VARIABLE = 7, + EFI_QUERY_VARIABLE_INFO = 8, + EFI_GET_NEXT_HIGH_MONO_COUNT = 9, + EFI_RESET_SYSTEM = 10, + EFI_UPDATE_CAPSULE = 11, + EFI_QUERY_CAPSULE_CAPS = 12, +}; + +struct efi_runtime_work { + void *arg1; + void *arg2; + void *arg3; + void *arg4; + void *arg5; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; +}; + +typedef void *efi_event_t; + +typedef void (*efi_event_notify_t)(efi_event_t, void *); + +typedef enum { + EfiTimerCancel = 0, + EfiTimerPeriodic = 1, + EfiTimerRelative = 2, +} EFI_TIMER_DELAY; + +typedef void *efi_handle_t; + +typedef struct efi_generic_dev_path efi_device_path_protocol_t; + +union efi_boot_services { + struct { + efi_table_hdr_t hdr; + void *raise_tpl; + void *restore_tpl; + efi_status_t (*allocate_pages)(int, int, long unsigned int, efi_physical_addr_t *); + efi_status_t (*free_pages)(efi_physical_addr_t, long unsigned int); + efi_status_t (*get_memory_map)(long unsigned int *, void *, long unsigned int *, long unsigned int *, u32 *); + efi_status_t (*allocate_pool)(int, long unsigned int, void **); + efi_status_t (*free_pool)(void *); + efi_status_t (*create_event)(u32, long unsigned int, efi_event_notify_t, void *, efi_event_t *); + efi_status_t (*set_timer)(efi_event_t, EFI_TIMER_DELAY, u64); + efi_status_t (*wait_for_event)(long unsigned int, efi_event_t *, long unsigned int *); + void *signal_event; + efi_status_t (*close_event)(efi_event_t); + void *check_event; + void *install_protocol_interface; + void *reinstall_protocol_interface; + void *uninstall_protocol_interface; + efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); + void *__reserved; + void *register_protocol_notify; + efi_status_t (*locate_handle)(int, efi_guid_t *, void *, long unsigned int *, efi_handle_t *); + efi_status_t (*locate_device_path)(efi_guid_t *, efi_device_path_protocol_t **, efi_handle_t *); + efi_status_t (*install_configuration_table)(efi_guid_t *, void *); + void *load_image; + void *start_image; + efi_status_t (*exit)(efi_handle_t, efi_status_t, long unsigned int, efi_char16_t *); + void *unload_image; + efi_status_t (*exit_boot_services)(efi_handle_t, long unsigned int); + void *get_next_monotonic_count; + efi_status_t (*stall)(long unsigned int); + void *set_watchdog_timer; + void *connect_controller; + efi_status_t (*disconnect_controller)(efi_handle_t, efi_handle_t, efi_handle_t); + void *open_protocol; + void *close_protocol; + void *open_protocol_information; + void *protocols_per_handle; + void *locate_handle_buffer; + efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); + void *install_multiple_protocol_interfaces; + void *uninstall_multiple_protocol_interfaces; + void *calculate_crc32; + void *copy_mem; + void *set_mem; + void *create_event_ex; + }; + struct { + efi_table_hdr_t hdr; + u32 raise_tpl; + u32 restore_tpl; + u32 allocate_pages; + u32 free_pages; + u32 get_memory_map; + u32 allocate_pool; + u32 free_pool; + u32 create_event; + u32 set_timer; + u32 wait_for_event; + u32 signal_event; + u32 close_event; + u32 check_event; + u32 install_protocol_interface; + u32 reinstall_protocol_interface; + u32 uninstall_protocol_interface; + u32 handle_protocol; + u32 __reserved; + u32 register_protocol_notify; + u32 locate_handle; + u32 locate_device_path; + u32 install_configuration_table; + u32 load_image; + u32 start_image; + u32 exit; + u32 unload_image; + u32 exit_boot_services; + u32 get_next_monotonic_count; + u32 stall; + u32 set_watchdog_timer; + u32 connect_controller; + u32 disconnect_controller; + u32 open_protocol; + u32 close_protocol; + u32 open_protocol_information; + u32 protocols_per_handle; + u32 locate_handle_buffer; + u32 locate_protocol; + u32 install_multiple_protocol_interfaces; + u32 uninstall_multiple_protocol_interfaces; + u32 calculate_crc32; + u32 copy_mem; + u32 set_mem; + u32 create_event_ex; + } mixed_mode; +}; + +typedef union efi_boot_services efi_boot_services_t; + +typedef struct { + efi_table_hdr_t hdr; + u32 fw_vendor; + u32 fw_revision; + u32 con_in_handle; + u32 con_in; + u32 con_out_handle; + u32 con_out; + u32 stderr_handle; + u32 stderr; + u32 runtime; + u32 boottime; + u32 nr_tables; + u32 tables; +} efi_system_table_32_t; + +typedef struct { + u16 scan_code; + efi_char16_t unicode_char; +} efi_input_key_t; + +union efi_simple_text_input_protocol; + +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; + +union efi_simple_text_input_protocol { + struct { + void *reset; + efi_status_t (*read_keystroke)(efi_simple_text_input_protocol_t *, efi_input_key_t *); + efi_event_t wait_for_key; + }; + struct { + u32 reset; + u32 read_keystroke; + u32 wait_for_key; + } mixed_mode; +}; + +union efi_simple_text_output_protocol; + +typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; + +union efi_simple_text_output_protocol { + struct { + void *reset; + efi_status_t (*output_string)(efi_simple_text_output_protocol_t *, efi_char16_t *); + void *test_string; + }; + struct { + u32 reset; + u32 output_string; + u32 test_string; + } mixed_mode; +}; + +typedef union { + struct { + efi_table_hdr_t hdr; + long unsigned int fw_vendor; + u32 fw_revision; + long unsigned int con_in_handle; + efi_simple_text_input_protocol_t *con_in; + long unsigned int con_out_handle; + efi_simple_text_output_protocol_t *con_out; + long unsigned int stderr_handle; + long unsigned int stderr; + efi_runtime_services_t *runtime; + efi_boot_services_t *boottime; + long unsigned int nr_tables; + long unsigned int tables; + }; + efi_system_table_32_t mixed_mode; +} efi_system_table_t; + +struct cper_arm_err_info { + u8 version; + u8 length; + u16 validation_bits; + u8 type; + u16 multiple_error; + u8 flags; + u64 error_info; + u64 virt_fault_addr; + u64 physical_fault_addr; +} __attribute__((packed)); + +struct cper_arm_ctx_info { + u16 version; + u16 type; + u32 size; +}; + +typedef long unsigned int psci_fn(long unsigned int, long unsigned int, long unsigned int, long unsigned int); + +enum psci_function { + PSCI_FN_CPU_SUSPEND = 0, + PSCI_FN_CPU_ON = 1, + PSCI_FN_CPU_OFF = 2, + PSCI_FN_MIGRATE = 3, + PSCI_FN_MAX = 4, +}; + +typedef int (*psci_initcall_t)(const struct device_node *); + +struct mrq_ping_request { + uint32_t challenge; +}; + +struct mrq_ping_response { + uint32_t reply; +}; + +struct mrq_query_tag_request { + uint32_t addr; +}; + +struct mrq_query_fw_tag_response { + uint8_t tag[32]; +}; + +struct mrq_query_abi_request { + uint32_t mrq; +}; + +struct mrq_query_abi_response { + int32_t status; +}; + +struct tegra_ivc_header; + +struct tegra_ivc { + struct device *peer; + struct { + struct tegra_ivc_header *channel; + unsigned int position; + dma_addr_t phys; + } rx; + struct { + struct tegra_ivc_header *channel; + unsigned int position; + dma_addr_t phys; + } tx; + void (*notify)(struct tegra_ivc *, void *); + void *notify_data; + unsigned int num_frames; + size_t frame_size; +}; + +typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int, struct tegra_bpmp_channel *, void *); + +struct tegra_bpmp_mrq { + struct list_head list; + unsigned int mrq; + tegra_bpmp_mrq_handler_t handler; + void *data; +}; + +struct tegra210_bpmp { + void *atomics; + void *arb_sema; + struct irq_data *tx_irq_data; +}; + +struct tegra186_bpmp { + struct tegra_bpmp *parent; + struct { + struct gen_pool *pool; + dma_addr_t phys; + void *virt; + } tx; + struct { + struct gen_pool *pool; + dma_addr_t phys; + void *virt; + } rx; + struct { + struct mbox_client client; + struct mbox_chan___2 *channel; + } mbox; +}; + +enum mrq_debugfs_commands { + CMD_DEBUGFS_READ = 1, + CMD_DEBUGFS_WRITE = 2, + CMD_DEBUGFS_DUMPDIR = 3, + CMD_DEBUGFS_MAX = 4, +}; + +struct cmd_debugfs_fileop_request { + uint32_t fnameaddr; + uint32_t fnamelen; + uint32_t dataaddr; + uint32_t datalen; +}; + +struct cmd_debugfs_dumpdir_request { + uint32_t dataaddr; + uint32_t datalen; +}; + +struct cmd_debugfs_fileop_response { + uint32_t reserved; + uint32_t nbytes; +}; + +struct cmd_debugfs_dumpdir_response { + uint32_t reserved; + uint32_t nbytes; +}; + +struct mrq_debugfs_request { + uint32_t cmd; + union { + struct cmd_debugfs_fileop_request fop; + struct cmd_debugfs_dumpdir_request dumpdir; + }; +}; + +struct mrq_debugfs_response { + int32_t reserved; + union { + struct cmd_debugfs_fileop_response fop; + struct cmd_debugfs_dumpdir_response dumpdir; + }; +}; + +enum mrq_debug_commands { + CMD_DEBUG_OPEN_RO = 0, + CMD_DEBUG_OPEN_WO = 1, + CMD_DEBUG_READ = 2, + CMD_DEBUG_WRITE = 3, + CMD_DEBUG_CLOSE = 4, + CMD_DEBUG_MAX = 5, +}; + +struct cmd_debug_fopen_request { + char name[116]; +}; + +struct cmd_debug_fopen_response { + uint32_t fd; + uint32_t datalen; +}; + +struct cmd_debug_fread_request { + uint32_t fd; +}; + +struct cmd_debug_fread_response { + uint32_t readlen; + char data[116]; +}; + +struct cmd_debug_fwrite_request { + uint32_t fd; + uint32_t datalen; + char data[108]; +}; + +struct cmd_debug_fclose_request { + uint32_t fd; +}; + +struct mrq_debug_request { + uint32_t cmd; + union { + struct cmd_debug_fopen_request fop; + struct cmd_debug_fread_request frd; + struct cmd_debug_fwrite_request fwr; + struct cmd_debug_fclose_request fcl; + }; +}; + +struct mrq_debug_response { + union { + struct cmd_debug_fopen_response fop; + struct cmd_debug_fread_response frd; + }; +}; + +struct seqbuf { + char *buf; + size_t pos; + size_t size; +}; + +struct tegra_ivc_header { + union { + struct { + u32 count; + u32 state; + }; + u8 pad[64]; + } tx; + union { + u32 count; + u8 pad[64]; + } rx; +}; + +enum tegra_ivc_state { + TEGRA_IVC_STATE_ESTABLISHED = 0, + TEGRA_IVC_STATE_SYNC = 1, + TEGRA_IVC_STATE_ACK = 2, +}; + +struct of_timer_irq { + int irq; + int index; + int percpu; + const char *name; + long unsigned int flags; + irq_handler_t handler; +}; + +struct of_timer_base { + void *base; + const char *name; + int index; +}; + +struct of_timer_clk { + struct clk *clk; + const char *name; + int index; + long unsigned int rate; + long unsigned int period; +}; + +struct timer_of { + unsigned int flags; + struct device_node *np; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct clock_event_device clkevt; + struct of_timer_base of_base; + struct of_timer_irq of_irq; + struct of_timer_clk of_clk; + void *private_data; + long: 64; + long: 64; +}; + +typedef int (*of_init_fn_1_ret)(struct device_node *); + +struct clocksource_mmio { + void *reg; + struct clocksource clksrc; +}; + +struct rk_timer { + void *base; + void *ctrl; + struct clk *clk; + struct clk *pclk; + u32 freq; + int irq; +}; + +struct rk_clkevt { + struct clock_event_device ce; + struct rk_timer timer; + long: 64; + long: 64; + long: 64; +}; + +enum arch_timer_reg { + ARCH_TIMER_REG_CTRL = 0, + ARCH_TIMER_REG_TVAL = 1, +}; + +enum arch_timer_spi_nr { + ARCH_TIMER_PHYS_SPI = 0, + ARCH_TIMER_VIRT_SPI = 1, + ARCH_TIMER_MAX_TIMER_SPI = 2, +}; + +enum arch_timer_erratum_match_type { + ate_match_dt = 0, + ate_match_local_cap_id = 1, + ate_match_acpi_oem_info = 2, +}; + +struct arch_timer_erratum_workaround { + enum arch_timer_erratum_match_type match_type; + const void *id; + const char *desc; + u32 (*read_cntp_tval_el0)(); + u32 (*read_cntv_tval_el0)(); + u64 (*read_cntpct_el0)(); + u64 (*read_cntvct_el0)(); + int (*set_next_event_phys)(long unsigned int, struct clock_event_device *); + int (*set_next_event_virt)(long unsigned int, struct clock_event_device *); + bool disable_compat_vdso; +}; + +struct arch_timer { + void *base; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct clock_event_device evt; +}; + +struct ate_acpi_oem_info { + char oem_id[7]; + char oem_table_id[9]; + u32 oem_revision; +}; + +typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, const void *); + +struct sp804_timer { + int load; + int load_h; + int value; + int value_h; + int ctrl; + int intclr; + int ris; + int mis; + int bgload; + int bgload_h; + int timer_base[2]; + int width; +}; + +struct sp804_clkevt { + void *base; + void *load; + void *load_h; + void *value; + void *value_h; + void *ctrl; + void *intclr; + void *ris; + void *mis; + void *bgload; + void *bgload_h; + long unsigned int reload; + int width; +}; + +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[0]; +}; + +struct of_endpoint { + unsigned int port; + unsigned int id; + const struct device_node *local_node; +}; + +struct supplier_bindings { + struct device_node * (*parse_prop)(struct device_node *, const char *, int); +}; + +struct of_changeset_entry { + struct list_head node; + long unsigned int action; + struct device_node *np; + struct property *prop; + struct property *old_prop; +}; + +struct of_changeset { + struct list_head entries; +}; + +struct of_bus___2 { + void (*count_cells)(const void *, int, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); +}; + +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *); + void (*count_cells)(struct device_node *, int *, int *); + u64 (*map)(__be32 *, const __be32 *, int, int, int); + int (*translate)(__be32 *, u64, int); + bool has_flags; + unsigned int (*get_flags)(const __be32 *); +}; + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +enum of_overlay_notify_action { + OF_OVERLAY_PRE_APPLY = 0, + OF_OVERLAY_POST_APPLY = 1, + OF_OVERLAY_PRE_REMOVE = 2, + OF_OVERLAY_POST_REMOVE = 3, +}; + +struct of_overlay_notify_data { + struct device_node *overlay; + struct device_node *target; +}; + +struct target { + struct device_node *np; + bool in_livetree; +}; + +struct fragment { + struct device_node *overlay; + struct device_node *target; +}; + +struct overlay_changeset { + int id; + struct list_head ovcs_list; + const void *fdt; + struct device_node *overlay_tree; + int count; + struct fragment *fragments; + bool symbols_fragment; + struct of_changeset cset; +}; + +enum vchiq_reason { + VCHIQ_SERVICE_OPENED = 0, + VCHIQ_SERVICE_CLOSED = 1, + VCHIQ_MESSAGE_AVAILABLE = 2, + VCHIQ_BULK_TRANSMIT_DONE = 3, + VCHIQ_BULK_RECEIVE_DONE = 4, + VCHIQ_BULK_TRANSMIT_ABORTED = 5, + VCHIQ_BULK_RECEIVE_ABORTED = 6, +}; + +enum vchiq_status { + VCHIQ_ERROR = 4294967295, + VCHIQ_SUCCESS = 0, + VCHIQ_RETRY = 1, +}; + +enum vchiq_bulk_mode { + VCHIQ_BULK_MODE_CALLBACK = 0, + VCHIQ_BULK_MODE_BLOCKING = 1, + VCHIQ_BULK_MODE_NOCALLBACK = 2, + VCHIQ_BULK_MODE_WAITING = 3, +}; + +enum vchiq_service_option { + VCHIQ_SERVICE_OPTION_AUTOCLOSE = 0, + VCHIQ_SERVICE_OPTION_SLOT_QUOTA = 1, + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA = 2, + VCHIQ_SERVICE_OPTION_SYNCHRONOUS = 3, + VCHIQ_SERVICE_OPTION_TRACE = 4, +}; + +struct vchiq_header { + int msgid; + unsigned int size; + char data[0]; +}; + +struct vchiq_service_base { + int fourcc; + enum vchiq_status (*callback)(enum vchiq_reason, struct vchiq_header *, unsigned int, void *); + void *userdata; +}; + +struct vchiq_service_params_kernel { + int fourcc; + enum vchiq_status (*callback)(enum vchiq_reason, struct vchiq_header *, unsigned int, void *); + void *userdata; + short int version; + short int version_min; +}; + +typedef uint32_t BITSET_T; + +enum { + DEBUG_ENTRIES = 0, + DEBUG_SLOT_HANDLER_COUNT = 1, + DEBUG_SLOT_HANDLER_LINE = 2, + DEBUG_PARSE_LINE = 3, + DEBUG_PARSE_HEADER = 4, + DEBUG_PARSE_MSGID = 5, + DEBUG_AWAIT_COMPLETION_LINE = 6, + DEBUG_DEQUEUE_MESSAGE_LINE = 7, + DEBUG_SERVICE_CALLBACK_LINE = 8, + DEBUG_MSG_QUEUE_FULL_COUNT = 9, + DEBUG_COMPLETION_QUEUE_FULL_COUNT = 10, + DEBUG_MAX = 11, +}; + +enum vchiq_connstate { + VCHIQ_CONNSTATE_DISCONNECTED = 0, + VCHIQ_CONNSTATE_CONNECTING = 1, + VCHIQ_CONNSTATE_CONNECTED = 2, + VCHIQ_CONNSTATE_PAUSING = 3, + VCHIQ_CONNSTATE_PAUSE_SENT = 4, + VCHIQ_CONNSTATE_PAUSED = 5, + VCHIQ_CONNSTATE_RESUMING = 6, + VCHIQ_CONNSTATE_PAUSE_TIMEOUT = 7, + VCHIQ_CONNSTATE_RESUME_TIMEOUT = 8, +}; + +enum { + VCHIQ_SRVSTATE_FREE = 0, + VCHIQ_SRVSTATE_HIDDEN = 1, + VCHIQ_SRVSTATE_LISTENING = 2, + VCHIQ_SRVSTATE_OPENING = 3, + VCHIQ_SRVSTATE_OPEN = 4, + VCHIQ_SRVSTATE_OPENSYNC = 5, + VCHIQ_SRVSTATE_CLOSESENT = 6, + VCHIQ_SRVSTATE_CLOSERECVD = 7, + VCHIQ_SRVSTATE_CLOSEWAIT = 8, + VCHIQ_SRVSTATE_CLOSED = 9, +}; + +enum { + VCHIQ_POLL_TERMINATE = 0, + VCHIQ_POLL_REMOVE = 1, + VCHIQ_POLL_TXNOTIFY = 2, + VCHIQ_POLL_RXNOTIFY = 3, + VCHIQ_POLL_COUNT = 4, +}; + +enum vchiq_bulk_dir { + VCHIQ_BULK_TRANSMIT = 0, + VCHIQ_BULK_RECEIVE = 1, +}; + +typedef void (*vchiq_userdata_term)(void *); + +struct vchiq_bulk { + short int mode; + short int dir; + void *userdata; + dma_addr_t data; + int size; + void *remote_data; + int remote_size; + int actual; +}; + +struct vchiq_bulk_queue { + int local_insert; + int remote_insert; + int process; + int remote_notify; + int remove; + struct vchiq_bulk bulks[4]; +}; + +struct remote_event { + int armed; + int fired; + u32 __unused; +}; + +struct vchiq_slot { + char data[4096]; +}; + +struct vchiq_slot_info { + short int use_count; + short int release_count; +}; + +struct service_stats_struct { + int quota_stalls; + int slot_stalls; + int bulk_stalls; + int error_count; + int ctrl_tx_count; + int ctrl_rx_count; + int bulk_tx_count; + int bulk_rx_count; + int bulk_aborted_count; + uint64_t ctrl_tx_bytes; + uint64_t ctrl_rx_bytes; + uint64_t bulk_tx_bytes; + uint64_t bulk_rx_bytes; +}; + +struct vchiq_state; + +struct vchiq_instance; + +struct vchiq_service { + struct vchiq_service_base base; + unsigned int handle; + struct kref ref_count; + struct callback_head rcu; + int srvstate; + vchiq_userdata_term userdata_term; + unsigned int localport; + unsigned int remoteport; + int public_fourcc; + int client_id; + char auto_close; + char sync; + char closing; + char trace; + atomic_t poll_flags; + short int version; + short int version_min; + short int peer_version; + struct vchiq_state *state; + struct vchiq_instance *instance; + int service_use_count; + struct vchiq_bulk_queue bulk_tx; + struct vchiq_bulk_queue bulk_rx; + struct completion remove_event; + struct completion bulk_remove_event; + struct mutex bulk_mutex; + struct service_stats_struct stats; + int msg_queue_read; + int msg_queue_write; + struct completion msg_queue_pop; + struct completion msg_queue_push; + struct vchiq_header *msg_queue[128]; +}; + +struct state_stats_struct { + int slot_stalls; + int data_stalls; + int ctrl_tx_count; + int ctrl_rx_count; + int error_count; +}; + +struct vchiq_service_quota { + short unsigned int slot_quota; + short unsigned int slot_use_count; + short unsigned int message_quota; + short unsigned int message_use_count; + struct completion quota_event; + int previous_tx_index; +}; + +struct opaque_platform_state; + +struct vchiq_shared_state; + +struct vchiq_state { + int id; + int initialised; + enum vchiq_connstate conn_state; + short int version_common; + struct vchiq_shared_state *local; + struct vchiq_shared_state *remote; + struct vchiq_slot *slot_data; + short unsigned int default_slot_quota; + short unsigned int default_message_quota; + struct completion connect; + struct mutex mutex; + struct vchiq_instance **instance; + struct task_struct *slot_handler_thread; + struct task_struct *recycle_thread; + struct task_struct *sync_thread; + wait_queue_head_t trigger_event; + wait_queue_head_t recycle_event; + wait_queue_head_t sync_trigger_event; + wait_queue_head_t sync_release_event; + char *tx_data; + char *rx_data; + struct vchiq_slot_info *rx_info; + struct mutex slot_mutex; + struct mutex recycle_mutex; + struct mutex sync_mutex; + struct mutex bulk_transfer_mutex; + int rx_pos; + int local_tx_pos; + int slot_queue_available; + int poll_needed; + int previous_data_index; + short unsigned int data_use_count; + short unsigned int data_quota; + atomic_t poll_services[128]; + int unused_service; + struct completion slot_available_event; + struct completion slot_remove_event; + struct completion data_quota_event; + struct state_stats_struct stats; + struct vchiq_service *services[4096]; + struct vchiq_service_quota service_quotas[4096]; + struct vchiq_slot_info slot_info[128]; + struct opaque_platform_state *platform_state; +}; + +struct vchiq_shared_state { + int initialised; + int slot_first; + int slot_last; + int slot_sync; + struct remote_event trigger; + int tx_pos; + struct remote_event recycle; + int slot_queue_recycle; + struct remote_event sync_trigger; + struct remote_event sync_release; + int slot_queue[64]; + int debug[11]; +}; + +struct vchiq_slot_zero { + int magic; + short int version; + short int version_min; + int slot_zero_size; + int slot_size; + int max_slots; + int max_slots_per_side; + int platform_data[2]; + struct vchiq_shared_state master; + struct vchiq_shared_state slave; + struct vchiq_slot_info slots[128]; +}; + +struct bulk_waiter { + struct vchiq_bulk *bulk; + struct completion event; + int actual; +}; + +struct vchiq_config { + unsigned int max_msg_size; + unsigned int bulk_threshold; + unsigned int max_outstanding_bulks; + unsigned int max_services; + short int version; + short int version_min; +}; + +struct vchiq_open_payload { + int fourcc; + int client_id; + short int version; + short int version_min; +}; + +struct vchiq_openack_payload { + short int version; +}; + +enum { + QMFLAGS_IS_BLOCKING = 1, + QMFLAGS_NO_MUTEX_LOCK = 2, + QMFLAGS_NO_MUTEX_UNLOCK = 4, +}; + +struct vchiq_element { + const void *data; + unsigned int size; +}; + +struct vchiq_completion_data_kernel { + enum vchiq_reason reason; + struct vchiq_header *header; + void *service_userdata; + void *bulk_userdata; +}; + +struct vchiq_debugfs_node { + struct dentry *dentry; +}; + +struct vchiq_instance { + struct vchiq_state *state; + struct vchiq_completion_data_kernel completions[128]; + int completion_insert; + int completion_remove; + struct completion insert_event; + struct completion remove_event; + struct mutex completion_mutex; + int connected; + int closing; + int pid; + int mark; + int use_close_delivered; + int trace; + struct list_head bulk_waiter_list; + struct mutex bulk_waiter_list_mutex; + struct vchiq_debugfs_node debugfs_node; +}; + +struct vchiq_service_params { + int fourcc; + enum vchiq_status (*callback)(enum vchiq_reason, struct vchiq_header *, unsigned int, void *); + void *userdata; + short int version; + short int version_min; +}; + +struct vchiq_create_service { + struct vchiq_service_params params; + int is_open; + int is_vchi; + unsigned int handle; +}; + +struct vchiq_queue_message { + unsigned int handle; + unsigned int count; + const struct vchiq_element *elements; +}; + +struct vchiq_queue_bulk_transfer { + unsigned int handle; + void *data; + unsigned int size; + void *userdata; + enum vchiq_bulk_mode mode; +}; + +struct vchiq_completion_data { + enum vchiq_reason reason; + struct vchiq_header *header; + void *service_userdata; + void *bulk_userdata; +}; + +struct vchiq_await_completion { + unsigned int count; + struct vchiq_completion_data *buf; + unsigned int msgbufsize; + unsigned int msgbufcount; + void **msgbufs; +}; + +struct vchiq_dequeue_message { + unsigned int handle; + int blocking; + unsigned int bufsize; + void *buf; +}; + +struct vchiq_get_config { + unsigned int config_size; + struct vchiq_config *pconfig; +}; + +struct vchiq_set_service_option { + unsigned int handle; + enum vchiq_service_option option; + int value; +}; + +enum USE_TYPE_E { + USE_TYPE_SERVICE = 0, + USE_TYPE_VCHIQ = 1, +}; + +struct vchiq_arm_state { + struct task_struct *ka_thread; + struct completion ka_evt; + atomic_t ka_use_count; + atomic_t ka_use_ack_count; + atomic_t ka_release_count; + rwlock_t susp_res_lock; + struct vchiq_state *state; + int videocore_use_count; + int peer_use_count; + int first_connect; +}; + +struct vchiq_drvdata { + const unsigned int cache_line_size; + struct rpi_firmware *fw; +}; + +struct user_service { + struct vchiq_service *service; + void *userdata; + struct vchiq_instance *instance; + char is_vchi; + char dequeue_pending; + char close_pending; + int message_available_pos; + int msg_insert; + int msg_remove; + struct completion insert_event; + struct completion remove_event; + struct completion close_event; + struct vchiq_header *msg_queue[128]; +}; + +struct bulk_waiter_node { + struct bulk_waiter bulk_waiter; + int pid; + struct list_head list; +}; + +struct dump_context { + char *buf; + size_t actual; + size_t space; + loff_t offset; +}; + +struct vchiq_io_copy_callback_context { + struct vchiq_element *element; + size_t element_offset; + long unsigned int elements_to_go; +}; + +struct vchiq_completion_data32 { + enum vchiq_reason reason; + compat_uptr_t header; + compat_uptr_t service_userdata; + compat_uptr_t bulk_userdata; +}; + +struct vchiq_service_params32 { + int fourcc; + compat_uptr_t callback; + compat_uptr_t userdata; + short int version; + short int version_min; +}; + +struct vchiq_create_service32 { + struct vchiq_service_params32 params; + int is_open; + int is_vchi; + unsigned int handle; +}; + +struct vchiq_element32 { + compat_uptr_t data; + unsigned int size; +}; + +struct vchiq_queue_message32 { + unsigned int handle; + unsigned int count; + compat_uptr_t elements; +}; + +struct vchiq_queue_bulk_transfer32 { + unsigned int handle; + compat_uptr_t data; + unsigned int size; + compat_uptr_t userdata; + enum vchiq_bulk_mode mode; +}; + +struct vchiq_await_completion32 { + unsigned int count; + compat_uptr_t buf; + unsigned int msgbufsize; + unsigned int msgbufcount; + compat_uptr_t msgbufs; +}; + +struct vchiq_dequeue_message32 { + unsigned int handle; + int blocking; + unsigned int bufsize; + compat_uptr_t buf; +}; + +struct vchiq_get_config32 { + unsigned int config_size; + compat_uptr_t pconfig; +}; + +struct service_data_struct { + int fourcc; + int clientid; + int use_count; +}; + +struct pagelist { + u32 length; + u16 type; + u16 offset; + u32 addrs[1]; +}; + +struct vchiq_2835_state { + int inited; + struct vchiq_arm_state arm_state; +}; + +struct vchiq_pagelist_info { + struct pagelist *pagelist; + size_t pagelist_buffer_size; + dma_addr_t dma_addr; + enum dma_data_direction dma_dir; + unsigned int num_pages; + unsigned int pages_need_release; + struct page **pages; + struct scatterlist *scatterlist; + unsigned int scatterlist_mapped; +}; + +struct vchiq_debugfs_log_entry { + const char *name; + void *plevel; +}; + +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(); + +enum ec_status { + EC_RES_SUCCESS = 0, + EC_RES_INVALID_COMMAND = 1, + EC_RES_ERROR = 2, + EC_RES_INVALID_PARAM = 3, + EC_RES_ACCESS_DENIED = 4, + EC_RES_INVALID_RESPONSE = 5, + EC_RES_INVALID_VERSION = 6, + EC_RES_INVALID_CHECKSUM = 7, + EC_RES_IN_PROGRESS = 8, + EC_RES_UNAVAILABLE = 9, + EC_RES_TIMEOUT = 10, + EC_RES_OVERFLOW = 11, + EC_RES_INVALID_HEADER = 12, + EC_RES_REQUEST_TRUNCATED = 13, + EC_RES_RESPONSE_TOO_BIG = 14, + EC_RES_BUS_ERROR = 15, + EC_RES_BUSY = 16, + EC_RES_INVALID_HEADER_VERSION = 17, + EC_RES_INVALID_HEADER_CRC = 18, + EC_RES_INVALID_DATA_CRC = 19, + EC_RES_DUP_UNAVAILABLE = 20, +}; + +enum host_event_code { + EC_HOST_EVENT_LID_CLOSED = 1, + EC_HOST_EVENT_LID_OPEN = 2, + EC_HOST_EVENT_POWER_BUTTON = 3, + EC_HOST_EVENT_AC_CONNECTED = 4, + EC_HOST_EVENT_AC_DISCONNECTED = 5, + EC_HOST_EVENT_BATTERY_LOW = 6, + EC_HOST_EVENT_BATTERY_CRITICAL = 7, + EC_HOST_EVENT_BATTERY = 8, + EC_HOST_EVENT_THERMAL_THRESHOLD = 9, + EC_HOST_EVENT_DEVICE = 10, + EC_HOST_EVENT_THERMAL = 11, + EC_HOST_EVENT_USB_CHARGER = 12, + EC_HOST_EVENT_KEY_PRESSED = 13, + EC_HOST_EVENT_INTERFACE_READY = 14, + EC_HOST_EVENT_KEYBOARD_RECOVERY = 15, + EC_HOST_EVENT_THERMAL_SHUTDOWN = 16, + EC_HOST_EVENT_BATTERY_SHUTDOWN = 17, + EC_HOST_EVENT_THROTTLE_START = 18, + EC_HOST_EVENT_THROTTLE_STOP = 19, + EC_HOST_EVENT_HANG_DETECT = 20, + EC_HOST_EVENT_HANG_REBOOT = 21, + EC_HOST_EVENT_PD_MCU = 22, + EC_HOST_EVENT_BATTERY_STATUS = 23, + EC_HOST_EVENT_PANIC = 24, + EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25, + EC_HOST_EVENT_RTC = 26, + EC_HOST_EVENT_MKBP = 27, + EC_HOST_EVENT_USB_MUX = 28, + EC_HOST_EVENT_MODE_CHANGE = 29, + EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30, + EC_HOST_EVENT_WOV = 31, + EC_HOST_EVENT_INVALID = 32, +}; + +struct ec_host_request { + uint8_t struct_version; + uint8_t checksum; + uint16_t command; + uint8_t command_version; + uint8_t reserved; + uint16_t data_len; +}; + +struct ec_params_hello { + uint32_t in_data; +}; + +struct ec_response_hello { + uint32_t out_data; +}; + +struct ec_params_get_cmd_versions { + uint8_t cmd; +}; + +struct ec_response_get_cmd_versions { + uint32_t version_mask; +}; + +enum ec_comms_status { + EC_COMMS_STATUS_PROCESSING = 1, +}; + +struct ec_response_get_comms_status { + uint32_t flags; +}; + +struct ec_response_get_protocol_info { + uint32_t protocol_versions; + uint16_t max_request_packet_size; + uint16_t max_response_packet_size; + uint32_t flags; +}; + +enum ec_led_colors { + EC_LED_COLOR_RED = 0, + EC_LED_COLOR_GREEN = 1, + EC_LED_COLOR_BLUE = 2, + EC_LED_COLOR_YELLOW = 3, + EC_LED_COLOR_WHITE = 4, + EC_LED_COLOR_AMBER = 5, + EC_LED_COLOR_COUNT = 6, +}; + +enum motionsense_command { + MOTIONSENSE_CMD_DUMP = 0, + MOTIONSENSE_CMD_INFO = 1, + MOTIONSENSE_CMD_EC_RATE = 2, + MOTIONSENSE_CMD_SENSOR_ODR = 3, + MOTIONSENSE_CMD_SENSOR_RANGE = 4, + MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5, + MOTIONSENSE_CMD_DATA = 6, + MOTIONSENSE_CMD_FIFO_INFO = 7, + MOTIONSENSE_CMD_FIFO_FLUSH = 8, + MOTIONSENSE_CMD_FIFO_READ = 9, + MOTIONSENSE_CMD_PERFORM_CALIB = 10, + MOTIONSENSE_CMD_SENSOR_OFFSET = 11, + MOTIONSENSE_CMD_LIST_ACTIVITIES = 12, + MOTIONSENSE_CMD_SET_ACTIVITY = 13, + MOTIONSENSE_CMD_LID_ANGLE = 14, + MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15, + MOTIONSENSE_CMD_SPOOF = 16, + MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17, + MOTIONSENSE_CMD_SENSOR_SCALE = 18, + MOTIONSENSE_NUM_CMDS = 19, +}; + +struct ec_response_motion_sensor_data { + uint8_t flags; + uint8_t sensor_num; + union { + int16_t data[3]; + struct { + uint16_t reserved; + uint32_t timestamp; + } __attribute__((packed)); + struct { + uint8_t activity; + uint8_t state; + int16_t add_info[2]; + }; + }; +} __attribute__((packed)); + +struct ec_response_motion_sense_fifo_info { + uint16_t size; + uint16_t count; + uint32_t timestamp; + uint16_t total_lost; + uint16_t lost[0]; +} __attribute__((packed)); + +struct ec_response_motion_sense_fifo_data { + uint32_t number_data; + struct ec_response_motion_sensor_data data[0]; +}; + +struct ec_motion_sense_activity { + uint8_t sensor_num; + uint8_t activity; + uint8_t enable; + uint8_t reserved; + uint16_t parameters[3]; +}; + +struct ec_params_motion_sense { + uint8_t cmd; + union { + struct { + uint8_t max_sensor_count; + } dump; + struct { + int16_t data; + } kb_wake_angle; + struct { + uint8_t sensor_num; + } info; + struct { + uint8_t sensor_num; + } info_3; + struct { + uint8_t sensor_num; + } data; + struct { + uint8_t sensor_num; + } fifo_flush; + struct { + uint8_t sensor_num; + } perform_calib; + struct { + uint8_t sensor_num; + } list_activities; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } ec_rate; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } sensor_odr; + struct { + uint8_t sensor_num; + uint8_t roundup; + uint16_t reserved; + int32_t data; + } sensor_range; + struct { + uint8_t sensor_num; + uint16_t flags; + int16_t temp; + int16_t offset[3]; + } __attribute__((packed)) sensor_offset; + struct { + uint8_t sensor_num; + uint16_t flags; + int16_t temp; + uint16_t scale[3]; + } __attribute__((packed)) sensor_scale; + struct { + uint32_t max_data_vector; + } fifo_read; + struct ec_motion_sense_activity set_activity; + struct { + int8_t enable; + } fifo_int_enable; + struct { + uint8_t sensor_id; + uint8_t spoof_enable; + uint8_t reserved; + int16_t components[3]; + } __attribute__((packed)) spoof; + struct { + int16_t lid_angle; + int16_t hys_degree; + } tablet_mode_threshold; + }; +} __attribute__((packed)); + +struct ec_response_motion_sense { + union { + struct { + uint8_t module_flags; + uint8_t sensor_count; + struct ec_response_motion_sensor_data sensor[0]; + } __attribute__((packed)) dump; + struct { + uint8_t type; + uint8_t location; + uint8_t chip; + } info; + struct { + uint8_t type; + uint8_t location; + uint8_t chip; + uint32_t min_frequency; + uint32_t max_frequency; + uint32_t fifo_max_event_count; + } info_3; + struct ec_response_motion_sensor_data data; + struct { + int32_t ret; + } ec_rate; + struct { + int32_t ret; + } sensor_odr; + struct { + int32_t ret; + } sensor_range; + struct { + int32_t ret; + } kb_wake_angle; + struct { + int32_t ret; + } fifo_int_enable; + struct { + int32_t ret; + } spoof; + struct { + int16_t temp; + int16_t offset[3]; + } sensor_offset; + struct { + int16_t temp; + int16_t offset[3]; + } perform_calib; + struct { + int16_t temp; + uint16_t scale[3]; + } sensor_scale; + struct ec_response_motion_sense_fifo_info fifo_info; + struct ec_response_motion_sense_fifo_info fifo_flush; + struct ec_response_motion_sense_fifo_data fifo_read; + struct { + uint16_t reserved; + uint32_t enabled; + uint32_t disabled; + } __attribute__((packed)) list_activities; + struct { + uint16_t value; + } lid_angle; + struct { + uint16_t lid_angle; + uint16_t hys_degree; + } tablet_mode_threshold; + }; +}; + +enum ec_temp_thresholds { + EC_TEMP_THRESH_WARN = 0, + EC_TEMP_THRESH_HIGH = 1, + EC_TEMP_THRESH_HALT = 2, + EC_TEMP_THRESH_COUNT = 3, +}; + +enum ec_mkbp_event { + EC_MKBP_EVENT_KEY_MATRIX = 0, + EC_MKBP_EVENT_HOST_EVENT = 1, + EC_MKBP_EVENT_SENSOR_FIFO = 2, + EC_MKBP_EVENT_BUTTON = 3, + EC_MKBP_EVENT_SWITCH = 4, + EC_MKBP_EVENT_FINGERPRINT = 5, + EC_MKBP_EVENT_SYSRQ = 6, + EC_MKBP_EVENT_HOST_EVENT64 = 7, + EC_MKBP_EVENT_CEC_EVENT = 8, + EC_MKBP_EVENT_CEC_MESSAGE = 9, + EC_MKBP_EVENT_COUNT = 10, +}; + +union ec_response_get_next_data_v1 { + uint8_t key_matrix[16]; + uint32_t host_event; + uint64_t host_event64; + struct { + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } __attribute__((packed)) sensor_fifo; + uint32_t buttons; + uint32_t switches; + uint32_t fp_events; + uint32_t sysrq; + uint32_t cec_events; + uint8_t cec_message[16]; +}; + +struct ec_response_get_next_event_v1 { + uint8_t event_type; + union ec_response_get_next_data_v1 data; +} __attribute__((packed)); + +struct ec_response_host_event_mask { + uint32_t mask; +}; + +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = 4, + EC_MSG_RX_PROTO_BYTES = 3, + EC_PROTO2_MSG_BYTES = 256, + EC_MAX_MSG_BYTES = 65536, +}; + +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +struct platform_device___2; + +struct cros_ec_device { + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *, unsigned int, unsigned int, void *); + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + bool suspended; + int (*cmd_xfer)(struct cros_ec_device *, struct cros_ec_command *); + int (*pkt_xfer)(struct cros_ec_device *, struct cros_ec_command *); + struct mutex lock; + u8 mkbp_event_supported; + bool host_sleep_v1; + struct blocking_notifier_head event_notifier; + struct ec_response_get_next_event_v1 event_data; + int event_size; + u32 host_event_wake_mask; + u32 last_resume_result; + ktime_t last_event_time; + struct notifier_block notifier_ready; + struct platform_device___2 *ec; + struct platform_device___2 *pd; +}; + +struct cros_ec_debugfs; + +struct cros_ec_dev { + struct device class_dev; + struct cros_ec_device *ec_dev; + struct device *dev; + struct cros_ec_debugfs *debug_info; + bool has_kb_wake_angle; + u16 cmd_offset; + u32 features[2]; +}; + +struct trace_event_raw_cros_ec_request_start { + struct trace_entry ent; + uint32_t version; + uint32_t offset; + uint32_t command; + uint32_t outsize; + uint32_t insize; + char __data[0]; +}; + +struct trace_event_raw_cros_ec_request_done { + struct trace_entry ent; + uint32_t version; + uint32_t offset; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + int retval; + char __data[0]; +}; + +struct trace_event_data_offsets_cros_ec_request_start {}; + +struct trace_event_data_offsets_cros_ec_request_done {}; + +typedef void (*btf_trace_cros_ec_request_start)(void *, struct cros_ec_command *); + +typedef void (*btf_trace_cros_ec_request_done)(void *, struct cros_ec_command *, int); + +struct platform_mhu_link { + int irq; + void *tx_reg; + void *rx_reg; +}; + +struct platform_mhu { + void *base; + struct platform_mhu_link mlink[3]; + struct mbox_chan chan[3]; + struct mbox_controller mbox; +}; + +struct acpi_table_pcct { + struct acpi_table_header header; + u32 flags; + u64 reserved; +}; + +enum acpi_pcct_type { + ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1, + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, + ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, + ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, + ACPI_PCCT_TYPE_RESERVED = 5, +}; + +struct acpi_pcct_subspace { + struct acpi_subtable_header header; + u8 reserved[6]; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; +} __attribute__((packed)); + +struct acpi_pcct_hw_reduced_type2 { + struct acpi_subtable_header header; + u32 platform_interrupt; + u8 flags; + u8 reserved; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 preserve_mask; + u64 write_mask; + u32 latency; + u32 max_access_rate; + u16 min_turnaround_time; + struct acpi_generic_address platform_ack_register; + u64 ack_preserve_mask; + u64 ack_write_mask; +} __attribute__((packed)); + +struct bcm2835_mbox { + void *regs; + spinlock_t lock; + struct mbox_controller controller; +}; + +struct slimpro_mbox_chan { + struct device *dev; + struct mbox_chan *chan; + void *reg; + int irq; + u32 rx_msg[3]; +}; + +struct slimpro_mbox { + struct mbox_controller mb_ctrl; + struct slimpro_mbox_chan mc[8]; + struct mbox_chan chans[8]; +}; + +struct hi3660_chan_info { + unsigned int dst_irq; + unsigned int ack_irq; +}; + +struct hi3660_mbox { + struct device *dev; + void *base; + struct mbox_chan chan[32]; + struct hi3660_chan_info mchan[32]; + struct mbox_controller controller; +}; + +struct hi6220_mbox; + +struct hi6220_mbox_chan { + unsigned int dir; + unsigned int dst_irq; + unsigned int ack_irq; + unsigned int slot; + struct hi6220_mbox *parent; +}; + +struct hi6220_mbox { + struct device *dev; + int irq; + bool tx_irq_mode; + void *ipc; + void *base; + unsigned int chan_num; + struct hi6220_mbox_chan *mchan; + void *irq_map_chan[32]; + struct mbox_chan *chan; + struct mbox_controller controller; +}; + +struct tegra_hsp; + +struct tegra_hsp_channel { + struct tegra_hsp *hsp; + struct mbox_chan *chan; + void *regs; +}; + +struct tegra_hsp_soc; + +struct tegra_hsp_mailbox; + +struct tegra_hsp { + struct device *dev; + const struct tegra_hsp_soc *soc; + struct mbox_controller mbox_db; + struct mbox_controller mbox_sm; + void *regs; + unsigned int doorbell_irq; + unsigned int *shared_irqs; + unsigned int shared_irq; + unsigned int num_sm; + unsigned int num_as; + unsigned int num_ss; + unsigned int num_db; + unsigned int num_si; + spinlock_t lock; + struct list_head doorbells; + struct tegra_hsp_mailbox *mailboxes; + long unsigned int mask; +}; + +struct tegra_hsp_doorbell { + struct tegra_hsp_channel channel; + struct list_head list; + const char *name; + unsigned int master; + unsigned int index; +}; + +struct tegra_hsp_mailbox { + struct tegra_hsp_channel channel; + unsigned int index; + bool producer; +}; + +struct tegra_hsp_db_map { + const char *name; + unsigned int master; + unsigned int index; +}; + +struct tegra_hsp_soc { + const struct tegra_hsp_db_map *map; + bool has_per_mb_ie; +}; + +struct sun6i_msgbox { + struct mbox_controller controller; + struct clk *clk; + spinlock_t lock; + void *regs; +}; + +struct hwspinlock___2; + +struct hwspinlock_ops { + int (*trylock)(struct hwspinlock___2 *); + void (*unlock)(struct hwspinlock___2 *); + void (*relax)(struct hwspinlock___2 *); +}; + +struct hwspinlock_device; + +struct hwspinlock___2 { + struct hwspinlock_device *bank; + spinlock_t lock; + void *priv; +}; + +struct hwspinlock_device { + struct device *dev; + const struct hwspinlock_ops *ops; + int base_id; + int num_locks; + struct hwspinlock___2 lock[0]; +}; + +struct regmap_field___2; + +struct devfreq_freqs { + long unsigned int old; + long unsigned int new; +}; + +struct devfreq_passive_data { + struct devfreq *parent; + int (*get_target_freq)(struct devfreq *, long unsigned int *); + struct devfreq *this; + struct notifier_block nb; +}; + +struct trace_event_raw_devfreq_monitor { + struct trace_entry ent; + long unsigned int freq; + long unsigned int busy_time; + long unsigned int total_time; + unsigned int polling_ms; + u32 __data_loc_dev_name; + char __data[0]; +}; + +struct trace_event_data_offsets_devfreq_monitor { + u32 dev_name; +}; + +typedef void (*btf_trace_devfreq_monitor)(void *, struct devfreq *); + +struct devfreq_notifier_devres { + struct devfreq *devfreq; + struct notifier_block *nb; + unsigned int list; +}; + +struct devfreq_event_desc; + +struct devfreq_event_dev { + struct list_head node; + struct device dev; + struct mutex lock; + u32 enable_count; + const struct devfreq_event_desc *desc; +}; + +struct devfreq_event_ops; + +struct devfreq_event_desc { + const char *name; + u32 event_type; + void *driver_data; + const struct devfreq_event_ops *ops; +}; + +struct devfreq_event_data { + long unsigned int load_count; + long unsigned int total_count; +}; + +struct devfreq_event_ops { + int (*enable)(struct devfreq_event_dev *); + int (*disable)(struct devfreq_event_dev *); + int (*reset)(struct devfreq_event_dev *); + int (*set_event)(struct devfreq_event_dev *); + int (*get_event)(struct devfreq_event_dev *, struct devfreq_event_data *); +}; + +struct devfreq_simple_ondemand_data { + unsigned int upthreshold; + unsigned int downdifferential; +}; + +struct userspace_data { + long unsigned int user_frequency; + bool valid; +}; + +union extcon_property_value { + int intval; +}; + +struct extcon_cable; + +struct extcon_dev___2 { + const char *name; + const unsigned int *supported_cable; + const u32 *mutually_exclusive; + struct device dev; + struct raw_notifier_head nh_all; + struct raw_notifier_head *nh; + struct list_head entry; + int max_supported; + spinlock_t lock; + u32 state; + struct device_type extcon_dev_type; + struct extcon_cable *cables; + struct attribute_group attr_g_muex; + struct attribute **attrs_muex; + struct device_attribute *d_attrs_muex; +}; + +struct extcon_cable { + struct extcon_dev___2 *edev; + int cable_index; + struct attribute_group attr_g; + struct device_attribute attr_name; + struct device_attribute attr_state; + struct attribute *attrs[3]; + union extcon_property_value usb_propval[3]; + union extcon_property_value chg_propval[1]; + union extcon_property_value jack_propval[1]; + union extcon_property_value disp_propval[2]; + long unsigned int usb_bits[1]; + long unsigned int chg_bits[1]; + long unsigned int jack_bits[1]; + long unsigned int disp_bits[1]; +}; + +struct __extcon_info { + unsigned int type; + unsigned int id; + const char *name; +}; + +struct extcon_dev_notifier_devres { + struct extcon_dev___2 *edev; + unsigned int id; + struct notifier_block *nb; +}; + +struct mtk_smi_larb_iommu { + struct device *dev; + unsigned int mmu; +}; + +enum mtk_smi_gen { + MTK_SMI_GEN1 = 0, + MTK_SMI_GEN2 = 1, +}; + +struct mtk_smi_common_plat { + enum mtk_smi_gen gen; + bool has_gals; + u32 bus_sel; +}; + +struct mtk_smi_larb_gen { + int port_in_larb[17]; + void (*config_port)(struct device *); + unsigned int larb_direct_to_common_mask; + bool has_gals; +}; + +struct mtk_smi { + struct device *dev; + struct clk *clk_apb; + struct clk *clk_smi; + struct clk *clk_gals0; + struct clk *clk_gals1; + struct clk *clk_async; + union { + void *smi_ao_base; + void *base; + }; + const struct mtk_smi_common_plat *plat; +}; + +struct mtk_smi_larb { + struct mtk_smi smi; + void *base; + struct device *smi_common_dev; + const struct mtk_smi_larb_gen *larb_gen; + int larbid; + u32 *mmu; +}; + +struct tegra186_mc_client { + const char *name; + unsigned int sid; + struct { + unsigned int override; + unsigned int security; + } regs; +}; + +struct tegra186_mc_soc { + const struct tegra186_mc_client *clients; + unsigned int num_clients; +}; + +struct tegra186_mc { + struct device *dev; + void *regs; + const struct tegra186_mc_soc *soc; +}; + +struct emc_dvfs_latency { + uint32_t freq; + uint32_t latency; +}; + +struct mrq_emc_dvfs_latency_response { + uint32_t num_pairs; + struct emc_dvfs_latency pairs[14]; +}; + +struct tegra186_emc_dvfs { + long unsigned int latency; + long unsigned int rate; +}; + +struct tegra186_emc { + struct tegra_bpmp *bpmp; + struct device *dev; + struct clk *clk; + struct tegra186_emc_dvfs *dvfs; + unsigned int num_dvfs; + struct { + struct dentry *root; + long unsigned int min_rate; + long unsigned int max_rate; + } debugfs; +}; + +enum vme_resource_type { + VME_MASTER = 0, + VME_SLAVE = 1, + VME_DMA = 2, + VME_LM = 3, +}; + +struct vme_dma_attr { + u32 type; + void *private; +}; + +struct vme_resource { + enum vme_resource_type type; + struct list_head *entry; +}; + +struct vme_bridge; + +struct vme_dev { + int num; + struct vme_bridge *bridge; + struct device dev; + struct list_head drv_list; + struct list_head bridge_list; +}; + +struct vme_callback { + void (*func)(int, int, void *); + void *priv_data; +}; + +struct vme_irq { + int count; + struct vme_callback callback[256]; +}; + +struct vme_slave_resource; + +struct vme_master_resource; + +struct vme_dma_list; + +struct vme_lm_resource; + +struct vme_bridge { + char name[16]; + int num; + struct list_head master_resources; + struct list_head slave_resources; + struct list_head dma_resources; + struct list_head lm_resources; + struct list_head vme_error_handlers; + struct list_head devices; + struct device *parent; + void *driver_priv; + struct list_head bus_list; + struct vme_irq irq[7]; + struct mutex irq_mtx; + int (*slave_get)(struct vme_slave_resource *, int *, long long unsigned int *, long long unsigned int *, dma_addr_t *, u32 *, u32 *); + int (*slave_set)(struct vme_slave_resource *, int, long long unsigned int, long long unsigned int, dma_addr_t, u32, u32); + int (*master_get)(struct vme_master_resource *, int *, long long unsigned int *, long long unsigned int *, u32 *, u32 *, u32 *); + int (*master_set)(struct vme_master_resource *, int, long long unsigned int, long long unsigned int, u32, u32, u32); + ssize_t (*master_read)(struct vme_master_resource *, void *, size_t, loff_t); + ssize_t (*master_write)(struct vme_master_resource *, void *, size_t, loff_t); + unsigned int (*master_rmw)(struct vme_master_resource *, unsigned int, unsigned int, unsigned int, loff_t); + int (*dma_list_add)(struct vme_dma_list *, struct vme_dma_attr *, struct vme_dma_attr *, size_t); + int (*dma_list_exec)(struct vme_dma_list *); + int (*dma_list_empty)(struct vme_dma_list *); + void (*irq_set)(struct vme_bridge *, int, int, int); + int (*irq_generate)(struct vme_bridge *, int, int); + int (*lm_set)(struct vme_lm_resource *, long long unsigned int, u32, u32); + int (*lm_get)(struct vme_lm_resource *, long long unsigned int *, u32 *, u32 *); + int (*lm_attach)(struct vme_lm_resource *, int, void (*)(void *), void *); + int (*lm_detach)(struct vme_lm_resource *, int); + int (*slot_get)(struct vme_bridge *); + void * (*alloc_consistent)(struct device *, size_t, dma_addr_t *); + void (*free_consistent)(struct device *, size_t, void *, dma_addr_t); +}; + +struct vme_driver { + const char *name; + int (*match)(struct vme_dev *); + int (*probe)(struct vme_dev *); + int (*remove)(struct vme_dev *); + struct device_driver driver; + struct list_head devices; +}; + +struct vme_master_resource { + struct list_head list; + struct vme_bridge *parent; + spinlock_t lock; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; + u32 width_attr; + struct resource bus_resource; + void *kern_base; +}; + +struct vme_slave_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + u32 address_attr; + u32 cycle_attr; +}; + +struct vme_dma_pattern { + u32 pattern; + u32 type; +}; + +struct vme_dma_pci { + dma_addr_t address; +}; + +struct vme_dma_vme { + long long unsigned int address; + u32 aspace; + u32 cycle; + u32 dwidth; +}; + +struct vme_dma_resource; + +struct vme_dma_list { + struct list_head list; + struct vme_dma_resource *parent; + struct list_head entries; + struct mutex mtx; +}; + +struct vme_dma_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + struct list_head pending; + struct list_head running; + u32 route_attr; +}; + +struct vme_lm_resource { + struct list_head list; + struct vme_bridge *parent; + struct mutex mtx; + int locked; + int number; + int monitors; +}; + +struct vme_error_handler { + struct list_head list; + long long unsigned int start; + long long unsigned int end; + long long unsigned int first_error; + u32 aspace; + unsigned int num_errors; +}; + +struct powercap_control_type; + +struct powercap_control_type_ops { + int (*set_enable)(struct powercap_control_type *, bool); + int (*get_enable)(struct powercap_control_type *, bool *); + int (*release)(struct powercap_control_type *); +}; + +struct powercap_control_type { + struct device dev; + struct idr idr; + int nr_zones; + const struct powercap_control_type_ops *ops; + struct mutex lock; + bool allocated; + struct list_head node; +}; + +struct powercap_zone; + +struct powercap_zone_ops { + int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *); + int (*get_energy_uj)(struct powercap_zone *, u64 *); + int (*reset_energy_uj)(struct powercap_zone *); + int (*get_max_power_range_uw)(struct powercap_zone *, u64 *); + int (*get_power_uw)(struct powercap_zone *, u64 *); + int (*set_enable)(struct powercap_zone *, bool); + int (*get_enable)(struct powercap_zone *, bool *); + int (*release)(struct powercap_zone *); +}; + +struct powercap_zone_constraint; + +struct powercap_zone { + int id; + char *name; + void *control_type_inst; + const struct powercap_zone_ops *ops; + struct device dev; + int const_id_cnt; + struct idr idr; + struct idr *parent_idr; + void *private_data; + struct attribute **zone_dev_attrs; + int zone_attr_count; + struct attribute_group dev_zone_attr_group; + const struct attribute_group *dev_attr_groups[2]; + bool allocated; + struct powercap_zone_constraint *constraints; +}; + +struct powercap_zone_constraint_ops; + +struct powercap_zone_constraint { + int id; + struct powercap_zone *power_zone; + const struct powercap_zone_constraint_ops *ops; +}; + +struct powercap_zone_constraint_ops { + int (*set_power_limit_uw)(struct powercap_zone *, int, u64); + int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *); + int (*set_time_window_us)(struct powercap_zone *, int, u64); + int (*get_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_max_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_min_power_uw)(struct powercap_zone *, int, u64 *); + int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *); + int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *); + const char * (*get_name)(struct powercap_zone *, int); +}; + +struct powercap_constraint_attr { + struct device_attribute power_limit_attr; + struct device_attribute time_window_attr; + struct device_attribute max_power_attr; + struct device_attribute min_power_attr; + struct device_attribute max_time_window_attr; + struct device_attribute min_time_window_attr; + struct device_attribute name_attr; +}; + +enum { + CCI_IF_SLAVE = 0, + CCI_IF_MASTER = 1, + CCI_IF_MAX = 2, +}; + +struct event_range { + u32 min; + u32 max; +}; + +struct cci_pmu_hw_events { + struct perf_event **events; + long unsigned int *used_mask; + raw_spinlock_t pmu_lock; +}; + +struct cci_pmu; + +struct cci_pmu_model { + char *name; + u32 fixed_hw_cntrs; + u32 num_hw_cntrs; + u32 cntr_size; + struct attribute **format_attrs; + struct attribute **event_attrs; + struct event_range event_ranges[2]; + int (*validate_hw_event)(struct cci_pmu *, long unsigned int); + int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, long unsigned int); + void (*write_counters)(struct cci_pmu *, long unsigned int *); +}; + +struct cci_pmu { + void *base; + void *ctrl_base; + struct pmu pmu; + int cpu; + int nr_irqs; + int *irqs; + long unsigned int active_irqs; + const struct cci_pmu_model *model; + struct cci_pmu_hw_events hw_events; + struct platform_device *plat_device; + int num_cntrs; + atomic_t active_events; + struct mutex reserve_mutex; +}; + +enum cci_models { + CCI400_R0 = 0, + CCI400_R1 = 1, + CCI_MODEL_MAX = 2, +}; + +enum cci400_perf_events { + CCI400_PMU_CYCLES = 255, +}; + +struct arm_ccn_component { + void *base; + u32 type; + long unsigned int pmu_events_mask[1]; + union { + struct { + long unsigned int dt_cmp_mask[1]; + } xp; + }; +}; + +struct arm_ccn_dt { + int id; + void *base; + spinlock_t config_lock; + long unsigned int pmu_counters_mask[1]; + struct { + struct arm_ccn_component *source; + struct perf_event *event; + } pmu_counters[9]; + struct { + u64 l; + u64 h; + } cmp_mask[12]; + struct hrtimer hrtimer; + unsigned int cpu; + struct hlist_node node; + struct pmu pmu; +}; + +struct arm_ccn { + struct device *dev; + void *base; + unsigned int irq; + unsigned int sbas_present: 1; + unsigned int sbsx_present: 1; + int num_nodes; + struct arm_ccn_component *node; + int num_xps; + struct arm_ccn_component *xp; + struct arm_ccn_dt dt; + int mn_id; +}; + +struct arm_ccn_pmu_event { + struct device_attribute attr; + u32 type; + u32 event; + int num_ports; + int num_vcs; + const char *def; + int mask; +}; + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int); + void (*disable_pmuirq)(unsigned int); + void (*free_pmuirq)(unsigned int, int, void *); +}; + +typedef int (*armpmu_init_fn)(struct arm_pmu *); + +struct pmu_probe_info { + unsigned int cpuid; + unsigned int mask; + armpmu_init_fn init; +}; + +struct hisi_pmu; + +struct hisi_uncore_ops { + void (*write_evtype)(struct hisi_pmu *, int, u32); + int (*get_event_idx)(struct perf_event *); + u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64); + void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*start_counters)(struct hisi_pmu *); + void (*stop_counters)(struct hisi_pmu *); +}; + +struct hisi_pmu_hwevents { + struct perf_event *hw_events[16]; + long unsigned int used_mask[1]; +}; + +struct hisi_pmu { + struct pmu pmu; + const struct hisi_uncore_ops *ops; + struct hisi_pmu_hwevents pmu_events; + cpumask_t associated_cpus; + int on_cpu; + int irq; + struct device *dev; + struct hlist_node node; + int sccl_id; + int ccl_id; + void *base; + u32 index_id; + int num_counters; + int counter_bits; + int check_event; +}; + +struct hw_pmu_info { + u32 type; + u32 enable_mask; + void *csr; +}; + +struct xgene_pmu; + +struct xgene_pmu_dev { + struct hw_pmu_info *inf; + struct xgene_pmu *parent; + struct pmu pmu; + u8 max_counters; + long unsigned int cntr_assign_mask[1]; + u64 max_period; + const struct attribute_group **attr_groups; + struct perf_event *pmu_counter_event[4]; +}; + +struct xgene_pmu_ops; + +struct xgene_pmu { + struct device *dev; + struct hlist_node node; + int version; + void *pcppmu_csr; + u32 mcb_active_mask; + u32 mc_active_mask; + u32 l3c_active_mask; + cpumask_t cpu; + int irq; + raw_spinlock_t lock; + const struct xgene_pmu_ops *ops; + struct list_head l3cpmus; + struct list_head iobpmus; + struct list_head mcbpmus; + struct list_head mcpmus; +}; + +struct xgene_pmu_ops { + void (*mask_int)(struct xgene_pmu *); + void (*unmask_int)(struct xgene_pmu *); + u64 (*read_counter)(struct xgene_pmu_dev *, int); + void (*write_counter)(struct xgene_pmu_dev *, int, u64); + void (*write_evttype)(struct xgene_pmu_dev *, int, u32); + void (*write_agentmsk)(struct xgene_pmu_dev *, u32); + void (*write_agent1msk)(struct xgene_pmu_dev *, u32); + void (*enable_counter)(struct xgene_pmu_dev *, int); + void (*disable_counter)(struct xgene_pmu_dev *, int); + void (*enable_counter_int)(struct xgene_pmu_dev *, int); + void (*disable_counter_int)(struct xgene_pmu_dev *, int); + void (*reset_counters)(struct xgene_pmu_dev *); + void (*start_counters)(struct xgene_pmu_dev *); + void (*stop_counters)(struct xgene_pmu_dev *); +}; + +struct xgene_pmu_dev_ctx { + char *name; + struct list_head next; + struct xgene_pmu_dev *pmu_dev; + struct hw_pmu_info inf; +}; + +struct xgene_pmu_data { + int id; + u32 data; +}; + +enum xgene_pmu_version { + PCP_PMU_V1 = 1, + PCP_PMU_V2 = 2, + PCP_PMU_V3 = 3, +}; + +enum xgene_pmu_dev_type { + PMU_TYPE_L3C = 0, + PMU_TYPE_IOB = 1, + PMU_TYPE_IOB_SLOW = 2, + PMU_TYPE_MCB = 3, + PMU_TYPE_MC = 4, +}; + +struct trace_event_raw_mc_event { + struct trace_entry ent; + unsigned int error_type; + u32 __data_loc_msg; + u32 __data_loc_label; + u16 error_count; + u8 mc_index; + s8 top_layer; + s8 middle_layer; + s8 lower_layer; + long int address; + u8 grain_bits; + long int syndrome; + u32 __data_loc_driver_detail; + char __data[0]; +}; + +struct trace_event_raw_arm_event { + struct trace_entry ent; + u64 mpidr; + u64 midr; + u32 running_state; + u32 psci_state; + u8 affinity; + char __data[0]; +}; + +struct trace_event_raw_non_standard_event { + struct trace_entry ent; + char sec_type[16]; + char fru_id[16]; + u32 __data_loc_fru_text; + u8 sev; + u32 len; + u32 __data_loc_buf; + char __data[0]; +}; + +struct trace_event_raw_aer_event { + struct trace_entry ent; + u32 __data_loc_dev_name; + u32 status; + u8 severity; + u8 tlp_header_valid; + u32 tlp_header[4]; + char __data[0]; +}; + +struct trace_event_raw_memory_failure_event { + struct trace_entry ent; + long unsigned int pfn; + int type; + int result; + char __data[0]; +}; + +struct trace_event_data_offsets_mc_event { + u32 msg; + u32 label; + u32 driver_detail; +}; + +struct trace_event_data_offsets_arm_event {}; + +struct trace_event_data_offsets_non_standard_event { + u32 fru_text; + u32 buf; +}; + +struct trace_event_data_offsets_aer_event { + u32 dev_name; +}; + +struct trace_event_data_offsets_memory_failure_event {}; + +typedef void (*btf_trace_mc_event)(void *, const unsigned int, const char *, const char *, const int, const u8, const s8, const s8, const s8, long unsigned int, const u8, long unsigned int, const char *); + +typedef void (*btf_trace_arm_event)(void *, const struct cper_sec_proc_arm *); + +typedef void (*btf_trace_non_standard_event)(void *, const guid_t *, const guid_t *, const char *, const u8, const u8 *, const u32); + +typedef void (*btf_trace_aer_event)(void *, const char *, const u32, const u8, const u8, struct aer_header_log_regs *); + +typedef void (*btf_trace_memory_failure_event)(void *, long unsigned int, int, int); + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE = 2, + NVMEM_CELL_ADD = 3, + NVMEM_CELL_REMOVE = 4, +}; + +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + +struct nvmem_device___2 { + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + void *priv; +}; + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct device_node *np; + struct nvmem_device___2 *nvmem; + struct list_head node; +}; + +struct net_device_devres { + struct net_device *ndev; +}; + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long int tv_nsec; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +struct net_proto_family { + int family; + int (*create)(struct net *, struct socket *, int, int); + struct module *owner; +}; + +enum { + SOCK_WAKE_IO = 0, + SOCK_WAKE_WAITD = 1, + SOCK_WAKE_SPACE = 2, + SOCK_WAKE_URG = 3, +}; + +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + short unsigned int base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short int ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; + +struct libipw_device; + +struct iw_spy_data; + +struct iw_public_data { + struct iw_spy_data *spy_data; + struct libipw_device *libipw; +}; + +struct iw_param { + __s32 value; + __u8 fixed; + __u8 disabled; + __u16 flags; +}; + +struct iw_point { + void *pointer; + __u16 length; + __u16 flags; +}; + +struct iw_freq { + __s32 m; + __s16 e; + __u8 i; + __u8 flags; +}; + +struct iw_quality { + __u8 qual; + __u8 level; + __u8 noise; + __u8 updated; +}; + +struct iw_discarded { + __u32 nwid; + __u32 code; + __u32 fragment; + __u32 retries; + __u32 misc; +}; + +struct iw_missed { + __u32 beacon; +}; + +struct iw_statistics { + __u16 status; + struct iw_quality qual; + struct iw_discarded discard; + struct iw_missed miss; +}; + +union iwreq_data { + char name[16]; + struct iw_point essid; + struct iw_param nwid; + struct iw_freq freq; + struct iw_param sens; + struct iw_param bitrate; + struct iw_param txpower; + struct iw_param rts; + struct iw_param frag; + __u32 mode; + struct iw_param retry; + struct iw_point encoding; + struct iw_param power; + struct iw_quality qual; + struct sockaddr ap_addr; + struct sockaddr addr; + struct iw_param param; + struct iw_point data; +}; + +struct iw_priv_args { + __u32 cmd; + __u16 set_args; + __u16 get_args; + char name[16]; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct iw_request_info { + __u16 cmd; + __u16 flags; +}; + +struct iw_spy_data { + int spy_number; + u_char spy_address[48]; + struct iw_quality spy_stat[8]; + struct iw_quality spy_thr_low; + struct iw_quality spy_thr_high; + u_char spy_thr_under[8]; +}; + +enum { + SOF_TIMESTAMPING_TX_HARDWARE = 1, + SOF_TIMESTAMPING_TX_SOFTWARE = 2, + SOF_TIMESTAMPING_RX_HARDWARE = 4, + SOF_TIMESTAMPING_RX_SOFTWARE = 8, + SOF_TIMESTAMPING_SOFTWARE = 16, + SOF_TIMESTAMPING_SYS_HARDWARE = 32, + SOF_TIMESTAMPING_RAW_HARDWARE = 64, + SOF_TIMESTAMPING_OPT_ID = 128, + SOF_TIMESTAMPING_TX_SCHED = 256, + SOF_TIMESTAMPING_TX_ACK = 512, + SOF_TIMESTAMPING_OPT_CMSG = 1024, + SOF_TIMESTAMPING_OPT_TSONLY = 2048, + SOF_TIMESTAMPING_OPT_STATS = 4096, + SOF_TIMESTAMPING_OPT_PKTINFO = 8192, + SOF_TIMESTAMPING_OPT_TX_SWHW = 16384, + SOF_TIMESTAMPING_LAST = 16384, + SOF_TIMESTAMPING_MASK = 32767, +}; + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + +struct sock_skb_cb { + u32 dropcount; +}; + +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + +struct sock_extended_err { + __u32 ee_errno; + __u8 ee_origin; + __u8 ee_type; + __u8 ee_code; + __u8 ee_pad; + __u32 ee_info; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; +}; + +struct sock_exterr_skb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct sock_extended_err ee; + u16 addr_offset; + __be16 port; + u8 opt_stats: 1; + u8 unused: 7; +}; + +struct used_address { + struct __kernel_sockaddr_storage name; + unsigned int name_len; +}; + +struct linger { + int l_onoff; + int l_linger; +}; + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; + +struct mmpin { + struct user_struct *user; + unsigned int num_pg; +}; + +struct ubuf_info { + void (*callback)(struct ubuf_info *, bool); + union { + struct { + long unsigned int desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy: 1; + u32 bytelen; + }; + }; + refcount_t refcnt; + struct mmpin mmp; +}; + +struct prot_inuse { + int val[64]; +}; + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[128]; + u8 buckets[0]; +}; + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + unsigned int processed; + unsigned int time_squeeze; + unsigned int received_rps; + struct softnet_data *rps_ipi_list; + struct sd_flow_limit *flow_limit; + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + struct sk_buff_head xfrm_backlog; + struct { + u16 recursion; + u8 more; + } xmit; + int: 32; + unsigned int input_queue_head; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + call_single_data_t csd; + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = 1, + SOF_TXTIME_REPORT_ERRORS = 2, + SOF_TXTIME_FLAGS_LAST = 2, + SOF_TXTIME_FLAGS_MASK = 3, +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u16 tsflags; +}; + +struct inet_bind_bucket { + possible_net_t ib_net; + int l3mdev; + short unsigned int port; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; + struct in6_addr fast_v6_rcv_saddr; + __be32 fast_rcv_saddr; + short unsigned int fast_sk_family; + bool fast_ipv6_only; + struct hlist_node node; + struct hlist_head owners; +}; + +struct tcp_fastopen_cookie { + __le64 val[2]; + s8 len; + bool exp; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +struct tcp_options_received { + int ts_recent_stamp; + u32 ts_recent; + u32 rcv_tsval; + u32 rcv_tsecr; + u16 saw_tstamp: 1; + u16 tstamp_ok: 1; + u16 dsack: 1; + u16 wscale_ok: 1; + u16 sack_ok: 3; + u16 smc_ok: 1; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u8 saw_unknown: 1; + u8 unused: 7; + u8 num_sacks; + u16 user_mss; + u16 mss_clamp; +}; + +struct tcp_rack { + u64 mstamp; + u32 rtt_us; + u32 end_seq; + u32 last_delivered; + u8 reo_wnd_steps; + u8 reo_wnd_persist: 5; + u8 dsack_seen: 1; + u8 advanced: 1; +}; + +struct tcp_sock_af_ops; + +struct tcp_md5sig_info; + +struct tcp_fastopen_request; + +struct tcp_sock { + struct inet_connection_sock inet_conn; + u16 tcp_header_len; + u16 gso_segs; + __be32 pred_flags; + u64 bytes_received; + u32 segs_in; + u32 data_segs_in; + u32 rcv_nxt; + u32 copied_seq; + u32 rcv_wup; + u32 snd_nxt; + u32 segs_out; + u32 data_segs_out; + u64 bytes_sent; + u64 bytes_acked; + u32 dsack_dups; + u32 snd_una; + u32 snd_sml; + u32 rcv_tstamp; + u32 lsndtime; + u32 last_oow_ack_time; + u32 compressed_ack_rcv_nxt; + u32 tsoffset; + struct list_head tsq_node; + struct list_head tsorted_sent_queue; + u32 snd_wl1; + u32 snd_wnd; + u32 max_window; + u32 mss_cache; + u32 window_clamp; + u32 rcv_ssthresh; + struct tcp_rack rack; + u16 advmss; + u8 compressed_ack; + u8 dup_ack_counter: 2; + u8 tlp_retrans: 1; + u8 unused: 5; + u32 chrono_start; + u32 chrono_stat[3]; + u8 chrono_type: 2; + u8 rate_app_limited: 1; + u8 fastopen_connect: 1; + u8 fastopen_no_cookie: 1; + u8 is_sack_reneg: 1; + u8 fastopen_client_fail: 2; + u8 nonagle: 4; + u8 thin_lto: 1; + u8 recvmsg_inq: 1; + u8 repair: 1; + u8 frto: 1; + u8 repair_queue; + u8 save_syn: 2; + u8 syn_data: 1; + u8 syn_fastopen: 1; + u8 syn_fastopen_exp: 1; + u8 syn_fastopen_ch: 1; + u8 syn_data_acked: 1; + u8 is_cwnd_limited: 1; + u32 tlp_high_seq; + u32 tcp_tx_delay; + u64 tcp_wstamp_ns; + u64 tcp_clock_cache; + u64 tcp_mstamp; + u32 srtt_us; + u32 mdev_us; + u32 mdev_max_us; + u32 rttvar_us; + u32 rtt_seq; + struct minmax rtt_min; + u32 packets_out; + u32 retrans_out; + u32 max_packets_out; + u32 max_packets_seq; + u16 urg_data; + u8 ecn_flags; + u8 keepalive_probes; + u32 reordering; + u32 reord_seen; + u32 snd_up; + struct tcp_options_received rx_opt; + u32 snd_ssthresh; + u32 snd_cwnd; + u32 snd_cwnd_cnt; + u32 snd_cwnd_clamp; + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; + u32 prr_delivered; + u32 prr_out; + u32 delivered; + u32 delivered_ce; + u32 lost; + u32 app_limited; + u64 first_tx_mstamp; + u64 delivered_mstamp; + u32 rate_delivered; + u32 rate_interval_us; + u32 rcv_wnd; + u32 write_seq; + u32 notsent_lowat; + u32 pushed_seq; + u32 lost_out; + u32 sacked_out; + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + struct sk_buff *lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + struct rb_root out_of_order_queue; + struct sk_buff *ooo_last_skb; + struct tcp_sack_block duplicate_sack[1]; + struct tcp_sack_block selective_acks[4]; + struct tcp_sack_block recv_sack_cache[4]; + struct sk_buff *highest_sack; + int lost_cnt_hint; + u32 prior_ssthresh; + u32 high_seq; + u32 retrans_stamp; + u32 undo_marker; + int undo_retrans; + u64 bytes_retrans; + u32 total_retrans; + u32 urg_seq; + unsigned int keepalive_time; + unsigned int keepalive_intvl; + int linger2; + u8 bpf_sock_ops_cb_flags; + u16 timeout_rehash; + u32 rcv_ooopack; + u32 rcv_rtt_last_tsecr; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 mtu_info; + bool is_mptcp; + bool syn_smc; + const struct tcp_sock_af_ops *af_specific; + struct tcp_md5sig_info *md5sig_info; + struct tcp_fastopen_request *fastopen_req; + struct request_sock *fastopen_rsk; + struct saved_syn *saved_syn; +}; + +struct tcp_md5sig_key; + +struct tcp_sock_af_ops { + struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); + int (*md5_parse)(struct sock *, int, sockptr_t, int); +}; + +struct tcp_md5sig_info { + struct hlist_head head; + struct callback_head rcu; +}; + +struct tcp_fastopen_request { + struct tcp_fastopen_cookie cookie; + struct msghdr *data; + size_t size; + int copied; + struct ubuf_info *uarg; +}; + +union tcp_md5_addr { + struct in_addr a4; + struct in6_addr a6; +}; + +struct tcp_md5sig_key { + struct hlist_node node; + u8 keylen; + u8 family; + u8 prefixlen; + union tcp_md5_addr addr; + int l3index; + u8 key[80]; + struct callback_head rcu; +}; + +struct net_protocol { + int (*early_demux)(struct sk_buff *); + int (*early_demux_handler)(struct sk_buff *); + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, u32); + unsigned int no_policy: 1; + unsigned int netns_ok: 1; + unsigned int icmp_strict_tag_validation: 1; +}; + +struct cgroup_cls_state { + struct cgroup_subsys_state css; + u32 classid; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC = 0, + SK_MEMINFO_RCVBUF = 1, + SK_MEMINFO_WMEM_ALLOC = 2, + SK_MEMINFO_SNDBUF = 3, + SK_MEMINFO_FWD_ALLOC = 4, + SK_MEMINFO_WMEM_QUEUED = 5, + SK_MEMINFO_OPTMEM = 6, + SK_MEMINFO_BACKLOG = 7, + SK_MEMINFO_DROPS = 8, + SK_MEMINFO_VARS = 9, +}; + +enum sknetlink_groups { + SKNLGRP_NONE = 0, + SKNLGRP_INET_TCP_DESTROY = 1, + SKNLGRP_INET_UDP_DESTROY = 2, + SKNLGRP_INET6_TCP_DESTROY = 3, + SKNLGRP_INET6_UDP_DESTROY = 4, + __SKNLGRP_MAX = 5, +}; + +struct inet_request_sock { + struct request_sock req; + u16 snd_wscale: 4; + u16 rcv_wscale: 4; + u16 tstamp_ok: 1; + u16 sack_ok: 1; + u16 wscale_ok: 1; + u16 ecn_ok: 1; + u16 acked: 1; + u16 no_srccheck: 1; + u16 smc_ok: 1; + u32 ir_mark; + union { + struct ip_options_rcu *ireq_opt; + struct { + struct ipv6_txoptions *ipv6_opt; + struct sk_buff *pktopts; + }; + }; +}; + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + u64 snt_synack; + bool tfo_listener; + bool is_mptcp; + bool drop_req; + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; + u32 rcv_nxt; + u8 syn_tos; +}; + +enum tcp_synack_type { + TCP_SYNACK_NORMAL = 0, + TCP_SYNACK_FASTOPEN = 1, + TCP_SYNACK_COOKIE = 2, +}; + +struct tcp_request_sock_ops { + u16 mss_clamp; + struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *); + int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *); + void (*init_req)(struct request_sock *, const struct sock *, struct sk_buff *); + __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *); + struct dst_entry * (*route_req)(const struct sock *, struct flowi *, const struct request_sock *); + u32 (*init_seq)(const struct sk_buff *); + u32 (*init_ts_off)(const struct net *, const struct sk_buff *); + int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type, struct sk_buff *); +}; + +struct nf_conntrack { + atomic_t use; +}; + +enum { + SKB_FCLONE_UNAVAILABLE = 0, + SKB_FCLONE_ORIG = 1, + SKB_FCLONE_CLONE = 2, +}; + +struct sk_buff_fclones { + struct sk_buff skb1; + struct sk_buff skb2; + refcount_t fclone_ref; +}; + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +struct skb_checksum_ops { + __wsum (*update)(const void *, int, __wsum); + __wsum (*combine)(__wsum, __wsum, int, int); +}; + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + +struct napi_gro_cb { + void *frag0; + unsigned int frag0_len; + int data_offset; + u16 flush; + u16 flush_id; + u16 count; + u16 gro_remcsum_start; + long unsigned int age; + u16 proto; + u8 same_flow: 1; + u8 encap_mark: 1; + u8 csum_valid: 1; + u8 csum_cnt: 3; + u8 free: 2; + u8 is_ipv6: 1; + u8 is_fou: 1; + u8 is_atomic: 1; + u8 recursion_counter: 4; + u8 is_flist: 1; + __wsum csum; + struct sk_buff *last; +}; + +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct vlan_ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +struct qdisc_walker { + int stop; + int skip; + int count; + int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *); +}; + +struct ip_auth_hdr { + __u8 nexthdr; + __u8 hdrlen; + __be16 reserved; + __be32 spi; + __be32 seq_no; + __u8 auth_data[0]; +}; + +struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; +}; + +enum { + SCM_TSTAMP_SND = 0, + SCM_TSTAMP_SCHED = 1, + SCM_TSTAMP_ACK = 2, +}; + +struct mpls_shim_hdr { + __be32 label_stack_entry; +}; + +struct napi_alloc_cache { + struct page_frag_cache page; + unsigned int skb_count; + void *skb_cache[64]; +}; + +struct ahash_request___2; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; + u32 secid; +}; + +struct scm_timestamping { + struct __kernel_old_timespec ts[3]; +}; + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; +}; + +enum { + TCA_STATS_UNSPEC = 0, + TCA_STATS_BASIC = 1, + TCA_STATS_RATE_EST = 2, + TCA_STATS_QUEUE = 3, + TCA_STATS_APP = 4, + TCA_STATS_RATE_EST64 = 5, + TCA_STATS_PAD = 6, + TCA_STATS_BASIC_HW = 7, + TCA_STATS_PKT64 = 8, + __TCA_STATS_MAX = 9, +}; + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; + +struct net_rate_estimator { + struct gnet_stats_basic_packed *bstats; + spinlock_t *stats_lock; + seqcount_t *running; + struct gnet_stats_basic_cpu *cpu_bstats; + u8 ewma_log; + u8 intvl_log; + seqcount_t seq; + u64 last_packets; + u64 last_bytes; + u64 avpps; + u64 avbps; + long unsigned int next_jiffies; + struct timer_list timer; + struct callback_head rcu; +}; + +struct rtgenmsg { + unsigned char rtgen_family; +}; + +enum rtnetlink_groups { + RTNLGRP_NONE = 0, + RTNLGRP_LINK = 1, + RTNLGRP_NOTIFY = 2, + RTNLGRP_NEIGH = 3, + RTNLGRP_TC = 4, + RTNLGRP_IPV4_IFADDR = 5, + RTNLGRP_IPV4_MROUTE = 6, + RTNLGRP_IPV4_ROUTE = 7, + RTNLGRP_IPV4_RULE = 8, + RTNLGRP_IPV6_IFADDR = 9, + RTNLGRP_IPV6_MROUTE = 10, + RTNLGRP_IPV6_ROUTE = 11, + RTNLGRP_IPV6_IFINFO = 12, + RTNLGRP_DECnet_IFADDR = 13, + RTNLGRP_NOP2 = 14, + RTNLGRP_DECnet_ROUTE = 15, + RTNLGRP_DECnet_RULE = 16, + RTNLGRP_NOP4 = 17, + RTNLGRP_IPV6_PREFIX = 18, + RTNLGRP_IPV6_RULE = 19, + RTNLGRP_ND_USEROPT = 20, + RTNLGRP_PHONET_IFADDR = 21, + RTNLGRP_PHONET_ROUTE = 22, + RTNLGRP_DCB = 23, + RTNLGRP_IPV4_NETCONF = 24, + RTNLGRP_IPV6_NETCONF = 25, + RTNLGRP_MDB = 26, + RTNLGRP_MPLS_ROUTE = 27, + RTNLGRP_NSID = 28, + RTNLGRP_MPLS_NETCONF = 29, + RTNLGRP_IPV4_MROUTE_R = 30, + RTNLGRP_IPV6_MROUTE_R = 31, + RTNLGRP_NEXTHOP = 32, + RTNLGRP_BRVLAN = 33, + __RTNLGRP_MAX = 34, +}; + +enum { + NETNSA_NONE = 0, + NETNSA_NSID = 1, + NETNSA_PID = 2, + NETNSA_FD = 3, + NETNSA_TARGET_NSID = 4, + NETNSA_CURRENT_NSID = 5, + __NETNSA_MAX = 6, +}; + +struct pcpu_gen_cookie { + local_t nesting; + u64 last; +}; + +struct gen_cookie { + struct pcpu_gen_cookie *local; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + atomic64_t forward_last; + atomic64_t reverse_last; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, +}; + +struct net_fill_args { + u32 portid; + u32 seq; + int flags; + int cmd; + int nsid; + bool add_ref; + int ref_nsid; +}; + +struct rtnl_net_dump_cb { + struct net *tgt_net; + struct net *ref_net; + struct sk_buff *skb; + struct net_fill_args fillargs; + int idx; + int s_idx; +}; + +typedef u16 u_int16_t; + +typedef u32 u_int32_t; + +typedef u64 u_int64_t; + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD = 0, + FLOW_DISSECT_RET_OUT_BAD = 1, + FLOW_DISSECT_RET_PROTO_AGAIN = 2, + FLOW_DISSECT_RET_IPPROTO_AGAIN = 3, + FLOW_DISSECT_RET_CONTINUE = 4, +}; + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id: 12; + u16 vlan_dei: 1; + u16 vlan_priority: 3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl: 8; + u32 mpls_bos: 1; + u32 mpls_tc: 3; + u32 mpls_label: 20; +}; + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +struct flow_dissector_key_enc_opts { + u8 data[255]; + u8 len; + __be16 dst_opt_type; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +struct flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +struct flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +struct flow_dissector_key_tipc { + __be32 key; +}; + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; + +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + +struct flow_dissector_key_eth_addrs { + unsigned char dst[6]; + unsigned char src[6]; +}; + +struct flow_dissector_key_tcp { + __be16 flags; +}; + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; +}; + +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +struct flow_dissector_key_hash { + u32 hash; +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + struct flow_dissector_key_addrs addrs; + int: 32; +}; + +struct flow_keys_digest { + u8 data[16]; +}; + +enum ip_conntrack_info { + IP_CT_ESTABLISHED = 0, + IP_CT_RELATED = 1, + IP_CT_NEW = 2, + IP_CT_IS_REPLY = 3, + IP_CT_ESTABLISHED_REPLY = 3, + IP_CT_RELATED_REPLY = 4, + IP_CT_NUMBER = 5, + IP_CT_UNTRACKED = 7, +}; + +struct xt_table_info; + +struct xt_table { + struct list_head list; + unsigned int valid_hooks; + struct xt_table_info *private; + struct module *me; + u_int8_t af; + int priority; + int (*table_init)(struct net *); + const char name[32]; +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + u_int8_t last_wscale; + u_int8_t last_flags; +}; + +union nf_conntrack_man_proto { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; + +struct nf_ct_dccp { + u_int8_t role[2]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; + +struct ip_ct_sctp { + enum sctp_conntrack state; + __be32 vtag[2]; + u8 last_dir; + u8 flags; +}; + +struct nf_ct_event; + +struct nf_ct_event_notifier { + int (*fcn)(unsigned int, struct nf_ct_event *); +}; + +struct nf_exp_event; + +struct nf_exp_event_notifier { + int (*fcn)(unsigned int, struct nf_exp_event *); +}; + +enum bpf_ret_code { + BPF_OK = 0, + BPF_DROP = 2, + BPF_REDIRECT = 7, + BPF_LWT_REROUTE = 128, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1, + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2, + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4, +}; + +struct ip_tunnel_parm { + char name[16]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + +struct ip_tunnel_key { + __be64 tun_id; + union { + struct { + __be32 src; + __be32 dst; + } ipv4; + struct { + struct in6_addr src; + struct in6_addr dst; + } ipv6; + } u; + __be16 tun_flags; + u8 tos; + u8 ttl; + __be32 label; + __be16 tp_src; + __be16 tp_dst; +}; + +struct dst_cache_pcpu; + +struct dst_cache { + struct dst_cache_pcpu *cache; + long unsigned int reset_ts; +}; + +struct ip_tunnel_info { + struct ip_tunnel_key key; + struct dst_cache dst_cache; + u8 options_len; + u8 mode; +}; + +struct lwtunnel_state { + __u16 type; + __u16 flags; + __u16 headroom; + atomic_t refcnt; + int (*orig_output)(struct net *, struct sock *, struct sk_buff *); + int (*orig_input)(struct sk_buff *); + struct callback_head rcu; + __u8 data[0]; +}; + +union tcp_word_hdr { + struct tcphdr hdr; + __be32 words[5]; +}; + +struct arphdr { + __be16 ar_hrd; + __be16 ar_pro; + unsigned char ar_hln; + unsigned char ar_pln; + __be16 ar_op; +}; + +struct fib_info; + +struct fib_nh { + struct fib_nh_common nh_common; + struct hlist_node nh_hash; + struct fib_info *nh_parent; + __u32 nh_tclassid; + __be32 nh_saddr; + int nh_saddr_genid; +}; + +struct fib_info { + struct hlist_node fib_hash; + struct hlist_node fib_lhash; + struct list_head nh_list; + struct net *fib_net; + int fib_treeref; + refcount_t fib_clntref; + unsigned int fib_flags; + unsigned char fib_dead; + unsigned char fib_protocol; + unsigned char fib_scope; + unsigned char fib_type; + __be32 fib_prefsrc; + u32 fib_tb_id; + u32 fib_priority; + struct dst_metrics *fib_metrics; + int fib_nhs; + bool fib_nh_is_v6; + bool nh_updated; + struct nexthop *nh; + struct callback_head rcu; + struct fib_nh fib_nh[0]; +}; + +struct nh_info; + +struct nh_group; + +struct nexthop { + struct rb_node rb_node; + struct list_head fi_list; + struct list_head f6i_list; + struct list_head fdb_list; + struct list_head grp_list; + struct net *net; + u32 id; + u8 protocol; + u8 nh_flags; + bool is_group; + refcount_t refcnt; + struct callback_head rcu; + union { + struct nh_info *nh_info; + struct nh_group *nh_grp; + }; +}; + +struct nh_info { + struct hlist_node dev_hash; + struct nexthop *nh_parent; + u8 family; + bool reject_nh; + bool fdb_nh; + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry { + struct nexthop *nh; + u8 weight; + atomic_t upper_bound; + struct list_head nh_list; + struct nexthop *nh_parent; +}; + +struct nh_group { + struct nh_group *spare; + u16 num_nh; + bool mpath; + bool fdb_nh; + bool has_v4; + struct nh_grp_entry nh_entries[0]; +}; + +enum metadata_type { + METADATA_IP_TUNNEL = 0, + METADATA_HW_PORT_MUX = 1, +}; + +struct hw_port_info { + struct net_device *lower_dev; + u32 port_id; +}; + +struct metadata_dst { + struct dst_entry dst; + enum metadata_type type; + union { + struct ip_tunnel_info tun_info; + struct hw_port_info port_info; + } u; +}; + +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; + +struct gre_full_hdr { + struct gre_base_hdr fixed_header; + __be16 csum; + __be16 reserved1; + __be32 key; + __be32 seq; +}; + +struct pptp_gre_header { + struct gre_base_hdr gre_hd; + __be16 payload_len; + __be16 call_id; + __be32 seq; + __be32 ack; +}; + +struct tipc_basic_hdr { + __be32 w[4]; +}; + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + union { + struct { + __be16 id; + __be16 sequence; + } echo; + __be32 gateway; + struct { + __be16 __unused; + __be16 mtu; + } frag; + __u8 reserved[4]; + } un; +}; + +enum l2tp_debug_flags { + L2TP_MSG_DEBUG = 1, + L2TP_MSG_CONTROL = 2, + L2TP_MSG_SEQ = 4, + L2TP_MSG_DATA = 8, +}; + +struct pppoe_tag { + __be16 tag_type; + __be16 tag_len; + char tag_data[0]; +}; + +struct pppoe_hdr { + __u8 type: 4; + __u8 ver: 4; + __u8 code; + __be16 sid; + __be16 length; + struct pppoe_tag tag[0]; +}; + +struct mpls_label { + __be32 entry; +}; + +enum batadv_packettype { + BATADV_IV_OGM = 0, + BATADV_BCAST = 1, + BATADV_CODED = 2, + BATADV_ELP = 3, + BATADV_OGM2 = 4, + BATADV_UNICAST = 64, + BATADV_UNICAST_FRAG = 65, + BATADV_UNICAST_4ADDR = 66, + BATADV_ICMP = 67, + BATADV_UNICAST_TVLV = 68, +}; + +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; + __u8 dest[6]; +}; + +struct nf_conntrack_zone { + u16 id; + u8 flags; + u8 dir; +}; + +struct nf_conntrack_man { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + u_int16_t l3num; +}; + +struct nf_conntrack_tuple { + struct nf_conntrack_man src; + struct { + union nf_inet_addr u3; + union { + __be16 all; + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + u_int8_t type; + u_int8_t code; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; + } u; + u_int8_t protonum; + u_int8_t dir; + } dst; +}; + +struct nf_conntrack_tuple_hash { + struct hlist_nulls_node hnnode; + struct nf_conntrack_tuple tuple; +}; + +struct nf_ct_udp { + long unsigned int stream_ts; +}; + +struct nf_ct_gre { + unsigned int stream_timeout; + unsigned int timeout; +}; + +union nf_conntrack_proto { + struct nf_ct_dccp dccp; + struct ip_ct_sctp sctp; + struct ip_ct_tcp tcp; + struct nf_ct_udp udp; + struct nf_ct_gre gre; + unsigned int tmpl_padto; +}; + +struct nf_ct_ext; + +struct nf_conn { + struct nf_conntrack ct_general; + spinlock_t lock; + u32 timeout; + struct nf_conntrack_zone zone; + struct nf_conntrack_tuple_hash tuplehash[2]; + long unsigned int status; + u16 cpu; + possible_net_t ct_net; + struct hlist_node nat_bysource; + struct { } __nfct_init_offset; + struct nf_conn *master; + u_int32_t mark; + u_int32_t secmark; + struct nf_ct_ext *ext; + union nf_conntrack_proto proto; +}; + +struct xt_table_info { + unsigned int size; + unsigned int number; + unsigned int initial_entries; + unsigned int hook_entry[5]; + unsigned int underflow[5]; + unsigned int stacksize; + void ***jumpstack; + unsigned char entries[0]; +}; + +struct nf_conntrack_tuple_mask { + struct { + union nf_inet_addr u3; + union nf_conntrack_man_proto u; + } src; +}; + +struct nf_ct_ext { + u8 offset[9]; + u8 len; + char data[0]; +}; + +struct nf_conntrack_helper; + +struct nf_conntrack_expect { + struct hlist_node lnode; + struct hlist_node hnode; + struct nf_conntrack_tuple tuple; + struct nf_conntrack_tuple_mask mask; + void (*expectfn)(struct nf_conn *, struct nf_conntrack_expect *); + struct nf_conntrack_helper *helper; + struct nf_conn *master; + struct timer_list timeout; + refcount_t use; + unsigned int flags; + unsigned int class; + union nf_inet_addr saved_addr; + union nf_conntrack_man_proto saved_proto; + enum ip_conntrack_dir dir; + struct callback_head rcu; +}; + +enum nf_ct_ext_id { + NF_CT_EXT_HELPER = 0, + NF_CT_EXT_NAT = 1, + NF_CT_EXT_SEQADJ = 2, + NF_CT_EXT_ACCT = 3, + NF_CT_EXT_ECACHE = 4, + NF_CT_EXT_TSTAMP = 5, + NF_CT_EXT_TIMEOUT = 6, + NF_CT_EXT_LABELS = 7, + NF_CT_EXT_SYNPROXY = 8, + NF_CT_EXT_NUM = 9, +}; + +struct nf_ct_event { + struct nf_conn *ct; + u32 portid; + int report; +}; + +struct nf_exp_event { + struct nf_conntrack_expect *exp; + u32 portid; + int report; +}; + +struct nf_conn_labels { + long unsigned int bits[2]; +}; + +struct _flow_keys_digest_data { + __be16 n_proto; + u8 ip_proto; + u8 padding; + __be32 ports; + __be32 src; + __be32 dst; +}; + +struct rps_sock_flow_table { + u32 mask; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 ents[0]; +}; + +enum { + IF_OPER_UNKNOWN = 0, + IF_OPER_NOTPRESENT = 1, + IF_OPER_DOWN = 2, + IF_OPER_LOWERLAYERDOWN = 3, + IF_OPER_TESTING = 4, + IF_OPER_DORMANT = 5, + IF_OPER_UP = 6, +}; + +struct ipv4_devconf { + void *sysctl; + int data[32]; + long unsigned int state[1]; +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS = 0, + NF_NETDEV_NUMHOOKS = 1, +}; + +struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +}; + +typedef struct ifbond ifbond; + +struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +}; + +typedef struct ifslave ifslave; + +struct netdev_boot_setup { + char name[16]; + struct ifmap map; +}; + +enum { + NAPIF_STATE_SCHED = 1, + NAPIF_STATE_MISSED = 2, + NAPIF_STATE_DISABLE = 4, + NAPIF_STATE_NPSVC = 8, + NAPIF_STATE_LISTED = 16, + NAPIF_STATE_NO_BUSY_POLL = 32, + NAPIF_STATE_IN_BUSY_POLL = 64, +}; + +enum gro_result { + GRO_MERGED = 0, + GRO_MERGED_FREE = 1, + GRO_HELD = 2, + GRO_NORMAL = 3, + GRO_DROP = 4, + GRO_CONSUMED = 5, +}; + +typedef enum gro_result gro_result_t; + +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; + int flags; +}; + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; + +struct netpoll; + +struct netpoll_info { + refcount_t refcnt; + struct semaphore dev_lock; + struct sk_buff_head txq; + struct delayed_work tx_work; + struct netpoll *netpoll; + struct callback_head rcu; +}; + +struct udp_tunnel_info { + short unsigned int type; + sa_family_t sa_family; + __be16 port; + u8 hw_priv; +}; + +struct in_ifaddr; + +struct ip_mc_list; + +struct in_device { + struct net_device *dev; + refcount_t refcnt; + int dead; + struct in_ifaddr *ifa_list; + struct ip_mc_list *mc_list; + struct ip_mc_list **mc_hash; + int mc_count; + spinlock_t mc_tomb_lock; + struct ip_mc_list *mc_tomb; + long unsigned int mr_v1_seen; + long unsigned int mr_v2_seen; + long unsigned int mr_maxdelay; + long unsigned int mr_qi; + long unsigned int mr_qri; + unsigned char mr_qrv; + unsigned char mr_gq_running; + unsigned char mr_ifc_count; + struct timer_list mr_gq_timer; + struct timer_list mr_ifc_timer; + struct neigh_parms *arp_parms; + struct ipv4_devconf cnf; + struct callback_head callback_head; +}; + +struct offload_callbacks { + struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t); + struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sk_buff *, int); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *); + +enum { + NESTED_SYNC_IMM_BIT = 0, + NESTED_SYNC_TODO_BIT = 1, +}; + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +union inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; + +struct netpoll { + struct net_device *dev; + char dev_name[16]; + const char *name; + union inet_addr local_ip; + union inet_addr remote_ip; + bool ipv6; + u16 local_port; + u16 remote_port; + u8 remote_mac[6]; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED = 0, + __QDISC_STATE_DEACTIVATED = 1, +}; + +struct tcf_walker { + int stop; + int skip; + int count; + bool nonempty; + long unsigned int cookie; + int (*fn)(struct tcf_proto *, void *, struct tcf_walker *); +}; + +enum { + IPV4_DEVCONF_FORWARDING = 1, + IPV4_DEVCONF_MC_FORWARDING = 2, + IPV4_DEVCONF_PROXY_ARP = 3, + IPV4_DEVCONF_ACCEPT_REDIRECTS = 4, + IPV4_DEVCONF_SECURE_REDIRECTS = 5, + IPV4_DEVCONF_SEND_REDIRECTS = 6, + IPV4_DEVCONF_SHARED_MEDIA = 7, + IPV4_DEVCONF_RP_FILTER = 8, + IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9, + IPV4_DEVCONF_BOOTP_RELAY = 10, + IPV4_DEVCONF_LOG_MARTIANS = 11, + IPV4_DEVCONF_TAG = 12, + IPV4_DEVCONF_ARPFILTER = 13, + IPV4_DEVCONF_MEDIUM_ID = 14, + IPV4_DEVCONF_NOXFRM = 15, + IPV4_DEVCONF_NOPOLICY = 16, + IPV4_DEVCONF_FORCE_IGMP_VERSION = 17, + IPV4_DEVCONF_ARP_ANNOUNCE = 18, + IPV4_DEVCONF_ARP_IGNORE = 19, + IPV4_DEVCONF_PROMOTE_SECONDARIES = 20, + IPV4_DEVCONF_ARP_ACCEPT = 21, + IPV4_DEVCONF_ARP_NOTIFY = 22, + IPV4_DEVCONF_ACCEPT_LOCAL = 23, + IPV4_DEVCONF_SRC_VMARK = 24, + IPV4_DEVCONF_PROXY_ARP_PVLAN = 25, + IPV4_DEVCONF_ROUTE_LOCALNET = 26, + IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27, + IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28, + IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29, + IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30, + IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31, + IPV4_DEVCONF_BC_FORWARDING = 32, + __IPV4_DEVCONF_MAX = 33, +}; + +struct in_ifaddr { + struct hlist_node hash; + struct in_ifaddr *ifa_next; + struct in_device *ifa_dev; + struct callback_head callback_head; + __be32 ifa_local; + __be32 ifa_address; + __be32 ifa_mask; + __u32 ifa_rt_priority; + __be32 ifa_broadcast; + unsigned char ifa_scope; + unsigned char ifa_prefixlen; + __u32 ifa_flags; + char ifa_label[16]; + __u32 ifa_valid_lft; + __u32 ifa_preferred_lft; + long unsigned int ifa_cstamp; + long unsigned int ifa_tstamp; +}; + +struct udp_tunnel_nic_shared { + struct udp_tunnel_nic *udp_tunnel_nic_info; + struct list_head devices; +}; + +struct dev_kfree_skb_cb { + enum skb_free_reason reason; +}; + +struct netdev_adjacent { + struct net_device *dev; + bool master; + bool ignore; + u16 ref_nr; + void *private; + struct list_head list; + struct callback_head rcu; +}; + +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[32]; + unsigned char type; + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +enum { + NDA_UNSPEC = 0, + NDA_DST = 1, + NDA_LLADDR = 2, + NDA_CACHEINFO = 3, + NDA_PROBES = 4, + NDA_VLAN = 5, + NDA_PORT = 6, + NDA_VNI = 7, + NDA_IFINDEX = 8, + NDA_MASTER = 9, + NDA_LINK_NETNSID = 10, + NDA_SRC_VNI = 11, + NDA_PROTOCOL = 12, + NDA_NH_ID = 13, + NDA_FDB_EXT_ATTRS = 14, + __NDA_MAX = 15, +}; + +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; + +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC = 0, + NDTPA_IFINDEX = 1, + NDTPA_REFCNT = 2, + NDTPA_REACHABLE_TIME = 3, + NDTPA_BASE_REACHABLE_TIME = 4, + NDTPA_RETRANS_TIME = 5, + NDTPA_GC_STALETIME = 6, + NDTPA_DELAY_PROBE_TIME = 7, + NDTPA_QUEUE_LEN = 8, + NDTPA_APP_PROBES = 9, + NDTPA_UCAST_PROBES = 10, + NDTPA_MCAST_PROBES = 11, + NDTPA_ANYCAST_DELAY = 12, + NDTPA_PROXY_DELAY = 13, + NDTPA_PROXY_QLEN = 14, + NDTPA_LOCKTIME = 15, + NDTPA_QUEUE_LENBYTES = 16, + NDTPA_MCAST_REPROBES = 17, + NDTPA_PAD = 18, + __NDTPA_MAX = 19, +}; + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC = 0, + NDTA_NAME = 1, + NDTA_THRESH1 = 2, + NDTA_THRESH2 = 3, + NDTA_THRESH3 = 4, + NDTA_CONFIG = 5, + NDTA_PARMS = 6, + NDTA_STATS = 7, + NDTA_GC_INTERVAL = 8, + NDTA_PAD = 9, + __NDTA_MAX = 10, +}; + +enum { + RTN_UNSPEC = 0, + RTN_UNICAST = 1, + RTN_LOCAL = 2, + RTN_BROADCAST = 3, + RTN_ANYCAST = 4, + RTN_MULTICAST = 5, + RTN_BLACKHOLE = 6, + RTN_UNREACHABLE = 7, + RTN_PROHIBIT = 8, + RTN_THROW = 9, + RTN_NAT = 10, + RTN_XRESOLVE = 11, + __RTN_MAX = 12, +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES = 3, + NEIGH_LINK_TABLE = 3, +}; + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *); + unsigned int bucket; + unsigned int flags; +}; + +struct neighbour_cb { + long unsigned int sched_next; + unsigned int flags; +}; + +enum netevent_notif_type { + NETEVENT_NEIGH_UPDATE = 1, + NETEVENT_REDIRECT = 2, + NETEVENT_DELAY_PROBE_TIME_UPDATE = 3, + NETEVENT_IPV4_MPATH_HASH_UPDATE = 4, + NETEVENT_IPV6_MPATH_HASH_UPDATE = 5, + NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6, +}; + +struct neigh_dump_filter { + int master_idx; + int dev_idx; +}; + +struct neigh_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table neigh_vars[21]; +}; + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + u32 min_dump_alloc; +}; + +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + __u32 rx_compressed; + __u32 tx_compressed; + __u32 rx_nohandler; +}; + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; + +enum { + IFLA_UNSPEC = 0, + IFLA_ADDRESS = 1, + IFLA_BROADCAST = 2, + IFLA_IFNAME = 3, + IFLA_MTU = 4, + IFLA_LINK = 5, + IFLA_QDISC = 6, + IFLA_STATS = 7, + IFLA_COST = 8, + IFLA_PRIORITY = 9, + IFLA_MASTER = 10, + IFLA_WIRELESS = 11, + IFLA_PROTINFO = 12, + IFLA_TXQLEN = 13, + IFLA_MAP = 14, + IFLA_WEIGHT = 15, + IFLA_OPERSTATE = 16, + IFLA_LINKMODE = 17, + IFLA_LINKINFO = 18, + IFLA_NET_NS_PID = 19, + IFLA_IFALIAS = 20, + IFLA_NUM_VF = 21, + IFLA_VFINFO_LIST = 22, + IFLA_STATS64 = 23, + IFLA_VF_PORTS = 24, + IFLA_PORT_SELF = 25, + IFLA_AF_SPEC = 26, + IFLA_GROUP = 27, + IFLA_NET_NS_FD = 28, + IFLA_EXT_MASK = 29, + IFLA_PROMISCUITY = 30, + IFLA_NUM_TX_QUEUES = 31, + IFLA_NUM_RX_QUEUES = 32, + IFLA_CARRIER = 33, + IFLA_PHYS_PORT_ID = 34, + IFLA_CARRIER_CHANGES = 35, + IFLA_PHYS_SWITCH_ID = 36, + IFLA_LINK_NETNSID = 37, + IFLA_PHYS_PORT_NAME = 38, + IFLA_PROTO_DOWN = 39, + IFLA_GSO_MAX_SEGS = 40, + IFLA_GSO_MAX_SIZE = 41, + IFLA_PAD = 42, + IFLA_XDP = 43, + IFLA_EVENT = 44, + IFLA_NEW_NETNSID = 45, + IFLA_IF_NETNSID = 46, + IFLA_TARGET_NETNSID = 46, + IFLA_CARRIER_UP_COUNT = 47, + IFLA_CARRIER_DOWN_COUNT = 48, + IFLA_NEW_IFINDEX = 49, + IFLA_MIN_MTU = 50, + IFLA_MAX_MTU = 51, + IFLA_PROP_LIST = 52, + IFLA_ALT_IFNAME = 53, + IFLA_PERM_ADDRESS = 54, + IFLA_PROTO_DOWN_REASON = 55, + __IFLA_MAX = 56, +}; + +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC = 0, + IFLA_PROTO_DOWN_REASON_MASK = 1, + IFLA_PROTO_DOWN_REASON_VALUE = 2, + __IFLA_PROTO_DOWN_REASON_CNT = 3, + IFLA_PROTO_DOWN_REASON_MAX = 2, +}; + +enum { + IFLA_BRPORT_UNSPEC = 0, + IFLA_BRPORT_STATE = 1, + IFLA_BRPORT_PRIORITY = 2, + IFLA_BRPORT_COST = 3, + IFLA_BRPORT_MODE = 4, + IFLA_BRPORT_GUARD = 5, + IFLA_BRPORT_PROTECT = 6, + IFLA_BRPORT_FAST_LEAVE = 7, + IFLA_BRPORT_LEARNING = 8, + IFLA_BRPORT_UNICAST_FLOOD = 9, + IFLA_BRPORT_PROXYARP = 10, + IFLA_BRPORT_LEARNING_SYNC = 11, + IFLA_BRPORT_PROXYARP_WIFI = 12, + IFLA_BRPORT_ROOT_ID = 13, + IFLA_BRPORT_BRIDGE_ID = 14, + IFLA_BRPORT_DESIGNATED_PORT = 15, + IFLA_BRPORT_DESIGNATED_COST = 16, + IFLA_BRPORT_ID = 17, + IFLA_BRPORT_NO = 18, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19, + IFLA_BRPORT_CONFIG_PENDING = 20, + IFLA_BRPORT_MESSAGE_AGE_TIMER = 21, + IFLA_BRPORT_FORWARD_DELAY_TIMER = 22, + IFLA_BRPORT_HOLD_TIMER = 23, + IFLA_BRPORT_FLUSH = 24, + IFLA_BRPORT_MULTICAST_ROUTER = 25, + IFLA_BRPORT_PAD = 26, + IFLA_BRPORT_MCAST_FLOOD = 27, + IFLA_BRPORT_MCAST_TO_UCAST = 28, + IFLA_BRPORT_VLAN_TUNNEL = 29, + IFLA_BRPORT_BCAST_FLOOD = 30, + IFLA_BRPORT_GROUP_FWD_MASK = 31, + IFLA_BRPORT_NEIGH_SUPPRESS = 32, + IFLA_BRPORT_ISOLATED = 33, + IFLA_BRPORT_BACKUP_PORT = 34, + IFLA_BRPORT_MRP_RING_OPEN = 35, + IFLA_BRPORT_MRP_IN_OPEN = 36, + __IFLA_BRPORT_MAX = 37, +}; + +enum { + IFLA_INFO_UNSPEC = 0, + IFLA_INFO_KIND = 1, + IFLA_INFO_DATA = 2, + IFLA_INFO_XSTATS = 3, + IFLA_INFO_SLAVE_KIND = 4, + IFLA_INFO_SLAVE_DATA = 5, + __IFLA_INFO_MAX = 6, +}; + +enum { + IFLA_VF_INFO_UNSPEC = 0, + IFLA_VF_INFO = 1, + __IFLA_VF_INFO_MAX = 2, +}; + +enum { + IFLA_VF_UNSPEC = 0, + IFLA_VF_MAC = 1, + IFLA_VF_VLAN = 2, + IFLA_VF_TX_RATE = 3, + IFLA_VF_SPOOFCHK = 4, + IFLA_VF_LINK_STATE = 5, + IFLA_VF_RATE = 6, + IFLA_VF_RSS_QUERY_EN = 7, + IFLA_VF_STATS = 8, + IFLA_VF_TRUST = 9, + IFLA_VF_IB_NODE_GUID = 10, + IFLA_VF_IB_PORT_GUID = 11, + IFLA_VF_VLAN_LIST = 12, + IFLA_VF_BROADCAST = 13, + __IFLA_VF_MAX = 14, +}; + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC = 0, + IFLA_VF_VLAN_INFO = 1, + __IFLA_VF_VLAN_INFO_MAX = 2, +}; + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS = 0, + IFLA_VF_STATS_TX_PACKETS = 1, + IFLA_VF_STATS_RX_BYTES = 2, + IFLA_VF_STATS_TX_BYTES = 3, + IFLA_VF_STATS_BROADCAST = 4, + IFLA_VF_STATS_MULTICAST = 5, + IFLA_VF_STATS_PAD = 6, + IFLA_VF_STATS_RX_DROPPED = 7, + IFLA_VF_STATS_TX_DROPPED = 8, + __IFLA_VF_STATS_MAX = 9, +}; + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_PORT_UNSPEC = 0, + IFLA_VF_PORT = 1, + __IFLA_VF_PORT_MAX = 2, +}; + +enum { + IFLA_PORT_UNSPEC = 0, + IFLA_PORT_VF = 1, + IFLA_PORT_PROFILE = 2, + IFLA_PORT_VSI_TYPE = 3, + IFLA_PORT_INSTANCE_UUID = 4, + IFLA_PORT_HOST_UUID = 5, + IFLA_PORT_REQUEST = 6, + IFLA_PORT_RESPONSE = 7, + __IFLA_PORT_MAX = 8, +}; + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + +enum { + IFLA_STATS_UNSPEC = 0, + IFLA_STATS_LINK_64 = 1, + IFLA_STATS_LINK_XSTATS = 2, + IFLA_STATS_LINK_XSTATS_SLAVE = 3, + IFLA_STATS_LINK_OFFLOAD_XSTATS = 4, + IFLA_STATS_AF_SPEC = 5, + __IFLA_STATS_MAX = 6, +}; + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC = 0, + IFLA_OFFLOAD_XSTATS_CPU_HIT = 1, + __IFLA_OFFLOAD_XSTATS_MAX = 2, +}; + +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV = 1, + XDP_ATTACHED_SKB = 2, + XDP_ATTACHED_HW = 3, + XDP_ATTACHED_MULTI = 4, +}; + +enum { + IFLA_XDP_UNSPEC = 0, + IFLA_XDP_FD = 1, + IFLA_XDP_ATTACHED = 2, + IFLA_XDP_FLAGS = 3, + IFLA_XDP_PROG_ID = 4, + IFLA_XDP_DRV_PROG_ID = 5, + IFLA_XDP_SKB_PROG_ID = 6, + IFLA_XDP_HW_PROG_ID = 7, + IFLA_XDP_EXPECTED_FD = 8, + __IFLA_XDP_MAX = 9, +}; + +enum { + IFLA_EVENT_NONE = 0, + IFLA_EVENT_REBOOT = 1, + IFLA_EVENT_FEATURES = 2, + IFLA_EVENT_BONDING_FAILOVER = 3, + IFLA_EVENT_NOTIFY_PEERS = 4, + IFLA_EVENT_IGMP_RESEND = 5, + IFLA_EVENT_BONDING_OPTIONS = 6, +}; + +enum { + IFLA_BRIDGE_FLAGS = 0, + IFLA_BRIDGE_MODE = 1, + IFLA_BRIDGE_VLAN_INFO = 2, + IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3, + IFLA_BRIDGE_MRP = 4, + __IFLA_BRIDGE_MAX = 5, +}; + +enum { + BR_MCAST_DIR_RX = 0, + BR_MCAST_DIR_TX = 1, + BR_MCAST_DIR_SIZE = 2, +}; + +enum rtattr_type_t { + RTA_UNSPEC = 0, + RTA_DST = 1, + RTA_SRC = 2, + RTA_IIF = 3, + RTA_OIF = 4, + RTA_GATEWAY = 5, + RTA_PRIORITY = 6, + RTA_PREFSRC = 7, + RTA_METRICS = 8, + RTA_MULTIPATH = 9, + RTA_PROTOINFO = 10, + RTA_FLOW = 11, + RTA_CACHEINFO = 12, + RTA_SESSION = 13, + RTA_MP_ALGO = 14, + RTA_TABLE = 15, + RTA_MARK = 16, + RTA_MFC_STATS = 17, + RTA_VIA = 18, + RTA_NEWDST = 19, + RTA_PREF = 20, + RTA_ENCAP_TYPE = 21, + RTA_ENCAP = 22, + RTA_EXPIRES = 23, + RTA_PAD = 24, + RTA_UID = 25, + RTA_TTL_PROPAGATE = 26, + RTA_IP_PROTO = 27, + RTA_SPORT = 28, + RTA_DPORT = 29, + RTA_NH_ID = 30, + __RTA_MAX = 31, +}; + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + short unsigned int ifi_type; + int ifi_index; + unsigned int ifi_flags; + unsigned int ifi_change; +}; + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); + +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +struct rtnl_af_ops { + struct list_head list; + int family; + int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32); + size_t (*get_link_af_size)(const struct net_device *, u32); + int (*validate_link_af)(const struct net_device *, const struct nlattr *); + int (*set_link_af)(struct net_device *, const struct nlattr *); + int (*fill_stats_af)(struct sk_buff *, const struct net_device *); + size_t (*get_stats_af_size)(const struct net_device *); +}; + +struct rtnl_link { + rtnl_doit_func doit; + rtnl_dumpit_func dumpit; + struct module *owner; + unsigned int flags; + struct callback_head rcu; +}; + +enum { + IF_LINK_MODE_DEFAULT = 0, + IF_LINK_MODE_DORMANT = 1, + IF_LINK_MODE_TESTING = 2, +}; + +enum lw_bits { + LW_URGENT = 0, +}; + +struct seg6_pernet_data { + struct mutex lock; + struct in6_addr *tun_src; +}; + +enum { + BPF_F_RECOMPUTE_CSUM = 1, + BPF_F_INVALIDATE_HASH = 2, +}; + +enum { + BPF_F_HDR_FIELD_MASK = 15, +}; + +enum { + BPF_F_PSEUDO_HDR = 16, + BPF_F_MARK_MANGLED_0 = 32, + BPF_F_MARK_ENFORCE = 64, +}; + +enum { + BPF_F_INGRESS = 1, +}; + +enum { + BPF_F_TUNINFO_IPV6 = 1, +}; + +enum { + BPF_F_ZERO_CSUM_TX = 2, + BPF_F_DONT_FRAGMENT = 4, + BPF_F_SEQ_NUMBER = 8, +}; + +enum { + BPF_CSUM_LEVEL_QUERY = 0, + BPF_CSUM_LEVEL_INC = 1, + BPF_CSUM_LEVEL_DEC = 2, + BPF_CSUM_LEVEL_RESET = 3, +}; + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = 1, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2, + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4, + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8, + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16, + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32, +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + +enum { + BPF_SK_LOOKUP_F_REPLACE = 1, + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2, +}; + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET = 0, + BPF_ADJ_ROOM_MAC = 1, +}; + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC = 0, + BPF_HDR_START_NET = 1, +}; + +enum bpf_lwt_encap_mode { + BPF_LWT_ENCAP_SEG6 = 0, + BPF_LWT_ENCAP_SEG6_INLINE = 1, + BPF_LWT_ENCAP_IP = 2, +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + __u16 tunnel_ext; + __u32 tunnel_label; +}; + +struct bpf_xfrm_state { + __u32 reqid; + __u32 spi; + __u16 family; + __u16 ext; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + __u64 bytes_acked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = 1, + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2, + BPF_SOCK_OPS_STATE_CB_FLAG = 4, + BPF_SOCK_OPS_RTT_CB_FLAG = 8, + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16, + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64, + BPF_SOCK_OPS_ALL_CB_FLAGS = 127, +}; + +enum { + BPF_SOCK_OPS_VOID = 0, + BPF_SOCK_OPS_TIMEOUT_INIT = 1, + BPF_SOCK_OPS_RWND_INIT = 2, + BPF_SOCK_OPS_TCP_CONNECT_CB = 3, + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4, + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5, + BPF_SOCK_OPS_NEEDS_ECN = 6, + BPF_SOCK_OPS_BASE_RTT = 7, + BPF_SOCK_OPS_RTO_CB = 8, + BPF_SOCK_OPS_RETRANS_CB = 9, + BPF_SOCK_OPS_STATE_CB = 10, + BPF_SOCK_OPS_TCP_LISTEN_CB = 11, + BPF_SOCK_OPS_RTT_CB = 12, + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13, + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14, + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15, +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, + TCP_BPF_DELACK_MAX = 1003, + TCP_BPF_RTO_MIN = 1004, + TCP_BPF_SYN = 1005, + TCP_BPF_SYN_IP = 1006, + TCP_BPF_SYN_MAC = 1007, +}; + +enum { + BPF_LOAD_HDR_OPT_TCP_SYN = 1, +}; + +enum { + BPF_FIB_LOOKUP_DIRECT = 1, + BPF_FIB_LOOKUP_OUTPUT = 2, +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS = 0, + BPF_FIB_LKUP_RET_BLACKHOLE = 1, + BPF_FIB_LKUP_RET_UNREACHABLE = 2, + BPF_FIB_LKUP_RET_PROHIBIT = 3, + BPF_FIB_LKUP_RET_NOT_FWDED = 4, + BPF_FIB_LKUP_RET_FWD_DISABLED = 5, + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6, + BPF_FIB_LKUP_RET_NO_NEIGH = 7, + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8, +}; + +struct bpf_fib_lookup { + __u8 family; + __u8 l4_protocol; + __be16 sport; + __be16 dport; + __u16 tot_len; + __u32 ifindex; + union { + __u8 tos; + __be32 flowinfo; + __u32 rt_metric; + }; + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __u8 smac[6]; + __u8 dmac[6]; +}; + +struct bpf_redir_neigh { + __u32 nh_family; + union { + __be32 ipv4_nh; + __u32 ipv6_nh[4]; + }; +}; + +enum rt_scope_t { + RT_SCOPE_UNIVERSE = 0, + RT_SCOPE_SITE = 200, + RT_SCOPE_LINK = 253, + RT_SCOPE_HOST = 254, + RT_SCOPE_NOWHERE = 255, +}; + +enum rt_class_t { + RT_TABLE_UNSPEC = 0, + RT_TABLE_COMPAT = 252, + RT_TABLE_DEFAULT = 253, + RT_TABLE_MAIN = 254, + RT_TABLE_LOCAL = 255, + RT_TABLE_MAX = 4294967295, +}; + +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify: 1; + u8 skip_notify_kernel: 1; +}; + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int); + +struct inet_timewait_sock { + struct sock_common __tw_common; + __u32 tw_mark; + volatile unsigned char tw_substate; + unsigned char tw_rcv_wscale; + __be16 tw_sport; + unsigned int tw_kill: 1; + unsigned int tw_transparent: 1; + unsigned int tw_flowlabel: 20; + unsigned int tw_pad: 2; + unsigned int tw_tos: 8; + u32 tw_txhash; + u32 tw_priority; + struct timer_list tw_timer; + struct inet_bind_bucket *tw_tb; +}; + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + u32 tw_last_oow_ack_time; + int tw_ts_recent_stamp; + u32 tw_tx_delay; + struct tcp_md5sig_key *tw_md5_key; +}; + +struct udp_sock { + struct inet_sock inet; + int pending; + unsigned int corkflag; + __u8 encap_type; + unsigned char no_check6_tx: 1; + unsigned char no_check6_rx: 1; + unsigned char encap_enabled: 1; + unsigned char gro_enabled: 1; + __u16 len; + __u16 gso_size; + __u16 pcslen; + __u16 pcrlen; + __u8 pcflag; + __u8 unused[3]; + int (*encap_rcv)(struct sock *, struct sk_buff *); + int (*encap_err_lookup)(struct sock *, struct sk_buff *); + void (*encap_destroy)(struct sock *); + struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *); + int (*gro_complete)(struct sock *, struct sk_buff *, int); + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + struct sk_buff_head reader_queue; + int forward_deficit; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +}; + +struct fib6_result; + +struct fib6_config; + +struct ipv6_stub { + int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *); + int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *); + struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *); + int (*ipv6_route_input)(struct sk_buff *); + struct fib6_table * (*fib6_get_table)(struct net *, u32); + int (*fib6_lookup)(struct net *, int, struct flowi6 *, struct fib6_result *, int); + int (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, struct fib6_result *, int); + void (*fib6_select_path)(const struct net *, struct fib6_result *, struct flowi6 *, int, bool, const struct sk_buff *, int); + u32 (*ip6_mtu_from_fib6)(const struct fib6_result *, const struct in6_addr *, const struct in6_addr *); + int (*fib6_nh_init)(struct net *, struct fib6_nh *, struct fib6_config *, gfp_t, struct netlink_ext_ack *); + void (*fib6_nh_release)(struct fib6_nh *); + void (*fib6_update_sernum)(struct net *, struct fib6_info *); + int (*ip6_del_rt)(struct net *, struct fib6_info *, bool); + void (*fib6_rt_update)(struct net *, struct fib6_info *, struct nl_info *); + void (*udpv6_encap_enable)(); + void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool); + void (*xfrm6_local_rxpmtu)(struct sk_buff *, u32); + int (*xfrm6_udp_encap_rcv)(struct sock *, struct sk_buff *); + int (*xfrm6_rcv_encap)(struct sk_buff *, int, __be32, int); + struct neigh_table *nd_tbl; + int (*ipv6_fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); +}; + +struct fib6_result { + struct fib6_nh *nh; + struct fib6_info *f6i; + u32 fib6_flags; + u8 fib6_type; + struct rt6_info *rt6; +}; + +struct fib6_config { + u32 fc_table; + u32 fc_metric; + int fc_dst_len; + int fc_src_len; + int fc_ifindex; + u32 fc_flags; + u32 fc_protocol; + u16 fc_type; + u16 fc_delete_all_nh: 1; + u16 fc_ignore_dev_down: 1; + u16 __unused: 14; + u32 fc_nh_id; + struct in6_addr fc_dst; + struct in6_addr fc_src; + struct in6_addr fc_prefsrc; + struct in6_addr fc_gateway; + long unsigned int fc_expires; + struct nlattr *fc_mx; + int fc_mx_len; + int fc_mp_len; + struct nlattr *fc_mp; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; + bool fc_is_fdb; +}; + +struct ipv6_bpf_stub { + int (*inet6_bind)(struct sock *, struct sockaddr *, int, u32); + struct sock * (*udp6_lib_lookup)(struct net *, const struct in6_addr *, __be16, const struct in6_addr *, __be16, int, int, struct udp_table *, struct sk_buff *); +}; + +struct fib_result { + __be32 prefix; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + u32 tclassid; + struct fib_nh_common *nhc; + struct fib_info *fi; + struct fib_table *table; + struct hlist_head *fa_head; +}; + +enum { + INET_ECN_NOT_ECT = 0, + INET_ECN_ECT_1 = 1, + INET_ECN_ECT_0 = 2, + INET_ECN_CE = 3, + INET_ECN_MASK = 3, +}; + +struct tcp_skb_cb { + __u32 seq; + __u32 end_seq; + union { + __u32 tcp_tw_isn; + struct { + u16 tcp_gso_segs; + u16 tcp_gso_size; + }; + }; + __u8 tcp_flags; + __u8 sacked; + __u8 ip_dsfield; + __u8 txstamp_ack: 1; + __u8 eor: 1; + __u8 has_rxtstamp: 1; + __u8 unused: 5; + __u32 ack_seq; + union { + struct { + __u32 in_flight: 30; + __u32 is_app_limited: 1; + __u32 unused: 1; + __u32 delivered; + u64 first_tx_mstamp; + u64 delivered_mstamp; + } tx; + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + struct { + __u32 flags; + struct sock *sk_redir; + void *data_end; + } bpf; + }; +}; + +struct strp_stats { + long long unsigned int msgs; + long long unsigned int bytes; + unsigned int mem_fail; + unsigned int need_more_hdr; + unsigned int msg_too_big; + unsigned int msg_timeouts; + unsigned int bad_hdr_len; +}; + +struct strparser; + +struct strp_callbacks { + int (*parse_msg)(struct strparser *, struct sk_buff *); + void (*rcv_msg)(struct strparser *, struct sk_buff *); + int (*read_sock_done)(struct strparser *, int); + void (*abort_parser)(struct strparser *, int); + void (*lock)(struct strparser *); + void (*unlock)(struct strparser *); +}; + +struct strparser { + struct sock *sk; + u32 stopped: 1; + u32 paused: 1; + u32 aborted: 1; + u32 interrupted: 1; + u32 unrecov_intr: 1; + struct sk_buff **skb_nextp; + struct sk_buff *skb_head; + unsigned int need_bytes; + struct delayed_work msg_timer_work; + struct work_struct work; + struct strp_stats stats; + struct strp_callbacks cb; +}; + +struct strp_msg { + int full_len; + int offset; +}; + +struct xdp_umem { + void *addrs; + u64 size; + u32 headroom; + u32 chunk_size; + u32 chunks; + u32 npgs; + struct user_struct *user; + refcount_t users; + u8 flags; + bool zc; + struct page **pgs; + int id; + struct list_head xsk_dma_list; + struct work_struct work; +}; + +struct xdp_sock; + +struct xsk_map { + struct bpf_map map; + spinlock_t lock; + struct xdp_sock *xsk_map[0]; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xsk_queue; + +struct xdp_sock { + struct sock sk; + long: 64; + struct xsk_queue *rx; + struct net_device *dev; + struct xdp_umem *umem; + struct list_head flush_node; + struct xsk_buff_pool *pool; + u16 queue_id; + bool zc; + enum { + XSK_READY = 0, + XSK_BOUND = 1, + XSK_UNBOUND = 2, + } state; + long: 64; + struct xsk_queue *tx; + struct list_head tx_list; + spinlock_t rx_lock; + u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; + spinlock_t map_list_lock; + struct mutex mutex; + struct xsk_queue *fq_tmp; + struct xsk_queue *cq_tmp; + long: 64; +}; + +struct ipv6_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u8 first_segment; + __u8 flags; + __u16 tag; + struct in6_addr segments[0]; +}; + +enum { + SEG6_LOCAL_ACTION_UNSPEC = 0, + SEG6_LOCAL_ACTION_END = 1, + SEG6_LOCAL_ACTION_END_X = 2, + SEG6_LOCAL_ACTION_END_T = 3, + SEG6_LOCAL_ACTION_END_DX2 = 4, + SEG6_LOCAL_ACTION_END_DX6 = 5, + SEG6_LOCAL_ACTION_END_DX4 = 6, + SEG6_LOCAL_ACTION_END_DT6 = 7, + SEG6_LOCAL_ACTION_END_DT4 = 8, + SEG6_LOCAL_ACTION_END_B6 = 9, + SEG6_LOCAL_ACTION_END_B6_ENCAP = 10, + SEG6_LOCAL_ACTION_END_BM = 11, + SEG6_LOCAL_ACTION_END_S = 12, + SEG6_LOCAL_ACTION_END_AS = 13, + SEG6_LOCAL_ACTION_END_AM = 14, + SEG6_LOCAL_ACTION_END_BPF = 15, + __SEG6_LOCAL_ACTION_MAX = 16, +}; + +struct seg6_bpf_srh_state { + struct ipv6_sr_hdr *srh; + u16 hdrlen; + bool valid; +}; + +struct tls_crypto_info { + __u16 version; + __u16 cipher_type; +}; + +struct tls12_crypto_info_aes_gcm_128 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[16]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls12_crypto_info_aes_gcm_256 { + struct tls_crypto_info info; + unsigned char iv[8]; + unsigned char key[32]; + unsigned char salt[4]; + unsigned char rec_seq[8]; +}; + +struct tls_sw_context_rx { + struct crypto_aead *aead_recv; + struct crypto_wait async_wait; + struct strparser strp; + struct sk_buff_head rx_list; + void (*saved_data_ready)(struct sock *); + struct sk_buff *recv_pkt; + u8 control; + u8 async_capable: 1; + u8 decrypted: 1; + atomic_t decrypt_pending; + spinlock_t decrypt_compl_lock; + bool async_notify; +}; + +struct cipher_context { + char *iv; + char *rec_seq; +}; + +union tls_crypto_context { + struct tls_crypto_info info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + }; +}; + +struct tls_prot_info { + u16 version; + u16 cipher_type; + u16 prepend_size; + u16 tag_size; + u16 overhead_size; + u16 iv_size; + u16 salt_size; + u16 rec_seq_size; + u16 aad_size; + u16 tail_size; +}; + +struct tls_context { + struct tls_prot_info prot_info; + u8 tx_conf: 3; + u8 rx_conf: 3; + int (*push_pending_record)(struct sock *, int); + void (*sk_write_space)(struct sock *); + void *priv_ctx_tx; + void *priv_ctx_rx; + struct net_device *netdev; + struct cipher_context tx; + struct cipher_context rx; + struct scatterlist *partially_sent_record; + u16 partially_sent_offset; + bool in_tcp_sendpages; + bool pending_open_record_frags; + struct mutex tx_lock; + long unsigned int flags; + struct proto *sk_proto; + void (*sk_destruct)(struct sock *); + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + struct list_head list; + refcount_t refcount; + struct callback_head rcu; +}; + +typedef u64 (*btf_bpf_skb_get_pay_offset)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_get_nlattr)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_get_nlattr_nest)(struct sk_buff *, u32, u32); + +typedef u64 (*btf_bpf_skb_load_helper_8)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_8_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_16)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_16_no_cache)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_skb_load_helper_32)(const struct sk_buff *, const void *, int, int); + +typedef u64 (*btf_bpf_skb_load_helper_32_no_cache)(const struct sk_buff *, int); + +struct bpf_scratchpad { + union { + __be32 diff[128]; + u8 buff[512]; + }; +}; + +typedef u64 (*btf_bpf_skb_store_bytes)(struct sk_buff *, u32, const void *, u32, u64); + +typedef u64 (*btf_bpf_skb_load_bytes)(const struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_flow_dissector_load_bytes)(const struct bpf_flow_dissector *, u32, void *, u32); + +typedef u64 (*btf_bpf_skb_load_bytes_relative)(const struct sk_buff *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_sk_fullsock)(struct sock *); + +typedef u64 (*btf_sk_skb_pull_data)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_l3_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_l4_csum_replace)(struct sk_buff *, u32, u64, u64, u64); + +typedef u64 (*btf_bpf_csum_diff)(__be32 *, u32, __be32 *, u32, __wsum); + +typedef u64 (*btf_bpf_csum_update)(struct sk_buff *, __wsum); + +typedef u64 (*btf_bpf_csum_level)(struct sk_buff *, u64); + +enum { + BPF_F_NEIGH = 2, + BPF_F_PEER = 4, + BPF_F_NEXTHOP = 8, +}; + +typedef u64 (*btf_bpf_clone_redirect)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_redirect)(u32, u64); + +typedef u64 (*btf_bpf_redirect_peer)(u32, u64); + +typedef u64 (*btf_bpf_redirect_neigh)(u32, struct bpf_redir_neigh *, int, u64); + +typedef u64 (*btf_bpf_msg_apply_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_cork_bytes)(struct sk_msg *, u32); + +typedef u64 (*btf_bpf_msg_pull_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_push_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_msg_pop_data)(struct sk_msg *, u32, u32, u64); + +typedef u64 (*btf_bpf_get_cgroup_classid_curr)(); + +typedef u64 (*btf_bpf_skb_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_cgroup_classid)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_route_realm)(const struct sk_buff *); + +typedef u64 (*btf_bpf_get_hash_recalc)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash_invalid)(struct sk_buff *); + +typedef u64 (*btf_bpf_set_hash)(struct sk_buff *, u32); + +typedef u64 (*btf_bpf_skb_vlan_push)(struct sk_buff *, __be16, u16); + +typedef u64 (*btf_bpf_skb_vlan_pop)(struct sk_buff *); + +typedef u64 (*btf_bpf_skb_change_proto)(struct sk_buff *, __be16, u64); + +typedef u64 (*btf_bpf_skb_change_type)(struct sk_buff *, u32); + +typedef u64 (*btf_sk_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_adjust_room)(struct sk_buff *, s32, u32, u64); + +typedef u64 (*btf_bpf_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_tail)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_sk_skb_change_head)(struct sk_buff *, u32, u64); + +typedef u64 (*btf_bpf_xdp_adjust_head)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_tail)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_adjust_meta)(struct xdp_buff *, int); + +typedef u64 (*btf_bpf_xdp_redirect)(u32, u64); + +typedef u64 (*btf_bpf_xdp_redirect_map)(struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_skb_event_output)(struct sk_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_key)(struct sk_buff *, struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_get_tunnel_opt)(struct sk_buff *, u8 *, u32); + +typedef u64 (*btf_bpf_skb_set_tunnel_key)(struct sk_buff *, const struct bpf_tunnel_key *, u32, u64); + +typedef u64 (*btf_bpf_skb_set_tunnel_opt)(struct sk_buff *, const u8 *, u32); + +typedef u64 (*btf_bpf_skb_under_cgroup)(struct sk_buff *, struct bpf_map *, u32); + +typedef u64 (*btf_bpf_skb_cgroup_id)(const struct sk_buff *); + +typedef u64 (*btf_bpf_skb_ancestor_cgroup_id)(const struct sk_buff *, int); + +typedef u64 (*btf_bpf_sk_cgroup_id)(struct sock *); + +typedef u64 (*btf_bpf_sk_ancestor_cgroup_id)(struct sock *, int); + +typedef u64 (*btf_bpf_xdp_event_output)(struct xdp_buff *, struct bpf_map *, u64, void *, u64); + +typedef u64 (*btf_bpf_get_socket_cookie)(struct sk_buff *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_socket_cookie_sock_ops)(struct bpf_sock_ops_kern *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_netns_cookie_sock_addr)(struct bpf_sock_addr_kern *); + +typedef u64 (*btf_bpf_get_socket_uid)(struct sk_buff *); + +typedef u64 (*btf_bpf_sock_addr_setsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_addr_getsockopt)(struct bpf_sock_addr_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_setsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_getsockopt)(struct bpf_sock_ops_kern *, int, int, char *, int); + +typedef u64 (*btf_bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops_kern *, int); + +typedef u64 (*btf_bpf_bind)(struct bpf_sock_addr_kern *, struct sockaddr *, int); + +typedef u64 (*btf_bpf_skb_get_xfrm_state)(struct sk_buff *, u32, struct bpf_xfrm_state *, u32, u64); + +typedef u64 (*btf_bpf_xdp_fib_lookup)(struct xdp_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_skb_fib_lookup)(struct sk_buff *, struct bpf_fib_lookup *, int, u32); + +typedef u64 (*btf_bpf_lwt_in_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_xmit_push_encap)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_store_bytes)(struct sk_buff *, u32, const void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_action)(struct sk_buff *, u32, void *, u32); + +typedef u64 (*btf_bpf_lwt_seg6_adjust_srh)(struct sk_buff *, u32, s32); + +typedef u64 (*btf_bpf_skc_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_tcp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_lookup_udp)(struct sk_buff *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sk_release)(struct sock *); + +typedef u64 (*btf_bpf_xdp_sk_lookup_udp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_skc_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_xdp_sk_lookup_tcp)(struct xdp_buff *, struct bpf_sock_tuple *, u32, u32, u64); + +typedef u64 (*btf_bpf_sock_addr_skc_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_tcp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_sock_addr_sk_lookup_udp)(struct bpf_sock_addr_kern *, struct bpf_sock_tuple *, u32, u64, u64); + +typedef u64 (*btf_bpf_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_get_listener_sock)(struct sock *); + +typedef u64 (*btf_bpf_skb_ecn_set_ce)(struct sk_buff *); + +typedef u64 (*btf_bpf_tcp_check_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_tcp_gen_syncookie)(struct sock *, void *, u32, struct tcphdr *, u32); + +typedef u64 (*btf_bpf_sk_assign)(struct sk_buff *, struct sock *, u64); + +typedef u64 (*btf_bpf_sock_ops_load_hdr_opt)(struct bpf_sock_ops_kern *, void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_store_hdr_opt)(struct bpf_sock_ops_kern *, const void *, u32, u64); + +typedef u64 (*btf_bpf_sock_ops_reserve_hdr_opt)(struct bpf_sock_ops_kern *, u32, u64); + +typedef u64 (*btf_sk_select_reuseport)(struct sk_reuseport_kern *, struct bpf_map *, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes)(const struct sk_reuseport_kern *, u32, void *, u32); + +typedef u64 (*btf_sk_reuseport_load_bytes_relative)(const struct sk_reuseport_kern *, u32, void *, u32, u32); + +typedef u64 (*btf_bpf_sk_lookup_assign)(struct bpf_sk_lookup_kern *, struct sock *, u64); + +typedef u64 (*btf_bpf_skc_to_tcp6_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_timewait_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_tcp_request_sock)(struct sock *); + +typedef u64 (*btf_bpf_skc_to_udp6_sock)(struct sock *); + +struct bpf_dtab_netdev___2; + +enum { + INET_DIAG_REQ_NONE = 0, + INET_DIAG_REQ_BYTECODE = 1, + INET_DIAG_REQ_SK_BPF_STORAGES = 2, + INET_DIAG_REQ_PROTOCOL = 3, + __INET_DIAG_REQ_MAX = 4, +}; + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *, struct nlmsghdr *); + int (*get_info)(struct sk_buff *, struct sock *); + int (*destroy)(struct sk_buff *, struct nlmsghdr *); +}; + +struct broadcast_sk { + struct sock *sk; + struct work_struct work; +}; + +typedef int gifconf_func_t(struct net_device *, char *, int, int); + +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +enum hwtstamp_tx_types { + HWTSTAMP_TX_OFF = 0, + HWTSTAMP_TX_ON = 1, + HWTSTAMP_TX_ONESTEP_SYNC = 2, + HWTSTAMP_TX_ONESTEP_P2P = 3, + __HWTSTAMP_TX_CNT = 4, +}; + +enum hwtstamp_rx_filters { + HWTSTAMP_FILTER_NONE = 0, + HWTSTAMP_FILTER_ALL = 1, + HWTSTAMP_FILTER_SOME = 2, + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3, + HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4, + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5, + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6, + HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7, + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8, + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9, + HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10, + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11, + HWTSTAMP_FILTER_PTP_V2_EVENT = 12, + HWTSTAMP_FILTER_PTP_V2_SYNC = 13, + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14, + HWTSTAMP_FILTER_NTP_ALL = 15, + __HWTSTAMP_FILTER_CNT = 16, +}; + +struct tso_t { + int next_frag_idx; + int size; + void *data; + u16 ip_id; + u8 tlen; + bool ipv6; + u32 tcp_seq; +}; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE = 0, + FIB_EVENT_ENTRY_APPEND = 1, + FIB_EVENT_ENTRY_ADD = 2, + FIB_EVENT_ENTRY_DEL = 3, + FIB_EVENT_RULE_ADD = 4, + FIB_EVENT_RULE_DEL = 5, + FIB_EVENT_NH_ADD = 6, + FIB_EVENT_NH_DEL = 7, + FIB_EVENT_VIF_ADD = 8, + FIB_EVENT_VIF_DEL = 9, +}; + +struct fib_notifier_net { + struct list_head fib_notifier_ops; + struct atomic_notifier_head fib_chain; +}; + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct xdp_buff_xsk; + +struct xsk_buff_pool { + struct device *dev; + struct net_device *netdev; + struct list_head xsk_tx_list; + spinlock_t xsk_tx_list_lock; + refcount_t users; + struct xdp_umem *umem; + struct work_struct work; + struct list_head free_list; + u32 heads_cnt; + u16 queue_id; + long: 16; + long: 64; + long: 64; + long: 64; + struct xsk_queue *fq; + struct xsk_queue *cq; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 frame_len; + u8 cached_need_wakeup; + bool uses_need_wakeup; + bool dma_need_sync; + bool unaligned; + void *addrs; + spinlock_t cq_lock; + struct xdp_buff_xsk *free_heads[0]; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + bool unaligned; + u64 orig_addr; + struct list_head free_list_node; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key; + struct flow_dissector_key_meta *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key; + struct flow_dissector_key_basic *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key; + struct flow_dissector_key_control *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key; + struct flow_dissector_key_eth_addrs *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key; + struct flow_dissector_key_vlan *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key; + struct flow_dissector_key_ipv4_addrs *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key; + struct flow_dissector_key_ipv6_addrs *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key; + struct flow_dissector_key_ip *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key; + struct flow_dissector_key_ports *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key; + struct flow_dissector_key_icmp *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key; + struct flow_dissector_key_tcp *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key; + struct flow_dissector_key_mpls *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key; + struct flow_dissector_key_keyid *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key; + struct flow_dissector_key_enc_opts *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key; + struct flow_dissector_key_ct *mask; +}; + +enum flow_block_command { + FLOW_BLOCK_BIND = 0, + FLOW_BLOCK_UNBIND = 1, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC = 0, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP = 3, + FLOW_BLOCK_BINDER_TYPE_RED_MARK = 4, +}; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; + struct Qdisc *sch; +}; + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + struct Qdisc *sch; + enum flow_block_binder_type binder_type; + void *data; + void *cb_priv; + void (*cleanup)(struct flow_block_cb *); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +typedef int flow_indr_block_bind_cb_t(struct net_device *, struct Qdisc *, void *, enum tc_setup_type, void *, void *, void (*)(struct flow_block_cb *)); + +struct flow_indr_dev { + struct list_head list; + flow_indr_block_bind_cb_t *cb; + void *cb_priv; + refcount_t refcnt; + struct callback_head rcu; +}; + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *, char *); + ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t); +}; + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *, char *); + ssize_t (*store)(struct netdev_queue *, const char *, size_t); +}; + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS = 1, + __SK_REDIRECT = 2, + __SK_NONE = 3, +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *skb_parser; + struct bpf_prog *skb_verdict; +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED = 0, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct sk_psock_parser { + struct strparser strp; + bool enabled; + void (*saved_data_ready)(struct sock *); +}; + +struct sk_psock_work_state { + struct sk_buff *skb; + u32 len; + u32 off; +}; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_psock_parser parser; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + long unsigned int state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *); + void (*saved_close)(struct sock *, long int); + void (*saved_write_space)(struct sock *); + struct proto *sk_proto; + struct sk_psock_work_state work_state; + struct work_struct work; + union { + struct callback_head rcu; + struct work_struct gc; + }; +}; + +struct inet6_ifaddr { + struct in6_addr addr; + __u32 prefix_len; + __u32 rt_priority; + __u32 valid_lft; + __u32 prefered_lft; + refcount_t refcnt; + spinlock_t lock; + int state; + __u32 flags; + __u8 dad_probes; + __u8 stable_privacy_retry; + __u16 scope; + __u64 dad_nonce; + long unsigned int cstamp; + long unsigned int tstamp; + struct delayed_work dad_work; + struct inet6_dev *idev; + struct fib6_info *rt; + struct hlist_node addr_lst; + struct list_head if_list; + struct list_head tmp_list; + struct inet6_ifaddr *ifpub; + int regen_count; + bool tokenized; + struct callback_head rcu; + struct in6_addr peer_addr; +}; + +struct fib_rule_uid_range { + __u32 start; + __u32 end; +}; + +enum { + FRA_UNSPEC = 0, + FRA_DST = 1, + FRA_SRC = 2, + FRA_IIFNAME = 3, + FRA_GOTO = 4, + FRA_UNUSED2 = 5, + FRA_PRIORITY = 6, + FRA_UNUSED3 = 7, + FRA_UNUSED4 = 8, + FRA_UNUSED5 = 9, + FRA_FWMARK = 10, + FRA_FLOW = 11, + FRA_TUN_ID = 12, + FRA_SUPPRESS_IFGROUP = 13, + FRA_SUPPRESS_PREFIXLEN = 14, + FRA_TABLE = 15, + FRA_FWMASK = 16, + FRA_OIFNAME = 17, + FRA_PAD = 18, + FRA_L3MDEV = 19, + FRA_UID_RANGE = 20, + FRA_PROTOCOL = 21, + FRA_IP_PROTO = 22, + FRA_SPORT_RANGE = 23, + FRA_DPORT_RANGE = 24, + __FRA_MAX = 25, +}; + +enum { + FR_ACT_UNSPEC = 0, + FR_ACT_TO_TBL = 1, + FR_ACT_GOTO = 2, + FR_ACT_NOP = 3, + FR_ACT_RES3 = 4, + FR_ACT_RES4 = 5, + FR_ACT_BLACKHOLE = 6, + FR_ACT_UNREACHABLE = 7, + FR_ACT_PROHIBIT = 8, + __FR_ACT_MAX = 9, +}; + +struct fib_rule_notifier_info { + struct fib_notifier_info info; + struct fib_rule *rule; +}; + +struct trace_event_raw_kfree_skb { + struct trace_entry ent; + void *skbaddr; + void *location; + short unsigned int protocol; + char __data[0]; +}; + +struct trace_event_raw_consume_skb { + struct trace_entry ent; + void *skbaddr; + char __data[0]; +}; + +struct trace_event_raw_skb_copy_datagram_iovec { + struct trace_entry ent; + const void *skbaddr; + int len; + char __data[0]; +}; + +struct trace_event_data_offsets_kfree_skb {}; + +struct trace_event_data_offsets_consume_skb {}; + +struct trace_event_data_offsets_skb_copy_datagram_iovec {}; + +typedef void (*btf_trace_kfree_skb)(void *, struct sk_buff *, void *); + +typedef void (*btf_trace_consume_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_skb_copy_datagram_iovec)(void *, const struct sk_buff *, int); + +struct trace_event_raw_net_dev_start_xmit { + struct trace_entry ent; + u32 __data_loc_name; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + unsigned int len; + unsigned int data_len; + int network_offset; + bool transport_offset_valid; + int transport_offset; + u8 tx_flags; + u16 gso_size; + u16 gso_segs; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + int rc; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_xmit_timeout { + struct trace_entry ent; + u32 __data_loc_name; + u32 __data_loc_driver; + int queue_index; + char __data[0]; +}; + +struct trace_event_raw_net_dev_template { + struct trace_entry ent; + void *skbaddr; + unsigned int len; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_verbose_template { + struct trace_entry ent; + u32 __data_loc_name; + unsigned int napi_id; + u16 queue_mapping; + const void *skbaddr; + bool vlan_tagged; + u16 vlan_proto; + u16 vlan_tci; + u16 protocol; + u8 ip_summed; + u32 hash; + bool l4_hash; + unsigned int len; + unsigned int data_len; + unsigned int truesize; + bool mac_header_valid; + int mac_header; + unsigned char nr_frags; + u16 gso_size; + u16 gso_type; + char __data[0]; +}; + +struct trace_event_raw_net_dev_rx_exit_template { + struct trace_entry ent; + int ret; + char __data[0]; +}; + +struct trace_event_data_offsets_net_dev_start_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_xmit { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_xmit_timeout { + u32 name; + u32 driver; +}; + +struct trace_event_data_offsets_net_dev_template { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_rx_verbose_template { + u32 name; +}; + +struct trace_event_data_offsets_net_dev_rx_exit_template {}; + +typedef void (*btf_trace_net_dev_start_xmit)(void *, const struct sk_buff *, const struct net_device *); + +typedef void (*btf_trace_net_dev_xmit)(void *, struct sk_buff *, int, struct net_device *, unsigned int); + +typedef void (*btf_trace_net_dev_xmit_timeout)(void *, struct net_device *, int); + +typedef void (*btf_trace_net_dev_queue)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb)(void *, struct sk_buff *); + +typedef void (*btf_trace_netif_rx)(void *, struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_receive_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_receive_skb_list_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_netif_rx_ni_entry)(void *, const struct sk_buff *); + +typedef void (*btf_trace_napi_gro_frags_exit)(void *, int); + +typedef void (*btf_trace_napi_gro_receive_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_exit)(void *, int); + +typedef void (*btf_trace_netif_rx_ni_exit)(void *, int); + +typedef void (*btf_trace_netif_receive_skb_list_exit)(void *, int); + +struct trace_event_raw_napi_poll { + struct trace_entry ent; + struct napi_struct *napi; + u32 __data_loc_dev_name; + int work; + int budget; + char __data[0]; +}; + +struct trace_event_data_offsets_napi_poll { + u32 dev_name; +}; + +typedef void (*btf_trace_napi_poll)(void *, struct napi_struct *, int, int); + +enum tcp_ca_state { + TCP_CA_Open = 0, + TCP_CA_Disorder = 1, + TCP_CA_CWR = 2, + TCP_CA_Recovery = 3, + TCP_CA_Loss = 4, +}; + +struct trace_event_raw_sock_rcvqueue_full { + struct trace_entry ent; + int rmem_alloc; + unsigned int truesize; + int sk_rcvbuf; + char __data[0]; +}; + +struct trace_event_raw_sock_exceed_buf_limit { + struct trace_entry ent; + char name[32]; + long int *sysctl_mem; + long int allocated; + int sysctl_rmem; + int rmem_alloc; + int sysctl_wmem; + int wmem_alloc; + int wmem_queued; + int kind; + char __data[0]; +}; + +struct trace_event_raw_inet_sock_set_state { + struct trace_entry ent; + const void *skaddr; + int oldstate; + int newstate; + __u16 sport; + __u16 dport; + __u16 family; + __u16 protocol; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_sock_rcvqueue_full {}; + +struct trace_event_data_offsets_sock_exceed_buf_limit {}; + +struct trace_event_data_offsets_inet_sock_set_state {}; + +typedef void (*btf_trace_sock_rcvqueue_full)(void *, struct sock *, struct sk_buff *); + +typedef void (*btf_trace_sock_exceed_buf_limit)(void *, struct sock *, struct proto *, long int, int); + +typedef void (*btf_trace_inet_sock_set_state)(void *, const struct sock *, const int, const int); + +struct trace_event_raw_udp_fail_queue_rcv_skb { + struct trace_entry ent; + int rc; + __u16 lport; + char __data[0]; +}; + +struct trace_event_data_offsets_udp_fail_queue_rcv_skb {}; + +typedef void (*btf_trace_udp_fail_queue_rcv_skb)(void *, int, struct sock *); + +struct trace_event_raw_tcp_event_sk_skb { + struct trace_entry ent; + const void *skbaddr; + const void *skaddr; + int state; + __u16 sport; + __u16 dport; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_event_sk { + struct trace_entry ent; + const void *skaddr; + __u16 sport; + __u16 dport; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_raw_tcp_retransmit_synack { + struct trace_entry ent; + const void *skaddr; + const void *req; + __u16 sport; + __u16 dport; + __u8 saddr[4]; + __u8 daddr[4]; + __u8 saddr_v6[16]; + __u8 daddr_v6[16]; + char __data[0]; +}; + +struct trace_event_raw_tcp_probe { + struct trace_entry ent; + __u8 saddr[28]; + __u8 daddr[28]; + __u16 sport; + __u16 dport; + __u32 mark; + __u16 data_len; + __u32 snd_nxt; + __u32 snd_una; + __u32 snd_cwnd; + __u32 ssthresh; + __u32 snd_wnd; + __u32 srtt; + __u32 rcv_wnd; + __u64 sock_cookie; + char __data[0]; +}; + +struct trace_event_data_offsets_tcp_event_sk_skb {}; + +struct trace_event_data_offsets_tcp_event_sk {}; + +struct trace_event_data_offsets_tcp_retransmit_synack {}; + +struct trace_event_data_offsets_tcp_probe {}; + +typedef void (*btf_trace_tcp_retransmit_skb)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_send_reset)(void *, const struct sock *, const struct sk_buff *); + +typedef void (*btf_trace_tcp_receive_reset)(void *, struct sock *); + +typedef void (*btf_trace_tcp_destroy_sock)(void *, struct sock *); + +typedef void (*btf_trace_tcp_rcv_space_adjust)(void *, struct sock *); + +typedef void (*btf_trace_tcp_retransmit_synack)(void *, const struct sock *, const struct request_sock *); + +typedef void (*btf_trace_tcp_probe)(void *, struct sock *, struct sk_buff *); + +struct trace_event_raw_fib_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + u8 proto; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[4]; + __u8 dst[4]; + __u8 gw4[4]; + __u8 gw6[16]; + u16 sport; + u16 dport; + u32 __data_loc_name; + char __data[0]; +}; + +struct trace_event_data_offsets_fib_table_lookup { + u32 name; +}; + +typedef void (*btf_trace_fib_table_lookup)(void *, u32, const struct flowi4 *, const struct fib_nh_common *, int); + +struct trace_event_raw_qdisc_dequeue { + struct trace_entry ent; + struct Qdisc *qdisc; + const struct netdev_queue *txq; + int packets; + void *skbaddr; + int ifindex; + u32 handle; + u32 parent; + long unsigned int txq_state; + char __data[0]; +}; + +struct trace_event_raw_qdisc_reset { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_destroy { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + u32 handle; + char __data[0]; +}; + +struct trace_event_raw_qdisc_create { + struct trace_entry ent; + u32 __data_loc_dev; + u32 __data_loc_kind; + u32 parent; + char __data[0]; +}; + +struct trace_event_data_offsets_qdisc_dequeue {}; + +struct trace_event_data_offsets_qdisc_reset { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_destroy { + u32 dev; + u32 kind; +}; + +struct trace_event_data_offsets_qdisc_create { + u32 dev; + u32 kind; +}; + +typedef void (*btf_trace_qdisc_dequeue)(void *, struct Qdisc *, const struct netdev_queue *, int, struct sk_buff *); + +typedef void (*btf_trace_qdisc_reset)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_destroy)(void *, struct Qdisc *); + +typedef void (*btf_trace_qdisc_create)(void *, const struct Qdisc_ops *, struct net_device *, u32); + +struct bridge_stp_xstats { + __u64 transition_blk; + __u64 transition_fwd; + __u64 rx_bpdu; + __u64 tx_bpdu; + __u64 rx_tcn; + __u64 tx_tcn; +}; + +struct br_mcast_stats { + __u64 igmp_v1queries[2]; + __u64 igmp_v2queries[2]; + __u64 igmp_v3queries[2]; + __u64 igmp_leaves[2]; + __u64 igmp_v1reports[2]; + __u64 igmp_v2reports[2]; + __u64 igmp_v3reports[2]; + __u64 igmp_parse_errors; + __u64 mld_v1queries[2]; + __u64 mld_v2queries[2]; + __u64 mld_leaves[2]; + __u64 mld_v1reports[2]; + __u64 mld_v2reports[2]; + __u64 mld_parse_errors; + __u64 mcast_bytes[2]; + __u64 mcast_packets[2]; +}; + +struct br_ip { + union { + __be32 ip4; + struct in6_addr ip6; + } src; + union { + __be32 ip4; + struct in6_addr ip6; + } dst; + __be16 proto; + __u16 vid; +}; + +struct bridge_id { + unsigned char prio[2]; + unsigned char addr[6]; +}; + +typedef struct bridge_id bridge_id; + +struct mac_addr { + unsigned char addr[6]; +}; + +typedef struct mac_addr mac_addr; + +typedef __u16 port_id; + +struct bridge_mcast_own_query { + struct timer_list timer; + u32 startup_sent; +}; + +struct bridge_mcast_other_query { + struct timer_list timer; + long unsigned int delay_time; +}; + +struct net_bridge_port; + +struct bridge_mcast_querier { + struct br_ip addr; + struct net_bridge_port *port; +}; + +struct net_bridge; + +struct net_bridge_vlan_group; + +struct bridge_mcast_stats; + +struct net_bridge_port { + struct net_bridge *br; + struct net_device *dev; + struct list_head list; + long unsigned int flags; + struct net_bridge_vlan_group *vlgrp; + struct net_bridge_port *backup_port; + u8 priority; + u8 state; + u16 port_no; + unsigned char topology_change_ack; + unsigned char config_pending; + port_id port_id; + port_id designated_port; + bridge_id designated_root; + bridge_id designated_bridge; + u32 path_cost; + u32 designated_cost; + long unsigned int designated_age; + struct timer_list forward_delay_timer; + struct timer_list hold_timer; + struct timer_list message_age_timer; + struct kobject kobj; + struct callback_head rcu; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_own_query ip6_own_query; + unsigned char multicast_router; + struct bridge_mcast_stats *mcast_stats; + struct timer_list multicast_router_timer; + struct hlist_head mglist; + struct hlist_node rlist; + char sysfs_name[16]; + struct netpoll *np; + int offload_fwd_mark; + u16 group_fwd_mask; + u16 backup_redirected_cnt; + struct bridge_stp_xstats stp_xstats; +}; + +struct bridge_mcast_stats { + struct br_mcast_stats mstats; + struct u64_stats_sync syncp; +}; + +struct net_bridge { + spinlock_t lock; + spinlock_t hash_lock; + struct list_head port_list; + struct net_device *dev; + struct pcpu_sw_netstats *stats; + long unsigned int options; + __be16 vlan_proto; + u16 default_pvid; + struct net_bridge_vlan_group *vlgrp; + struct rhashtable fdb_hash_tbl; + union { + struct rtable fake_rtable; + struct rt6_info fake_rt6_info; + }; + u16 group_fwd_mask; + u16 group_fwd_mask_required; + bridge_id designated_root; + bridge_id bridge_id; + unsigned char topology_change; + unsigned char topology_change_detected; + u16 root_port; + long unsigned int max_age; + long unsigned int hello_time; + long unsigned int forward_delay; + long unsigned int ageing_time; + long unsigned int bridge_max_age; + long unsigned int bridge_hello_time; + long unsigned int bridge_forward_delay; + long unsigned int bridge_ageing_time; + u32 root_path_cost; + u8 group_addr[6]; + enum { + BR_NO_STP = 0, + BR_KERNEL_STP = 1, + BR_USER_STP = 2, + } stp_enabled; + u32 hash_max; + u32 multicast_last_member_count; + u32 multicast_startup_query_count; + u8 multicast_igmp_version; + u8 multicast_router; + u8 multicast_mld_version; + spinlock_t multicast_lock; + long unsigned int multicast_last_member_interval; + long unsigned int multicast_membership_interval; + long unsigned int multicast_querier_interval; + long unsigned int multicast_query_interval; + long unsigned int multicast_query_response_interval; + long unsigned int multicast_startup_query_interval; + struct rhashtable mdb_hash_tbl; + struct rhashtable sg_port_tbl; + struct hlist_head mcast_gc_list; + struct hlist_head mdb_list; + struct hlist_head router_list; + struct timer_list multicast_router_timer; + struct bridge_mcast_other_query ip4_other_query; + struct bridge_mcast_own_query ip4_own_query; + struct bridge_mcast_querier ip4_querier; + struct bridge_mcast_stats *mcast_stats; + struct bridge_mcast_other_query ip6_other_query; + struct bridge_mcast_own_query ip6_own_query; + struct bridge_mcast_querier ip6_querier; + struct work_struct mcast_gc_work; + struct timer_list hello_timer; + struct timer_list tcn_timer; + struct timer_list topology_change_timer; + struct delayed_work gc_work; + struct kobject *ifobj; + u32 auto_cnt; + int offload_fwd_mark; + struct hlist_head fdb_list; + struct list_head mrp_list; +}; + +struct net_bridge_vlan_group { + struct rhashtable vlan_hash; + struct rhashtable tunnel_hash; + struct list_head vlan_list; + u16 num_vlans; + u16 pvid; + u8 pvid_state; +}; + +struct net_bridge_fdb_key { + mac_addr addr; + u16 vlan_id; +}; + +struct net_bridge_fdb_entry { + struct rhash_head rhnode; + struct net_bridge_port *dst; + struct net_bridge_fdb_key key; + struct hlist_node fdb_node; + long unsigned int flags; + long: 64; + long: 64; + long unsigned int updated; + long unsigned int used; + struct callback_head rcu; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct trace_event_raw_br_fdb_add { + struct trace_entry ent; + u8 ndm_flags; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + u16 nlh_flags; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_external_learn_add { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_fdb_delete { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + char __data[0]; +}; + +struct trace_event_raw_br_fdb_update { + struct trace_entry ent; + u32 __data_loc_br_dev; + u32 __data_loc_dev; + unsigned char addr[6]; + u16 vid; + long unsigned int flags; + char __data[0]; +}; + +struct trace_event_data_offsets_br_fdb_add { + u32 dev; +}; + +struct trace_event_data_offsets_br_fdb_external_learn_add { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_fdb_delete { + u32 br_dev; + u32 dev; +}; + +struct trace_event_data_offsets_br_fdb_update { + u32 br_dev; + u32 dev; +}; + +typedef void (*btf_trace_br_fdb_add)(void *, struct ndmsg *, struct net_device *, const unsigned char *, u16, u16); + +typedef void (*btf_trace_br_fdb_external_learn_add)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16); + +typedef void (*btf_trace_fdb_delete)(void *, struct net_bridge *, struct net_bridge_fdb_entry *); + +typedef void (*btf_trace_br_fdb_update)(void *, struct net_bridge *, struct net_bridge_port *, const unsigned char *, u16, long unsigned int); + +struct trace_event_raw_page_pool_release { + struct trace_entry ent; + const struct page_pool *pool; + s32 inflight; + u32 hold; + u32 release; + u64 cnt; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_release { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 release; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_state_hold { + struct trace_entry ent; + const struct page_pool *pool; + const struct page *page; + u32 hold; + long unsigned int pfn; + char __data[0]; +}; + +struct trace_event_raw_page_pool_update_nid { + struct trace_entry ent; + const struct page_pool *pool; + int pool_nid; + int new_nid; + char __data[0]; +}; + +struct trace_event_data_offsets_page_pool_release {}; + +struct trace_event_data_offsets_page_pool_state_release {}; + +struct trace_event_data_offsets_page_pool_state_hold {}; + +struct trace_event_data_offsets_page_pool_update_nid {}; + +typedef void (*btf_trace_page_pool_release)(void *, const struct page_pool *, s32, u32, u32); + +typedef void (*btf_trace_page_pool_state_release)(void *, const struct page_pool *, const struct page *, u32); + +typedef void (*btf_trace_page_pool_state_hold)(void *, const struct page_pool *, const struct page *, u32); + +typedef void (*btf_trace_page_pool_update_nid)(void *, const struct page_pool *, int); + +struct trace_event_raw_neigh_create { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + int entries; + u8 created; + u8 gc_exempt; + u8 primary_key4[4]; + u8 primary_key6[16]; + char __data[0]; +}; + +struct trace_event_raw_neigh_update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u8 new_lladdr[32]; + u8 new_state; + u32 update_flags; + u32 pid; + char __data[0]; +}; + +struct trace_event_raw_neigh__update { + struct trace_entry ent; + u32 family; + u32 __data_loc_dev; + u8 lladdr[32]; + u8 lladdr_len; + u8 flags; + u8 nud_state; + u8 type; + u8 dead; + int refcnt; + __u8 primary_key4[4]; + __u8 primary_key6[16]; + long unsigned int confirmed; + long unsigned int updated; + long unsigned int used; + u32 err; + char __data[0]; +}; + +struct trace_event_data_offsets_neigh_create { + u32 dev; +}; + +struct trace_event_data_offsets_neigh_update { + u32 dev; +}; + +struct trace_event_data_offsets_neigh__update { + u32 dev; +}; + +typedef void (*btf_trace_neigh_create)(void *, struct neigh_table *, struct net_device *, const void *, const struct neighbour *, bool); + +typedef void (*btf_trace_neigh_update)(void *, struct neighbour *, const u8 *, u8, u32, u32); + +typedef void (*btf_trace_neigh_update_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_timer_handler)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_done)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_event_send_dead)(void *, struct neighbour *, int); + +typedef void (*btf_trace_neigh_cleanup_and_release)(void *, struct neighbour *, int); + +struct clock_identity { + u8 id[8]; +}; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +}; + +struct ptp_header { + u8 tsmt; + u8 ver; + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __attribute__((packed)); + +struct update_classid_context { + u32 classid; + unsigned int batch; +}; + +enum lwtunnel_encap_types { + LWTUNNEL_ENCAP_NONE = 0, + LWTUNNEL_ENCAP_MPLS = 1, + LWTUNNEL_ENCAP_IP = 2, + LWTUNNEL_ENCAP_ILA = 3, + LWTUNNEL_ENCAP_IP6 = 4, + LWTUNNEL_ENCAP_SEG6 = 5, + LWTUNNEL_ENCAP_BPF = 6, + LWTUNNEL_ENCAP_SEG6_LOCAL = 7, + LWTUNNEL_ENCAP_RPL = 8, + __LWTUNNEL_ENCAP_MAX = 9, +}; + +struct rtnexthop { + short unsigned int rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; + +struct lwtunnel_encap_ops { + int (*build_state)(struct net *, struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *); + void (*destroy_state)(struct lwtunnel_state *); + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*input)(struct sk_buff *); + int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *); + int (*get_encap_size)(struct lwtunnel_state *); + int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *); + int (*xmit)(struct sk_buff *); + struct module *owner; +}; + +enum { + LWT_BPF_PROG_UNSPEC = 0, + LWT_BPF_PROG_FD = 1, + LWT_BPF_PROG_NAME = 2, + __LWT_BPF_PROG_MAX = 3, +}; + +enum { + LWT_BPF_UNSPEC = 0, + LWT_BPF_IN = 1, + LWT_BPF_OUT = 2, + LWT_BPF_XMIT = 3, + LWT_BPF_XMIT_HEADROOM = 4, + __LWT_BPF_MAX = 5, +}; + +enum { + LWTUNNEL_XMIT_DONE = 0, + LWTUNNEL_XMIT_CONTINUE = 1, +}; + +struct bpf_lwt_prog { + struct bpf_prog *prog; + char *name; +}; + +struct bpf_lwt { + struct bpf_lwt_prog in; + struct bpf_lwt_prog out; + struct bpf_lwt_prog xmit; + int family; +}; + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + raw_spinlock_t lock; + long: 32; + long: 64; + long: 64; + long: 64; +}; + +typedef u64 (*btf_bpf_sock_map_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_map)(struct sk_buff *, struct bpf_map *, u32, u64); + +typedef u64 (*btf_bpf_msg_redirect_map)(struct sk_msg *, struct bpf_map *, u32, u64); + +struct sock_map_seq_info { + struct bpf_map *map; + struct sock *sk; + u32 index; +}; + +struct bpf_iter__sockmap { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + void *key; + }; + union { + struct sock *sk; + }; +}; + +struct bpf_shtab_elem { + struct callback_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_shtab_bucket { + struct hlist_head head; + raw_spinlock_t lock; +}; + +struct bpf_shtab { + struct bpf_map map; + struct bpf_shtab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; + long: 32; + long: 64; + long: 64; +}; + +typedef u64 (*btf_bpf_sock_hash_update)(struct bpf_sock_ops_kern *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_sk_redirect_hash)(struct sk_buff *, struct bpf_map *, void *, u64); + +typedef u64 (*btf_bpf_msg_redirect_hash)(struct sk_msg *, struct bpf_map *, void *, u64); + +struct sock_hash_seq_info { + struct bpf_map *map; + struct bpf_shtab *htab; + u32 bucket_id; +}; + +struct dst_cache_pcpu { + long unsigned int refresh_ts; + struct dst_entry *dst; + u32 cookie; + union { + struct in_addr in_saddr; + struct in6_addr in6_saddr; + }; +}; + +enum devlink_command { + DEVLINK_CMD_UNSPEC = 0, + DEVLINK_CMD_GET = 1, + DEVLINK_CMD_SET = 2, + DEVLINK_CMD_NEW = 3, + DEVLINK_CMD_DEL = 4, + DEVLINK_CMD_PORT_GET = 5, + DEVLINK_CMD_PORT_SET = 6, + DEVLINK_CMD_PORT_NEW = 7, + DEVLINK_CMD_PORT_DEL = 8, + DEVLINK_CMD_PORT_SPLIT = 9, + DEVLINK_CMD_PORT_UNSPLIT = 10, + DEVLINK_CMD_SB_GET = 11, + DEVLINK_CMD_SB_SET = 12, + DEVLINK_CMD_SB_NEW = 13, + DEVLINK_CMD_SB_DEL = 14, + DEVLINK_CMD_SB_POOL_GET = 15, + DEVLINK_CMD_SB_POOL_SET = 16, + DEVLINK_CMD_SB_POOL_NEW = 17, + DEVLINK_CMD_SB_POOL_DEL = 18, + DEVLINK_CMD_SB_PORT_POOL_GET = 19, + DEVLINK_CMD_SB_PORT_POOL_SET = 20, + DEVLINK_CMD_SB_PORT_POOL_NEW = 21, + DEVLINK_CMD_SB_PORT_POOL_DEL = 22, + DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23, + DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24, + DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25, + DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26, + DEVLINK_CMD_SB_OCC_SNAPSHOT = 27, + DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28, + DEVLINK_CMD_ESWITCH_GET = 29, + DEVLINK_CMD_ESWITCH_SET = 30, + DEVLINK_CMD_DPIPE_TABLE_GET = 31, + DEVLINK_CMD_DPIPE_ENTRIES_GET = 32, + DEVLINK_CMD_DPIPE_HEADERS_GET = 33, + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34, + DEVLINK_CMD_RESOURCE_SET = 35, + DEVLINK_CMD_RESOURCE_DUMP = 36, + DEVLINK_CMD_RELOAD = 37, + DEVLINK_CMD_PARAM_GET = 38, + DEVLINK_CMD_PARAM_SET = 39, + DEVLINK_CMD_PARAM_NEW = 40, + DEVLINK_CMD_PARAM_DEL = 41, + DEVLINK_CMD_REGION_GET = 42, + DEVLINK_CMD_REGION_SET = 43, + DEVLINK_CMD_REGION_NEW = 44, + DEVLINK_CMD_REGION_DEL = 45, + DEVLINK_CMD_REGION_READ = 46, + DEVLINK_CMD_PORT_PARAM_GET = 47, + DEVLINK_CMD_PORT_PARAM_SET = 48, + DEVLINK_CMD_PORT_PARAM_NEW = 49, + DEVLINK_CMD_PORT_PARAM_DEL = 50, + DEVLINK_CMD_INFO_GET = 51, + DEVLINK_CMD_HEALTH_REPORTER_GET = 52, + DEVLINK_CMD_HEALTH_REPORTER_SET = 53, + DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 54, + DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 55, + DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 56, + DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 57, + DEVLINK_CMD_FLASH_UPDATE = 58, + DEVLINK_CMD_FLASH_UPDATE_END = 59, + DEVLINK_CMD_FLASH_UPDATE_STATUS = 60, + DEVLINK_CMD_TRAP_GET = 61, + DEVLINK_CMD_TRAP_SET = 62, + DEVLINK_CMD_TRAP_NEW = 63, + DEVLINK_CMD_TRAP_DEL = 64, + DEVLINK_CMD_TRAP_GROUP_GET = 65, + DEVLINK_CMD_TRAP_GROUP_SET = 66, + DEVLINK_CMD_TRAP_GROUP_NEW = 67, + DEVLINK_CMD_TRAP_GROUP_DEL = 68, + DEVLINK_CMD_TRAP_POLICER_GET = 69, + DEVLINK_CMD_TRAP_POLICER_SET = 70, + DEVLINK_CMD_TRAP_POLICER_NEW = 71, + DEVLINK_CMD_TRAP_POLICER_DEL = 72, + DEVLINK_CMD_HEALTH_REPORTER_TEST = 73, + __DEVLINK_CMD_MAX = 74, + DEVLINK_CMD_MAX = 73, +}; + +enum devlink_eswitch_mode { + DEVLINK_ESWITCH_MODE_LEGACY = 0, + DEVLINK_ESWITCH_MODE_SWITCHDEV = 1, +}; + +enum { + DEVLINK_ATTR_STATS_RX_PACKETS = 0, + DEVLINK_ATTR_STATS_RX_BYTES = 1, + DEVLINK_ATTR_STATS_RX_DROPPED = 2, + __DEVLINK_ATTR_STATS_MAX = 3, + DEVLINK_ATTR_STATS_MAX = 2, +}; + +enum { + DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT = 0, + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT = 1, + __DEVLINK_FLASH_OVERWRITE_MAX_BIT = 2, + DEVLINK_FLASH_OVERWRITE_MAX_BIT = 1, +}; + +enum { + DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT = 0, + DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE = 1, +}; + +enum devlink_attr { + DEVLINK_ATTR_UNSPEC = 0, + DEVLINK_ATTR_BUS_NAME = 1, + DEVLINK_ATTR_DEV_NAME = 2, + DEVLINK_ATTR_PORT_INDEX = 3, + DEVLINK_ATTR_PORT_TYPE = 4, + DEVLINK_ATTR_PORT_DESIRED_TYPE = 5, + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6, + DEVLINK_ATTR_PORT_NETDEV_NAME = 7, + DEVLINK_ATTR_PORT_IBDEV_NAME = 8, + DEVLINK_ATTR_PORT_SPLIT_COUNT = 9, + DEVLINK_ATTR_PORT_SPLIT_GROUP = 10, + DEVLINK_ATTR_SB_INDEX = 11, + DEVLINK_ATTR_SB_SIZE = 12, + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 13, + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 14, + DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 15, + DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 16, + DEVLINK_ATTR_SB_POOL_INDEX = 17, + DEVLINK_ATTR_SB_POOL_TYPE = 18, + DEVLINK_ATTR_SB_POOL_SIZE = 19, + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 20, + DEVLINK_ATTR_SB_THRESHOLD = 21, + DEVLINK_ATTR_SB_TC_INDEX = 22, + DEVLINK_ATTR_SB_OCC_CUR = 23, + DEVLINK_ATTR_SB_OCC_MAX = 24, + DEVLINK_ATTR_ESWITCH_MODE = 25, + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26, + DEVLINK_ATTR_DPIPE_TABLES = 27, + DEVLINK_ATTR_DPIPE_TABLE = 28, + DEVLINK_ATTR_DPIPE_TABLE_NAME = 29, + DEVLINK_ATTR_DPIPE_TABLE_SIZE = 30, + DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 31, + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 32, + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 33, + DEVLINK_ATTR_DPIPE_ENTRIES = 34, + DEVLINK_ATTR_DPIPE_ENTRY = 35, + DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 36, + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 37, + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 38, + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 39, + DEVLINK_ATTR_DPIPE_MATCH = 40, + DEVLINK_ATTR_DPIPE_MATCH_VALUE = 41, + DEVLINK_ATTR_DPIPE_MATCH_TYPE = 42, + DEVLINK_ATTR_DPIPE_ACTION = 43, + DEVLINK_ATTR_DPIPE_ACTION_VALUE = 44, + DEVLINK_ATTR_DPIPE_ACTION_TYPE = 45, + DEVLINK_ATTR_DPIPE_VALUE = 46, + DEVLINK_ATTR_DPIPE_VALUE_MASK = 47, + DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 48, + DEVLINK_ATTR_DPIPE_HEADERS = 49, + DEVLINK_ATTR_DPIPE_HEADER = 50, + DEVLINK_ATTR_DPIPE_HEADER_NAME = 51, + DEVLINK_ATTR_DPIPE_HEADER_ID = 52, + DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 53, + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 54, + DEVLINK_ATTR_DPIPE_HEADER_INDEX = 55, + DEVLINK_ATTR_DPIPE_FIELD = 56, + DEVLINK_ATTR_DPIPE_FIELD_NAME = 57, + DEVLINK_ATTR_DPIPE_FIELD_ID = 58, + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 59, + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 60, + DEVLINK_ATTR_PAD = 61, + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62, + DEVLINK_ATTR_RESOURCE_LIST = 63, + DEVLINK_ATTR_RESOURCE = 64, + DEVLINK_ATTR_RESOURCE_NAME = 65, + DEVLINK_ATTR_RESOURCE_ID = 66, + DEVLINK_ATTR_RESOURCE_SIZE = 67, + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68, + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69, + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70, + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71, + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72, + DEVLINK_ATTR_RESOURCE_UNIT = 73, + DEVLINK_ATTR_RESOURCE_OCC = 74, + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75, + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76, + DEVLINK_ATTR_PORT_FLAVOUR = 77, + DEVLINK_ATTR_PORT_NUMBER = 78, + DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 79, + DEVLINK_ATTR_PARAM = 80, + DEVLINK_ATTR_PARAM_NAME = 81, + DEVLINK_ATTR_PARAM_GENERIC = 82, + DEVLINK_ATTR_PARAM_TYPE = 83, + DEVLINK_ATTR_PARAM_VALUES_LIST = 84, + DEVLINK_ATTR_PARAM_VALUE = 85, + DEVLINK_ATTR_PARAM_VALUE_DATA = 86, + DEVLINK_ATTR_PARAM_VALUE_CMODE = 87, + DEVLINK_ATTR_REGION_NAME = 88, + DEVLINK_ATTR_REGION_SIZE = 89, + DEVLINK_ATTR_REGION_SNAPSHOTS = 90, + DEVLINK_ATTR_REGION_SNAPSHOT = 91, + DEVLINK_ATTR_REGION_SNAPSHOT_ID = 92, + DEVLINK_ATTR_REGION_CHUNKS = 93, + DEVLINK_ATTR_REGION_CHUNK = 94, + DEVLINK_ATTR_REGION_CHUNK_DATA = 95, + DEVLINK_ATTR_REGION_CHUNK_ADDR = 96, + DEVLINK_ATTR_REGION_CHUNK_LEN = 97, + DEVLINK_ATTR_INFO_DRIVER_NAME = 98, + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99, + DEVLINK_ATTR_INFO_VERSION_FIXED = 100, + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101, + DEVLINK_ATTR_INFO_VERSION_STORED = 102, + DEVLINK_ATTR_INFO_VERSION_NAME = 103, + DEVLINK_ATTR_INFO_VERSION_VALUE = 104, + DEVLINK_ATTR_SB_POOL_CELL_SIZE = 105, + DEVLINK_ATTR_FMSG = 106, + DEVLINK_ATTR_FMSG_OBJ_NEST_START = 107, + DEVLINK_ATTR_FMSG_PAIR_NEST_START = 108, + DEVLINK_ATTR_FMSG_ARR_NEST_START = 109, + DEVLINK_ATTR_FMSG_NEST_END = 110, + DEVLINK_ATTR_FMSG_OBJ_NAME = 111, + DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE = 112, + DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA = 113, + DEVLINK_ATTR_HEALTH_REPORTER = 114, + DEVLINK_ATTR_HEALTH_REPORTER_NAME = 115, + DEVLINK_ATTR_HEALTH_REPORTER_STATE = 116, + DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT = 117, + DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT = 118, + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS = 119, + DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD = 120, + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER = 121, + DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME = 122, + DEVLINK_ATTR_FLASH_UPDATE_COMPONENT = 123, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG = 124, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE = 125, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL = 126, + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127, + DEVLINK_ATTR_PORT_PCI_VF_NUMBER = 128, + DEVLINK_ATTR_STATS = 129, + DEVLINK_ATTR_TRAP_NAME = 130, + DEVLINK_ATTR_TRAP_ACTION = 131, + DEVLINK_ATTR_TRAP_TYPE = 132, + DEVLINK_ATTR_TRAP_GENERIC = 133, + DEVLINK_ATTR_TRAP_METADATA = 134, + DEVLINK_ATTR_TRAP_GROUP_NAME = 135, + DEVLINK_ATTR_RELOAD_FAILED = 136, + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS = 137, + DEVLINK_ATTR_NETNS_FD = 138, + DEVLINK_ATTR_NETNS_PID = 139, + DEVLINK_ATTR_NETNS_ID = 140, + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP = 141, + DEVLINK_ATTR_TRAP_POLICER_ID = 142, + DEVLINK_ATTR_TRAP_POLICER_RATE = 143, + DEVLINK_ATTR_TRAP_POLICER_BURST = 144, + DEVLINK_ATTR_PORT_FUNCTION = 145, + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER = 146, + DEVLINK_ATTR_PORT_LANES = 147, + DEVLINK_ATTR_PORT_SPLITTABLE = 148, + DEVLINK_ATTR_PORT_EXTERNAL = 149, + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150, + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT = 151, + DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK = 152, + DEVLINK_ATTR_RELOAD_ACTION = 153, + DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED = 154, + DEVLINK_ATTR_RELOAD_LIMITS = 155, + DEVLINK_ATTR_DEV_STATS = 156, + DEVLINK_ATTR_RELOAD_STATS = 157, + DEVLINK_ATTR_RELOAD_STATS_ENTRY = 158, + DEVLINK_ATTR_RELOAD_STATS_LIMIT = 159, + DEVLINK_ATTR_RELOAD_STATS_VALUE = 160, + DEVLINK_ATTR_REMOTE_RELOAD_STATS = 161, + DEVLINK_ATTR_RELOAD_ACTION_INFO = 162, + DEVLINK_ATTR_RELOAD_ACTION_STATS = 163, + __DEVLINK_ATTR_MAX = 164, + DEVLINK_ATTR_MAX = 163, +}; + +enum devlink_dpipe_match_type { + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0, +}; + +enum devlink_dpipe_action_type { + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0, +}; + +enum devlink_dpipe_field_ethernet_id { + DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0, +}; + +enum devlink_dpipe_field_ipv4_id { + DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0, +}; + +enum devlink_dpipe_field_ipv6_id { + DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0, +}; + +enum devlink_dpipe_header_id { + DEVLINK_DPIPE_HEADER_ETHERNET = 0, + DEVLINK_DPIPE_HEADER_IPV4 = 1, + DEVLINK_DPIPE_HEADER_IPV6 = 2, +}; + +enum devlink_resource_unit { + DEVLINK_RESOURCE_UNIT_ENTRY = 0, +}; + +enum devlink_port_function_attr { + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0, + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1, + __DEVLINK_PORT_FUNCTION_ATTR_MAX = 2, + DEVLINK_PORT_FUNCTION_ATTR_MAX = 1, +}; + +struct devlink_dpipe_match { + enum devlink_dpipe_match_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +struct devlink_dpipe_action { + enum devlink_dpipe_action_type type; + unsigned int header_index; + struct devlink_dpipe_header *header; + unsigned int field_id; +}; + +struct devlink_dpipe_value { + union { + struct devlink_dpipe_action *action; + struct devlink_dpipe_match *match; + }; + unsigned int mapping_value; + bool mapping_valid; + unsigned int value_size; + void *value; + void *mask; +}; + +struct devlink_dpipe_entry { + u64 index; + struct devlink_dpipe_value *match_values; + unsigned int match_values_count; + struct devlink_dpipe_value *action_values; + unsigned int action_values_count; + u64 counter; + bool counter_valid; +}; + +struct devlink_dpipe_dump_ctx { + struct genl_info *info; + enum devlink_command cmd; + struct sk_buff *skb; + struct nlattr *nest; + void *hdr; +}; + +struct devlink_dpipe_table_ops; + +struct devlink_dpipe_table { + void *priv; + struct list_head list; + const char *name; + bool counters_enabled; + bool counter_control_extern; + bool resource_valid; + u64 resource_id; + u64 resource_units; + struct devlink_dpipe_table_ops *table_ops; + struct callback_head rcu; +}; + +struct devlink_dpipe_table_ops { + int (*actions_dump)(void *, struct sk_buff *); + int (*matches_dump)(void *, struct sk_buff *); + int (*entries_dump)(void *, bool, struct devlink_dpipe_dump_ctx *); + int (*counters_set_update)(void *, bool); + u64 (*size_get)(void *); +}; + +struct devlink_resource_size_params { + u64 size_min; + u64 size_max; + u64 size_granularity; + enum devlink_resource_unit unit; +}; + +typedef u64 devlink_resource_occ_get_t(void *); + +struct devlink_resource { + const char *name; + u64 id; + u64 size; + u64 size_new; + bool size_valid; + struct devlink_resource *parent; + struct devlink_resource_size_params size_params; + struct list_head list; + struct list_head resource_list; + devlink_resource_occ_get_t *occ_get; + void *occ_get_priv; +}; + +enum devlink_param_type { + DEVLINK_PARAM_TYPE_U8 = 0, + DEVLINK_PARAM_TYPE_U16 = 1, + DEVLINK_PARAM_TYPE_U32 = 2, + DEVLINK_PARAM_TYPE_STRING = 3, + DEVLINK_PARAM_TYPE_BOOL = 4, +}; + +struct devlink_flash_notify { + const char *status_msg; + const char *component; + long unsigned int done; + long unsigned int total; + long unsigned int timeout; +}; + +struct devlink_param { + u32 id; + const char *name; + bool generic; + enum devlink_param_type type; + long unsigned int supported_cmodes; + int (*get)(struct devlink *, u32, struct devlink_param_gset_ctx *); + int (*set)(struct devlink *, u32, struct devlink_param_gset_ctx *); + int (*validate)(struct devlink *, u32, union devlink_param_value, struct netlink_ext_ack *); +}; + +struct devlink_param_item { + struct list_head list; + const struct devlink_param *param; + union devlink_param_value driverinit_value; + bool driverinit_value_valid; + bool published; +}; + +enum devlink_param_generic_id { + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET = 0, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS = 1, + DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV = 2, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT = 3, + DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI = 4, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX = 5, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN = 6, + DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY = 7, + DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE = 8, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE = 9, + DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET = 10, + __DEVLINK_PARAM_GENERIC_ID_MAX = 11, + DEVLINK_PARAM_GENERIC_ID_MAX = 10, +}; + +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *); + int (*snapshot)(struct devlink *, const struct devlink_region_ops *, struct netlink_ext_ack *, u8 **); + void *priv; +}; + +struct devlink_port_region_ops { + const char *name; + void (*destructor)(const void *); + int (*snapshot)(struct devlink_port *, const struct devlink_port_region_ops *, struct netlink_ext_ack *, u8 **); + void *priv; +}; + +enum devlink_health_reporter_state { + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY = 0, + DEVLINK_HEALTH_REPORTER_STATE_ERROR = 1, +}; + +struct devlink_health_reporter; + +struct devlink_fmsg; + +struct devlink_health_reporter_ops { + char *name; + int (*recover)(struct devlink_health_reporter *, void *, struct netlink_ext_ack *); + int (*dump)(struct devlink_health_reporter *, struct devlink_fmsg *, void *, struct netlink_ext_ack *); + int (*diagnose)(struct devlink_health_reporter *, struct devlink_fmsg *, struct netlink_ext_ack *); + int (*test)(struct devlink_health_reporter *, struct netlink_ext_ack *); +}; + +struct devlink_health_reporter { + struct list_head list; + void *priv; + const struct devlink_health_reporter_ops *ops; + struct devlink *devlink; + struct devlink_port *devlink_port; + struct devlink_fmsg *dump_fmsg; + struct mutex dump_lock; + u64 graceful_period; + bool auto_recover; + bool auto_dump; + u8 health_state; + u64 dump_ts; + u64 dump_real_ts; + u64 error_count; + u64 recovery_count; + u64 last_recovery_ts; + refcount_t refcount; +}; + +struct devlink_fmsg { + struct list_head item_list; + bool putting_binary; +}; + +struct devlink_trap_metadata { + const char *trap_name; + const char *trap_group_name; + struct net_device *input_dev; + const struct flow_action_cookie *fa_cookie; + enum devlink_trap_type trap_type; +}; + +enum devlink_trap_generic_id { + DEVLINK_TRAP_GENERIC_ID_SMAC_MC = 0, + DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH = 1, + DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER = 2, + DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER = 3, + DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST = 4, + DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER = 5, + DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE = 6, + DEVLINK_TRAP_GENERIC_ID_TTL_ERROR = 7, + DEVLINK_TRAP_GENERIC_ID_TAIL_DROP = 8, + DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET = 9, + DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC = 10, + DEVLINK_TRAP_GENERIC_ID_DIP_LB = 11, + DEVLINK_TRAP_GENERIC_ID_SIP_MC = 12, + DEVLINK_TRAP_GENERIC_ID_SIP_LB = 13, + DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR = 14, + DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC = 15, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE = 16, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 17, + DEVLINK_TRAP_GENERIC_ID_MTU_ERROR = 18, + DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH = 19, + DEVLINK_TRAP_GENERIC_ID_RPF = 20, + DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE = 21, + DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS = 22, + DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS = 23, + DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE = 24, + DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR = 25, + DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC = 26, + DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP = 27, + DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP = 28, + DEVLINK_TRAP_GENERIC_ID_STP = 29, + DEVLINK_TRAP_GENERIC_ID_LACP = 30, + DEVLINK_TRAP_GENERIC_ID_LLDP = 31, + DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY = 32, + DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT = 33, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT = 34, + DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT = 35, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE = 36, + DEVLINK_TRAP_GENERIC_ID_MLD_QUERY = 37, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT = 38, + DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT = 39, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE = 40, + DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP = 41, + DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP = 42, + DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST = 43, + DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE = 44, + DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY = 45, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT = 46, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT = 47, + DEVLINK_TRAP_GENERIC_ID_IPV4_BFD = 48, + DEVLINK_TRAP_GENERIC_ID_IPV6_BFD = 49, + DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF = 50, + DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF = 51, + DEVLINK_TRAP_GENERIC_ID_IPV4_BGP = 52, + DEVLINK_TRAP_GENERIC_ID_IPV6_BGP = 53, + DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP = 54, + DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP = 55, + DEVLINK_TRAP_GENERIC_ID_IPV4_PIM = 56, + DEVLINK_TRAP_GENERIC_ID_IPV6_PIM = 57, + DEVLINK_TRAP_GENERIC_ID_UC_LB = 58, + DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE = 59, + DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE = 60, + DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE = 61, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES = 62, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS = 63, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT = 64, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT = 65, + DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT = 66, + DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT = 67, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT = 68, + DEVLINK_TRAP_GENERIC_ID_PTP_EVENT = 69, + DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL = 70, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE = 71, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP = 72, + DEVLINK_TRAP_GENERIC_ID_EARLY_DROP = 73, + DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING = 74, + DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING = 75, + DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING = 76, + DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING = 77, + DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING = 78, + DEVLINK_TRAP_GENERIC_ID_ARP_PARSING = 79, + DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING = 80, + DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING = 81, + DEVLINK_TRAP_GENERIC_ID_GRE_PARSING = 82, + DEVLINK_TRAP_GENERIC_ID_UDP_PARSING = 83, + DEVLINK_TRAP_GENERIC_ID_TCP_PARSING = 84, + DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING = 85, + DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING = 86, + DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING = 87, + DEVLINK_TRAP_GENERIC_ID_GTP_PARSING = 88, + DEVLINK_TRAP_GENERIC_ID_ESP_PARSING = 89, + __DEVLINK_TRAP_GENERIC_ID_MAX = 90, + DEVLINK_TRAP_GENERIC_ID_MAX = 89, +}; + +enum devlink_trap_group_generic_id { + DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS = 0, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS = 1, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS = 2, + DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS = 3, + DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS = 4, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS = 5, + DEVLINK_TRAP_GROUP_GENERIC_ID_STP = 6, + DEVLINK_TRAP_GROUP_GENERIC_ID_LACP = 7, + DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP = 8, + DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING = 9, + DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP = 10, + DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY = 11, + DEVLINK_TRAP_GROUP_GENERIC_ID_BFD = 12, + DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF = 13, + DEVLINK_TRAP_GROUP_GENERIC_ID_BGP = 14, + DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP = 15, + DEVLINK_TRAP_GROUP_GENERIC_ID_PIM = 16, + DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB = 17, + DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY = 18, + DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY = 19, + DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6 = 20, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT = 21, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL = 22, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE = 23, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP = 24, + DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS = 25, + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 26, + DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = 25, +}; + +struct devlink_info_req { + struct sk_buff *msg; +}; + +struct trace_event_raw_devlink_hwmsg { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + bool incoming; + long unsigned int type; + u32 __data_loc_buf; + size_t len; + char __data[0]; +}; + +struct trace_event_raw_devlink_hwerr { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + int err; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_report { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + u32 __data_loc_msg; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_recover_aborted { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + bool health_state; + u64 time_since_last_recover; + char __data[0]; +}; + +struct trace_event_raw_devlink_health_reporter_state_update { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_reporter_name; + u8 new_state; + char __data[0]; +}; + +struct trace_event_raw_devlink_trap_report { + struct trace_entry ent; + u32 __data_loc_bus_name; + u32 __data_loc_dev_name; + u32 __data_loc_driver_name; + u32 __data_loc_trap_name; + u32 __data_loc_trap_group_name; + u32 __data_loc_input_dev_name; + char __data[0]; +}; + +struct trace_event_data_offsets_devlink_hwmsg { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 buf; +}; + +struct trace_event_data_offsets_devlink_hwerr { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 msg; +}; + +struct trace_event_data_offsets_devlink_health_report { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; + u32 msg; +}; + +struct trace_event_data_offsets_devlink_health_recover_aborted { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; +}; + +struct trace_event_data_offsets_devlink_health_reporter_state_update { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 reporter_name; +}; + +struct trace_event_data_offsets_devlink_trap_report { + u32 bus_name; + u32 dev_name; + u32 driver_name; + u32 trap_name; + u32 trap_group_name; + u32 input_dev_name; +}; + +typedef void (*btf_trace_devlink_hwmsg)(void *, const struct devlink *, bool, long unsigned int, const u8 *, size_t); + +typedef void (*btf_trace_devlink_hwerr)(void *, const struct devlink *, int, const char *); + +typedef void (*btf_trace_devlink_health_report)(void *, const struct devlink *, const char *, const char *); + +typedef void (*btf_trace_devlink_health_recover_aborted)(void *, const struct devlink *, const char *, bool, u64); + +typedef void (*btf_trace_devlink_health_reporter_state_update)(void *, const struct devlink *, const char *, bool); + +typedef void (*btf_trace_devlink_trap_report)(void *, const struct devlink *, struct sk_buff *, const struct devlink_trap_metadata *); + +struct devlink_sb { + struct list_head list; + unsigned int index; + u32 size; + u16 ingress_pools_count; + u16 egress_pools_count; + u16 ingress_tc_count; + u16 egress_tc_count; +}; + +struct devlink_region { + struct devlink *devlink; + struct devlink_port *port; + struct list_head list; + union { + const struct devlink_region_ops *ops; + const struct devlink_port_region_ops *port_ops; + }; + struct list_head snapshot_list; + u32 max_snapshots; + u32 cur_snapshots; + u64 size; +}; + +struct devlink_snapshot { + struct list_head list; + struct devlink_region *region; + u8 *data; + u32 id; +}; + +enum devlink_multicast_groups { + DEVLINK_MCGRP_CONFIG = 0, +}; + +struct devlink_reload_combination { + enum devlink_reload_action action; + enum devlink_reload_limit limit; +}; + +struct devlink_fmsg_item { + struct list_head list; + int attrtype; + u8 nla_type; + u16 len; + int value[0]; +}; + +struct devlink_stats { + u64 rx_bytes; + u64 rx_packets; + struct u64_stats_sync syncp; +}; + +struct devlink_trap_policer_item { + const struct devlink_trap_policer *policer; + u64 rate; + u64 burst; + struct list_head list; +}; + +struct devlink_trap_group_item { + const struct devlink_trap_group *group; + struct devlink_trap_policer_item *policer_item; + struct list_head list; + struct devlink_stats *stats; +}; + +struct devlink_trap_item { + const struct devlink_trap *trap; + struct devlink_trap_group_item *group_item; + struct list_head list; + enum devlink_trap_action action; + struct devlink_stats *stats; + void *priv; +}; + +struct gro_cell; + +struct gro_cells { + struct gro_cell *cells; +}; + +struct gro_cell { + struct sk_buff_head napi_skbs; + struct napi_struct napi; +}; + +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE = 0, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 1, + __SK_DIAG_BPF_STORAGE_REQ_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE = 0, + SK_DIAG_BPF_STORAGE = 1, + __SK_DIAG_BPF_STORAGE_REP_MAX = 2, +}; + +enum { + SK_DIAG_BPF_STORAGE_NONE = 0, + SK_DIAG_BPF_STORAGE_PAD = 1, + SK_DIAG_BPF_STORAGE_MAP_ID = 2, + SK_DIAG_BPF_STORAGE_MAP_VALUE = 3, + __SK_DIAG_BPF_STORAGE_MAX = 4, +}; + +typedef u64 (*btf_bpf_sk_storage_get)(struct bpf_map *, struct sock *, void *, u64); + +typedef u64 (*btf_bpf_sk_storage_delete)(struct bpf_map *, struct sock *); + +struct bpf_sk_storage_diag { + u32 nr_maps; + struct bpf_map *maps[0]; +}; + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned int skip_elems; +}; + +struct bpf_iter__bpf_sk_storage_map { + union { + struct bpf_iter_meta *meta; + }; + union { + struct bpf_map *map; + }; + union { + struct sock *sk; + }; + union { + void *value; + }; +}; + +struct compat_cmsghdr { + compat_size_t cmsg_len; + compat_int_t cmsg_level; + compat_int_t cmsg_type; +}; + +typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *); + +struct nvmem_cell___2; + +struct fch_hdr { + __u8 daddr[6]; + __u8 saddr[6]; +}; + +struct fcllc { + __u8 dsap; + __u8 ssap; + __u8 llc; + __u8 protid[3]; + __be16 ethertype; +}; + +struct fddi_8022_1_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; +}; + +struct fddi_8022_2_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl_1; + __u8 ctrl_2; +}; + +struct fddi_snap_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; + __u8 oui[3]; + __be16 ethertype; +}; + +struct fddihdr { + __u8 fc; + __u8 daddr[6]; + __u8 saddr[6]; + union { + struct fddi_8022_1_hdr llc_8022_1; + struct fddi_8022_2_hdr llc_8022_2; + struct fddi_snap_hdr llc_snap; + } hdr; +} __attribute__((packed)); + +struct hippi_fp_hdr { + __be32 fixed; + __be32 d2_size; +}; + +struct hippi_le_hdr { + __u8 message_type: 4; + __u8 double_wide: 1; + __u8 fc: 3; + __u8 dest_switch_addr[3]; + __u8 src_addr_type: 4; + __u8 dest_addr_type: 4; + __u8 src_switch_addr[3]; + __u16 reserved; + __u8 daddr[6]; + __u16 locally_administered; + __u8 saddr[6]; +}; + +struct hippi_snap_hdr { + __u8 dsap; + __u8 ssap; + __u8 ctrl; + __u8 oui[3]; + __be16 ethertype; +}; + +struct hippi_hdr { + struct hippi_fp_hdr fp; + struct hippi_le_hdr le; + struct hippi_snap_hdr snap; +}; + +struct hippi_cb { + __u32 ifield; +}; + +enum macvlan_mode { + MACVLAN_MODE_PRIVATE = 1, + MACVLAN_MODE_VEPA = 2, + MACVLAN_MODE_BRIDGE = 4, + MACVLAN_MODE_PASSTHRU = 8, + MACVLAN_MODE_SOURCE = 16, +}; + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + short unsigned int overhead; + short int cell_align; + short unsigned int mpu; + __u32 rate; +}; + +struct tc_prio_qopt { + int bands; + __u8 priomap[16]; +}; + +enum { + TCA_UNSPEC = 0, + TCA_KIND = 1, + TCA_OPTIONS = 2, + TCA_STATS = 3, + TCA_XSTATS = 4, + TCA_RATE = 5, + TCA_FCNT = 6, + TCA_STATS2 = 7, + TCA_STAB = 8, + TCA_PAD = 9, + TCA_DUMP_INVISIBLE = 10, + TCA_CHAIN = 11, + TCA_HW_OFFLOAD = 12, + TCA_INGRESS_BLOCK = 13, + TCA_EGRESS_BLOCK = 14, + TCA_DUMP_FLAGS = 15, + __TCA_MAX = 16, +}; + +struct vlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + +struct netpoll___2; + +struct skb_array { + struct ptr_ring ring; +}; + +struct macvlan_port; + +struct macvlan_dev { + struct net_device *dev; + struct list_head list; + struct hlist_node hlist; + struct macvlan_port *port; + struct net_device *lowerdev; + void *accel_priv; + struct vlan_pcpu_stats *pcpu_stats; + long unsigned int mc_filter[4]; + netdev_features_t set_features; + enum macvlan_mode mode; + u16 flags; + unsigned int macaddr_count; + struct netpoll___2 *netpoll; +}; + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u8 linklayer; + u8 shift; +}; + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +struct pfifo_fast_priv { + struct skb_array q[3]; +}; + +struct tc_qopt_offload_stats { + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_queue *qstats; +}; + +enum tc_mq_command { + TC_MQ_CREATE = 0, + TC_MQ_DESTROY = 1, + TC_MQ_STATS = 2, + TC_MQ_GRAFT = 3, +}; + +struct tc_mq_opt_offload_graft_params { + long unsigned int queue; + u32 child_handle; +}; + +struct tc_mq_qopt_offload { + enum tc_mq_command command; + u32 handle; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; +}; + +struct mq_sched { + struct Qdisc **qdiscs; +}; + +enum tc_link_layer { + TC_LINKLAYER_UNAWARE = 0, + TC_LINKLAYER_ETHERNET = 1, + TC_LINKLAYER_ATM = 2, +}; + +enum { + TCA_STAB_UNSPEC = 0, + TCA_STAB_BASE = 1, + TCA_STAB_DATA = 2, + __TCA_STAB_MAX = 3, +}; + +struct qdisc_rate_table { + struct tc_ratespec rate; + u32 data[256]; + struct qdisc_rate_table *next; + int refcnt; +}; + +struct Qdisc_class_common { + u32 classid; + struct hlist_node hnode; +}; + +struct Qdisc_class_hash { + struct hlist_head *hash; + unsigned int hashsize; + unsigned int hashmask; + unsigned int hashelems; +}; + +struct qdisc_watchdog { + u64 last_expires; + struct hrtimer timer; + struct Qdisc *qdisc; +}; + +enum tc_root_command { + TC_ROOT_GRAFT = 0, +}; + +struct tc_root_qopt_offload { + enum tc_root_command command; + u32 handle; + bool ingress; +}; + +struct check_loop_arg { + struct qdisc_walker w; + struct Qdisc *p; + int depth; +}; + +struct tcf_bind_args { + struct tcf_walker w; + long unsigned int base; + long unsigned int cl; + u32 classid; +}; + +struct tc_bind_class_args { + struct qdisc_walker w; + long unsigned int new_cl; + u32 portid; + u32 clid; +}; + +struct qdisc_dump_args { + struct qdisc_walker w; + struct sk_buff *skb; + struct netlink_callback *cb; +}; + +enum net_xmit_qdisc_t { + __NET_XMIT_STOLEN = 65536, + __NET_XMIT_BYPASS = 131072, +}; + +struct tc_skb_ext { + __u32 chain; + __u16 mru; +}; + +enum { + TCA_ACT_UNSPEC = 0, + TCA_ACT_KIND = 1, + TCA_ACT_OPTIONS = 2, + TCA_ACT_INDEX = 3, + TCA_ACT_STATS = 4, + TCA_ACT_PAD = 5, + TCA_ACT_COOKIE = 6, + TCA_ACT_FLAGS = 7, + TCA_ACT_HW_STATS = 8, + TCA_ACT_USED_HW_STATS = 9, + __TCA_ACT_MAX = 10, +}; + +enum tca_id { + TCA_ID_UNSPEC = 0, + TCA_ID_POLICE = 1, + TCA_ID_GACT = 5, + TCA_ID_IPT = 6, + TCA_ID_PEDIT = 7, + TCA_ID_MIRRED = 8, + TCA_ID_NAT = 9, + TCA_ID_XT = 10, + TCA_ID_SKBEDIT = 11, + TCA_ID_VLAN = 12, + TCA_ID_BPF = 13, + TCA_ID_CONNMARK = 14, + TCA_ID_SKBMOD = 15, + TCA_ID_CSUM = 16, + TCA_ID_TUNNEL_KEY = 17, + TCA_ID_SIMP = 22, + TCA_ID_IFE = 25, + TCA_ID_SAMPLE = 26, + TCA_ID_CTINFO = 27, + TCA_ID_MPLS = 28, + TCA_ID_CT = 29, + TCA_ID_GATE = 30, + __TCA_ID_MAX = 255, +}; + +struct tcf_t { + __u64 install; + __u64 lastuse; + __u64 expires; + __u64 firstuse; +}; + +struct psample_group { + struct list_head list; + struct net *net; + u32 group_num; + u32 refcount; + u32 seq; + struct callback_head rcu; +}; + +struct action_gate_entry { + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; +}; + +enum qdisc_class_ops_flags { + QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, +}; + +enum tcf_proto_ops_flags { + TCF_PROTO_OPS_DOIT_UNLOCKED = 1, +}; + +typedef void tcf_chain_head_change_t(struct tcf_proto *, void *); + +struct tcf_idrinfo { + struct mutex lock; + struct idr action_idr; + struct net *net; +}; + +struct tc_action_ops; + +struct tc_cookie; + +struct tc_action { + const struct tc_action_ops *ops; + __u32 type; + struct tcf_idrinfo *idrinfo; + u32 tcfa_index; + refcount_t tcfa_refcnt; + atomic_t tcfa_bindcnt; + int tcfa_action; + struct tcf_t tcfa_tm; + struct gnet_stats_basic_packed tcfa_bstats; + struct gnet_stats_basic_packed tcfa_bstats_hw; + struct gnet_stats_queue tcfa_qstats; + struct net_rate_estimator *tcfa_rate_est; + spinlock_t tcfa_lock; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_basic_cpu *cpu_bstats_hw; + struct gnet_stats_queue *cpu_qstats; + struct tc_cookie *act_cookie; + struct tcf_chain *goto_chain; + u32 tcfa_flags; + u8 hw_stats; + u8 used_hw_stats; + bool used_hw_stats_valid; +}; + +typedef void (*tc_action_priv_destructor)(void *); + +struct tc_action_ops { + struct list_head head; + char kind[16]; + enum tca_id id; + size_t size; + struct module *owner; + int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); + int (*dump)(struct sk_buff *, struct tc_action *, int, int); + void (*cleanup)(struct tc_action *); + int (*lookup)(struct net *, struct tc_action **, u32); + int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, int, int, bool, struct tcf_proto *, u32, struct netlink_ext_ack *); + int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); + void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); + size_t (*get_fill_size)(const struct tc_action *); + struct net_device * (*get_dev)(const struct tc_action *, tc_action_priv_destructor *); + struct psample_group * (*get_psample_group)(const struct tc_action *, tc_action_priv_destructor *); +}; + +struct tc_cookie { + u8 *data; + u32 len; + struct callback_head rcu; +}; + +struct tcf_block_ext_info { + enum flow_block_binder_type binder_type; + tcf_chain_head_change_t *chain_head_change; + void *chain_head_change_priv; + u32 block_index; +}; + +struct tcf_qevent { + struct tcf_block *block; + struct tcf_block_ext_info info; + struct tcf_proto *filter_chain; +}; + +struct tcf_exts { + __u32 type; + int nr_actions; + struct tc_action **actions; + struct net *net; + int action; + int police; +}; + +enum pedit_header_type { + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0, + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2, + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3, + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4, + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, + __PEDIT_HDR_TYPE_MAX = 6, +}; + +enum pedit_cmd { + TCA_PEDIT_KEY_EX_CMD_SET = 0, + TCA_PEDIT_KEY_EX_CMD_ADD = 1, + __PEDIT_CMD_MAX = 2, +}; + +struct tc_pedit_key { + __u32 mask; + __u32 val; + __u32 off; + __u32 at; + __u32 offmask; + __u32 shift; +}; + +struct tcf_pedit_key_ex { + enum pedit_header_type htype; + enum pedit_cmd cmd; +}; + +struct tcf_pedit { + struct tc_action common; + unsigned char tcfp_nkeys; + unsigned char tcfp_flags; + struct tc_pedit_key *tcfp_keys; + struct tcf_pedit_key_ex *tcfp_keys_ex; +}; + +struct tcf_mirred { + struct tc_action common; + int tcfm_eaction; + bool tcfm_mac_header_xmit; + struct net_device *tcfm_dev; + struct list_head tcfm_list; +}; + +struct tcf_vlan_params { + int tcfv_action; + unsigned char tcfv_push_dst[6]; + unsigned char tcfv_push_src[6]; + u16 tcfv_push_vid; + __be16 tcfv_push_proto; + u8 tcfv_push_prio; + struct callback_head rcu; +}; + +struct tcf_vlan { + struct tc_action common; + struct tcf_vlan_params *vlan_p; +}; + +struct tcf_tunnel_key_params { + struct callback_head rcu; + int tcft_action; + struct metadata_dst *tcft_enc_metadata; +}; + +struct tcf_tunnel_key { + struct tc_action common; + struct tcf_tunnel_key_params *params; +}; + +struct tcf_csum_params { + u32 update_flags; + struct callback_head rcu; +}; + +struct tcf_csum { + struct tc_action common; + struct tcf_csum_params *params; +}; + +struct tcf_gact { + struct tc_action common; + u16 tcfg_ptype; + u16 tcfg_pval; + int tcfg_paction; + atomic_t packets; +}; + +struct tcf_police_params { + int tcfp_result; + u32 tcfp_ewma_rate; + s64 tcfp_burst; + u32 tcfp_mtu; + s64 tcfp_mtu_ptoks; + struct psched_ratecfg rate; + bool rate_present; + struct psched_ratecfg peak; + bool peak_present; + struct callback_head rcu; +}; + +struct tcf_police { + struct tc_action common; + struct tcf_police_params *params; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + spinlock_t tcfp_lock; + s64 tcfp_toks; + s64 tcfp_ptoks; + s64 tcfp_t_c; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct tcf_sample { + struct tc_action common; + u32 rate; + bool truncate; + u32 trunc_size; + struct psample_group *psample_group; + u32 psample_group_num; + struct list_head tcfm_list; +}; + +struct tcf_skbedit_params { + u32 flags; + u32 priority; + u32 mark; + u32 mask; + u16 queue_mapping; + u16 ptype; + struct callback_head rcu; +}; + +struct tcf_skbedit { + struct tc_action common; + struct tcf_skbedit_params *params; +}; + +struct nf_nat_range2 { + unsigned int flags; + union nf_inet_addr min_addr; + union nf_inet_addr max_addr; + union nf_conntrack_man_proto min_proto; + union nf_conntrack_man_proto max_proto; + union nf_conntrack_man_proto base_proto; +}; + +struct tcf_ct_flow_table; + +struct tcf_ct_params { + struct nf_conn *tmpl; + u16 zone; + u32 mark; + u32 mark_mask; + u32 labels[4]; + u32 labels_mask[4]; + struct nf_nat_range2 range; + bool ipv4_range; + u16 ct_action; + struct callback_head rcu; + struct tcf_ct_flow_table *ct_ft; + struct nf_flowtable *nf_ft; +}; + +struct tcf_ct { + struct tc_action common; + struct tcf_ct_params *params; +}; + +struct tcf_mpls_params { + int tcfm_action; + u32 tcfm_label; + u8 tcfm_tc; + u8 tcfm_ttl; + u8 tcfm_bos; + __be16 tcfm_proto; + struct callback_head rcu; +}; + +struct tcf_mpls { + struct tc_action common; + struct tcf_mpls_params *mpls_p; +}; + +struct tcfg_gate_entry { + int index; + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; + struct list_head list; +}; + +struct tcf_gate_params { + s32 tcfg_priority; + u64 tcfg_basetime; + u64 tcfg_cycletime; + u64 tcfg_cycletime_ext; + u32 tcfg_flags; + s32 tcfg_clockid; + size_t num_entries; + struct list_head entries; +}; + +struct tcf_gate { + struct tc_action common; + struct tcf_gate_params param; + u8 current_gate_status; + ktime_t current_close_time; + u32 current_entry_octets; + s32 current_max_octets; + struct tcfg_gate_entry *next_entry; + struct hrtimer hitimer; + enum tk_offsets tk_offset; +}; + +struct tcf_filter_chain_list_item { + struct list_head list; + tcf_chain_head_change_t *chain_head_change; + void *chain_head_change_priv; +}; + +struct tcf_net { + spinlock_t idr_lock; + struct idr idr; +}; + +struct tcf_block_owner_item { + struct list_head list; + struct Qdisc *q; + enum flow_block_binder_type binder_type; +}; + +struct tcf_chain_info { + struct tcf_proto **pprev; + struct tcf_proto *next; +}; + +struct tcf_dump_args { + struct tcf_walker w; + struct sk_buff *skb; + struct netlink_callback *cb; + struct tcf_block *block; + struct Qdisc *q; + u32 parent; + bool terse_dump; +}; + +struct tcamsg { + unsigned char tca_family; + unsigned char tca__pad1; + short unsigned int tca__pad2; +}; + +enum { + TCA_ROOT_UNSPEC = 0, + TCA_ROOT_TAB = 1, + TCA_ROOT_FLAGS = 2, + TCA_ROOT_COUNT = 3, + TCA_ROOT_TIME_DELTA = 4, + __TCA_ROOT_MAX = 5, +}; + +struct tc_action_net { + struct tcf_idrinfo *idrinfo; + const struct tc_action_ops *ops; +}; + +struct tc_act_bpf { + __u32 index; + __u32 capab; + int action; + int refcnt; + int bindcnt; +}; + +enum { + TCA_ACT_BPF_UNSPEC = 0, + TCA_ACT_BPF_TM = 1, + TCA_ACT_BPF_PARMS = 2, + TCA_ACT_BPF_OPS_LEN = 3, + TCA_ACT_BPF_OPS = 4, + TCA_ACT_BPF_FD = 5, + TCA_ACT_BPF_NAME = 6, + TCA_ACT_BPF_PAD = 7, + TCA_ACT_BPF_TAG = 8, + TCA_ACT_BPF_ID = 9, + __TCA_ACT_BPF_MAX = 10, +}; + +struct tcf_bpf { + struct tc_action common; + struct bpf_prog *filter; + union { + u32 bpf_fd; + u16 bpf_num_ops; + }; + struct sock_filter *bpf_ops; + const char *bpf_name; +}; + +struct tcf_bpf_cfg { + struct bpf_prog *filter; + struct sock_filter *bpf_ops; + const char *bpf_name; + u16 bpf_num_ops; + bool is_ebpf; +}; + +struct tc_fifo_qopt { + __u32 limit; +}; + +enum tc_fifo_command { + TC_FIFO_REPLACE = 0, + TC_FIFO_DESTROY = 1, + TC_FIFO_STATS = 2, +}; + +struct tc_fifo_qopt_offload { + enum tc_fifo_command command; + u32 handle; + u32 parent; + union { + struct tc_qopt_offload_stats stats; + }; +}; + +enum { + TCA_CGROUP_UNSPEC = 0, + TCA_CGROUP_ACT = 1, + TCA_CGROUP_POLICE = 2, + TCA_CGROUP_EMATCHES = 3, + __TCA_CGROUP_MAX = 4, +}; + +struct tcf_ematch_tree_hdr { + __u16 nmatches; + __u16 progid; +}; + +struct tcf_pkt_info { + unsigned char *ptr; + int nexthdr; +}; + +struct tcf_ematch_ops; + +struct tcf_ematch { + struct tcf_ematch_ops *ops; + long unsigned int data; + unsigned int datalen; + u16 matchid; + u16 flags; + struct net *net; +}; + +struct tcf_ematch_ops { + int kind; + int datalen; + int (*change)(struct net *, void *, int, struct tcf_ematch *); + int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); + void (*destroy)(struct tcf_ematch *); + int (*dump)(struct sk_buff *, struct tcf_ematch *); + struct module *owner; + struct list_head link; +}; + +struct tcf_ematch_tree { + struct tcf_ematch_tree_hdr hdr; + struct tcf_ematch *matches; +}; + +struct cls_cgroup_head { + u32 handle; + struct tcf_exts exts; + struct tcf_ematch_tree ematches; + struct tcf_proto *tp; + struct rcu_work rwork; +}; + +enum { + TCA_BPF_UNSPEC = 0, + TCA_BPF_ACT = 1, + TCA_BPF_POLICE = 2, + TCA_BPF_CLASSID = 3, + TCA_BPF_OPS_LEN = 4, + TCA_BPF_OPS = 5, + TCA_BPF_FD = 6, + TCA_BPF_NAME = 7, + TCA_BPF_FLAGS = 8, + TCA_BPF_FLAGS_GEN = 9, + TCA_BPF_TAG = 10, + TCA_BPF_ID = 11, + __TCA_BPF_MAX = 12, +}; + +enum tc_clsbpf_command { + TC_CLSBPF_OFFLOAD = 0, + TC_CLSBPF_STATS = 1, +}; + +struct tc_cls_bpf_offload { + struct flow_cls_common_offload common; + enum tc_clsbpf_command command; + struct tcf_exts *exts; + struct bpf_prog *prog; + struct bpf_prog *oldprog; + const char *name; + bool exts_integrated; +}; + +struct cls_bpf_head { + struct list_head plist; + struct idr handle_idr; + struct callback_head rcu; +}; + +struct cls_bpf_prog { + struct bpf_prog *filter; + struct list_head link; + struct tcf_result res; + bool exts_integrated; + u32 gen_flags; + unsigned int in_hw_count; + struct tcf_exts exts; + u32 handle; + u16 bpf_num_ops; + struct sock_filter *bpf_ops; + const char *bpf_name; + struct tcf_proto *tp; + struct rcu_work rwork; +}; + +enum { + TCA_EMATCH_TREE_UNSPEC = 0, + TCA_EMATCH_TREE_HDR = 1, + TCA_EMATCH_TREE_LIST = 2, + __TCA_EMATCH_TREE_MAX = 3, +}; + +struct tcf_ematch_hdr { + __u16 matchid; + __u16 kind; + __u16 flags; + __u16 pad; +}; + +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + short unsigned int nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsgerr { + int error; + struct nlmsghdr msg; +}; + +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED = 0, + NLMSGERR_ATTR_MSG = 1, + NLMSGERR_ATTR_OFFS = 2, + NLMSGERR_ATTR_COOKIE = 3, + NLMSGERR_ATTR_POLICY = 4, + __NLMSGERR_ATTR_MAX = 5, + NLMSGERR_ATTR_MAX = 4, +}; + +struct nl_pktinfo { + __u32 group; +}; + +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED = 1, +}; + +enum netlink_skb_flags { + NETLINK_SKB_DST = 8, +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +struct netlink_sock { + struct sock sk; + u32 portid; + u32 dst_portid; + u32 dst_group; + u32 flags; + u32 subscriptions; + u32 ngroups; + long unsigned int *groups; + long unsigned int state; + size_t max_recvmsg_len; + wait_queue_head_t wait; + bool bound; + bool cb_running; + int dump_done_errno; + struct netlink_callback cb; + struct mutex *cb_mutex; + struct mutex cb_def_mutex; + void (*netlink_rcv)(struct sk_buff *); + int (*netlink_bind)(struct net *, int); + void (*netlink_unbind)(struct net *, int); + struct module *module; + struct rhash_head node; + struct callback_head rcu; + struct work_struct work; +}; + +struct listeners; + +struct netlink_table { + struct rhashtable hash; + struct hlist_head mc_list; + struct listeners *listeners; + unsigned int flags; + unsigned int groups; + struct mutex *cb_mutex; + struct module *module; + int (*bind)(struct net *, int); + void (*unbind)(struct net *, int); + bool (*compare)(struct net *, struct sock *); + int registered; +}; + +struct listeners { + struct callback_head rcu; + long unsigned int masks[0]; +}; + +struct netlink_tap_net { + struct list_head netlink_tap_all; + struct mutex netlink_tap_lock; +}; + +struct netlink_compare_arg { + possible_net_t pnet; + u32 portid; +}; + +struct netlink_broadcast_data { + struct sock *exclude_sk; + struct net *net; + u32 portid; + u32 group; + int failure; + int delivery_failure; + int congested; + int delivered; + gfp_t allocation; + struct sk_buff *skb; + struct sk_buff *skb2; + int (*tx_filter)(struct sock *, struct sk_buff *, void *); + void *tx_data; +}; + +struct netlink_set_err_data { + struct sock *exclude_sk; + u32 portid; + u32 group; + int code; +}; + +struct nl_seq_iter { + struct seq_net_private p; + struct rhashtable_iter hti; + int link; +}; + +struct bpf_iter__netlink { + union { + struct bpf_iter_meta *meta; + }; + union { + struct netlink_sock *sk; + }; +}; + +enum { + CTRL_CMD_UNSPEC = 0, + CTRL_CMD_NEWFAMILY = 1, + CTRL_CMD_DELFAMILY = 2, + CTRL_CMD_GETFAMILY = 3, + CTRL_CMD_NEWOPS = 4, + CTRL_CMD_DELOPS = 5, + CTRL_CMD_GETOPS = 6, + CTRL_CMD_NEWMCAST_GRP = 7, + CTRL_CMD_DELMCAST_GRP = 8, + CTRL_CMD_GETMCAST_GRP = 9, + CTRL_CMD_GETPOLICY = 10, + __CTRL_CMD_MAX = 11, +}; + +enum { + CTRL_ATTR_UNSPEC = 0, + CTRL_ATTR_FAMILY_ID = 1, + CTRL_ATTR_FAMILY_NAME = 2, + CTRL_ATTR_VERSION = 3, + CTRL_ATTR_HDRSIZE = 4, + CTRL_ATTR_MAXATTR = 5, + CTRL_ATTR_OPS = 6, + CTRL_ATTR_MCAST_GROUPS = 7, + CTRL_ATTR_POLICY = 8, + CTRL_ATTR_OP_POLICY = 9, + CTRL_ATTR_OP = 10, + __CTRL_ATTR_MAX = 11, +}; + +enum { + CTRL_ATTR_OP_UNSPEC = 0, + CTRL_ATTR_OP_ID = 1, + CTRL_ATTR_OP_FLAGS = 2, + __CTRL_ATTR_OP_MAX = 3, +}; + +enum { + CTRL_ATTR_MCAST_GRP_UNSPEC = 0, + CTRL_ATTR_MCAST_GRP_NAME = 1, + CTRL_ATTR_MCAST_GRP_ID = 2, + __CTRL_ATTR_MCAST_GRP_MAX = 3, +}; + +enum { + CTRL_ATTR_POLICY_UNSPEC = 0, + CTRL_ATTR_POLICY_DO = 1, + CTRL_ATTR_POLICY_DUMP = 2, + __CTRL_ATTR_POLICY_DUMP_MAX = 3, + CTRL_ATTR_POLICY_DUMP_MAX = 2, +}; + +struct genl_start_context { + const struct genl_family *family; + struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; + const struct genl_ops *ops; + int hdrlen; +}; + +struct netlink_policy_dump_state; + +struct ctrl_dump_policy_ctx { + struct netlink_policy_dump_state *state; + const struct genl_family *rt; + unsigned int opidx; + u32 op; + u16 fam_id; + u8 policies: 1; + u8 single_op: 1; +}; + +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID = 0, + NL_ATTR_TYPE_FLAG = 1, + NL_ATTR_TYPE_U8 = 2, + NL_ATTR_TYPE_U16 = 3, + NL_ATTR_TYPE_U32 = 4, + NL_ATTR_TYPE_U64 = 5, + NL_ATTR_TYPE_S8 = 6, + NL_ATTR_TYPE_S16 = 7, + NL_ATTR_TYPE_S32 = 8, + NL_ATTR_TYPE_S64 = 9, + NL_ATTR_TYPE_BINARY = 10, + NL_ATTR_TYPE_STRING = 11, + NL_ATTR_TYPE_NUL_STRING = 12, + NL_ATTR_TYPE_NESTED = 13, + NL_ATTR_TYPE_NESTED_ARRAY = 14, + NL_ATTR_TYPE_BITFIELD32 = 15, +}; + +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC = 0, + NL_POLICY_TYPE_ATTR_TYPE = 1, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5, + NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6, + NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7, + NL_POLICY_TYPE_ATTR_POLICY_IDX = 8, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10, + NL_POLICY_TYPE_ATTR_PAD = 11, + NL_POLICY_TYPE_ATTR_MASK = 12, + __NL_POLICY_TYPE_ATTR_MAX = 13, + NL_POLICY_TYPE_ATTR_MAX = 12, +}; + +struct netlink_policy_dump_state___2 { + unsigned int policy_idx; + unsigned int attr_idx; + unsigned int n_alloc; + struct { + const struct nla_policy *policy; + unsigned int maxtype; + } policies[0]; +}; + +struct trace_event_raw_bpf_test_finish { + struct trace_entry ent; + int err; + char __data[0]; +}; + +struct trace_event_data_offsets_bpf_test_finish {}; + +typedef void (*btf_trace_bpf_test_finish)(void *, int *); + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +struct bpf_raw_tp_test_run_info { + struct bpf_prog *prog; + void *ctx; + u32 retval; +}; + +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +enum tunable_id { + ETHTOOL_ID_UNSPEC = 0, + ETHTOOL_RX_COPYBREAK = 1, + ETHTOOL_TX_COPYBREAK = 2, + ETHTOOL_PFC_PREVENTION_TOUT = 3, + __ETHTOOL_TUNABLE_COUNT = 4, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC = 0, + ETHTOOL_TUNABLE_U8 = 1, + ETHTOOL_TUNABLE_U16 = 2, + ETHTOOL_TUNABLE_U32 = 3, + ETHTOOL_TUNABLE_U64 = 4, + ETHTOOL_TUNABLE_STRING = 5, + ETHTOOL_TUNABLE_S8 = 6, + ETHTOOL_TUNABLE_S16 = 7, + ETHTOOL_TUNABLE_S32 = 8, + ETHTOOL_TUNABLE_S64 = 9, +}; + +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC = 0, + ETHTOOL_PHY_DOWNSHIFT = 1, + ETHTOOL_PHY_FAST_LINK_DOWN = 2, + ETHTOOL_PHY_EDPD = 3, + __ETHTOOL_PHY_TUNABLE_COUNT = 4, +}; + +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; + +enum ethtool_flags { + ETH_FLAG_TXVLAN = 128, + ETH_FLAG_RXVLAN = 256, + ETH_FLAG_LRO = 32768, + ETH_FLAG_NTUPLE = 134217728, + ETH_FLAG_RXHASH = 268435456, +}; + +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; + __u32 rss_config[0]; +}; + +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT = 0, + ETHTOOL_F_WISH__BIT = 1, + ETHTOOL_F_COMPAT__BIT = 2, +}; + +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[128]; + char data[0]; +}; + +enum { + ETH_RSS_HASH_TOP_BIT = 0, + ETH_RSS_HASH_XOR_BIT = 1, + ETH_RSS_HASH_CRC32_BIT = 2, + ETH_RSS_HASH_FUNCS_COUNT = 3, +}; + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + long unsigned int priv[0]; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_link_usettings { + struct ethtool_link_settings base; + struct { + __u32 supported[3]; + __u32 advertising[3]; + __u32 lp_advertising[3]; + } link_modes; +}; + +struct ethtool_rx_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; + long: 48; +}; + +struct ethtool_rx_flow_match { + struct flow_dissector dissector; + int: 32; + struct ethtool_rx_flow_key key; + struct ethtool_rx_flow_key mask; +}; + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN = 0, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE = 1, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE = 2, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT = 3, +}; + +enum { + ETHTOOL_MSG_USER_NONE = 0, + ETHTOOL_MSG_STRSET_GET = 1, + ETHTOOL_MSG_LINKINFO_GET = 2, + ETHTOOL_MSG_LINKINFO_SET = 3, + ETHTOOL_MSG_LINKMODES_GET = 4, + ETHTOOL_MSG_LINKMODES_SET = 5, + ETHTOOL_MSG_LINKSTATE_GET = 6, + ETHTOOL_MSG_DEBUG_GET = 7, + ETHTOOL_MSG_DEBUG_SET = 8, + ETHTOOL_MSG_WOL_GET = 9, + ETHTOOL_MSG_WOL_SET = 10, + ETHTOOL_MSG_FEATURES_GET = 11, + ETHTOOL_MSG_FEATURES_SET = 12, + ETHTOOL_MSG_PRIVFLAGS_GET = 13, + ETHTOOL_MSG_PRIVFLAGS_SET = 14, + ETHTOOL_MSG_RINGS_GET = 15, + ETHTOOL_MSG_RINGS_SET = 16, + ETHTOOL_MSG_CHANNELS_GET = 17, + ETHTOOL_MSG_CHANNELS_SET = 18, + ETHTOOL_MSG_COALESCE_GET = 19, + ETHTOOL_MSG_COALESCE_SET = 20, + ETHTOOL_MSG_PAUSE_GET = 21, + ETHTOOL_MSG_PAUSE_SET = 22, + ETHTOOL_MSG_EEE_GET = 23, + ETHTOOL_MSG_EEE_SET = 24, + ETHTOOL_MSG_TSINFO_GET = 25, + ETHTOOL_MSG_CABLE_TEST_ACT = 26, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 27, + ETHTOOL_MSG_TUNNEL_INFO_GET = 28, + __ETHTOOL_MSG_USER_CNT = 29, + ETHTOOL_MSG_USER_MAX = 28, +}; + +enum { + ETHTOOL_A_HEADER_UNSPEC = 0, + ETHTOOL_A_HEADER_DEV_INDEX = 1, + ETHTOOL_A_HEADER_DEV_NAME = 2, + ETHTOOL_A_HEADER_FLAGS = 3, + __ETHTOOL_A_HEADER_CNT = 4, + ETHTOOL_A_HEADER_MAX = 3, +}; + +enum { + ETHTOOL_A_STRSET_UNSPEC = 0, + ETHTOOL_A_STRSET_HEADER = 1, + ETHTOOL_A_STRSET_STRINGSETS = 2, + ETHTOOL_A_STRSET_COUNTS_ONLY = 3, + __ETHTOOL_A_STRSET_CNT = 4, + ETHTOOL_A_STRSET_MAX = 3, +}; + +enum { + ETHTOOL_A_LINKINFO_UNSPEC = 0, + ETHTOOL_A_LINKINFO_HEADER = 1, + ETHTOOL_A_LINKINFO_PORT = 2, + ETHTOOL_A_LINKINFO_PHYADDR = 3, + ETHTOOL_A_LINKINFO_TP_MDIX = 4, + ETHTOOL_A_LINKINFO_TP_MDIX_CTRL = 5, + ETHTOOL_A_LINKINFO_TRANSCEIVER = 6, + __ETHTOOL_A_LINKINFO_CNT = 7, + ETHTOOL_A_LINKINFO_MAX = 6, +}; + +enum { + ETHTOOL_A_LINKMODES_UNSPEC = 0, + ETHTOOL_A_LINKMODES_HEADER = 1, + ETHTOOL_A_LINKMODES_AUTONEG = 2, + ETHTOOL_A_LINKMODES_OURS = 3, + ETHTOOL_A_LINKMODES_PEER = 4, + ETHTOOL_A_LINKMODES_SPEED = 5, + ETHTOOL_A_LINKMODES_DUPLEX = 6, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 7, + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 8, + __ETHTOOL_A_LINKMODES_CNT = 9, + ETHTOOL_A_LINKMODES_MAX = 8, +}; + +enum { + ETHTOOL_A_LINKSTATE_UNSPEC = 0, + ETHTOOL_A_LINKSTATE_HEADER = 1, + ETHTOOL_A_LINKSTATE_LINK = 2, + ETHTOOL_A_LINKSTATE_SQI = 3, + ETHTOOL_A_LINKSTATE_SQI_MAX = 4, + ETHTOOL_A_LINKSTATE_EXT_STATE = 5, + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 6, + __ETHTOOL_A_LINKSTATE_CNT = 7, + ETHTOOL_A_LINKSTATE_MAX = 6, +}; + +enum { + ETHTOOL_A_DEBUG_UNSPEC = 0, + ETHTOOL_A_DEBUG_HEADER = 1, + ETHTOOL_A_DEBUG_MSGMASK = 2, + __ETHTOOL_A_DEBUG_CNT = 3, + ETHTOOL_A_DEBUG_MAX = 2, +}; + +enum { + ETHTOOL_A_WOL_UNSPEC = 0, + ETHTOOL_A_WOL_HEADER = 1, + ETHTOOL_A_WOL_MODES = 2, + ETHTOOL_A_WOL_SOPASS = 3, + __ETHTOOL_A_WOL_CNT = 4, + ETHTOOL_A_WOL_MAX = 3, +}; + +enum { + ETHTOOL_A_FEATURES_UNSPEC = 0, + ETHTOOL_A_FEATURES_HEADER = 1, + ETHTOOL_A_FEATURES_HW = 2, + ETHTOOL_A_FEATURES_WANTED = 3, + ETHTOOL_A_FEATURES_ACTIVE = 4, + ETHTOOL_A_FEATURES_NOCHANGE = 5, + __ETHTOOL_A_FEATURES_CNT = 6, + ETHTOOL_A_FEATURES_MAX = 5, +}; + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC = 0, + ETHTOOL_A_PRIVFLAGS_HEADER = 1, + ETHTOOL_A_PRIVFLAGS_FLAGS = 2, + __ETHTOOL_A_PRIVFLAGS_CNT = 3, + ETHTOOL_A_PRIVFLAGS_MAX = 2, +}; + +enum { + ETHTOOL_A_RINGS_UNSPEC = 0, + ETHTOOL_A_RINGS_HEADER = 1, + ETHTOOL_A_RINGS_RX_MAX = 2, + ETHTOOL_A_RINGS_RX_MINI_MAX = 3, + ETHTOOL_A_RINGS_RX_JUMBO_MAX = 4, + ETHTOOL_A_RINGS_TX_MAX = 5, + ETHTOOL_A_RINGS_RX = 6, + ETHTOOL_A_RINGS_RX_MINI = 7, + ETHTOOL_A_RINGS_RX_JUMBO = 8, + ETHTOOL_A_RINGS_TX = 9, + __ETHTOOL_A_RINGS_CNT = 10, + ETHTOOL_A_RINGS_MAX = 9, +}; + +enum { + ETHTOOL_A_CHANNELS_UNSPEC = 0, + ETHTOOL_A_CHANNELS_HEADER = 1, + ETHTOOL_A_CHANNELS_RX_MAX = 2, + ETHTOOL_A_CHANNELS_TX_MAX = 3, + ETHTOOL_A_CHANNELS_OTHER_MAX = 4, + ETHTOOL_A_CHANNELS_COMBINED_MAX = 5, + ETHTOOL_A_CHANNELS_RX_COUNT = 6, + ETHTOOL_A_CHANNELS_TX_COUNT = 7, + ETHTOOL_A_CHANNELS_OTHER_COUNT = 8, + ETHTOOL_A_CHANNELS_COMBINED_COUNT = 9, + __ETHTOOL_A_CHANNELS_CNT = 10, + ETHTOOL_A_CHANNELS_MAX = 9, +}; + +enum { + ETHTOOL_A_COALESCE_UNSPEC = 0, + ETHTOOL_A_COALESCE_HEADER = 1, + ETHTOOL_A_COALESCE_RX_USECS = 2, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES = 3, + ETHTOOL_A_COALESCE_RX_USECS_IRQ = 4, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ = 5, + ETHTOOL_A_COALESCE_TX_USECS = 6, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES = 7, + ETHTOOL_A_COALESCE_TX_USECS_IRQ = 8, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ = 9, + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS = 10, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX = 11, + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX = 12, + ETHTOOL_A_COALESCE_PKT_RATE_LOW = 13, + ETHTOOL_A_COALESCE_RX_USECS_LOW = 14, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW = 15, + ETHTOOL_A_COALESCE_TX_USECS_LOW = 16, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW = 17, + ETHTOOL_A_COALESCE_PKT_RATE_HIGH = 18, + ETHTOOL_A_COALESCE_RX_USECS_HIGH = 19, + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH = 20, + ETHTOOL_A_COALESCE_TX_USECS_HIGH = 21, + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH = 22, + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 23, + __ETHTOOL_A_COALESCE_CNT = 24, + ETHTOOL_A_COALESCE_MAX = 23, +}; + +enum { + ETHTOOL_A_PAUSE_UNSPEC = 0, + ETHTOOL_A_PAUSE_HEADER = 1, + ETHTOOL_A_PAUSE_AUTONEG = 2, + ETHTOOL_A_PAUSE_RX = 3, + ETHTOOL_A_PAUSE_TX = 4, + ETHTOOL_A_PAUSE_STATS = 5, + __ETHTOOL_A_PAUSE_CNT = 6, + ETHTOOL_A_PAUSE_MAX = 5, +}; + +enum { + ETHTOOL_A_EEE_UNSPEC = 0, + ETHTOOL_A_EEE_HEADER = 1, + ETHTOOL_A_EEE_MODES_OURS = 2, + ETHTOOL_A_EEE_MODES_PEER = 3, + ETHTOOL_A_EEE_ACTIVE = 4, + ETHTOOL_A_EEE_ENABLED = 5, + ETHTOOL_A_EEE_TX_LPI_ENABLED = 6, + ETHTOOL_A_EEE_TX_LPI_TIMER = 7, + __ETHTOOL_A_EEE_CNT = 8, + ETHTOOL_A_EEE_MAX = 7, +}; + +enum { + ETHTOOL_A_TSINFO_UNSPEC = 0, + ETHTOOL_A_TSINFO_HEADER = 1, + ETHTOOL_A_TSINFO_TIMESTAMPING = 2, + ETHTOOL_A_TSINFO_TX_TYPES = 3, + ETHTOOL_A_TSINFO_RX_FILTERS = 4, + ETHTOOL_A_TSINFO_PHC_INDEX = 5, + __ETHTOOL_A_TSINFO_CNT = 6, + ETHTOOL_A_TSINFO_MAX = 5, +}; + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_HEADER = 1, + __ETHTOOL_A_CABLE_TEST_CNT = 2, + ETHTOOL_A_CABLE_TEST_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_HEADER = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG = 2, + __ETHTOOL_A_CABLE_TEST_TDR_CNT = 3, + ETHTOOL_A_CABLE_TEST_TDR_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC = 0, + ETHTOOL_A_TUNNEL_INFO_HEADER = 1, + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS = 2, + __ETHTOOL_A_TUNNEL_INFO_CNT = 3, + ETHTOOL_A_TUNNEL_INFO_MAX = 2, +}; + +enum ethtool_multicast_groups { + ETHNL_MCGRP_MONITOR = 0, +}; + +struct ethnl_req_info { + struct net_device *dev; + u32 flags; +}; + +struct ethnl_reply_data { + struct net_device *dev; +}; + +struct ethnl_request_ops { + u8 request_cmd; + u8 reply_cmd; + u16 hdr_attr; + unsigned int req_info_size; + unsigned int reply_data_size; + bool allow_nodev_do; + int (*parse_request)(struct ethnl_req_info *, struct nlattr **, struct netlink_ext_ack *); + int (*prepare_data)(const struct ethnl_req_info *, struct ethnl_reply_data *, struct genl_info *); + int (*reply_size)(const struct ethnl_req_info *, const struct ethnl_reply_data *); + int (*fill_reply)(struct sk_buff *, const struct ethnl_req_info *, const struct ethnl_reply_data *); + void (*cleanup_data)(struct ethnl_reply_data *); +}; + +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + int pos_hash; + int pos_idx; +}; + +typedef void (*ethnl_notify_handler_t)(struct net_device *, unsigned int, const void *); + +enum { + ETHTOOL_A_BITSET_BIT_UNSPEC = 0, + ETHTOOL_A_BITSET_BIT_INDEX = 1, + ETHTOOL_A_BITSET_BIT_NAME = 2, + ETHTOOL_A_BITSET_BIT_VALUE = 3, + __ETHTOOL_A_BITSET_BIT_CNT = 4, + ETHTOOL_A_BITSET_BIT_MAX = 3, +}; + +enum { + ETHTOOL_A_BITSET_BITS_UNSPEC = 0, + ETHTOOL_A_BITSET_BITS_BIT = 1, + __ETHTOOL_A_BITSET_BITS_CNT = 2, + ETHTOOL_A_BITSET_BITS_MAX = 1, +}; + +enum { + ETHTOOL_A_BITSET_UNSPEC = 0, + ETHTOOL_A_BITSET_NOMASK = 1, + ETHTOOL_A_BITSET_SIZE = 2, + ETHTOOL_A_BITSET_BITS = 3, + ETHTOOL_A_BITSET_VALUE = 4, + ETHTOOL_A_BITSET_MASK = 5, + __ETHTOOL_A_BITSET_CNT = 6, + ETHTOOL_A_BITSET_MAX = 5, +}; + +typedef const char (* const ethnl_string_array_t)[32]; + +enum { + ETHTOOL_A_STRING_UNSPEC = 0, + ETHTOOL_A_STRING_INDEX = 1, + ETHTOOL_A_STRING_VALUE = 2, + __ETHTOOL_A_STRING_CNT = 3, + ETHTOOL_A_STRING_MAX = 2, +}; + +enum { + ETHTOOL_A_STRINGS_UNSPEC = 0, + ETHTOOL_A_STRINGS_STRING = 1, + __ETHTOOL_A_STRINGS_CNT = 2, + ETHTOOL_A_STRINGS_MAX = 1, +}; + +enum { + ETHTOOL_A_STRINGSET_UNSPEC = 0, + ETHTOOL_A_STRINGSET_ID = 1, + ETHTOOL_A_STRINGSET_COUNT = 2, + ETHTOOL_A_STRINGSET_STRINGS = 3, + __ETHTOOL_A_STRINGSET_CNT = 4, + ETHTOOL_A_STRINGSET_MAX = 3, +}; + +enum { + ETHTOOL_A_STRINGSETS_UNSPEC = 0, + ETHTOOL_A_STRINGSETS_STRINGSET = 1, + __ETHTOOL_A_STRINGSETS_CNT = 2, + ETHTOOL_A_STRINGSETS_MAX = 1, +}; + +struct strset_info { + bool per_dev; + bool free_strings; + unsigned int count; + const char (*strings)[32]; +}; + +struct strset_req_info { + struct ethnl_req_info base; + u32 req_ids; + bool counts_only; +}; + +struct strset_reply_data { + struct ethnl_reply_data base; + struct strset_info sets[16]; +}; + +struct linkinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; +}; + +struct linkmodes_reply_data { + struct ethnl_reply_data base; + struct ethtool_link_ksettings ksettings; + struct ethtool_link_settings *lsettings; + bool peer_empty; +}; + +struct link_mode_info { + int speed; + u8 duplex; +}; + +struct linkstate_reply_data { + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; +}; + +struct debug_reply_data { + struct ethnl_reply_data base; + u32 msg_mask; +}; + +struct wol_reply_data { + struct ethnl_reply_data base; + struct ethtool_wolinfo wol; + bool show_sopass; +}; + +struct features_reply_data { + struct ethnl_reply_data base; + u32 hw[2]; + u32 wanted[2]; + u32 active[2]; + u32 nochange[2]; + u32 all[2]; +}; + +struct privflags_reply_data { + struct ethnl_reply_data base; + const char (*priv_flag_names)[32]; + unsigned int n_priv_flags; + u32 priv_flags; +}; + +struct rings_reply_data { + struct ethnl_reply_data base; + struct ethtool_ringparam ringparam; +}; + +struct channels_reply_data { + struct ethnl_reply_data base; + struct ethtool_channels channels; +}; + +struct coalesce_reply_data { + struct ethnl_reply_data base; + struct ethtool_coalesce coalesce; + u32 supported_params; +}; + +enum { + ETHTOOL_A_PAUSE_STAT_UNSPEC = 0, + ETHTOOL_A_PAUSE_STAT_PAD = 1, + ETHTOOL_A_PAUSE_STAT_TX_FRAMES = 2, + ETHTOOL_A_PAUSE_STAT_RX_FRAMES = 3, + __ETHTOOL_A_PAUSE_STAT_CNT = 4, + ETHTOOL_A_PAUSE_STAT_MAX = 3, +}; + +struct pause_reply_data { + struct ethnl_reply_data base; + struct ethtool_pauseparam pauseparam; + struct ethtool_pause_stats pausestat; +}; + +struct eee_reply_data { + struct ethnl_reply_data base; + struct ethtool_eee eee; +}; + +struct tsinfo_reply_data { + struct ethnl_reply_data base; + struct ethtool_ts_info ts_info; +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A = 0, + ETHTOOL_A_CABLE_PAIR_B = 1, + ETHTOOL_A_CABLE_PAIR_C = 2, + ETHTOOL_A_CABLE_PAIR_D = 3, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC = 0, + ETHTOOL_A_CABLE_RESULT_PAIR = 1, + ETHTOOL_A_CABLE_RESULT_CODE = 2, + __ETHTOOL_A_CABLE_RESULT_CNT = 3, + ETHTOOL_A_CABLE_RESULT_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 1, + ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 2, + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT = 3, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 2, +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_NEST_RESULT = 1, + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH = 2, + __ETHTOOL_A_CABLE_NEST_CNT = 3, + ETHTOOL_A_CABLE_NEST_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_NTF_HEADER = 1, + ETHTOOL_A_CABLE_TEST_NTF_STATUS = 2, + ETHTOOL_A_CABLE_TEST_NTF_NEST = 3, + __ETHTOOL_A_CABLE_TEST_NTF_CNT = 4, + ETHTOOL_A_CABLE_TEST_NTF_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC = 0, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST = 1, + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST = 2, + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP = 3, + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR = 4, + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT = 5, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = 4, +}; + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC = 0, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR = 1, + ETHTOOL_A_CABLE_AMPLITUDE_mV = 2, + __ETHTOOL_A_CABLE_AMPLITUDE_CNT = 3, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = 2, +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC = 0, + ETHTOOL_A_CABLE_PULSE_mV = 1, + __ETHTOOL_A_CABLE_PULSE_CNT = 2, + ETHTOOL_A_CABLE_PULSE_MAX = 1, +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC = 0, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE = 1, + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE = 2, + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE = 3, + __ETHTOOL_A_CABLE_STEP_CNT = 4, + ETHTOOL_A_CABLE_STEP_MAX = 3, +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC = 0, + ETHTOOL_A_CABLE_TDR_NEST_STEP = 1, + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE = 2, + ETHTOOL_A_CABLE_TDR_NEST_PULSE = 3, + __ETHTOOL_A_CABLE_TDR_NEST_CNT = 4, + ETHTOOL_A_CABLE_TDR_NEST_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT = 1, + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE = 2, + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT = 3, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = 2, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE = 1, + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES = 2, + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY = 3, + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT = 4, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = 3, +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC = 0, + ETHTOOL_A_TUNNEL_UDP_TABLE = 1, + __ETHTOOL_A_TUNNEL_UDP_CNT = 2, + ETHTOOL_A_TUNNEL_UDP_MAX = 1, +}; + +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN = 1, + UDP_TUNNEL_TYPE_GENEVE = 2, + UDP_TUNNEL_TYPE_VXLAN_GPE = 4, +}; + +enum udp_tunnel_nic_info_flags { + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = 1, + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = 2, + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = 4, + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = 8, +}; + +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *, unsigned int, unsigned int, struct udp_tunnel_info *); + void (*set_port_priv)(struct net_device *, unsigned int, unsigned int, u8); + void (*add_port)(struct net_device *, struct udp_tunnel_info *); + void (*del_port)(struct net_device *, struct udp_tunnel_info *); + void (*reset_ntf)(struct net_device *); + size_t (*dump_size)(struct net_device *, unsigned int); + int (*dump_write)(struct net_device *, unsigned int, struct sk_buff *); +}; + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + int pos_hash; + int pos_idx; +}; + +struct nf_hook_entries_rcu_head { + struct callback_head head; + void *allocation; +}; + +struct nf_conn___2; + +enum nf_nat_manip_type; + +struct nf_nat_hook { + int (*parse_nat_setup)(struct nf_conn___2 *, enum nf_nat_manip_type, const struct nlattr *); + void (*decode_session)(struct sk_buff *, struct flowi *); + unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn___2 *, enum nf_nat_manip_type, enum ip_conntrack_dir); +}; + +struct nf_conntrack_tuple___2; + +struct nf_ct_hook { + int (*update)(struct net *, struct sk_buff *); + void (*destroy)(struct nf_conntrack *); + bool (*get_tuple_skb)(struct nf_conntrack_tuple___2 *, const struct sk_buff *); +}; + +struct nfnl_ct_hook { + struct nf_conn___2 * (*get_ct)(const struct sk_buff *, enum ip_conntrack_info *); + size_t (*build_size)(const struct nf_conn___2 *); + int (*build)(struct sk_buff *, struct nf_conn___2 *, enum ip_conntrack_info, u_int16_t, u_int16_t); + int (*parse)(const struct nlattr *, struct nf_conn___2 *); + int (*attach_expect)(const struct nlattr *, struct nf_conn___2 *, u32, u32); + void (*seq_adjust)(struct sk_buff *, struct nf_conn___2 *, enum ip_conntrack_info, s32); +}; + +struct nf_ipv6_ops { + void (*route_input)(struct sk_buff *); + int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *)); + int (*reroute)(struct sk_buff *, const struct nf_queue_entry *); +}; + +struct nf_queue_entry { + struct list_head list; + struct sk_buff *skb; + unsigned int id; + unsigned int hook_index; + struct net_device *physin; + struct net_device *physout; + struct nf_hook_state state; + u16 size; +}; + +struct nf_loginfo { + u_int8_t type; + union { + struct { + u_int32_t copy_len; + u_int16_t group; + u_int16_t qthreshold; + u_int16_t flags; + } ulog; + struct { + u_int8_t level; + u_int8_t logflags; + } log; + } u; +}; + +struct nf_log_buf { + unsigned int count; + char buf[1020]; +}; + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED = 0, + BRNF_PROTO_8021Q = 1, + BRNF_PROTO_PPPOE = 2, + } orig_proto: 8; + u8 pkt_otherhost: 1; + u8 in_prerouting: 1; + u8 bridged_dnat: 1; + __u16 frag_max_size; + struct net_device *physindev; + struct net_device *physoutdev; + union { + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + char neigh_header[8]; + }; +}; + +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_sockopt_ops { + struct list_head list; + u_int8_t pf; + int set_optmin; + int set_optmax; + int (*set)(struct sock *, int, sockptr_t, unsigned int); + int get_optmin; + int get_optmax; + int (*get)(struct sock *, int, void *, int *); + struct module *owner; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + unsigned int rtm_flags; +}; + +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + +struct ip_sf_list; + +struct ip_mc_list { + struct in_device *interface; + __be32 multiaddr; + unsigned int sfmode; + struct ip_sf_list *sources; + struct ip_sf_list *tomb; + long unsigned int sfcount[2]; + union { + struct ip_mc_list *next; + struct ip_mc_list *next_rcu; + }; + struct ip_mc_list *next_hash; + struct timer_list timer; + int users; + refcount_t refcnt; + spinlock_t lock; + char tm_running; + char reporter; + char unsolicit_count; + char loaded; + unsigned char gsquery; + unsigned char crcount; + struct callback_head rcu; +}; + +struct ip_sf_socklist { + unsigned int sl_max; + unsigned int sl_count; + struct callback_head rcu; + __be32 sl_addr[0]; +}; + +struct ip_mc_socklist { + struct ip_mc_socklist *next_rcu; + struct ip_mreqn multi; + unsigned int sfmode; + struct ip_sf_socklist *sflist; + struct callback_head rcu; +}; + +struct ip_sf_list { + struct ip_sf_list *sf_next; + long unsigned int sf_count[2]; + __be32 sf_inaddr; + unsigned char sf_gsresp; + unsigned char sf_oldin; + unsigned char sf_crcount; +}; + +struct ipv4_addr_key { + __be32 addr; + int vif; +}; + +struct inetpeer_addr { + union { + struct ipv4_addr_key a4; + struct in6_addr a6; + u32 key[4]; + }; + __u16 family; +}; + +struct inet_peer { + struct rb_node rb_node; + struct inetpeer_addr daddr; + u32 metrics[17]; + u32 rate_tokens; + u32 n_redirects; + long unsigned int rate_last; + union { + struct { + atomic_t rid; + }; + struct callback_head rcu; + }; + __u32 dtime; + refcount_t refcnt; +}; + +struct fib_rt_info { + struct fib_info *fi; + u32 tb_id; + __be32 dst; + int dst_len; + u8 tos; + u8 type; + u8 offload: 1; + u8 trap: 1; + u8 unused: 6; +}; + +struct uncached_list { + spinlock_t lock; + struct list_head head; +}; + +struct ip_rt_acct { + __u32 o_bytes; + __u32 o_packets; + __u32 i_bytes; + __u32 i_packets; +}; + +struct rt_cache_stat { + unsigned int in_slow_tot; + unsigned int in_slow_mc; + unsigned int in_no_route; + unsigned int in_brd; + unsigned int in_martian_dst; + unsigned int in_martian_src; + unsigned int out_slow_tot; + unsigned int out_slow_mc; +}; + +struct fib_alias { + struct hlist_node fa_list; + struct fib_info *fa_info; + u8 fa_tos; + u8 fa_type; + u8 fa_state; + u8 fa_slen; + u32 tb_id; + s16 fa_default; + u8 offload: 1; + u8 trap: 1; + u8 unused: 6; + struct callback_head rcu; +}; + +struct fib_prop { + int error; + u8 scope; +}; + +struct net_offload { + struct offload_callbacks callbacks; + unsigned int flags; +}; + +struct raw_hashinfo { + rwlock_t lock; + struct hlist_head ht[256]; +}; + +enum ip_defrag_users { + IP_DEFRAG_LOCAL_DELIVER = 0, + IP_DEFRAG_CALL_RA_CHAIN = 1, + IP_DEFRAG_CONNTRACK_IN = 2, + __IP_DEFRAG_CONNTRACK_IN_END = 65537, + IP_DEFRAG_CONNTRACK_OUT = 65538, + __IP_DEFRAG_CONNTRACK_OUT_END = 131073, + IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074, + __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609, + IP_DEFRAG_VS_IN = 196610, + IP_DEFRAG_VS_OUT = 196611, + IP_DEFRAG_VS_FWD = 196612, + IP_DEFRAG_AF_PACKET = 196613, + IP_DEFRAG_MACVLAN = 196614, +}; + +enum { + INET_FRAG_FIRST_IN = 1, + INET_FRAG_LAST_IN = 2, + INET_FRAG_COMPLETE = 4, + INET_FRAG_HASH_DEAD = 8, +}; + +struct ipq { + struct inet_frag_queue q; + u8 ecn; + u16 max_df_size; + int iif; + unsigned int rid; + struct inet_peer *peer; +}; + +struct ip_options_data { + struct ip_options_rcu opt; + char data[40]; +}; + +struct ipcm_cookie { + struct sockcm_cookie sockc; + __be32 addr; + int oif; + struct ip_options_rcu *opt; + __u8 ttl; + __s16 tos; + char priority; + __u16 gso_size; +}; + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +struct ip_reply_arg { + struct kvec iov[1]; + int flags; + __wsum csum; + int csumoffset; + int bound_dev_if; + u8 tos; + kuid_t uid; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist[1]; +}; + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1]; +}; + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +} __attribute__((packed)); + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +} __attribute__((packed)); + +struct compat_group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1]; +} __attribute__((packed)); + +enum { + BPFILTER_IPT_SO_SET_REPLACE = 64, + BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65, + BPFILTER_IPT_SET_MAX = 66, +}; + +enum { + BPFILTER_IPT_SO_GET_INFO = 64, + BPFILTER_IPT_SO_GET_ENTRIES = 65, + BPFILTER_IPT_SO_GET_REVISION_MATCH = 66, + BPFILTER_IPT_SO_GET_REVISION_TARGET = 67, + BPFILTER_IPT_GET_MAX = 68, +}; + +struct tcpvegas_info { + __u32 tcpv_enabled; + __u32 tcpv_rttcnt; + __u32 tcpv_rtt; + __u32 tcpv_minrtt; +}; + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; + +struct tcp_bbr_info { + __u32 bbr_bw_lo; + __u32 bbr_bw_hi; + __u32 bbr_min_rtt; + __u32 bbr_pacing_gain; + __u32 bbr_cwnd_gain; +}; + +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; + struct tcp_bbr_info bbr; +}; + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT = 2, + BPF_TCP_SYN_RECV = 3, + BPF_TCP_FIN_WAIT1 = 4, + BPF_TCP_FIN_WAIT2 = 5, + BPF_TCP_TIME_WAIT = 6, + BPF_TCP_CLOSE = 7, + BPF_TCP_CLOSE_WAIT = 8, + BPF_TCP_LAST_ACK = 9, + BPF_TCP_LISTEN = 10, + BPF_TCP_CLOSING = 11, + BPF_TCP_NEW_SYN_RECV = 12, + BPF_TCP_MAX_STATES = 13, +}; + +enum inet_csk_ack_state_t { + ICSK_ACK_SCHED = 1, + ICSK_ACK_TIMER = 2, + ICSK_ACK_PUSHED = 4, + ICSK_ACK_PUSHED2 = 8, + ICSK_ACK_NOW = 16, +}; + +enum { + TCP_FLAG_CWR = 32768, + TCP_FLAG_ECE = 16384, + TCP_FLAG_URG = 8192, + TCP_FLAG_ACK = 4096, + TCP_FLAG_PSH = 2048, + TCP_FLAG_RST = 1024, + TCP_FLAG_SYN = 512, + TCP_FLAG_FIN = 256, + TCP_RESERVED_BITS = 15, + TCP_DATA_OFFSET = 240, +}; + +struct tcp_repair_opt { + __u32 opt_code; + __u32 opt_val; +}; + +struct tcp_repair_window { + __u32 snd_wl1; + __u32 snd_wnd; + __u32 max_window; + __u32 rcv_wnd; + __u32 rcv_wup; +}; + +enum { + TCP_NO_QUEUE = 0, + TCP_RECV_QUEUE = 1, + TCP_SEND_QUEUE = 2, + TCP_QUEUES_NR = 3, +}; + +struct tcp_info { + __u8 tcpi_state; + __u8 tcpi_ca_state; + __u8 tcpi_retransmits; + __u8 tcpi_probes; + __u8 tcpi_backoff; + __u8 tcpi_options; + __u8 tcpi_snd_wscale: 4; + __u8 tcpi_rcv_wscale: 4; + __u8 tcpi_delivery_rate_app_limited: 1; + __u8 tcpi_fastopen_client_fail: 2; + __u32 tcpi_rto; + __u32 tcpi_ato; + __u32 tcpi_snd_mss; + __u32 tcpi_rcv_mss; + __u32 tcpi_unacked; + __u32 tcpi_sacked; + __u32 tcpi_lost; + __u32 tcpi_retrans; + __u32 tcpi_fackets; + __u32 tcpi_last_data_sent; + __u32 tcpi_last_ack_sent; + __u32 tcpi_last_data_recv; + __u32 tcpi_last_ack_recv; + __u32 tcpi_pmtu; + __u32 tcpi_rcv_ssthresh; + __u32 tcpi_rtt; + __u32 tcpi_rttvar; + __u32 tcpi_snd_ssthresh; + __u32 tcpi_snd_cwnd; + __u32 tcpi_advmss; + __u32 tcpi_reordering; + __u32 tcpi_rcv_rtt; + __u32 tcpi_rcv_space; + __u32 tcpi_total_retrans; + __u64 tcpi_pacing_rate; + __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; + __u64 tcpi_bytes_received; + __u32 tcpi_segs_out; + __u32 tcpi_segs_in; + __u32 tcpi_notsent_bytes; + __u32 tcpi_min_rtt; + __u32 tcpi_data_segs_in; + __u32 tcpi_data_segs_out; + __u64 tcpi_delivery_rate; + __u64 tcpi_busy_time; + __u64 tcpi_rwnd_limited; + __u64 tcpi_sndbuf_limited; + __u32 tcpi_delivered; + __u32 tcpi_delivered_ce; + __u64 tcpi_bytes_sent; + __u64 tcpi_bytes_retrans; + __u32 tcpi_dsack_dups; + __u32 tcpi_reord_seen; + __u32 tcpi_rcv_ooopack; + __u32 tcpi_snd_wnd; +}; + +enum { + TCP_NLA_PAD = 0, + TCP_NLA_BUSY = 1, + TCP_NLA_RWND_LIMITED = 2, + TCP_NLA_SNDBUF_LIMITED = 3, + TCP_NLA_DATA_SEGS_OUT = 4, + TCP_NLA_TOTAL_RETRANS = 5, + TCP_NLA_PACING_RATE = 6, + TCP_NLA_DELIVERY_RATE = 7, + TCP_NLA_SND_CWND = 8, + TCP_NLA_REORDERING = 9, + TCP_NLA_MIN_RTT = 10, + TCP_NLA_RECUR_RETRANS = 11, + TCP_NLA_DELIVERY_RATE_APP_LMT = 12, + TCP_NLA_SNDQ_SIZE = 13, + TCP_NLA_CA_STATE = 14, + TCP_NLA_SND_SSTHRESH = 15, + TCP_NLA_DELIVERED = 16, + TCP_NLA_DELIVERED_CE = 17, + TCP_NLA_BYTES_SENT = 18, + TCP_NLA_BYTES_RETRANS = 19, + TCP_NLA_DSACK_DUPS = 20, + TCP_NLA_REORD_SEEN = 21, + TCP_NLA_SRTT = 22, + TCP_NLA_TIMEOUT_REHASH = 23, + TCP_NLA_BYTES_NOTSENT = 24, + TCP_NLA_EDT = 25, +}; + +struct tcp_zerocopy_receive { + __u64 address; + __u32 length; + __u32 recv_skip_hint; + __u32 inq; + __s32 err; +}; + +struct tcp_md5sig_pool { + struct ahash_request *md5_req; + void *scratch; +}; + +enum tcp_chrono { + TCP_CHRONO_UNSPEC = 0, + TCP_CHRONO_BUSY = 1, + TCP_CHRONO_RWND_LIMITED = 2, + TCP_CHRONO_SNDBUF_LIMITED = 3, + __TCP_CHRONO_MAX = 4, +}; + +struct tcp_splice_state { + struct pipe_inode_info *pipe; + size_t len; + unsigned int flags; +}; + +enum tcp_fastopen_client_fail { + TFO_STATUS_UNSPEC = 0, + TFO_COOKIE_UNAVAILABLE = 1, + TFO_DATA_NOT_ACKED = 2, + TFO_SYN_RETRANSMITTED = 3, +}; + +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +struct static_key_false_deferred { + struct static_key_false key; + long unsigned int timeout; + struct delayed_work work; +}; + +struct mptcp_ext { + union { + u64 data_ack; + u32 data_ack32; + }; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u8 use_map: 1; + u8 dsn64: 1; + u8 data_fin: 1; + u8 use_ack: 1; + u8 ack64: 1; + u8 mpc_map: 1; + u8 __unused: 2; +}; + +enum tcp_queue { + TCP_FRAG_IN_WRITE_QUEUE = 0, + TCP_FRAG_IN_RTX_QUEUE = 1, +}; + +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = 1, + CA_ACK_WIN_UPDATE = 2, + CA_ACK_ECE = 4, +}; + +struct tcp_sacktag_state { + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; +}; + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE = 0, + PKT_HASH_TYPE_L2 = 1, + PKT_HASH_TYPE_L3 = 2, + PKT_HASH_TYPE_L4 = 3, +}; + +enum { + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, +}; + +enum tsq_flags { + TSQF_THROTTLED = 1, + TSQF_QUEUED = 2, + TCPF_TSQ_DEFERRED = 4, + TCPF_WRITE_TIMER_DEFERRED = 8, + TCPF_DELACK_TIMER_DEFERRED = 16, + TCPF_MTU_REDUCED_DEFERRED = 32, +}; + +struct mptcp_out_options { + u16 suboptions; + u64 sndr_key; + u64 rcvr_key; + union { + struct in_addr addr; + struct in6_addr addr6; + }; + u8 addr_id; + u64 ahmac; + u8 rm_id; + u8 join_id; + u8 backup; + u32 nonce; + u64 thmac; + u32 token; + u8 hmac[20]; + struct mptcp_ext ext_copy; +}; + +struct tcp_out_options { + u16 options; + u16 mss; + u8 ws; + u8 num_sack_blocks; + u8 hash_size; + u8 bpf_opt_len; + __u8 *hash_location; + __u32 tsval; + __u32 tsecr; + struct tcp_fastopen_cookie *fastopen_cookie; + struct mptcp_out_options mptcp; +}; + +struct tsq_tasklet { + struct tasklet_struct tasklet; + struct list_head head; +}; + +struct tcp_md5sig { + struct __kernel_sockaddr_storage tcpm_addr; + __u8 tcpm_flags; + __u8 tcpm_prefixlen; + __u16 tcpm_keylen; + int tcpm_ifindex; + __u8 tcpm_key[80]; +}; + +struct icmp_err { + int errno; + unsigned int fatal: 1; +}; + +enum tcp_tw_status { + TCP_TW_SUCCESS = 0, + TCP_TW_RST = 1, + TCP_TW_ACK = 2, + TCP_TW_SYN = 3, +}; + +struct tcp4_pseudohdr { + __be32 saddr; + __be32 daddr; + __u8 pad; + __u8 protocol; + __be16 len; +}; + +enum tcp_seq_states { + TCP_SEQ_STATE_LISTENING = 0, + TCP_SEQ_STATE_ESTABLISHED = 1, +}; + +struct tcp_seq_afinfo { + sa_family_t family; +}; + +struct tcp_iter_state { + struct seq_net_private p; + enum tcp_seq_states state; + struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; + int bucket; + int offset; + int sbucket; + int num; + loff_t last_pos; +}; + +struct bpf_iter__tcp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct sock_common *sk_common; + }; + uid_t uid; +}; + +enum tcp_metric_index { + TCP_METRIC_RTT = 0, + TCP_METRIC_RTTVAR = 1, + TCP_METRIC_SSTHRESH = 2, + TCP_METRIC_CWND = 3, + TCP_METRIC_REORDERING = 4, + TCP_METRIC_RTT_US = 5, + TCP_METRIC_RTTVAR_US = 6, + __TCP_METRIC_MAX = 7, +}; + +enum { + TCP_METRICS_ATTR_UNSPEC = 0, + TCP_METRICS_ATTR_ADDR_IPV4 = 1, + TCP_METRICS_ATTR_ADDR_IPV6 = 2, + TCP_METRICS_ATTR_AGE = 3, + TCP_METRICS_ATTR_TW_TSVAL = 4, + TCP_METRICS_ATTR_TW_TS_STAMP = 5, + TCP_METRICS_ATTR_VALS = 6, + TCP_METRICS_ATTR_FOPEN_MSS = 7, + TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8, + TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9, + TCP_METRICS_ATTR_FOPEN_COOKIE = 10, + TCP_METRICS_ATTR_SADDR_IPV4 = 11, + TCP_METRICS_ATTR_SADDR_IPV6 = 12, + TCP_METRICS_ATTR_PAD = 13, + __TCP_METRICS_ATTR_MAX = 14, +}; + +enum { + TCP_METRICS_CMD_UNSPEC = 0, + TCP_METRICS_CMD_GET = 1, + TCP_METRICS_CMD_DEL = 2, + __TCP_METRICS_CMD_MAX = 3, +}; + +struct tcp_fastopen_metrics { + u16 mss; + u16 syn_loss: 10; + u16 try_exp: 2; + long unsigned int last_syn_loss; + struct tcp_fastopen_cookie cookie; +}; + +struct tcp_metrics_block { + struct tcp_metrics_block *tcpm_next; + possible_net_t tcpm_net; + struct inetpeer_addr tcpm_saddr; + struct inetpeer_addr tcpm_daddr; + long unsigned int tcpm_stamp; + u32 tcpm_lock; + u32 tcpm_vals[5]; + struct tcp_fastopen_metrics tcpm_fastopen; + struct callback_head callback_head; +}; + +struct tcpm_hash_bucket { + struct tcp_metrics_block *chain; +}; + +struct icmp_filter { + __u32 data; +}; + +struct raw_iter_state { + struct seq_net_private p; + int bucket; +}; + +struct raw_sock { + struct inet_sock inet; + struct icmp_filter filter; + u32 ipmr_table; +}; + +struct raw_frag_vec { + struct msghdr *msg; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; + +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *); + int (*err_handler)(struct sk_buff *, u32); +}; + +struct udp_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + __u16 cscov; + __u8 partial_cov; +}; + +struct udp_dev_scratch { + u32 _tsize_state; + u16 len; + bool is_linear; + bool csum_unnecessary; +}; + +struct udp_seq_afinfo { + sa_family_t family; + struct udp_table *udp_table; +}; + +struct udp_iter_state { + struct seq_net_private p; + int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; +}; + +struct bpf_iter__udp { + union { + struct bpf_iter_meta *meta; + }; + union { + struct udp_sock *udp_sk; + }; + uid_t uid; + int: 32; + int bucket; +}; + +struct inet_protosw { + struct list_head list; + short unsigned int type; + short unsigned int protocol; + struct proto *prot; + const struct proto_ops *ops; + unsigned char flags; +}; + +typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *); + +typedef struct sock * (*udp_lookup_t)(struct sk_buff *, __be16, __be16); + +struct arpreq { + struct sockaddr arp_pa; + struct sockaddr arp_ha; + int arp_flags; + struct sockaddr arp_netmask; + char arp_dev[16]; +}; + +typedef struct { + char ax25_call[7]; +} ax25_address; + +enum { + AX25_VALUES_IPDEFMODE = 0, + AX25_VALUES_AXDEFMODE = 1, + AX25_VALUES_BACKOFF = 2, + AX25_VALUES_CONMODE = 3, + AX25_VALUES_WINDOW = 4, + AX25_VALUES_EWINDOW = 5, + AX25_VALUES_T1 = 6, + AX25_VALUES_T2 = 7, + AX25_VALUES_T3 = 8, + AX25_VALUES_IDLE = 9, + AX25_VALUES_N2 = 10, + AX25_VALUES_PACLEN = 11, + AX25_VALUES_PROTOCOL = 12, + AX25_VALUES_DS_TIMEOUT = 13, + AX25_MAX_VALUES = 14, +}; + +enum ip_conntrack_status { + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = 1, + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = 2, + IPS_ASSURED_BIT = 2, + IPS_ASSURED = 4, + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = 8, + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = 16, + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = 32, + IPS_NAT_MASK = 48, + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = 64, + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = 128, + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = 256, + IPS_NAT_DONE_MASK = 384, + IPS_DYING_BIT = 9, + IPS_DYING = 512, + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = 1024, + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = 2048, + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = 4096, + IPS_NAT_CLASH_BIT = 12, + IPS_NAT_CLASH = 4096, + IPS_HELPER_BIT = 13, + IPS_HELPER = 8192, + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = 16384, + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = 32768, + IPS_UNCHANGEABLE_MASK = 56313, + __IPS_MAX_BIT = 16, +}; + +enum { + XFRM_LOOKUP_ICMP = 1, + XFRM_LOOKUP_QUEUE = 2, + XFRM_LOOKUP_KEEP_DST_REF = 4, +}; + +struct icmp_ext_hdr { + __u8 reserved1: 4; + __u8 version: 4; + __u8 reserved2; + __sum16 checksum; +}; + +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; + +struct icmp_bxm { + struct sk_buff *skb; + int offset; + int data_len; + struct { + struct icmphdr icmph; + __be32 times[3]; + } data; + int head_len; + struct ip_options_data replyopts; +}; + +struct icmp_control { + bool (*handler)(struct sk_buff *); + short int error; +}; + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; + +enum { + IFA_UNSPEC = 0, + IFA_ADDRESS = 1, + IFA_LOCAL = 2, + IFA_LABEL = 3, + IFA_BROADCAST = 4, + IFA_ANYCAST = 5, + IFA_CACHEINFO = 6, + IFA_MULTICAST = 7, + IFA_FLAGS = 8, + IFA_RT_PRIORITY = 9, + IFA_TARGET_NETNSID = 10, + __IFA_MAX = 11, +}; + +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; + +enum { + IFLA_INET_UNSPEC = 0, + IFLA_INET_CONF = 1, + __IFLA_INET_MAX = 2, +}; + +struct in_validator_info { + __be32 ivi_addr; + struct in_device *ivi_dev; + struct netlink_ext_ack *extack; +}; + +struct netconfmsg { + __u8 ncm_family; +}; + +enum { + NETCONFA_UNSPEC = 0, + NETCONFA_IFINDEX = 1, + NETCONFA_FORWARDING = 2, + NETCONFA_RP_FILTER = 3, + NETCONFA_MC_FORWARDING = 4, + NETCONFA_PROXY_NEIGH = 5, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6, + NETCONFA_INPUT = 7, + NETCONFA_BC_FORWARDING = 8, + __NETCONFA_MAX = 9, +}; + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +struct devinet_sysctl_table { + struct ctl_table_header *sysctl_header; + struct ctl_table devinet_vars[33]; +}; + +struct rtentry { + long unsigned int rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + long unsigned int rt_pad3; + void *rt_pad4; + short int rt_metric; + char *rt_dev; + long unsigned int rt_mtu; + long unsigned int rt_window; + short unsigned int rt_irtt; +}; + +struct pingv6_ops { + int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *); + void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *); + int (*icmpv6_err_convert)(u8, u8, int *); + void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *); + int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int); +}; + +struct compat_rtentry { + u32 rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + short unsigned int rt_flags; + short int rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short int rt_pad4; + short int rt_metric; + compat_uptr_t rt_dev; + u32 rt_mtu; + u32 rt_window; + short unsigned int rt_irtt; +}; + +struct igmphdr { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; +}; + +struct igmpv3_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + __be32 grec_mca; + __be32 grec_src[0]; +}; + +struct igmpv3_report { + __u8 type; + __u8 resv1; + __sum16 csum; + __be16 resv2; + __be16 ngrec; + struct igmpv3_grec grec[0]; +}; + +struct igmpv3_query { + __u8 type; + __u8 code; + __sum16 csum; + __be32 group; + __u8 qrv: 3; + __u8 suppress: 1; + __u8 resv: 4; + __u8 qqic; + __be16 nsrcs; + __be32 srcs[0]; +}; + +struct igmp_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *in_dev; +}; + +struct igmp_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct in_device *idev; + struct ip_mc_list *im; +}; + +struct fib_config { + u8 fc_dst_len; + u8 fc_tos; + u8 fc_protocol; + u8 fc_scope; + u8 fc_type; + u8 fc_gw_family; + u32 fc_table; + __be32 fc_dst; + union { + __be32 fc_gw4; + struct in6_addr fc_gw6; + }; + int fc_oif; + u32 fc_flags; + u32 fc_priority; + __be32 fc_prefsrc; + u32 fc_nh_id; + struct nlattr *fc_mx; + struct rtnexthop *fc_mp; + int fc_mx_len; + int fc_mp_len; + u32 fc_flow; + u32 fc_nlflags; + struct nl_info fc_nlinfo; + struct nlattr *fc_encap; + u16 fc_encap_type; +}; + +struct fib_result_nl { + __be32 fl_addr; + u32 fl_mark; + unsigned char fl_tos; + unsigned char fl_scope; + unsigned char tb_id_in; + unsigned char tb_id; + unsigned char prefixlen; + unsigned char nh_sel; + unsigned char type; + unsigned char scope; + int err; +}; + +struct fib_dump_filter { + u32 table_id; + bool filter_set; + bool dump_routes; + bool dump_exceptions; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + +struct fib_nh_notifier_info { + struct fib_notifier_info info; + struct fib_nh *fib_nh; +}; + +struct fib_entry_notifier_info { + struct fib_notifier_info info; + u32 dst; + int dst_len; + struct fib_info *fi; + u8 tos; + u8 type; + u32 tb_id; +}; + +typedef unsigned int t_key; + +struct key_vector { + t_key key; + unsigned char pos; + unsigned char bits; + unsigned char slen; + union { + struct hlist_head leaf; + struct key_vector *tnode[0]; + }; +}; + +struct tnode { + struct callback_head rcu; + t_key empty_children; + t_key full_children; + struct key_vector *parent; + struct key_vector kv[1]; +}; + +struct trie_use_stats { + unsigned int gets; + unsigned int backtrack; + unsigned int semantic_match_passed; + unsigned int semantic_match_miss; + unsigned int null_node_hit; + unsigned int resize_node_skipped; +}; + +struct trie_stat { + unsigned int totdepth; + unsigned int maxdepth; + unsigned int tnodes; + unsigned int leaves; + unsigned int nullpointers; + unsigned int prefixes; + unsigned int nodesizes[32]; +}; + +struct trie { + struct key_vector kv[1]; + struct trie_use_stats *stats; +}; + +struct fib_trie_iter { + struct seq_net_private p; + struct fib_table *tb; + struct key_vector *tnode; + unsigned int index; + unsigned int depth; +}; + +struct fib_route_iter { + struct seq_net_private p; + struct fib_table *main_tb; + struct key_vector *tnode; + loff_t pos; + t_key key; +}; + +struct ipfrag_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + }; + struct sk_buff *next_frag; + int frag_run_len; +}; + +struct icmpv6_echo { + __be16 identifier; + __be16 sequence; +}; + +struct icmpv6_nd_advt { + __u32 reserved: 5; + __u32 override: 1; + __u32 solicited: 1; + __u32 router: 1; + __u32 reserved2: 24; +}; + +struct icmpv6_nd_ra { + __u8 hop_limit; + __u8 reserved: 3; + __u8 router_pref: 2; + __u8 home_agent: 1; + __u8 other: 1; + __u8 managed: 1; + __be16 rt_lifetime; +}; + +struct icmp6hdr { + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + struct icmpv6_echo u_echo; + struct icmpv6_nd_advt u_nd_advt; + struct icmpv6_nd_ra u_nd_ra; + } icmp6_dataun; +}; + +struct ping_iter_state { + struct seq_net_private p; + int bucket; + sa_family_t family; +}; + +struct pingfakehdr { + struct icmphdr icmph; + struct msghdr *msg; + sa_family_t family; + __wsum wcheck; +}; + +struct ping_table { + struct hlist_nulls_head hash[64]; + rwlock_t lock; +}; + +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC = 0, + LWTUNNEL_IP_ID = 1, + LWTUNNEL_IP_DST = 2, + LWTUNNEL_IP_SRC = 3, + LWTUNNEL_IP_TTL = 4, + LWTUNNEL_IP_TOS = 5, + LWTUNNEL_IP_FLAGS = 6, + LWTUNNEL_IP_PAD = 7, + LWTUNNEL_IP_OPTS = 8, + __LWTUNNEL_IP_MAX = 9, +}; + +enum lwtunnel_ip6_t { + LWTUNNEL_IP6_UNSPEC = 0, + LWTUNNEL_IP6_ID = 1, + LWTUNNEL_IP6_DST = 2, + LWTUNNEL_IP6_SRC = 3, + LWTUNNEL_IP6_HOPLIMIT = 4, + LWTUNNEL_IP6_TC = 5, + LWTUNNEL_IP6_FLAGS = 6, + LWTUNNEL_IP6_PAD = 7, + LWTUNNEL_IP6_OPTS = 8, + __LWTUNNEL_IP6_MAX = 9, +}; + +enum { + LWTUNNEL_IP_OPTS_UNSPEC = 0, + LWTUNNEL_IP_OPTS_GENEVE = 1, + LWTUNNEL_IP_OPTS_VXLAN = 2, + LWTUNNEL_IP_OPTS_ERSPAN = 3, + __LWTUNNEL_IP_OPTS_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_GENEVE_UNSPEC = 0, + LWTUNNEL_IP_OPT_GENEVE_CLASS = 1, + LWTUNNEL_IP_OPT_GENEVE_TYPE = 2, + LWTUNNEL_IP_OPT_GENEVE_DATA = 3, + __LWTUNNEL_IP_OPT_GENEVE_MAX = 4, +}; + +enum { + LWTUNNEL_IP_OPT_VXLAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_VXLAN_GBP = 1, + __LWTUNNEL_IP_OPT_VXLAN_MAX = 2, +}; + +enum { + LWTUNNEL_IP_OPT_ERSPAN_UNSPEC = 0, + LWTUNNEL_IP_OPT_ERSPAN_VER = 1, + LWTUNNEL_IP_OPT_ERSPAN_INDEX = 2, + LWTUNNEL_IP_OPT_ERSPAN_DIR = 3, + LWTUNNEL_IP_OPT_ERSPAN_HWID = 4, + __LWTUNNEL_IP_OPT_ERSPAN_MAX = 5, +}; + +struct ip6_tnl_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *); + int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); +}; + +struct geneve_opt { + __be16 opt_class; + u8 type; + u8 length: 5; + u8 r3: 1; + u8 r2: 1; + u8 r1: 1; + u8 opt_data[0]; +}; + +struct vxlan_metadata { + u32 gbp; +}; + +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; + __u8 hwid_upper: 2; + __u8 ft: 5; + __u8 p: 1; + __u8 o: 1; + __u8 gra: 2; + __u8 dir: 1; + __u8 hwid: 4; +}; + +struct erspan_metadata { + int version; + union { + __be32 index; + struct erspan_md2 md2; + } u; +}; + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; + unsigned char nh_protocol; + unsigned char resvd; + unsigned int nh_flags; +}; + +struct nexthop_grp { + __u32 id; + __u8 weight; + __u8 resvd1; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH = 0, + __NEXTHOP_GRP_TYPE_MAX = 1, +}; + +enum { + NHA_UNSPEC = 0, + NHA_ID = 1, + NHA_GROUP = 2, + NHA_GROUP_TYPE = 3, + NHA_BLACKHOLE = 4, + NHA_OIF = 5, + NHA_GATEWAY = 6, + NHA_ENCAP_TYPE = 7, + NHA_ENCAP = 8, + NHA_GROUPS = 9, + NHA_MASTER = 10, + NHA_FDB = 11, + __NHA_MAX = 12, +}; + +struct nh_config { + u32 nh_id; + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u8 nh_fdb; + u32 nh_flags; + int nh_ifindex; + struct net_device *dev; + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + struct nlattr *nh_grp; + u16 nh_grp_type; + struct nlattr *nh_encap; + u16 nh_encap_type; + u32 nlflags; + struct nl_info nlinfo; +}; + +enum nexthop_event_type { + NEXTHOP_EVENT_DEL = 0, +}; + +struct bpfilter_umh_ops { + struct umd_info info; + struct mutex lock; + int (*sockopt)(struct sock *, int, sockptr_t, unsigned int, bool); + int (*start)(); +}; + +struct inet6_protocol { + void (*early_demux)(struct sk_buff *); + void (*early_demux_handler)(struct sk_buff *); + int (*handler)(struct sk_buff *); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + unsigned int flags; +}; + +struct snmp_mib { + const char *name; + int entry; +}; + +struct fib4_rule { + struct fib_rule common; + u8 dst_len; + u8 src_len; + u8 tos; + __be32 src; + __be32 srcmask; + __be32 dst; + __be32 dstmask; + u32 tclassid; +}; + +enum { + PIM_TYPE_HELLO = 0, + PIM_TYPE_REGISTER = 1, + PIM_TYPE_REGISTER_STOP = 2, + PIM_TYPE_JOIN_PRUNE = 3, + PIM_TYPE_BOOTSTRAP = 4, + PIM_TYPE_ASSERT = 5, + PIM_TYPE_GRAFT = 6, + PIM_TYPE_GRAFT_ACK = 7, + PIM_TYPE_CANDIDATE_RP_ADV = 8, +}; + +struct pimreghdr { + __u8 type; + __u8 reserved; + __be16 csum; + __be32 flags; +}; + +typedef short unsigned int vifi_t; + +struct vifctl { + vifi_t vifc_vifi; + unsigned char vifc_flags; + unsigned char vifc_threshold; + unsigned int vifc_rate_limit; + union { + struct in_addr vifc_lcl_addr; + int vifc_lcl_ifindex; + }; + struct in_addr vifc_rmt_addr; +}; + +struct mfcctl { + struct in_addr mfcc_origin; + struct in_addr mfcc_mcastgrp; + vifi_t mfcc_parent; + unsigned char mfcc_ttls[32]; + unsigned int mfcc_pkt_cnt; + unsigned int mfcc_byte_cnt; + unsigned int mfcc_wrong_if; + int mfcc_expire; +}; + +struct sioc_sg_req { + struct in_addr src; + struct in_addr grp; + long unsigned int pktcnt; + long unsigned int bytecnt; + long unsigned int wrong_if; +}; + +struct sioc_vif_req { + vifi_t vifi; + long unsigned int icount; + long unsigned int ocount; + long unsigned int ibytes; + long unsigned int obytes; +}; + +struct igmpmsg { + __u32 unused1; + __u32 unused2; + unsigned char im_msgtype; + unsigned char im_mbz; + unsigned char im_vif; + unsigned char im_vif_hi; + struct in_addr im_src; + struct in_addr im_dst; +}; + +enum { + IPMRA_TABLE_UNSPEC = 0, + IPMRA_TABLE_ID = 1, + IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2, + IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3, + IPMRA_TABLE_MROUTE_DO_ASSERT = 4, + IPMRA_TABLE_MROUTE_DO_PIM = 5, + IPMRA_TABLE_VIFS = 6, + IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7, + __IPMRA_TABLE_MAX = 8, +}; + +enum { + IPMRA_VIF_UNSPEC = 0, + IPMRA_VIF = 1, + __IPMRA_VIF_MAX = 2, +}; + +enum { + IPMRA_VIFA_UNSPEC = 0, + IPMRA_VIFA_IFINDEX = 1, + IPMRA_VIFA_VIF_ID = 2, + IPMRA_VIFA_FLAGS = 3, + IPMRA_VIFA_BYTES_IN = 4, + IPMRA_VIFA_BYTES_OUT = 5, + IPMRA_VIFA_PACKETS_IN = 6, + IPMRA_VIFA_PACKETS_OUT = 7, + IPMRA_VIFA_LOCAL_ADDR = 8, + IPMRA_VIFA_REMOTE_ADDR = 9, + IPMRA_VIFA_PAD = 10, + __IPMRA_VIFA_MAX = 11, +}; + +enum { + IPMRA_CREPORT_UNSPEC = 0, + IPMRA_CREPORT_MSGTYPE = 1, + IPMRA_CREPORT_VIF_ID = 2, + IPMRA_CREPORT_SRC_ADDR = 3, + IPMRA_CREPORT_DST_ADDR = 4, + IPMRA_CREPORT_PKT = 5, + IPMRA_CREPORT_TABLE = 6, + __IPMRA_CREPORT_MAX = 7, +}; + +struct vif_device { + struct net_device *dev; + long unsigned int bytes_in; + long unsigned int bytes_out; + long unsigned int pkt_in; + long unsigned int pkt_out; + long unsigned int rate_limit; + unsigned char threshold; + short unsigned int flags; + int link; + struct netdev_phys_item_id dev_parent_id; + __be32 local; + __be32 remote; +}; + +struct vif_entry_notifier_info { + struct fib_notifier_info info; + struct net_device *dev; + short unsigned int vif_index; + short unsigned int vif_flags; + u32 tb_id; +}; + +enum { + MFC_STATIC = 1, + MFC_OFFLOAD = 2, +}; + +struct mr_mfc { + struct rhlist_head mnode; + short unsigned int mfc_parent; + int mfc_flags; + union { + struct { + long unsigned int expires; + struct sk_buff_head unresolved; + } unres; + struct { + long unsigned int last_assert; + int minvif; + int maxvif; + long unsigned int bytes; + long unsigned int pkt; + long unsigned int wrong_if; + long unsigned int lastuse; + unsigned char ttls[32]; + refcount_t refcount; + } res; + } mfc_un; + struct list_head list; + struct callback_head rcu; + void (*free)(struct callback_head *); +}; + +struct mfc_entry_notifier_info { + struct fib_notifier_info info; + struct mr_mfc *mfc; + u32 tb_id; +}; + +struct mr_table_ops { + const struct rhashtable_params *rht_params; + void *cmparg_any; +}; + +struct mr_table { + struct list_head list; + possible_net_t net; + struct mr_table_ops ops; + u32 id; + struct sock *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct vif_device vif_table[32]; + struct rhltable mfc_hash; + struct list_head mfc_cache_list; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + bool mroute_do_wrvifwhole; + int mroute_reg_vif_num; +}; + +struct mr_vif_iter { + struct seq_net_private p; + struct mr_table *mrt; + int ct; +}; + +struct mr_mfc_iter { + struct seq_net_private p; + struct mr_table *mrt; + struct list_head *cache; + spinlock_t *lock; +}; + +struct mfc_cache_cmp_arg { + __be32 mfc_mcastgrp; + __be32 mfc_origin; +}; + +struct mfc_cache { + struct mr_mfc _c; + union { + struct { + __be32 mfc_mcastgrp; + __be32 mfc_origin; + }; + struct mfc_cache_cmp_arg cmparg; + }; +}; + +struct ipmr_result { + struct mr_table *mrt; +}; + +struct compat_sioc_sg_req { + struct in_addr src; + struct in_addr grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_vif_req { + vifi_t vifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct rta_mfc_stats { + __u64 mfcs_packets; + __u64 mfcs_bytes; + __u64 mfcs_wrong_if; +}; + +enum rpc_display_format_t { + RPC_DISPLAY_ADDR = 0, + RPC_DISPLAY_PORT = 1, + RPC_DISPLAY_PROTO = 2, + RPC_DISPLAY_HEX_ADDR = 3, + RPC_DISPLAY_HEX_PORT = 4, + RPC_DISPLAY_NETID = 5, + RPC_DISPLAY_MAX = 6, +}; + +struct ic_device { + struct ic_device *next; + struct net_device *dev; + short unsigned int flags; + short int able; + __be32 xid; +}; + +struct bootp_pkt { + struct iphdr iph; + struct udphdr udph; + u8 op; + u8 htype; + u8 hlen; + u8 hops; + __be32 xid; + __be16 secs; + __be16 flags; + __be32 client_ip; + __be32 your_ip; + __be32 server_ip; + __be32 relay_ip; + u8 hw_addr[16]; + u8 serv_name[64]; + u8 boot_file[128]; + u8 exten[312]; +}; + +struct bictcp { + u32 cnt; + u32 last_max_cwnd; + u32 last_cwnd; + u32 last_time; + u32 bic_origin_point; + u32 bic_K; + u32 delay_min; + u32 epoch_start; + u32 ack_cnt; + u32 tcp_cwnd; + u16 unused; + u8 sample_cnt; + u8 found; + u32 round_start; + u32 end_seq; + u32 last_ack; + u32 curr_rtt; +}; + +struct tls_rec { + struct list_head list; + int tx_ready; + int tx_flags; + struct sk_msg msg_plaintext; + struct sk_msg msg_encrypted; + struct scatterlist sg_aead_in[2]; + struct scatterlist sg_aead_out[2]; + char content_type; + struct scatterlist sg_content_type; + char aad_space[13]; + u8 iv_data[16]; + long: 24; + long: 64; + long: 64; + struct aead_request aead_req; + u8 aead_req_ctx[0]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + spinlock_t encrypt_compl_lock; + int async_notify; + u8 async_capable: 1; + long unsigned int tx_bitmask; +}; + +enum { + TCP_BPF_IPV4 = 0, + TCP_BPF_IPV6 = 1, + TCP_BPF_NUM_PROTS = 2, +}; + +enum { + TCP_BPF_BASE = 0, + TCP_BPF_TX = 1, + TCP_BPF_NUM_CFGS = 2, +}; + +enum { + UDP_BPF_IPV4 = 0, + UDP_BPF_IPV6 = 1, + UDP_BPF_NUM_PROTS = 2, +}; + +struct cipso_v4_map_cache_bkt { + spinlock_t lock; + u32 size; + struct list_head list; +}; + +struct cipso_v4_map_cache_entry { + u32 hash; + unsigned char *key; + size_t key_len; + struct netlbl_lsm_cache *lsm_data; + u32 activity; + struct list_head list; +}; + +struct xfrm_policy_afinfo { + struct dst_ops *dst_ops; + struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32); + int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32); + int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *); + struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *); +}; + +struct xfrm_state_afinfo { + u8 family; + u8 proto; + const struct xfrm_type_offload *type_offload_esp; + const struct xfrm_type *type_esp; + const struct xfrm_type *type_ipip; + const struct xfrm_type *type_ipip6; + const struct xfrm_type *type_comp; + const struct xfrm_type *type_ah; + const struct xfrm_type *type_routing; + const struct xfrm_type *type_dstopts; + int (*output)(struct net *, struct sock *, struct sk_buff *); + int (*transport_finish)(struct sk_buff *, int); + void (*local_error)(struct sk_buff *, u32); +}; + +struct ip_tunnel; + +struct ip6_tnl; + +struct xfrm_tunnel_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + union { + struct ip_tunnel *ip4; + struct ip6_tnl *ip6; + } tunnel; +}; + +struct xfrm_mode_skb_cb { + struct xfrm_tunnel_skb_cb header; + __be16 id; + __be16 frag_off; + u8 ihl; + u8 tos; + u8 ttl; + u8 protocol; + u8 optlen; + u8 flow_lbl[3]; +}; + +struct xfrm_spi_skb_cb { + struct xfrm_tunnel_skb_cb header; + unsigned int daddroff; + unsigned int family; + __be32 seq; +}; + +struct xfrm_input_afinfo { + u8 family; + bool is_ipip; + int (*callback)(struct sk_buff *, u8, int); +}; + +struct xfrm4_protocol { + int (*handler)(struct sk_buff *); + int (*input_handler)(struct sk_buff *, int, __be32, int); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, u32); + struct xfrm4_protocol *next; + int priority; +}; + +typedef u64 (*btf_bpf_tcp_send_ack)(struct tcp_sock *, u32); + +struct seqcount_mutex { + seqcount_t seqcount; +}; + +typedef struct seqcount_mutex seqcount_mutex_t; + +enum { + XFRM_STATE_VOID = 0, + XFRM_STATE_ACQ = 1, + XFRM_STATE_VALID = 2, + XFRM_STATE_ERROR = 3, + XFRM_STATE_EXPIRED = 4, + XFRM_STATE_DEAD = 5, +}; + +struct xfrm_if; + +struct xfrm_if_cb { + struct xfrm_if * (*decode_session)(struct sk_buff *, short unsigned int); +}; + +struct xfrm_if_parms { + int link; + u32 if_id; +}; + +struct xfrm_if { + struct xfrm_if *next; + struct net_device *dev; + struct net *net; + struct xfrm_if_parms p; + struct gro_cells gro_cells; +}; + +struct xfrm_policy_walk { + struct xfrm_policy_walk_entry walk; + u8 type; + u32 seq; +}; + +struct xfrmk_spdinfo { + u32 incnt; + u32 outcnt; + u32 fwdcnt; + u32 inscnt; + u32 outscnt; + u32 fwdscnt; + u32 spdhcnt; + u32 spdhmcnt; +}; + +struct ip6_mh { + __u8 ip6mh_proto; + __u8 ip6mh_hdrlen; + __u8 ip6mh_type; + __u8 ip6mh_reserved; + __u16 ip6mh_cksum; + __u8 data[0]; +}; + +struct xfrm_flo { + struct dst_entry *dst_orig; + u8 flags; +}; + +struct xfrm_pol_inexact_node { + struct rb_node node; + union { + xfrm_address_t addr; + struct callback_head rcu; + }; + u8 prefixlen; + struct rb_root root; + struct hlist_head hhead; +}; + +struct xfrm_pol_inexact_key { + possible_net_t net; + u32 if_id; + u16 family; + u8 dir; + u8 type; +}; + +struct xfrm_pol_inexact_bin { + struct xfrm_pol_inexact_key k; + struct rhash_head head; + struct hlist_head hhead; + seqcount_spinlock_t count; + struct rb_root root_d; + struct rb_root root_s; + struct list_head inexact_bins; + struct callback_head rcu; +}; + +enum xfrm_pol_inexact_candidate_type { + XFRM_POL_CAND_BOTH = 0, + XFRM_POL_CAND_SADDR = 1, + XFRM_POL_CAND_DADDR = 2, + XFRM_POL_CAND_ANY = 3, + XFRM_POL_CAND_MAX = 4, +}; + +struct xfrm_pol_inexact_candidates { + struct hlist_head *res[4]; +}; + +enum xfrm_ae_ftype_t { + XFRM_AE_UNSPEC = 0, + XFRM_AE_RTHR = 1, + XFRM_AE_RVAL = 2, + XFRM_AE_LVAL = 4, + XFRM_AE_ETHR = 8, + XFRM_AE_CR = 16, + XFRM_AE_CE = 32, + XFRM_AE_CU = 64, + __XFRM_AE_MAX = 65, +}; + +enum xfrm_nlgroups { + XFRMNLGRP_NONE = 0, + XFRMNLGRP_ACQUIRE = 1, + XFRMNLGRP_EXPIRE = 2, + XFRMNLGRP_SA = 3, + XFRMNLGRP_POLICY = 4, + XFRMNLGRP_AEVENTS = 5, + XFRMNLGRP_REPORT = 6, + XFRMNLGRP_MIGRATE = 7, + XFRMNLGRP_MAPPING = 8, + __XFRMNLGRP_MAX = 9, +}; + +enum { + XFRM_MODE_FLAG_TUNNEL = 1, +}; + +struct km_event { + union { + u32 hard; + u32 proto; + u32 byid; + u32 aevent; + u32 type; + } data; + u32 seq; + u32 portid; + u32 event; + struct net *net; +}; + +struct xfrm_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + u32 reserved; + u16 family; +}; + +struct xfrm_migrate { + xfrm_address_t old_daddr; + xfrm_address_t old_saddr; + xfrm_address_t new_daddr; + xfrm_address_t new_saddr; + u8 proto; + u8 mode; + u16 reserved; + u32 reqid; + u16 old_family; + u16 new_family; +}; + +struct xfrm_mgr { + struct list_head list; + int (*notify)(struct xfrm_state *, const struct km_event *); + int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *); + struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *); + int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16); + int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *); + int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *); + int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *); + bool (*is_alive)(const struct km_event *); +}; + +struct xfrmk_sadinfo { + u32 sadhcnt; + u32 sadhmcnt; + u32 sadcnt; +}; + +struct xfrm_translator { + int (*alloc_compat)(struct sk_buff *, const struct nlmsghdr *); + struct nlmsghdr * (*rcv_msg_compat)(const struct nlmsghdr *, int, const struct nla_policy *, struct netlink_ext_ack *); + int (*xlate_user_policy_sockptr)(u8 **, int); + struct module *owner; +}; + +struct ip_beet_phdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 padlen; + __u8 reserved; +}; + +struct ip_tunnel_6rd_parm { + struct in6_addr prefix; + __be32 relay_prefix; + u16 prefixlen; + u16 relay_prefixlen; +}; + +struct ip_tunnel_prl_entry; + +struct ip_tunnel { + struct ip_tunnel *next; + struct hlist_node hash_node; + struct net_device *dev; + struct net *net; + long unsigned int err_time; + int err_count; + u32 i_seqno; + u32 o_seqno; + int tun_hlen; + u32 index; + u8 erspan_ver; + u8 dir; + u16 hwid; + struct dst_cache dst_cache; + struct ip_tunnel_parm parms; + int mlink; + int encap_hlen; + int hlen; + struct ip_tunnel_encap encap; + struct ip_tunnel_6rd_parm ip6rd; + struct ip_tunnel_prl_entry *prl; + unsigned int prl_count; + unsigned int ip_tnl_net_id; + struct gro_cells gro_cells; + __u32 fwmark; + bool collect_md; + bool ignore_df; +}; + +struct __ip6_tnl_parm { + char name[16]; + int link; + __u8 proto; + __u8 encap_limit; + __u8 hop_limit; + bool collect_md; + __be32 flowinfo; + __u32 flags; + struct in6_addr laddr; + struct in6_addr raddr; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + __u32 fwmark; + __u32 index; + __u8 erspan_ver; + __u8 dir; + __u16 hwid; +}; + +struct ip6_tnl { + struct ip6_tnl *next; + struct net_device *dev; + struct net *net; + struct __ip6_tnl_parm parms; + struct flowi fl; + struct dst_cache dst_cache; + struct gro_cells gro_cells; + int err_count; + long unsigned int err_time; + __u32 i_seqno; + __u32 o_seqno; + int hlen; + int tun_hlen; + int encap_hlen; + struct ip_tunnel_encap encap; + int mlink; +}; + +struct xfrm_skb_cb { + struct xfrm_tunnel_skb_cb header; + union { + struct { + __u32 low; + __u32 hi; + } output; + struct { + __be32 low; + __be32 hi; + } input; + } seq; +}; + +struct ip_tunnel_prl_entry { + struct ip_tunnel_prl_entry *next; + __be32 addr; + u16 flags; + struct callback_head callback_head; +}; + +struct xfrm_trans_tasklet { + struct tasklet_struct tasklet; + struct sk_buff_head queue; +}; + +struct xfrm_trans_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + int (*finish)(struct net *, struct sock *, struct sk_buff *); + struct net *net; +}; + +struct xfrm_user_offload { + int ifindex; + __u8 flags; +}; + +struct espintcp_msg { + struct sk_buff *skb; + struct sk_msg skmsg; + int offset; + int len; +}; + +struct espintcp_ctx { + struct strparser strp; + struct sk_buff_head ike_queue; + struct sk_buff_head out_queue; + struct espintcp_msg partial; + void (*saved_data_ready)(struct sock *); + void (*saved_write_space)(struct sock *); + void (*saved_destruct)(struct sock *); + struct work_struct work; + bool tx_running; +}; + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; + +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + +struct in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + __u32 rtmsg_type; + __u16 rtmsg_dst_len; + __u16 rtmsg_src_len; + __u32 rtmsg_metric; + long unsigned int rtmsg_info; + __u32 rtmsg_flags; + int rtmsg_ifindex; +}; + +struct compat_in6_rtmsg { + struct in6_addr rtmsg_dst; + struct in6_addr rtmsg_src; + struct in6_addr rtmsg_gateway; + u32 rtmsg_type; + u16 rtmsg_dst_len; + u16 rtmsg_src_len; + u32 rtmsg_metric; + u32 rtmsg_info; + u32 rtmsg_flags; + s32 rtmsg_ifindex; +}; + +struct ac6_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +struct ip6_ra_chain { + struct ip6_ra_chain *next; + struct sock *sk; + int sel; + void (*destructor)(struct sock *); +}; + +struct ipcm6_cookie { + struct sockcm_cookie sockc; + __s16 hlimit; + __s16 tclass; + __s8 dontfrag; + struct ipv6_txoptions *opt; + __u16 gso_size; +}; + +enum { + IFLA_INET6_UNSPEC = 0, + IFLA_INET6_FLAGS = 1, + IFLA_INET6_CONF = 2, + IFLA_INET6_STATS = 3, + IFLA_INET6_MCAST = 4, + IFLA_INET6_CACHEINFO = 5, + IFLA_INET6_ICMP6STATS = 6, + IFLA_INET6_TOKEN = 7, + IFLA_INET6_ADDR_GEN_MODE = 8, + __IFLA_INET6_MAX = 9, +}; + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64 = 0, + IN6_ADDR_GEN_MODE_NONE = 1, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2, + IN6_ADDR_GEN_MODE_RANDOM = 3, +}; + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +struct wpan_phy; + +struct wpan_dev_header_ops; + +struct wpan_dev { + struct wpan_phy *wpan_phy; + int iftype; + struct list_head list; + struct net_device *netdev; + const struct wpan_dev_header_ops *header_ops; + struct net_device *lowpan_dev; + u32 identifier; + __le16 pan_id; + __le16 short_addr; + __le64 extended_addr; + atomic_t bsn; + atomic_t dsn; + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + bool lbt; + bool promiscuous_mode; + bool ackreq; +}; + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + short unsigned int prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum { + PREFIX_UNSPEC = 0, + PREFIX_ADDRESS = 1, + PREFIX_CACHEINFO = 2, + __PREFIX_MAX = 3, +}; + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + +struct in6_ifreq { + struct in6_addr ifr6_addr; + __u32 ifr6_prefixlen; + int ifr6_ifindex; +}; + +enum { + DEVCONF_FORWARDING = 0, + DEVCONF_HOPLIMIT = 1, + DEVCONF_MTU6 = 2, + DEVCONF_ACCEPT_RA = 3, + DEVCONF_ACCEPT_REDIRECTS = 4, + DEVCONF_AUTOCONF = 5, + DEVCONF_DAD_TRANSMITS = 6, + DEVCONF_RTR_SOLICITS = 7, + DEVCONF_RTR_SOLICIT_INTERVAL = 8, + DEVCONF_RTR_SOLICIT_DELAY = 9, + DEVCONF_USE_TEMPADDR = 10, + DEVCONF_TEMP_VALID_LFT = 11, + DEVCONF_TEMP_PREFERED_LFT = 12, + DEVCONF_REGEN_MAX_RETRY = 13, + DEVCONF_MAX_DESYNC_FACTOR = 14, + DEVCONF_MAX_ADDRESSES = 15, + DEVCONF_FORCE_MLD_VERSION = 16, + DEVCONF_ACCEPT_RA_DEFRTR = 17, + DEVCONF_ACCEPT_RA_PINFO = 18, + DEVCONF_ACCEPT_RA_RTR_PREF = 19, + DEVCONF_RTR_PROBE_INTERVAL = 20, + DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21, + DEVCONF_PROXY_NDP = 22, + DEVCONF_OPTIMISTIC_DAD = 23, + DEVCONF_ACCEPT_SOURCE_ROUTE = 24, + DEVCONF_MC_FORWARDING = 25, + DEVCONF_DISABLE_IPV6 = 26, + DEVCONF_ACCEPT_DAD = 27, + DEVCONF_FORCE_TLLAO = 28, + DEVCONF_NDISC_NOTIFY = 29, + DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30, + DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31, + DEVCONF_SUPPRESS_FRAG_NDISC = 32, + DEVCONF_ACCEPT_RA_FROM_LOCAL = 33, + DEVCONF_USE_OPTIMISTIC = 34, + DEVCONF_ACCEPT_RA_MTU = 35, + DEVCONF_STABLE_SECRET = 36, + DEVCONF_USE_OIF_ADDRS_ONLY = 37, + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39, + DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40, + DEVCONF_DROP_UNSOLICITED_NA = 41, + DEVCONF_KEEP_ADDR_ON_DOWN = 42, + DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43, + DEVCONF_SEG6_ENABLED = 44, + DEVCONF_SEG6_REQUIRE_HMAC = 45, + DEVCONF_ENHANCED_DAD = 46, + DEVCONF_ADDR_GEN_MODE = 47, + DEVCONF_DISABLE_POLICY = 48, + DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49, + DEVCONF_NDISC_TCLASS = 50, + DEVCONF_RPL_SEG_ENABLED = 51, + DEVCONF_MAX = 52, +}; + +enum { + INET6_IFADDR_STATE_PREDAD = 0, + INET6_IFADDR_STATE_DAD = 1, + INET6_IFADDR_STATE_POSTDAD = 2, + INET6_IFADDR_STATE_ERRDAD = 3, + INET6_IFADDR_STATE_DEAD = 4, +}; + +enum nl802154_cca_modes { + __NL802154_CCA_INVALID = 0, + NL802154_CCA_ENERGY = 1, + NL802154_CCA_CARRIER = 2, + NL802154_CCA_ENERGY_CARRIER = 3, + NL802154_CCA_ALOHA = 4, + NL802154_CCA_UWB_SHR = 5, + NL802154_CCA_UWB_MULTIPLEXED = 6, + __NL802154_CCA_ATTR_AFTER_LAST = 7, + NL802154_CCA_ATTR_MAX = 6, +}; + +enum nl802154_cca_opts { + NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0, + NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1, + __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2, + NL802154_CCA_OPT_ATTR_MAX = 1, +}; + +enum nl802154_supported_bool_states { + NL802154_SUPPORTED_BOOL_FALSE = 0, + NL802154_SUPPORTED_BOOL_TRUE = 1, + __NL802154_SUPPORTED_BOOL_INVALD = 2, + NL802154_SUPPORTED_BOOL_BOTH = 3, + __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4, + NL802154_SUPPORTED_BOOL_MAX = 3, +}; + +struct wpan_phy_supported { + u32 channels[32]; + u32 cca_modes; + u32 cca_opts; + u32 iftypes; + enum nl802154_supported_bool_states lbt; + u8 min_minbe; + u8 max_minbe; + u8 min_maxbe; + u8 max_maxbe; + u8 min_csma_backoffs; + u8 max_csma_backoffs; + s8 min_frame_retries; + s8 max_frame_retries; + size_t tx_powers_size; + size_t cca_ed_levels_size; + const s32 *tx_powers; + const s32 *cca_ed_levels; +}; + +struct wpan_phy_cca { + enum nl802154_cca_modes mode; + enum nl802154_cca_opts opt; +}; + +struct wpan_phy { + const void *privid; + u32 flags; + u8 current_channel; + u8 current_page; + struct wpan_phy_supported supported; + s32 transmit_power; + struct wpan_phy_cca cca; + __le64 perm_extended_addr; + s32 cca_ed_level; + u8 symbol_duration; + u16 lifs_period; + u16 sifs_period; + struct device dev; + possible_net_t _net; + long: 64; + long: 64; + char priv[0]; +}; + +struct ieee802154_addr { + u8 mode; + __le16 pan_id; + union { + __le16 short_addr; + __le64 extended_addr; + }; +}; + +struct wpan_dev_header_ops { + int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int); +}; + +union fwnet_hwaddr { + u8 u[16]; + struct { + __be64 uniq_id; + u8 max_rec; + u8 sspd; + __be16 fifo_hi; + __be32 fifo_lo; + } uc; +}; + +struct in6_validator_info { + struct in6_addr i6vi_addr; + struct inet6_dev *i6vi_dev; + struct netlink_ext_ack *extack; +}; + +struct ifa6_config { + const struct in6_addr *pfx; + unsigned int plen; + const struct in6_addr *peer_pfx; + u32 rt_priority; + u32 ifa_flags; + u32 preferred_lft; + u32 valid_lft; + u16 scope; +}; + +enum cleanup_prefix_rt_t { + CLEANUP_PREFIX_RT_NOP = 0, + CLEANUP_PREFIX_RT_DEL = 1, + CLEANUP_PREFIX_RT_EXPIRE = 2, +}; + +enum { + IPV6_SADDR_RULE_INIT = 0, + IPV6_SADDR_RULE_LOCAL = 1, + IPV6_SADDR_RULE_SCOPE = 2, + IPV6_SADDR_RULE_PREFERRED = 3, + IPV6_SADDR_RULE_OIF = 4, + IPV6_SADDR_RULE_LABEL = 5, + IPV6_SADDR_RULE_PRIVACY = 6, + IPV6_SADDR_RULE_ORCHID = 7, + IPV6_SADDR_RULE_PREFIX = 8, + IPV6_SADDR_RULE_MAX = 9, +}; + +struct ipv6_saddr_score { + int rule; + int addr_type; + struct inet6_ifaddr *ifa; + long unsigned int scorebits[1]; + int scopedist; + int matchlen; +}; + +struct ipv6_saddr_dst { + const struct in6_addr *addr; + int ifindex; + int scope; + int label; + unsigned int prefs; +}; + +struct if6_iter_state { + struct seq_net_private p; + int bucket; + int offset; +}; + +enum addr_type_t { + UNICAST_ADDR = 0, + MULTICAST_ADDR = 1, + ANYCAST_ADDR = 2, +}; + +struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; + enum addr_type_t type; +}; + +enum { + DAD_PROCESS = 0, + DAD_BEGIN = 1, + DAD_ABORT = 2, +}; + +struct ifaddrlblmsg { + __u8 ifal_family; + __u8 __ifal_reserved; + __u8 ifal_prefixlen; + __u8 ifal_flags; + __u32 ifal_index; + __u32 ifal_seq; +}; + +enum { + IFAL_ADDRESS = 1, + IFAL_LABEL = 2, + __IFAL_MAX = 3, +}; + +struct ip6addrlbl_entry { + struct in6_addr prefix; + int prefixlen; + int ifindex; + int addrtype; + u32 label; + struct hlist_node list; + struct callback_head rcu; +}; + +struct ip6addrlbl_init_table { + const struct in6_addr *prefix; + int prefixlen; + u32 label; +}; + +struct rd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + struct in6_addr dest; + __u8 opt[0]; +}; + +struct fib6_gc_args { + int timeout; + int more; +}; + +struct rt6_exception { + struct hlist_node hlist; + struct rt6_info *rt6i; + long unsigned int stamp; + struct callback_head rcu; +}; + +struct route_info { + __u8 type; + __u8 length; + __u8 prefix_len; + __u8 reserved_l: 3; + __u8 route_pref: 2; + __u8 reserved_h: 3; + __be32 lifetime; + __u8 prefix[0]; +}; + +struct rt6_rtnl_dump_arg { + struct sk_buff *skb; + struct netlink_callback *cb; + struct net *net; + struct fib_dump_filter filter; +}; + +struct netevent_redirect { + struct dst_entry *old; + struct dst_entry *new; + struct neighbour *neigh; + const void *daddr; +}; + +struct trace_event_raw_fib6_table_lookup { + struct trace_entry ent; + u32 tb_id; + int err; + int oif; + int iif; + __u8 tos; + __u8 scope; + __u8 flags; + __u8 src[16]; + __u8 dst[16]; + u16 sport; + u16 dport; + u8 proto; + u8 rt_type; + u32 __data_loc_name; + __u8 gw[16]; + char __data[0]; +}; + +struct trace_event_data_offsets_fib6_table_lookup { + u32 name; +}; + +typedef void (*btf_trace_fib6_table_lookup)(void *, const struct net *, const struct fib6_result *, struct fib6_table *, const struct flowi6 *); + +enum rt6_nud_state { + RT6_NUD_FAIL_HARD = 4294967293, + RT6_NUD_FAIL_PROBE = 4294967294, + RT6_NUD_FAIL_DO_RR = 4294967295, + RT6_NUD_SUCCEED = 1, +}; + +struct fib6_nh_dm_arg { + struct net *net; + const struct in6_addr *saddr; + int oif; + int flags; + struct fib6_nh *nh; +}; + +struct __rt6_probe_work { + struct work_struct work; + struct in6_addr target; + struct net_device *dev; +}; + +struct fib6_nh_frl_arg { + u32 flags; + int oif; + int strict; + int *mpri; + bool *do_rr; + struct fib6_nh *nh; +}; + +struct fib6_nh_excptn_arg { + struct rt6_info *rt; + int plen; +}; + +struct fib6_nh_match_arg { + const struct net_device *dev; + const struct in6_addr *gw; + struct fib6_nh *match; +}; + +struct fib6_nh_age_excptn_arg { + struct fib6_gc_args *gc_args; + long unsigned int now; +}; + +struct fib6_nh_rd_arg { + struct fib6_result *res; + struct flowi6 *fl6; + const struct in6_addr *gw; + struct rt6_info **ret; +}; + +struct ip6rd_flowi { + struct flowi6 fl6; + struct in6_addr gateway; +}; + +struct fib6_nh_del_cached_rt_arg { + struct fib6_config *cfg; + struct fib6_info *f6i; +}; + +struct arg_dev_net_ip { + struct net_device *dev; + struct net *net; + struct in6_addr *addr; +}; + +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned char nh_flags; + long unsigned int event; + }; +}; + +struct rt6_mtu_change_arg { + struct net_device *dev; + unsigned int mtu; + struct fib6_info *f6i; +}; + +struct rt6_nh { + struct fib6_info *fib6_info; + struct fib6_config r_cfg; + struct list_head next; +}; + +struct fib6_nh_exception_dump_walker { + struct rt6_rtnl_dump_arg *dump; + struct fib6_info *rt; + unsigned int flags; + unsigned int skip; + unsigned int count; +}; + +enum fib6_walk_state { + FWS_S = 0, + FWS_L = 1, + FWS_R = 2, + FWS_C = 3, + FWS_U = 4, +}; + +struct fib6_walker { + struct list_head lh; + struct fib6_node *root; + struct fib6_node *node; + struct fib6_info *leaf; + enum fib6_walk_state state; + unsigned int skip; + unsigned int count; + unsigned int skip_in_node; + int (*func)(struct fib6_walker *); + void *args; +}; + +struct fib6_entry_notifier_info { + struct fib_notifier_info info; + struct fib6_info *rt; + unsigned int nsiblings; +}; + +struct ipv6_route_iter { + struct seq_net_private p; + struct fib6_walker w; + loff_t skip; + struct fib6_table *tbl; + int sernum; +}; + +struct bpf_iter__ipv6_route { + union { + struct bpf_iter_meta *meta; + }; + union { + struct fib6_info *rt; + }; +}; + +struct fib6_cleaner { + struct fib6_walker w; + struct net *net; + int (*func)(struct fib6_info *, void *); + int sernum; + void *arg; + bool skip_notify; +}; + +enum { + FIB6_NO_SERNUM_CHANGE = 0, +}; + +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; + struct netlink_ext_ack *extack; +}; + +struct fib6_nh_pcpu_arg { + struct fib6_info *from; + const struct fib6_table *table; +}; + +struct lookup_args { + int offset; + const struct in6_addr *addr; +}; + +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + int ipv6mr_ifindex; +}; + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; +}; + +struct ip6_mtuinfo { + struct sockaddr_in6 ip6m_addr; + __u32 ip6m_mtu; +}; + +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + short unsigned int nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + short unsigned int nduseropt_pad2; + unsigned int nduseropt_pad3; +}; + +enum { + NDUSEROPT_UNSPEC = 0, + NDUSEROPT_SRCADDR = 1, + __NDUSEROPT_MAX = 2, +}; + +struct nd_msg { + struct icmp6hdr icmph; + struct in6_addr target; + __u8 opt[0]; +}; + +struct rs_msg { + struct icmp6hdr icmph; + __u8 opt[0]; +}; + +struct ra_msg { + struct icmp6hdr icmph; + __be32 reachable_time; + __be32 retrans_timer; +}; + +struct icmp6_filter { + __u32 data[8]; +}; + +struct raw6_sock { + struct inet_sock inet; + __u32 checksum; + __u32 offset; + struct icmp6_filter filter; + __u32 ip6mr_table; + struct ipv6_pinfo inet6; +}; + +typedef int mh_filter_t(struct sock *, struct sk_buff *); + +struct raw6_frag_vec { + struct msghdr *msg; + int hlen; + char c[4]; +}; + +typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *); + +struct ipv6_destopt_hao { + __u8 type; + __u8 length; + struct in6_addr addr; +} __attribute__((packed)); + +struct icmpv6_msg { + struct sk_buff *skb; + int offset; + uint8_t type; +}; + +struct icmp6_err { + int err; + int fatal; +}; + +struct mld_msg { + struct icmp6hdr mld_hdr; + struct in6_addr mld_mca; +}; + +struct mld2_grec { + __u8 grec_type; + __u8 grec_auxwords; + __be16 grec_nsrcs; + struct in6_addr grec_mca; + struct in6_addr grec_src[0]; +}; + +struct mld2_report { + struct icmp6hdr mld2r_hdr; + struct mld2_grec mld2r_grec[0]; +}; + +struct mld2_query { + struct icmp6hdr mld2q_hdr; + struct in6_addr mld2q_mca; + __u8 mld2q_qrv: 3; + __u8 mld2q_suppress: 1; + __u8 mld2q_resv2: 4; + __u8 mld2q_qqic; + __be16 mld2q_nsrcs; + struct in6_addr mld2q_srcs[0]; +}; + +struct igmp6_mc_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; +}; + +struct igmp6_mcf_iter_state { + struct seq_net_private p; + struct net_device *dev; + struct inet6_dev *idev; + struct ifmcaddr6 *im; +}; + +enum ip6_defrag_users { + IP6_DEFRAG_LOCAL_DELIVER = 0, + IP6_DEFRAG_CONNTRACK_IN = 1, + __IP6_DEFRAG_CONNTRACK_IN = 65536, + IP6_DEFRAG_CONNTRACK_OUT = 65537, + __IP6_DEFRAG_CONNTRACK_OUT = 131072, + IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073, + __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608, +}; + +struct frag_queue { + struct inet_frag_queue q; + int iif; + __u16 nhoffset; + u8 ecn; +}; + +struct tcp6_pseudohdr { + struct in6_addr saddr; + struct in6_addr daddr; + __be32 len; + __be32 protocol; +}; + +struct rt0_hdr { + struct ipv6_rt_hdr rt_hdr; + __u32 reserved; + struct in6_addr addr[0]; +}; + +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; + __u32 cmpre: 4; + __u32 cmpri: 4; + __u32 reserved: 4; + __u32 pad: 4; + __u32 reserved1: 16; + union { + struct in6_addr addr[0]; + __u8 data[0]; + } segments; +}; + +struct tlvtype_proc { + int type; + bool (*func)(struct sk_buff *, int); +}; + +struct ip6fl_iter_state { + struct seq_net_private p; + struct pid_namespace *pid_ns; + int bucket; +}; + +struct sr6_tlv { + __u8 type; + __u8 len; + __u8 data[0]; +}; + +enum { + SEG6_ATTR_UNSPEC = 0, + SEG6_ATTR_DST = 1, + SEG6_ATTR_DSTLEN = 2, + SEG6_ATTR_HMACKEYID = 3, + SEG6_ATTR_SECRET = 4, + SEG6_ATTR_SECRETLEN = 5, + SEG6_ATTR_ALGID = 6, + SEG6_ATTR_HMACINFO = 7, + __SEG6_ATTR_MAX = 8, +}; + +enum { + SEG6_CMD_UNSPEC = 0, + SEG6_CMD_SETHMAC = 1, + SEG6_CMD_DUMPHMAC = 2, + SEG6_CMD_SET_TUNSRC = 3, + SEG6_CMD_GET_TUNSRC = 4, + __SEG6_CMD_MAX = 5, +}; + +typedef short unsigned int mifi_t; + +typedef __u32 if_mask; + +struct if_set { + if_mask ifs_bits[8]; +}; + +struct mif6ctl { + mifi_t mif6c_mifi; + unsigned char mif6c_flags; + unsigned char vifc_threshold; + __u16 mif6c_pifi; + unsigned int vifc_rate_limit; +}; + +struct mf6cctl { + struct sockaddr_in6 mf6cc_origin; + struct sockaddr_in6 mf6cc_mcastgrp; + mifi_t mf6cc_parent; + struct if_set mf6cc_ifset; +}; + +struct sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + long unsigned int pktcnt; + long unsigned int bytecnt; + long unsigned int wrong_if; +}; + +struct sioc_mif_req6 { + mifi_t mifi; + long unsigned int icount; + long unsigned int ocount; + long unsigned int ibytes; + long unsigned int obytes; +}; + +struct mrt6msg { + __u8 im6_mbz; + __u8 im6_msgtype; + __u16 im6_mif; + __u32 im6_pad; + struct in6_addr im6_src; + struct in6_addr im6_dst; +}; + +enum { + IP6MRA_CREPORT_UNSPEC = 0, + IP6MRA_CREPORT_MSGTYPE = 1, + IP6MRA_CREPORT_MIF_ID = 2, + IP6MRA_CREPORT_SRC_ADDR = 3, + IP6MRA_CREPORT_DST_ADDR = 4, + IP6MRA_CREPORT_PKT = 5, + __IP6MRA_CREPORT_MAX = 6, +}; + +struct mfc6_cache_cmp_arg { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; +}; + +struct mfc6_cache { + struct mr_mfc _c; + union { + struct { + struct in6_addr mf6c_mcastgrp; + struct in6_addr mf6c_origin; + }; + struct mfc6_cache_cmp_arg cmparg; + }; +}; + +struct ip6mr_result { + struct mr_table *mrt; +}; + +struct compat_sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + compat_ulong_t pktcnt; + compat_ulong_t bytecnt; + compat_ulong_t wrong_if; +}; + +struct compat_sioc_mif_req6 { + mifi_t mifi; + compat_ulong_t icount; + compat_ulong_t ocount; + compat_ulong_t ibytes; + compat_ulong_t obytes; +}; + +struct xfrm6_protocol { + int (*handler)(struct sk_buff *); + int (*input_handler)(struct sk_buff *, int, __be32, int); + int (*cb_handler)(struct sk_buff *, int); + int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32); + struct xfrm6_protocol *next; + int priority; +}; + +struct br_input_skb_cb { + struct net_device *brdev; + u16 frag_max_size; + u8 igmp; + u8 mrouters_only: 1; + u8 proxyarp_replied: 1; + u8 src_port_isolated: 1; + u8 vlan_filtered: 1; + u8 br_netfilter_broute: 1; + int offload_fwd_mark; +}; + +struct nf_bridge_frag_data; + +typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); + +struct fib6_rule { + struct fib_rule common; + struct rt6key src; + struct rt6key dst; + u8 tclass; +}; + +struct calipso_doi; + +struct netlbl_calipso_ops { + int (*doi_add)(struct calipso_doi *, struct netlbl_audit *); + void (*doi_free)(struct calipso_doi *); + int (*doi_remove)(u32, struct netlbl_audit *); + struct calipso_doi * (*doi_getdef)(u32); + void (*doi_putdef)(struct calipso_doi *); + int (*doi_walk)(u32 *, int (*)(struct calipso_doi *, void *), void *); + int (*sock_getattr)(struct sock *, struct netlbl_lsm_secattr *); + int (*sock_setattr)(struct sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + void (*sock_delattr)(struct sock *); + int (*req_setattr)(struct request_sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + void (*req_delattr)(struct request_sock *); + int (*opt_getattr)(const unsigned char *, struct netlbl_lsm_secattr *); + unsigned char * (*skbuff_optptr)(const struct sk_buff *); + int (*skbuff_setattr)(struct sk_buff *, const struct calipso_doi *, const struct netlbl_lsm_secattr *); + int (*skbuff_delattr)(struct sk_buff *); + void (*cache_invalidate)(); + int (*cache_add)(const unsigned char *, const struct netlbl_lsm_secattr *); +}; + +struct calipso_doi { + u32 doi; + u32 type; + refcount_t refcount; + struct list_head list; + struct callback_head rcu; +}; + +struct calipso_map_cache_bkt { + spinlock_t lock; + u32 size; + struct list_head list; +}; + +struct calipso_map_cache_entry { + u32 hash; + unsigned char *key; + size_t key_len; + struct netlbl_lsm_cache *lsm_data; + u32 activity; + struct list_head list; +}; + +enum { + SEG6_IPTUNNEL_UNSPEC = 0, + SEG6_IPTUNNEL_SRH = 1, + __SEG6_IPTUNNEL_MAX = 2, +}; + +struct seg6_iptunnel_encap { + int mode; + struct ipv6_sr_hdr srh[0]; +}; + +enum { + SEG6_IPTUN_MODE_INLINE = 0, + SEG6_IPTUN_MODE_ENCAP = 1, + SEG6_IPTUN_MODE_L2ENCAP = 2, +}; + +struct seg6_lwt { + struct dst_cache cache; + struct seg6_iptunnel_encap tuninfo[0]; +}; + +enum { + SEG6_LOCAL_UNSPEC = 0, + SEG6_LOCAL_ACTION = 1, + SEG6_LOCAL_SRH = 2, + SEG6_LOCAL_TABLE = 3, + SEG6_LOCAL_NH4 = 4, + SEG6_LOCAL_NH6 = 5, + SEG6_LOCAL_IIF = 6, + SEG6_LOCAL_OIF = 7, + SEG6_LOCAL_BPF = 8, + __SEG6_LOCAL_MAX = 9, +}; + +enum { + SEG6_LOCAL_BPF_PROG_UNSPEC = 0, + SEG6_LOCAL_BPF_PROG = 1, + SEG6_LOCAL_BPF_PROG_NAME = 2, + __SEG6_LOCAL_BPF_PROG_MAX = 3, +}; + +struct seg6_local_lwt; + +struct seg6_action_desc { + int action; + long unsigned int attrs; + int (*input)(struct sk_buff *, struct seg6_local_lwt *); + int static_headroom; +}; + +struct seg6_local_lwt { + int action; + struct ipv6_sr_hdr *srh; + int table; + struct in_addr nh4; + struct in6_addr nh6; + int iif; + int oif; + struct bpf_lwt_prog bpf; + int headroom; + struct seg6_action_desc *desc; +}; + +struct seg6_action_param { + int (*parse)(struct nlattr **, struct seg6_local_lwt *); + int (*put)(struct sk_buff *, struct seg6_local_lwt *); + int (*cmp)(struct seg6_local_lwt *, struct seg6_local_lwt *); +}; + +enum { + RPL_IPTUNNEL_UNSPEC = 0, + RPL_IPTUNNEL_SRH = 1, + __RPL_IPTUNNEL_MAX = 2, +}; + +struct rpl_iptunnel_encap { + struct ipv6_rpl_sr_hdr srh[0]; +}; + +struct rpl_lwt { + struct dst_cache cache; + struct rpl_iptunnel_encap tuninfo; +}; + +enum { + IP6_FH_F_FRAG = 1, + IP6_FH_F_AUTH = 2, + IP6_FH_F_SKIP_RH = 4, +}; + +struct _strp_msg { + struct strp_msg strp; + int accum_len; +}; + +struct vlan_group { + unsigned int nr_vlan_devs; + struct hlist_node hlist; + struct net_device **vlan_devices_arrays[16]; +}; + +struct vlan_info { + struct net_device *real_dev; + struct vlan_group grp; + struct list_head vid_list; + unsigned int nr_vids; + struct callback_head rcu; +}; + +enum vlan_flags { + VLAN_FLAG_REORDER_HDR = 1, + VLAN_FLAG_GVRP = 2, + VLAN_FLAG_LOOSE_BINDING = 4, + VLAN_FLAG_MVRP = 8, + VLAN_FLAG_BRIDGE_BINDING = 16, +}; + +struct vlan_priority_tci_mapping { + u32 priority; + u16 vlan_qos; + struct vlan_priority_tci_mapping *next; +}; + +struct vlan_dev_priv { + unsigned int nr_ingress_mappings; + u32 ingress_priority_map[8]; + unsigned int nr_egress_mappings; + struct vlan_priority_tci_mapping *egress_priority_map[16]; + __be16 vlan_proto; + u16 vlan_id; + u16 flags; + struct net_device *real_dev; + unsigned char real_dev_addr[6]; + struct proc_dir_entry *dent; + struct vlan_pcpu_stats *vlan_pcpu_stats; + struct netpoll *netpoll; +}; + +enum vlan_protos { + VLAN_PROTO_8021Q = 0, + VLAN_PROTO_8021AD = 1, + VLAN_PROTO_NUM = 2, +}; + +struct vlan_vid_info { + struct list_head list; + __be16 proto; + u16 vid; + int refcount; +}; + +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED = 0, + NL80211_IFTYPE_ADHOC = 1, + NL80211_IFTYPE_STATION = 2, + NL80211_IFTYPE_AP = 3, + NL80211_IFTYPE_AP_VLAN = 4, + NL80211_IFTYPE_WDS = 5, + NL80211_IFTYPE_MONITOR = 6, + NL80211_IFTYPE_MESH_POINT = 7, + NL80211_IFTYPE_P2P_CLIENT = 8, + NL80211_IFTYPE_P2P_GO = 9, + NL80211_IFTYPE_P2P_DEVICE = 10, + NL80211_IFTYPE_OCB = 11, + NL80211_IFTYPE_NAN = 12, + NUM_NL80211_IFTYPES = 13, + NL80211_IFTYPE_MAX = 12, +}; + +struct cfg80211_conn; + +struct cfg80211_cached_keys; + +enum ieee80211_bss_type { + IEEE80211_BSS_TYPE_ESS = 0, + IEEE80211_BSS_TYPE_PBSS = 1, + IEEE80211_BSS_TYPE_IBSS = 2, + IEEE80211_BSS_TYPE_MBSS = 3, + IEEE80211_BSS_TYPE_ANY = 4, +}; + +struct cfg80211_internal_bss; + +enum nl80211_chan_width { + NL80211_CHAN_WIDTH_20_NOHT = 0, + NL80211_CHAN_WIDTH_20 = 1, + NL80211_CHAN_WIDTH_40 = 2, + NL80211_CHAN_WIDTH_80 = 3, + NL80211_CHAN_WIDTH_80P80 = 4, + NL80211_CHAN_WIDTH_160 = 5, + NL80211_CHAN_WIDTH_5 = 6, + NL80211_CHAN_WIDTH_10 = 7, + NL80211_CHAN_WIDTH_1 = 8, + NL80211_CHAN_WIDTH_2 = 9, + NL80211_CHAN_WIDTH_4 = 10, + NL80211_CHAN_WIDTH_8 = 11, + NL80211_CHAN_WIDTH_16 = 12, +}; + +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + +struct ieee80211_channel; + +struct cfg80211_chan_def { + struct ieee80211_channel *chan; + enum nl80211_chan_width width; + u32 center_freq1; + u32 center_freq2; + struct ieee80211_edmg edmg; + u16 freq1_offset; +}; + +struct ieee80211_mcs_info { + u8 rx_mask[10]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +}; + +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + struct ieee80211_mcs_info mcs; + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__((packed)); + +struct key_params; + +struct cfg80211_ibss_params { + const u8 *ssid; + const u8 *bssid; + struct cfg80211_chan_def chandef; + const u8 *ie; + u8 ssid_len; + u8 ie_len; + u16 beacon_interval; + u32 basic_rates; + bool channel_fixed; + bool privacy; + bool control_port; + bool control_port_over_nl80211; + bool userspace_handles_dfs; + int: 24; + int mcast_rate[5]; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; + int: 32; +} __attribute__((packed)); + +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM = 0, + NL80211_AUTHTYPE_SHARED_KEY = 1, + NL80211_AUTHTYPE_FT = 2, + NL80211_AUTHTYPE_NETWORK_EAP = 3, + NL80211_AUTHTYPE_SAE = 4, + NL80211_AUTHTYPE_FILS_SK = 5, + NL80211_AUTHTYPE_FILS_SK_PFS = 6, + NL80211_AUTHTYPE_FILS_PK = 7, + __NL80211_AUTHTYPE_NUM = 8, + NL80211_AUTHTYPE_MAX = 7, + NL80211_AUTHTYPE_AUTOMATIC = 8, +}; + +enum nl80211_mfp { + NL80211_MFP_NO = 0, + NL80211_MFP_REQUIRED = 1, + NL80211_MFP_OPTIONAL = 2, +}; + +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[5]; + int n_akm_suites; + u32 akm_suites[2]; + bool control_port; + __be16 control_port_ethertype; + bool control_port_no_encrypt; + bool control_port_over_nl80211; + bool control_port_no_preauth; + struct key_params *wep_keys; + int wep_tx_key; + const u8 *psk; + const u8 *sae_pwd; + u8 sae_pwd_len; +}; + +struct ieee80211_vht_mcs_info { + __le16 rx_mcs_map; + __le16 rx_highest; + __le16 tx_mcs_map; + __le16 tx_highest; +}; + +struct ieee80211_vht_cap { + __le32 vht_cap_info; + struct ieee80211_vht_mcs_info supp_mcs; +}; + +enum nl80211_bss_select_attr { + __NL80211_BSS_SELECT_ATTR_INVALID = 0, + NL80211_BSS_SELECT_ATTR_RSSI = 1, + NL80211_BSS_SELECT_ATTR_BAND_PREF = 2, + NL80211_BSS_SELECT_ATTR_RSSI_ADJUST = 3, + __NL80211_BSS_SELECT_ATTR_AFTER_LAST = 4, + NL80211_BSS_SELECT_ATTR_MAX = 3, +}; + +enum nl80211_band { + NL80211_BAND_2GHZ = 0, + NL80211_BAND_5GHZ = 1, + NL80211_BAND_60GHZ = 2, + NL80211_BAND_6GHZ = 3, + NL80211_BAND_S1GHZ = 4, + NUM_NL80211_BANDS = 5, +}; + +struct cfg80211_bss_select_adjust { + enum nl80211_band band; + s8 delta; +}; + +struct cfg80211_bss_selection { + enum nl80211_bss_select_attr behaviour; + union { + enum nl80211_band band_pref; + struct cfg80211_bss_select_adjust adjust; + } param; +}; + +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_hint; + const u8 *bssid; + const u8 *bssid_hint; + const u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + int: 32; + const u8 *ie; + size_t ie_len; + bool privacy; + int: 24; + enum nl80211_mfp mfp; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len; + u8 key_idx; + short: 16; + u32 flags; + int bg_scan_period; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + bool pbss; + int: 24; + struct cfg80211_bss_selection bss_select; + const u8 *prev_bssid; + const u8 *fils_erp_username; + size_t fils_erp_username_len; + const u8 *fils_erp_realm; + size_t fils_erp_realm_len; + u16 fils_erp_next_seq_num; + long: 48; + const u8 *fils_erp_rrk; + size_t fils_erp_rrk_len; + bool want_1x; + int: 24; + struct ieee80211_edmg edmg; + int: 32; +} __attribute__((packed)); + +struct cfg80211_cqm_config; + +struct wiphy; + +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + struct list_head list; + struct net_device *netdev; + u32 identifier; + struct list_head mgmt_registrations; + spinlock_t mgmt_registrations_lock; + u8 mgmt_registrations_need_update: 1; + struct mutex mtx; + bool use_4addr; + bool is_running; + u8 address[6]; + u8 ssid[32]; + u8 ssid_len; + u8 mesh_id_len; + u8 mesh_id_up_len; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; + u32 conn_owner_nlportid; + struct work_struct disconnect_wk; + u8 disconnect_bssid[6]; + struct list_head event_list; + spinlock_t event_lock; + struct cfg80211_internal_bss *current_bss; + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + bool ibss_fixed; + bool ibss_dfs_possible; + bool ps; + int ps_timeout; + int beacon_interval; + u32 ap_unexpected_nlportid; + u32 owner_nlportid; + bool nl_owner_dead; + bool cac_started; + long unsigned int cac_start_time; + unsigned int cac_time_ms; + struct { + struct cfg80211_ibss_params ibss; + struct cfg80211_connect_params connect; + struct cfg80211_cached_keys *keys; + const u8 *ie; + size_t ie_len; + u8 bssid[6]; + u8 prev_bssid[6]; + u8 ssid[32]; + s8 default_key; + s8 default_mgmt_key; + bool prev_bssid_valid; + } wext; + struct cfg80211_cqm_config *cqm_config; + struct list_head pmsr_list; + spinlock_t pmsr_lock; + struct work_struct pmsr_free_wk; + long unsigned int unprot_beacon_reported; +}; + +struct iw_encode_ext { + __u32 ext_flags; + __u8 tx_seq[8]; + __u8 rx_seq[8]; + struct sockaddr addr; + __u16 alg; + __u16 key_len; + __u8 key[0]; +}; + +struct iwreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union iwreq_data u; +}; + +struct iw_event { + __u16 len; + __u16 cmd; + union iwreq_data u; +}; + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; + +struct __compat_iw_event { + __u16 len; + __u16 cmd; + compat_caddr_t pointer; +}; + +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE = 0, + NL80211_REGDOM_SET_BY_USER = 1, + NL80211_REGDOM_SET_BY_DRIVER = 2, + NL80211_REGDOM_SET_BY_COUNTRY_IE = 3, +}; + +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +enum nl80211_user_reg_hint_type { + NL80211_USER_REG_HINT_USER = 0, + NL80211_USER_REG_HINT_CELL_BASE = 1, + NL80211_USER_REG_HINT_INDOOR = 2, +}; + +enum nl80211_mntr_flags { + __NL80211_MNTR_FLAG_INVALID = 0, + NL80211_MNTR_FLAG_FCSFAIL = 1, + NL80211_MNTR_FLAG_PLCPFAIL = 2, + NL80211_MNTR_FLAG_CONTROL = 3, + NL80211_MNTR_FLAG_OTHER_BSS = 4, + NL80211_MNTR_FLAG_COOK_FRAMES = 5, + NL80211_MNTR_FLAG_ACTIVE = 6, + __NL80211_MNTR_FLAG_AFTER_LAST = 7, + NL80211_MNTR_FLAG_MAX = 6, +}; + +enum nl80211_key_mode { + NL80211_KEY_RX_TX = 0, + NL80211_KEY_NO_TX = 1, + NL80211_KEY_SET_TX = 2, +}; + +enum nl80211_bss_scan_width { + NL80211_BSS_CHAN_WIDTH_20 = 0, + NL80211_BSS_CHAN_WIDTH_10 = 1, + NL80211_BSS_CHAN_WIDTH_5 = 2, + NL80211_BSS_CHAN_WIDTH_1 = 3, + NL80211_BSS_CHAN_WIDTH_2 = 4, +}; + +struct nl80211_wowlan_tcp_data_seq { + __u32 start; + __u32 offset; + __u32 len; +}; + +struct nl80211_wowlan_tcp_data_token { + __u32 offset; + __u32 len; + __u8 token_stream[0]; +}; + +struct nl80211_wowlan_tcp_data_token_feature { + __u32 min_len; + __u32 max_len; + __u32 bufsize; +}; + +enum nl80211_ext_feature_index { + NL80211_EXT_FEATURE_VHT_IBSS = 0, + NL80211_EXT_FEATURE_RRM = 1, + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER = 2, + NL80211_EXT_FEATURE_SCAN_START_TIME = 3, + NL80211_EXT_FEATURE_BSS_PARENT_TSF = 4, + NL80211_EXT_FEATURE_SET_SCAN_DWELL = 5, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 6, + NL80211_EXT_FEATURE_BEACON_RATE_HT = 7, + NL80211_EXT_FEATURE_BEACON_RATE_VHT = 8, + NL80211_EXT_FEATURE_FILS_STA = 9, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA = 10, + NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED = 11, + NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 12, + NL80211_EXT_FEATURE_CQM_RSSI_LIST = 13, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 14, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK = 15, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X = 16, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 17, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP = 18, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 19, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 20, + NL80211_EXT_FEATURE_MFP_OPTIONAL = 21, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN = 22, + NL80211_EXT_FEATURE_LOW_POWER_SCAN = 23, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN = 24, + NL80211_EXT_FEATURE_DFS_OFFLOAD = 25, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211 = 26, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 27, + NL80211_EXT_FEATURE_TXQS = 28, + NL80211_EXT_FEATURE_SCAN_RANDOM_SN = 29, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT = 30, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 31, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 32, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 33, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 34, + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 35, + NL80211_EXT_FEATURE_EXT_KEY_ID = 36, + NL80211_EXT_FEATURE_STA_TX_PWR = 37, + NL80211_EXT_FEATURE_SAE_OFFLOAD = 38, + NL80211_EXT_FEATURE_VLAN_OFFLOAD = 39, + NL80211_EXT_FEATURE_AQL = 40, + NL80211_EXT_FEATURE_BEACON_PROTECTION = 41, + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 42, + NL80211_EXT_FEATURE_PROTECTED_TWT = 43, + NL80211_EXT_FEATURE_DEL_IBSS_STA = 44, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS = 45, + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 46, + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ = 47, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS = 48, + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 49, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 50, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 51, + NL80211_EXT_FEATURE_FILS_DISCOVERY = 52, + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 53, + NUM_NL80211_EXT_FEATURES = 54, + MAX_NL80211_EXT_FEATURES = 53, +}; + +enum nl80211_dfs_state { + NL80211_DFS_USABLE = 0, + NL80211_DFS_UNAVAILABLE = 1, + NL80211_DFS_AVAILABLE = 2, +}; + +struct nl80211_vendor_cmd_info { + __u32 vendor_id; + __u32 subcmd; +}; + +struct ieee80211_he_cap_elem { + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; +}; + +struct ieee80211_he_mcs_nss_supp { + __le16 rx_mcs_80; + __le16 tx_mcs_80; + __le16 rx_mcs_160; + __le16 tx_mcs_160; + __le16 rx_mcs_80p80; + __le16 tx_mcs_80p80; +}; + +struct ieee80211_he_6ghz_capa { + __le16 capa; +}; + +enum environment_cap { + ENVIRON_ANY = 0, + ENVIRON_INDOOR = 1, + ENVIRON_OUTDOOR = 2, +}; + +struct regulatory_request { + struct callback_head callback_head; + int wiphy_idx; + enum nl80211_reg_initiator initiator; + enum nl80211_user_reg_hint_type user_reg_hint_type; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + bool intersect; + bool processed; + enum environment_cap country_ie_env; + struct list_head list; +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_wmm_ac { + u16 cw_min; + u16 cw_max; + u16 cot; + u8 aifsn; +}; + +struct ieee80211_wmm_rule { + struct ieee80211_wmm_ac client[4]; + struct ieee80211_wmm_ac ap[4]; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + struct ieee80211_wmm_rule wmm_rule; + u32 flags; + u32 dfs_cac_ms; + bool has_wmm; +}; + +struct ieee80211_regdomain { + struct callback_head callback_head; + u32 n_reg_rules; + char alpha2[3]; + enum nl80211_dfs_regions dfs_region; + struct ieee80211_reg_rule reg_rules[0]; +}; + +struct ieee80211_channel { + enum nl80211_band band; + u32 center_freq; + u16 freq_offset; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + int max_reg_power; + bool beacon_found; + u32 orig_flags; + int orig_mag; + int orig_mpwr; + enum nl80211_dfs_state dfs_state; + long unsigned int dfs_state_entered; + unsigned int dfs_cac_ms; +}; + +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value; + u16 hw_value_short; +}; + +struct ieee80211_sta_ht_cap { + u16 cap; + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; + char: 8; +} __attribute__((packed)); + +struct ieee80211_sta_vht_cap { + bool vht_supported; + u32 cap; + struct ieee80211_vht_mcs_info vht_mcs; +}; + +struct ieee80211_sta_he_cap { + bool has_he; + struct ieee80211_he_cap_elem he_cap_elem; + struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp; + u8 ppe_thres[25]; +} __attribute__((packed)); + +struct ieee80211_sband_iftype_data { + u16 types_mask; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + char: 8; +} __attribute__((packed)); + +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; + u8 nss_mcs[5]; +}; + +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum nl80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; + struct ieee80211_edmg edmg_cap; + u16 n_iftype_data; + const struct ieee80211_sband_iftype_data *iftype_data; +}; + +struct key_params { + const u8 *key; + const u8 *seq; + int key_len; + int seq_len; + u16 vlan_id; + u32 cipher; + enum nl80211_key_mode mode; +}; + +struct mac_address { + u8 addr[6]; +}; + +struct cfg80211_ssid { + u8 ssid[32]; + u8 ssid_len; +}; + +enum cfg80211_signal_type { + CFG80211_SIGNAL_TYPE_NONE = 0, + CFG80211_SIGNAL_TYPE_MBM = 1, + CFG80211_SIGNAL_TYPE_UNSPEC = 2, +}; + +struct ieee80211_txrx_stypes; + +struct ieee80211_iface_combination; + +struct wiphy_iftype_akm_suites; + +struct wiphy_wowlan_support; + +struct cfg80211_wowlan; + +struct wiphy_iftype_ext_capab; + +struct wiphy_coalesce_support; + +struct wiphy_vendor_command; + +struct cfg80211_pmsr_capabilities; + +struct wiphy { + u8 perm_addr[6]; + u8 addr_mask[6]; + struct mac_address *addresses; + const struct ieee80211_txrx_stypes *mgmt_stypes; + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; + u16 software_iftypes; + u16 n_addresses; + u16 interface_modes; + u16 max_acl_mac_addrs; + u32 flags; + u32 regulatory_flags; + u32 features; + u8 ext_features[7]; + u32 ap_sme_capa; + enum cfg80211_signal_type signal_type; + int bss_priv_size; + u8 max_scan_ssids; + u8 max_sched_scan_reqs; + u8 max_sched_scan_ssids; + u8 max_match_sets; + u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; + int n_cipher_suites; + const u32 *cipher_suites; + int n_akm_suites; + const u32 *akm_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + char fw_version[32]; + u32 hw_version; + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; + u16 max_remain_on_channel_duration; + u8 max_num_pmkids; + u32 available_antennas_tx; + u32 available_antennas_rx; + u32 probe_resp_offload; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; + const struct wiphy_iftype_ext_capab *iftype_ext_capab; + unsigned int num_iftype_ext_capab; + const void *privid; + struct ieee80211_supported_band *bands[5]; + void (*reg_notifier)(struct wiphy *, struct regulatory_request *); + const struct ieee80211_regdomain *regd; + struct device dev; + bool registered; + struct dentry *debugfsdir; + const struct ieee80211_ht_cap *ht_capa_mod_mask; + const struct ieee80211_vht_cap *vht_capa_mod_mask; + struct list_head wdev_list; + possible_net_t _net; + const struct iw_handler_def *wext; + const struct wiphy_coalesce_support *coalesce; + const struct wiphy_vendor_command *vendor_commands; + const struct nl80211_vendor_cmd_info *vendor_events; + int n_vendor_commands; + int n_vendor_events; + u16 max_ap_assoc_sta; + u8 max_num_csa_counters; + u32 bss_select_support; + u8 nan_supported_bands; + u32 txq_limit; + u32 txq_memory_limit; + u32 txq_quantum; + long unsigned int tx_queue_len; + u8 support_mbssid: 1; + u8 support_only_he_mbssid: 1; + const struct cfg80211_pmsr_capabilities *pmsr_capa; + struct { + u64 peer; + u64 vif; + u8 max_retry; + } tid_config_support; + u8 max_data_retry_count; + long: 56; + long: 64; + char priv[0]; +}; + +struct cfg80211_match_set { + struct cfg80211_ssid ssid; + u8 bssid[6]; + s32 rssi_thold; + s32 per_band_rssi_thold[5]; +}; + +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +struct cfg80211_sched_scan_request { + u64 reqid; + struct cfg80211_ssid *ssids; + int n_ssids; + u32 n_channels; + enum nl80211_bss_scan_width scan_width; + const u8 *ie; + size_t ie_len; + u32 flags; + struct cfg80211_match_set *match_sets; + int n_match_sets; + s32 min_rssi_thold; + u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; + u8 mac_addr[6]; + u8 mac_addr_mask[6]; + bool relative_rssi_set; + s8 relative_rssi; + struct cfg80211_bss_select_adjust rssi_adjust; + struct wiphy *wiphy; + struct net_device *dev; + long unsigned int scan_start; + bool report_results; + struct callback_head callback_head; + u32 owner_nlportid; + bool nl_owner_dead; + struct list_head list; + struct ieee80211_channel *channels[0]; +}; + +struct cfg80211_pkt_pattern { + const u8 *mask; + const u8 *pattern; + int pattern_len; + int pkt_offset; +}; + +struct cfg80211_wowlan_tcp { + struct socket *sock; + __be32 src; + __be32 dst; + u16 src_port; + u16 dst_port; + u8 dst_mac[6]; + int payload_len; + const u8 *payload; + struct nl80211_wowlan_tcp_data_seq payload_seq; + u32 data_interval; + u32 wake_len; + const u8 *wake_data; + const u8 *wake_mask; + u32 tokens_size; + struct nl80211_wowlan_tcp_data_token payload_tok; +}; + +struct cfg80211_wowlan { + bool any; + bool disconnect; + bool magic_pkt; + bool gtk_rekey_failure; + bool eap_identity_req; + bool four_way_handshake; + bool rfkill_release; + struct cfg80211_pkt_pattern *patterns; + struct cfg80211_wowlan_tcp *tcp; + int n_patterns; + struct cfg80211_sched_scan_request *nd_config; +}; + +struct ieee80211_iface_limit { + u16 max; + u16 types; +}; + +struct ieee80211_iface_combination { + const struct ieee80211_iface_limit *limits; + u32 num_different_channels; + u16 max_interfaces; + u8 n_limits; + bool beacon_int_infra_match; + u8 radar_detect_widths; + u8 radar_detect_regions; + u32 beacon_int_min_gcd; +}; + +struct ieee80211_txrx_stypes { + u16 tx; + u16 rx; +}; + +struct wiphy_wowlan_tcp_support { + const struct nl80211_wowlan_tcp_data_token_feature *tok; + u32 data_payload_max; + u32 data_interval_max; + u32 wake_payload_max; + bool seq; +}; + +struct wiphy_wowlan_support { + u32 flags; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; + int max_nd_match_sets; + const struct wiphy_wowlan_tcp_support *tcp; +}; + +struct wiphy_coalesce_support { + int n_rules; + int max_delay; + int n_patterns; + int pattern_max_len; + int pattern_min_len; + int max_pkt_offset; +}; + +struct wiphy_vendor_command { + struct nl80211_vendor_cmd_info info; + u32 flags; + int (*doit)(struct wiphy *, struct wireless_dev *, const void *, int); + int (*dumpit)(struct wiphy *, struct wireless_dev *, struct sk_buff *, const void *, int, long unsigned int *); + const struct nla_policy *policy; + unsigned int maxattr; +}; + +struct wiphy_iftype_ext_capab { + enum nl80211_iftype iftype; + const u8 *extended_capabilities; + const u8 *extended_capabilities_mask; + u8 extended_capabilities_len; +}; + +struct cfg80211_pmsr_capabilities { + unsigned int max_peers; + u8 report_ap_tsf: 1; + u8 randomize_mac_addr: 1; + struct { + u32 preambles; + u32 bandwidths; + s8 max_bursts_exponent; + u8 max_ftms_per_burst; + u8 supported: 1; + u8 asap: 1; + u8 non_asap: 1; + u8 request_lci: 1; + u8 request_civicloc: 1; + u8 trigger_based: 1; + u8 non_trigger_based: 1; + } ftm; +}; + +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + +struct iw_ioctl_description { + __u8 header_type; + __u8 token_type; + __u16 token_size; + __u16 min_tokens; + __u16 max_tokens; + __u32 flags; +}; + +typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); + +struct iw_thrspy { + struct sockaddr addr; + struct iw_quality qual; + struct iw_quality low; + struct iw_quality high; +}; + +struct netlbl_af4list { + __be32 addr; + __be32 mask; + u32 valid; + struct list_head list; +}; + +struct netlbl_af6list { + struct in6_addr addr; + struct in6_addr mask; + u32 valid; + struct list_head list; +}; + +struct netlbl_domaddr_map { + struct list_head list4; + struct list_head list6; +}; + +struct netlbl_dommap_def { + u32 type; + union { + struct netlbl_domaddr_map *addrsel; + struct cipso_v4_doi *cipso; + struct calipso_doi *calipso; + }; +}; + +struct netlbl_domaddr4_map { + struct netlbl_dommap_def def; + struct netlbl_af4list list; +}; + +struct netlbl_domaddr6_map { + struct netlbl_dommap_def def; + struct netlbl_af6list list; +}; + +struct netlbl_dom_map { + char *domain; + u16 family; + struct netlbl_dommap_def def; + u32 valid; + struct list_head list; + struct callback_head rcu; +}; + +struct netlbl_domhsh_tbl { + struct list_head *tbl; + u32 size; +}; + +enum { + NLBL_MGMT_C_UNSPEC = 0, + NLBL_MGMT_C_ADD = 1, + NLBL_MGMT_C_REMOVE = 2, + NLBL_MGMT_C_LISTALL = 3, + NLBL_MGMT_C_ADDDEF = 4, + NLBL_MGMT_C_REMOVEDEF = 5, + NLBL_MGMT_C_LISTDEF = 6, + NLBL_MGMT_C_PROTOCOLS = 7, + NLBL_MGMT_C_VERSION = 8, + NLBL_MGMT_C_S0_SET = 9, + NLBL_MGMT_C_S0_GET = 10, + __NLBL_MGMT_C_MAX = 11, +}; + +enum { + NLBL_MGMT_A_UNSPEC = 0, + NLBL_MGMT_A_DOMAIN = 1, + NLBL_MGMT_A_PROTOCOL = 2, + NLBL_MGMT_A_VERSION = 3, + NLBL_MGMT_A_CV4DOI = 4, + NLBL_MGMT_A_IPV6ADDR = 5, + NLBL_MGMT_A_IPV6MASK = 6, + NLBL_MGMT_A_IPV4ADDR = 7, + NLBL_MGMT_A_IPV4MASK = 8, + NLBL_MGMT_A_ADDRSELECTOR = 9, + NLBL_MGMT_A_SELECTORLIST = 10, + NLBL_MGMT_A_FAMILY = 11, + NLBL_MGMT_A_CLPDOI = 12, + NLBL_MGMT_A_S0 = 13, + __NLBL_MGMT_A_MAX = 14, +}; + +struct netlbl_domhsh_walk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +enum { + NLBL_UNLABEL_C_UNSPEC = 0, + NLBL_UNLABEL_C_ACCEPT = 1, + NLBL_UNLABEL_C_LIST = 2, + NLBL_UNLABEL_C_STATICADD = 3, + NLBL_UNLABEL_C_STATICREMOVE = 4, + NLBL_UNLABEL_C_STATICLIST = 5, + NLBL_UNLABEL_C_STATICADDDEF = 6, + NLBL_UNLABEL_C_STATICREMOVEDEF = 7, + NLBL_UNLABEL_C_STATICLISTDEF = 8, + __NLBL_UNLABEL_C_MAX = 9, +}; + +enum { + NLBL_UNLABEL_A_UNSPEC = 0, + NLBL_UNLABEL_A_ACPTFLG = 1, + NLBL_UNLABEL_A_IPV6ADDR = 2, + NLBL_UNLABEL_A_IPV6MASK = 3, + NLBL_UNLABEL_A_IPV4ADDR = 4, + NLBL_UNLABEL_A_IPV4MASK = 5, + NLBL_UNLABEL_A_IFACE = 6, + NLBL_UNLABEL_A_SECCTX = 7, + __NLBL_UNLABEL_A_MAX = 8, +}; + +struct netlbl_unlhsh_tbl { + struct list_head *tbl; + u32 size; +}; + +struct netlbl_unlhsh_addr4 { + u32 secid; + struct netlbl_af4list list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_addr6 { + u32 secid; + struct netlbl_af6list list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_iface { + int ifindex; + struct list_head addr4_list; + struct list_head addr6_list; + u32 valid; + struct list_head list; + struct callback_head rcu; +}; + +struct netlbl_unlhsh_walk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +enum { + NLBL_CIPSOV4_C_UNSPEC = 0, + NLBL_CIPSOV4_C_ADD = 1, + NLBL_CIPSOV4_C_REMOVE = 2, + NLBL_CIPSOV4_C_LIST = 3, + NLBL_CIPSOV4_C_LISTALL = 4, + __NLBL_CIPSOV4_C_MAX = 5, +}; + +enum { + NLBL_CIPSOV4_A_UNSPEC = 0, + NLBL_CIPSOV4_A_DOI = 1, + NLBL_CIPSOV4_A_MTYPE = 2, + NLBL_CIPSOV4_A_TAG = 3, + NLBL_CIPSOV4_A_TAGLST = 4, + NLBL_CIPSOV4_A_MLSLVLLOC = 5, + NLBL_CIPSOV4_A_MLSLVLREM = 6, + NLBL_CIPSOV4_A_MLSLVL = 7, + NLBL_CIPSOV4_A_MLSLVLLST = 8, + NLBL_CIPSOV4_A_MLSCATLOC = 9, + NLBL_CIPSOV4_A_MLSCATREM = 10, + NLBL_CIPSOV4_A_MLSCAT = 11, + NLBL_CIPSOV4_A_MLSCATLST = 12, + __NLBL_CIPSOV4_A_MAX = 13, +}; + +struct netlbl_cipsov4_doiwalk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +struct netlbl_domhsh_walk_arg___2 { + struct netlbl_audit *audit_info; + u32 doi; +}; + +enum { + NLBL_CALIPSO_C_UNSPEC = 0, + NLBL_CALIPSO_C_ADD = 1, + NLBL_CALIPSO_C_REMOVE = 2, + NLBL_CALIPSO_C_LIST = 3, + NLBL_CALIPSO_C_LISTALL = 4, + __NLBL_CALIPSO_C_MAX = 5, +}; + +enum { + NLBL_CALIPSO_A_UNSPEC = 0, + NLBL_CALIPSO_A_DOI = 1, + NLBL_CALIPSO_A_MTYPE = 2, + __NLBL_CALIPSO_A_MAX = 3, +}; + +struct netlbl_calipso_doiwalk_arg { + struct netlink_callback *nl_cb; + struct sk_buff *skb; + u32 seq; +}; + +struct dcbmsg { + __u8 dcb_family; + __u8 cmd; + __u16 dcb_pad; +}; + +enum dcbnl_commands { + DCB_CMD_UNDEFINED = 0, + DCB_CMD_GSTATE = 1, + DCB_CMD_SSTATE = 2, + DCB_CMD_PGTX_GCFG = 3, + DCB_CMD_PGTX_SCFG = 4, + DCB_CMD_PGRX_GCFG = 5, + DCB_CMD_PGRX_SCFG = 6, + DCB_CMD_PFC_GCFG = 7, + DCB_CMD_PFC_SCFG = 8, + DCB_CMD_SET_ALL = 9, + DCB_CMD_GPERM_HWADDR = 10, + DCB_CMD_GCAP = 11, + DCB_CMD_GNUMTCS = 12, + DCB_CMD_SNUMTCS = 13, + DCB_CMD_PFC_GSTATE = 14, + DCB_CMD_PFC_SSTATE = 15, + DCB_CMD_BCN_GCFG = 16, + DCB_CMD_BCN_SCFG = 17, + DCB_CMD_GAPP = 18, + DCB_CMD_SAPP = 19, + DCB_CMD_IEEE_SET = 20, + DCB_CMD_IEEE_GET = 21, + DCB_CMD_GDCBX = 22, + DCB_CMD_SDCBX = 23, + DCB_CMD_GFEATCFG = 24, + DCB_CMD_SFEATCFG = 25, + DCB_CMD_CEE_GET = 26, + DCB_CMD_IEEE_DEL = 27, + __DCB_CMD_ENUM_MAX = 28, + DCB_CMD_MAX = 27, +}; + +enum dcbnl_attrs { + DCB_ATTR_UNDEFINED = 0, + DCB_ATTR_IFNAME = 1, + DCB_ATTR_STATE = 2, + DCB_ATTR_PFC_STATE = 3, + DCB_ATTR_PFC_CFG = 4, + DCB_ATTR_NUM_TC = 5, + DCB_ATTR_PG_CFG = 6, + DCB_ATTR_SET_ALL = 7, + DCB_ATTR_PERM_HWADDR = 8, + DCB_ATTR_CAP = 9, + DCB_ATTR_NUMTCS = 10, + DCB_ATTR_BCN = 11, + DCB_ATTR_APP = 12, + DCB_ATTR_IEEE = 13, + DCB_ATTR_DCBX = 14, + DCB_ATTR_FEATCFG = 15, + DCB_ATTR_CEE = 16, + __DCB_ATTR_ENUM_MAX = 17, + DCB_ATTR_MAX = 16, +}; + +enum ieee_attrs { + DCB_ATTR_IEEE_UNSPEC = 0, + DCB_ATTR_IEEE_ETS = 1, + DCB_ATTR_IEEE_PFC = 2, + DCB_ATTR_IEEE_APP_TABLE = 3, + DCB_ATTR_IEEE_PEER_ETS = 4, + DCB_ATTR_IEEE_PEER_PFC = 5, + DCB_ATTR_IEEE_PEER_APP = 6, + DCB_ATTR_IEEE_MAXRATE = 7, + DCB_ATTR_IEEE_QCN = 8, + DCB_ATTR_IEEE_QCN_STATS = 9, + DCB_ATTR_DCB_BUFFER = 10, + __DCB_ATTR_IEEE_MAX = 11, +}; + +enum ieee_attrs_app { + DCB_ATTR_IEEE_APP_UNSPEC = 0, + DCB_ATTR_IEEE_APP = 1, + __DCB_ATTR_IEEE_APP_MAX = 2, +}; + +enum cee_attrs { + DCB_ATTR_CEE_UNSPEC = 0, + DCB_ATTR_CEE_PEER_PG = 1, + DCB_ATTR_CEE_PEER_PFC = 2, + DCB_ATTR_CEE_PEER_APP_TABLE = 3, + DCB_ATTR_CEE_TX_PG = 4, + DCB_ATTR_CEE_RX_PG = 5, + DCB_ATTR_CEE_PFC = 6, + DCB_ATTR_CEE_APP_TABLE = 7, + DCB_ATTR_CEE_FEAT = 8, + __DCB_ATTR_CEE_MAX = 9, +}; + +enum peer_app_attr { + DCB_ATTR_CEE_PEER_APP_UNSPEC = 0, + DCB_ATTR_CEE_PEER_APP_INFO = 1, + DCB_ATTR_CEE_PEER_APP = 2, + __DCB_ATTR_CEE_PEER_APP_MAX = 3, +}; + +enum dcbnl_pfc_up_attrs { + DCB_PFC_UP_ATTR_UNDEFINED = 0, + DCB_PFC_UP_ATTR_0 = 1, + DCB_PFC_UP_ATTR_1 = 2, + DCB_PFC_UP_ATTR_2 = 3, + DCB_PFC_UP_ATTR_3 = 4, + DCB_PFC_UP_ATTR_4 = 5, + DCB_PFC_UP_ATTR_5 = 6, + DCB_PFC_UP_ATTR_6 = 7, + DCB_PFC_UP_ATTR_7 = 8, + DCB_PFC_UP_ATTR_ALL = 9, + __DCB_PFC_UP_ATTR_ENUM_MAX = 10, + DCB_PFC_UP_ATTR_MAX = 9, +}; + +enum dcbnl_pg_attrs { + DCB_PG_ATTR_UNDEFINED = 0, + DCB_PG_ATTR_TC_0 = 1, + DCB_PG_ATTR_TC_1 = 2, + DCB_PG_ATTR_TC_2 = 3, + DCB_PG_ATTR_TC_3 = 4, + DCB_PG_ATTR_TC_4 = 5, + DCB_PG_ATTR_TC_5 = 6, + DCB_PG_ATTR_TC_6 = 7, + DCB_PG_ATTR_TC_7 = 8, + DCB_PG_ATTR_TC_MAX = 9, + DCB_PG_ATTR_TC_ALL = 10, + DCB_PG_ATTR_BW_ID_0 = 11, + DCB_PG_ATTR_BW_ID_1 = 12, + DCB_PG_ATTR_BW_ID_2 = 13, + DCB_PG_ATTR_BW_ID_3 = 14, + DCB_PG_ATTR_BW_ID_4 = 15, + DCB_PG_ATTR_BW_ID_5 = 16, + DCB_PG_ATTR_BW_ID_6 = 17, + DCB_PG_ATTR_BW_ID_7 = 18, + DCB_PG_ATTR_BW_ID_MAX = 19, + DCB_PG_ATTR_BW_ID_ALL = 20, + __DCB_PG_ATTR_ENUM_MAX = 21, + DCB_PG_ATTR_MAX = 20, +}; + +enum dcbnl_tc_attrs { + DCB_TC_ATTR_PARAM_UNDEFINED = 0, + DCB_TC_ATTR_PARAM_PGID = 1, + DCB_TC_ATTR_PARAM_UP_MAPPING = 2, + DCB_TC_ATTR_PARAM_STRICT_PRIO = 3, + DCB_TC_ATTR_PARAM_BW_PCT = 4, + DCB_TC_ATTR_PARAM_ALL = 5, + __DCB_TC_ATTR_PARAM_ENUM_MAX = 6, + DCB_TC_ATTR_PARAM_MAX = 5, +}; + +enum dcbnl_cap_attrs { + DCB_CAP_ATTR_UNDEFINED = 0, + DCB_CAP_ATTR_ALL = 1, + DCB_CAP_ATTR_PG = 2, + DCB_CAP_ATTR_PFC = 3, + DCB_CAP_ATTR_UP2TC = 4, + DCB_CAP_ATTR_PG_TCS = 5, + DCB_CAP_ATTR_PFC_TCS = 6, + DCB_CAP_ATTR_GSP = 7, + DCB_CAP_ATTR_BCN = 8, + DCB_CAP_ATTR_DCBX = 9, + __DCB_CAP_ATTR_ENUM_MAX = 10, + DCB_CAP_ATTR_MAX = 9, +}; + +enum dcbnl_numtcs_attrs { + DCB_NUMTCS_ATTR_UNDEFINED = 0, + DCB_NUMTCS_ATTR_ALL = 1, + DCB_NUMTCS_ATTR_PG = 2, + DCB_NUMTCS_ATTR_PFC = 3, + __DCB_NUMTCS_ATTR_ENUM_MAX = 4, + DCB_NUMTCS_ATTR_MAX = 3, +}; + +enum dcbnl_bcn_attrs { + DCB_BCN_ATTR_UNDEFINED = 0, + DCB_BCN_ATTR_RP_0 = 1, + DCB_BCN_ATTR_RP_1 = 2, + DCB_BCN_ATTR_RP_2 = 3, + DCB_BCN_ATTR_RP_3 = 4, + DCB_BCN_ATTR_RP_4 = 5, + DCB_BCN_ATTR_RP_5 = 6, + DCB_BCN_ATTR_RP_6 = 7, + DCB_BCN_ATTR_RP_7 = 8, + DCB_BCN_ATTR_RP_ALL = 9, + DCB_BCN_ATTR_BCNA_0 = 10, + DCB_BCN_ATTR_BCNA_1 = 11, + DCB_BCN_ATTR_ALPHA = 12, + DCB_BCN_ATTR_BETA = 13, + DCB_BCN_ATTR_GD = 14, + DCB_BCN_ATTR_GI = 15, + DCB_BCN_ATTR_TMAX = 16, + DCB_BCN_ATTR_TD = 17, + DCB_BCN_ATTR_RMIN = 18, + DCB_BCN_ATTR_W = 19, + DCB_BCN_ATTR_RD = 20, + DCB_BCN_ATTR_RU = 21, + DCB_BCN_ATTR_WRTT = 22, + DCB_BCN_ATTR_RI = 23, + DCB_BCN_ATTR_C = 24, + DCB_BCN_ATTR_ALL = 25, + __DCB_BCN_ATTR_ENUM_MAX = 26, + DCB_BCN_ATTR_MAX = 25, +}; + +enum dcb_general_attr_values { + DCB_ATTR_VALUE_UNDEFINED = 255, +}; + +enum dcbnl_app_attrs { + DCB_APP_ATTR_UNDEFINED = 0, + DCB_APP_ATTR_IDTYPE = 1, + DCB_APP_ATTR_ID = 2, + DCB_APP_ATTR_PRIORITY = 3, + __DCB_APP_ATTR_ENUM_MAX = 4, + DCB_APP_ATTR_MAX = 3, +}; + +enum dcbnl_featcfg_attrs { + DCB_FEATCFG_ATTR_UNDEFINED = 0, + DCB_FEATCFG_ATTR_ALL = 1, + DCB_FEATCFG_ATTR_PG = 2, + DCB_FEATCFG_ATTR_PFC = 3, + DCB_FEATCFG_ATTR_APP = 4, + __DCB_FEATCFG_ATTR_ENUM_MAX = 5, + DCB_FEATCFG_ATTR_MAX = 4, +}; + +struct dcb_app_type { + int ifindex; + struct dcb_app app; + struct list_head list; + u8 dcbx; +}; + +struct dcb_ieee_app_prio_map { + u64 map[8]; +}; + +struct dcb_ieee_app_dscp_map { + u8 map[64]; +}; + +enum dcbevent_notif_type { + DCB_APP_EVENT = 1, +}; + +struct reply_func { + int type; + int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_ID_UNDEFINED = 0, + SWITCHDEV_ATTR_ID_PORT_STP_STATE = 1, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 2, + SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS = 3, + SWITCHDEV_ATTR_ID_PORT_MROUTER = 4, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 5, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 6, + SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 7, + SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 8, + SWITCHDEV_ATTR_ID_MRP_PORT_STATE = 9, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE = 10, +}; + +struct switchdev_attr { + struct net_device *orig_dev; + enum switchdev_attr_id id; + u32 flags; + void *complete_priv; + void (*complete)(struct net_device *, int, void *); + union { + u8 stp_state; + long unsigned int brport_flags; + bool mrouter; + clock_t ageing_time; + bool vlan_filtering; + bool mc_disabled; + u8 mrp_port_state; + u8 mrp_port_role; + } u; +}; + +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE = 2, + SWITCHDEV_FDB_ADD_TO_DEVICE = 3, + SWITCHDEV_FDB_DEL_TO_DEVICE = 4, + SWITCHDEV_FDB_OFFLOADED = 5, + SWITCHDEV_FDB_FLUSH_TO_BRIDGE = 6, + SWITCHDEV_PORT_OBJ_ADD = 7, + SWITCHDEV_PORT_OBJ_DEL = 8, + SWITCHDEV_PORT_ATTR_SET = 9, + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE = 10, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE = 11, + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE = 12, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE = 13, + SWITCHDEV_VXLAN_FDB_OFFLOADED = 14, +}; + +struct switchdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct switchdev_notifier_port_obj_info { + struct switchdev_notifier_info info; + const struct switchdev_obj *obj; + struct switchdev_trans *trans; + bool handled; +}; + +struct switchdev_notifier_port_attr_info { + struct switchdev_notifier_info info; + const struct switchdev_attr *attr; + struct switchdev_trans *trans; + bool handled; +}; + +typedef void switchdev_deferred_func_t(struct net_device *, const void *); + +struct switchdev_deferred_item { + struct list_head list; + struct net_device *dev; + switchdev_deferred_func_t *func; + long unsigned int data[0]; +}; + +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC = 0, + L3MDEV_TYPE_VRF = 1, + __L3MDEV_TYPE_MAX = 2, +}; + +typedef int (*lookup_by_table_id_t)(struct net *, u32); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +struct ncsi_dev { + int state; + int link_up; + struct net_device *dev; + void (*handler)(struct ncsi_dev *); +}; + +enum { + NCSI_CAP_BASE = 0, + NCSI_CAP_GENERIC = 0, + NCSI_CAP_BC = 1, + NCSI_CAP_MC = 2, + NCSI_CAP_BUFFER = 3, + NCSI_CAP_AEN = 4, + NCSI_CAP_VLAN = 5, + NCSI_CAP_MAX = 6, +}; + +enum { + NCSI_MODE_BASE = 0, + NCSI_MODE_ENABLE = 0, + NCSI_MODE_TX_ENABLE = 1, + NCSI_MODE_LINK = 2, + NCSI_MODE_VLAN = 3, + NCSI_MODE_BC = 4, + NCSI_MODE_MC = 5, + NCSI_MODE_AEN = 6, + NCSI_MODE_FC = 7, + NCSI_MODE_MAX = 8, +}; + +struct ncsi_channel_version { + u32 version; + u32 alpha2; + u8 fw_name[12]; + u32 fw_version; + u16 pci_ids[4]; + u32 mf_id; +}; + +struct ncsi_channel_cap { + u32 index; + u32 cap; +}; + +struct ncsi_channel_mode { + u32 index; + u32 enable; + u32 size; + u32 data[8]; +}; + +struct ncsi_channel_mac_filter { + u8 n_uc; + u8 n_mc; + u8 n_mixed; + u64 bitmap; + unsigned char *addrs; +}; + +struct ncsi_channel_vlan_filter { + u8 n_vids; + u64 bitmap; + u16 *vids; +}; + +struct ncsi_channel_stats { + u32 hnc_cnt_hi; + u32 hnc_cnt_lo; + u32 hnc_rx_bytes; + u32 hnc_tx_bytes; + u32 hnc_rx_uc_pkts; + u32 hnc_rx_mc_pkts; + u32 hnc_rx_bc_pkts; + u32 hnc_tx_uc_pkts; + u32 hnc_tx_mc_pkts; + u32 hnc_tx_bc_pkts; + u32 hnc_fcs_err; + u32 hnc_align_err; + u32 hnc_false_carrier; + u32 hnc_runt_pkts; + u32 hnc_jabber_pkts; + u32 hnc_rx_pause_xon; + u32 hnc_rx_pause_xoff; + u32 hnc_tx_pause_xon; + u32 hnc_tx_pause_xoff; + u32 hnc_tx_s_collision; + u32 hnc_tx_m_collision; + u32 hnc_l_collision; + u32 hnc_e_collision; + u32 hnc_rx_ctl_frames; + u32 hnc_rx_64_frames; + u32 hnc_rx_127_frames; + u32 hnc_rx_255_frames; + u32 hnc_rx_511_frames; + u32 hnc_rx_1023_frames; + u32 hnc_rx_1522_frames; + u32 hnc_rx_9022_frames; + u32 hnc_tx_64_frames; + u32 hnc_tx_127_frames; + u32 hnc_tx_255_frames; + u32 hnc_tx_511_frames; + u32 hnc_tx_1023_frames; + u32 hnc_tx_1522_frames; + u32 hnc_tx_9022_frames; + u32 hnc_rx_valid_bytes; + u32 hnc_rx_runt_pkts; + u32 hnc_rx_jabber_pkts; + u32 ncsi_rx_cmds; + u32 ncsi_dropped_cmds; + u32 ncsi_cmd_type_errs; + u32 ncsi_cmd_csum_errs; + u32 ncsi_rx_pkts; + u32 ncsi_tx_pkts; + u32 ncsi_tx_aen_pkts; + u32 pt_tx_pkts; + u32 pt_tx_dropped; + u32 pt_tx_channel_err; + u32 pt_tx_us_err; + u32 pt_rx_pkts; + u32 pt_rx_dropped; + u32 pt_rx_channel_err; + u32 pt_rx_us_err; + u32 pt_rx_os_err; +}; + +struct ncsi_package; + +struct ncsi_channel { + unsigned char id; + int state; + bool reconfigure_needed; + spinlock_t lock; + struct ncsi_package *package; + struct ncsi_channel_version version; + struct ncsi_channel_cap caps[6]; + struct ncsi_channel_mode modes[8]; + struct ncsi_channel_mac_filter mac_filter; + struct ncsi_channel_vlan_filter vlan_filter; + struct ncsi_channel_stats stats; + struct { + struct timer_list timer; + bool enabled; + unsigned int state; + } monitor; + struct list_head node; + struct list_head link; +}; + +struct ncsi_dev_priv; + +struct ncsi_package { + unsigned char id; + unsigned char uuid[16]; + struct ncsi_dev_priv *ndp; + spinlock_t lock; + unsigned int channel_num; + struct list_head channels; + struct list_head node; + bool multi_channel; + u32 channel_whitelist; + struct ncsi_channel *preferred_channel; +}; + +struct ncsi_request { + unsigned char id; + bool used; + unsigned int flags; + struct ncsi_dev_priv *ndp; + struct sk_buff *cmd; + struct sk_buff *rsp; + struct timer_list timer; + bool enabled; + u32 snd_seq; + u32 snd_portid; + struct nlmsghdr nlhdr; +}; + +struct ncsi_dev_priv { + struct ncsi_dev ndev; + unsigned int flags; + unsigned int gma_flag; + spinlock_t lock; + unsigned int package_probe_id; + unsigned int package_num; + struct list_head packages; + struct ncsi_channel *hot_channel; + struct ncsi_request requests[256]; + unsigned int request_id; + unsigned int pending_req_num; + struct ncsi_package *active_package; + struct ncsi_channel *active_channel; + struct list_head channel_queue; + struct work_struct work; + struct packet_type ptype; + struct list_head node; + struct list_head vlan_vids; + bool multi_package; + bool mlx_multi_host; + u32 package_whitelist; +}; + +struct ncsi_cmd_arg { + struct ncsi_dev_priv *ndp; + unsigned char type; + unsigned char id; + unsigned char package; + unsigned char channel; + short unsigned int payload; + unsigned int req_flags; + union { + unsigned char bytes[16]; + short unsigned int words[8]; + unsigned int dwords[4]; + }; + unsigned char *data; + struct genl_info *info; +}; + +struct ncsi_pkt_hdr { + unsigned char mc_id; + unsigned char revision; + unsigned char reserved; + unsigned char id; + unsigned char type; + unsigned char channel; + __be16 length; + __be32 reserved1[2]; +}; + +struct ncsi_cmd_pkt_hdr { + struct ncsi_pkt_hdr common; +}; + +struct ncsi_cmd_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 checksum; + unsigned char pad[26]; +}; + +struct ncsi_cmd_sp_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char hw_arbitration; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_dc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char ald; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_rc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 reserved; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_ae_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mc_id; + __be32 mode; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_sl_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 oem_mode; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_svf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be16 reserved; + __be16 vlan; + __be16 reserved1; + unsigned char index; + unsigned char enable; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_ev_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_sma_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char mac[6]; + unsigned char index; + unsigned char at_e; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_cmd_ebf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_egmf_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_snfc_pkt { + struct ncsi_cmd_pkt_hdr cmd; + unsigned char reserved[3]; + unsigned char mode; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_cmd_oem_pkt { + struct ncsi_cmd_pkt_hdr cmd; + __be32 mfr_id; + unsigned char data[0]; +}; + +struct ncsi_cmd_handler { + unsigned char type; + int payload; + int (*handler)(struct sk_buff *, struct ncsi_cmd_arg *); +}; + +enum { + NCSI_CAP_GENERIC_HWA = 1, + NCSI_CAP_GENERIC_HDS = 2, + NCSI_CAP_GENERIC_FC = 4, + NCSI_CAP_GENERIC_FC1 = 8, + NCSI_CAP_GENERIC_MC = 16, + NCSI_CAP_GENERIC_HWA_UNKNOWN = 0, + NCSI_CAP_GENERIC_HWA_SUPPORT = 32, + NCSI_CAP_GENERIC_HWA_NOT_SUPPORT = 64, + NCSI_CAP_GENERIC_HWA_RESERVED = 96, + NCSI_CAP_GENERIC_HWA_MASK = 96, + NCSI_CAP_GENERIC_MASK = 127, + NCSI_CAP_BC_ARP = 1, + NCSI_CAP_BC_DHCPC = 2, + NCSI_CAP_BC_DHCPS = 4, + NCSI_CAP_BC_NETBIOS = 8, + NCSI_CAP_BC_MASK = 15, + NCSI_CAP_MC_IPV6_NEIGHBOR = 1, + NCSI_CAP_MC_IPV6_ROUTER = 2, + NCSI_CAP_MC_DHCPV6_RELAY = 4, + NCSI_CAP_MC_DHCPV6_WELL_KNOWN = 8, + NCSI_CAP_MC_IPV6_MLD = 16, + NCSI_CAP_MC_IPV6_NEIGHBOR_S = 32, + NCSI_CAP_MC_MASK = 63, + NCSI_CAP_AEN_LSC = 1, + NCSI_CAP_AEN_CR = 2, + NCSI_CAP_AEN_HDS = 4, + NCSI_CAP_AEN_MASK = 7, + NCSI_CAP_VLAN_ONLY = 1, + NCSI_CAP_VLAN_NO = 2, + NCSI_CAP_VLAN_ANY = 4, + NCSI_CAP_VLAN_MASK = 7, +}; + +struct ncsi_rsp_pkt_hdr { + struct ncsi_pkt_hdr common; + __be16 code; + __be16 reason; +}; + +struct ncsi_rsp_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 checksum; + unsigned char pad[22]; +}; + +struct ncsi_rsp_oem_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 mfr_id; + unsigned char data[0]; +}; + +struct ncsi_rsp_oem_mlx_pkt { + unsigned char cmd_rev; + unsigned char cmd; + unsigned char param; + unsigned char optional; + unsigned char data[0]; +}; + +struct ncsi_rsp_oem_bcm_pkt { + unsigned char ver; + unsigned char type; + __be16 len; + unsigned char data[0]; +}; + +struct ncsi_rsp_gls_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 status; + __be32 other; + __be32 oem_status; + __be32 checksum; + unsigned char pad[10]; +}; + +struct ncsi_rsp_gvi_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 ncsi_version; + unsigned char reserved[3]; + unsigned char alpha2; + unsigned char fw_name[12]; + __be32 fw_version; + __be16 pci_ids[4]; + __be32 mf_id; + __be32 checksum; +}; + +struct ncsi_rsp_gc_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 cap; + __be32 bc_cap; + __be32 mc_cap; + __be32 buf_cap; + __be32 aen_cap; + unsigned char vlan_cnt; + unsigned char mixed_cnt; + unsigned char mc_cnt; + unsigned char uc_cnt; + unsigned char reserved[2]; + unsigned char vlan_mode; + unsigned char channel_cnt; + __be32 checksum; +}; + +struct ncsi_rsp_gp_pkt { + struct ncsi_rsp_pkt_hdr rsp; + unsigned char mac_cnt; + unsigned char reserved[2]; + unsigned char mac_enable; + unsigned char vlan_cnt; + unsigned char reserved1; + __be16 vlan_enable; + __be32 link_mode; + __be32 bc_mode; + __be32 valid_modes; + unsigned char vlan_mode; + unsigned char fc_mode; + unsigned char reserved2[2]; + __be32 aen_mode; + unsigned char mac[6]; + __be16 vlan; + __be32 checksum; +}; + +struct ncsi_rsp_gcps_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 cnt_hi; + __be32 cnt_lo; + __be32 rx_bytes; + __be32 tx_bytes; + __be32 rx_uc_pkts; + __be32 rx_mc_pkts; + __be32 rx_bc_pkts; + __be32 tx_uc_pkts; + __be32 tx_mc_pkts; + __be32 tx_bc_pkts; + __be32 fcs_err; + __be32 align_err; + __be32 false_carrier; + __be32 runt_pkts; + __be32 jabber_pkts; + __be32 rx_pause_xon; + __be32 rx_pause_xoff; + __be32 tx_pause_xon; + __be32 tx_pause_xoff; + __be32 tx_s_collision; + __be32 tx_m_collision; + __be32 l_collision; + __be32 e_collision; + __be32 rx_ctl_frames; + __be32 rx_64_frames; + __be32 rx_127_frames; + __be32 rx_255_frames; + __be32 rx_511_frames; + __be32 rx_1023_frames; + __be32 rx_1522_frames; + __be32 rx_9022_frames; + __be32 tx_64_frames; + __be32 tx_127_frames; + __be32 tx_255_frames; + __be32 tx_511_frames; + __be32 tx_1023_frames; + __be32 tx_1522_frames; + __be32 tx_9022_frames; + __be32 rx_valid_bytes; + __be32 rx_runt_pkts; + __be32 rx_jabber_pkts; + __be32 checksum; +}; + +struct ncsi_rsp_gns_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 rx_cmds; + __be32 dropped_cmds; + __be32 cmd_type_errs; + __be32 cmd_csum_errs; + __be32 rx_pkts; + __be32 tx_pkts; + __be32 tx_aen_pkts; + __be32 checksum; +}; + +struct ncsi_rsp_gnpts_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 tx_pkts; + __be32 tx_dropped; + __be32 tx_channel_err; + __be32 tx_us_err; + __be32 rx_pkts; + __be32 rx_dropped; + __be32 rx_channel_err; + __be32 rx_us_err; + __be32 rx_os_err; + __be32 checksum; +}; + +struct ncsi_rsp_gps_pkt { + struct ncsi_rsp_pkt_hdr rsp; + __be32 status; + __be32 checksum; +}; + +struct ncsi_rsp_gpuuid_pkt { + struct ncsi_rsp_pkt_hdr rsp; + unsigned char uuid[16]; + __be32 checksum; +}; + +struct ncsi_rsp_oem_handler { + unsigned int mfr_id; + int (*handler)(struct ncsi_request *); +}; + +struct ncsi_rsp_handler { + unsigned char type; + int payload; + int (*handler)(struct ncsi_request *); +}; + +struct ncsi_aen_pkt_hdr { + struct ncsi_pkt_hdr common; + unsigned char reserved2[3]; + unsigned char type; +}; + +struct ncsi_aen_lsc_pkt { + struct ncsi_aen_pkt_hdr aen; + __be32 status; + __be32 oem_status; + __be32 checksum; + unsigned char pad[14]; +}; + +struct ncsi_aen_hncdsc_pkt { + struct ncsi_aen_pkt_hdr aen; + __be32 status; + __be32 checksum; + unsigned char pad[18]; +}; + +struct ncsi_aen_handler { + unsigned char type; + int payload; + int (*handler)(struct ncsi_dev_priv *, struct ncsi_aen_pkt_hdr *); +}; + +enum { + ncsi_dev_state_registered = 0, + ncsi_dev_state_functional = 256, + ncsi_dev_state_probe = 512, + ncsi_dev_state_config = 768, + ncsi_dev_state_suspend = 1024, +}; + +enum { + MLX_MC_RBT_SUPPORT = 1, + MLX_MC_RBT_AVL = 8, +}; + +enum { + ncsi_dev_state_major = 65280, + ncsi_dev_state_minor = 255, + ncsi_dev_state_probe_deselect = 513, + ncsi_dev_state_probe_package = 514, + ncsi_dev_state_probe_channel = 515, + ncsi_dev_state_probe_mlx_gma = 516, + ncsi_dev_state_probe_mlx_smaf = 517, + ncsi_dev_state_probe_cis = 518, + ncsi_dev_state_probe_gvi = 519, + ncsi_dev_state_probe_gc = 520, + ncsi_dev_state_probe_gls = 521, + ncsi_dev_state_probe_dp = 522, + ncsi_dev_state_config_sp = 769, + ncsi_dev_state_config_cis = 770, + ncsi_dev_state_config_oem_gma = 771, + ncsi_dev_state_config_clear_vids = 772, + ncsi_dev_state_config_svf = 773, + ncsi_dev_state_config_ev = 774, + ncsi_dev_state_config_sma = 775, + ncsi_dev_state_config_ebf = 776, + ncsi_dev_state_config_dgmf = 777, + ncsi_dev_state_config_ecnt = 778, + ncsi_dev_state_config_ec = 779, + ncsi_dev_state_config_ae = 780, + ncsi_dev_state_config_gls = 781, + ncsi_dev_state_config_done = 782, + ncsi_dev_state_suspend_select = 1025, + ncsi_dev_state_suspend_gls = 1026, + ncsi_dev_state_suspend_dcnt = 1027, + ncsi_dev_state_suspend_dc = 1028, + ncsi_dev_state_suspend_deselect = 1029, + ncsi_dev_state_suspend_done = 1030, +}; + +struct vlan_vid { + struct list_head list; + __be16 proto; + u16 vid; +}; + +struct ncsi_oem_gma_handler { + unsigned int mfr_id; + int (*handler)(struct ncsi_cmd_arg *); +}; + +enum ncsi_nl_commands { + NCSI_CMD_UNSPEC = 0, + NCSI_CMD_PKG_INFO = 1, + NCSI_CMD_SET_INTERFACE = 2, + NCSI_CMD_CLEAR_INTERFACE = 3, + NCSI_CMD_SEND_CMD = 4, + NCSI_CMD_SET_PACKAGE_MASK = 5, + NCSI_CMD_SET_CHANNEL_MASK = 6, + __NCSI_CMD_AFTER_LAST = 7, + NCSI_CMD_MAX = 6, +}; + +enum ncsi_nl_attrs { + NCSI_ATTR_UNSPEC = 0, + NCSI_ATTR_IFINDEX = 1, + NCSI_ATTR_PACKAGE_LIST = 2, + NCSI_ATTR_PACKAGE_ID = 3, + NCSI_ATTR_CHANNEL_ID = 4, + NCSI_ATTR_DATA = 5, + NCSI_ATTR_MULTI_FLAG = 6, + NCSI_ATTR_PACKAGE_MASK = 7, + NCSI_ATTR_CHANNEL_MASK = 8, + __NCSI_ATTR_AFTER_LAST = 9, + NCSI_ATTR_MAX = 8, +}; + +enum ncsi_nl_pkg_attrs { + NCSI_PKG_ATTR_UNSPEC = 0, + NCSI_PKG_ATTR = 1, + NCSI_PKG_ATTR_ID = 2, + NCSI_PKG_ATTR_FORCED = 3, + NCSI_PKG_ATTR_CHANNEL_LIST = 4, + __NCSI_PKG_ATTR_AFTER_LAST = 5, + NCSI_PKG_ATTR_MAX = 4, +}; + +enum ncsi_nl_channel_attrs { + NCSI_CHANNEL_ATTR_UNSPEC = 0, + NCSI_CHANNEL_ATTR = 1, + NCSI_CHANNEL_ATTR_ID = 2, + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 3, + NCSI_CHANNEL_ATTR_VERSION_MINOR = 4, + NCSI_CHANNEL_ATTR_VERSION_STR = 5, + NCSI_CHANNEL_ATTR_LINK_STATE = 6, + NCSI_CHANNEL_ATTR_ACTIVE = 7, + NCSI_CHANNEL_ATTR_FORCED = 8, + NCSI_CHANNEL_ATTR_VLAN_LIST = 9, + NCSI_CHANNEL_ATTR_VLAN_ID = 10, + __NCSI_CHANNEL_ATTR_AFTER_LAST = 11, + NCSI_CHANNEL_ATTR_MAX = 10, +}; + +struct sockaddr_xdp { + __u16 sxdp_family; + __u16 sxdp_flags; + __u32 sxdp_ifindex; + __u32 sxdp_queue_id; + __u32 sxdp_shared_umem_fd; +}; + +struct xdp_ring_offset { + __u64 producer; + __u64 consumer; + __u64 desc; + __u64 flags; +}; + +struct xdp_mmap_offsets { + struct xdp_ring_offset rx; + struct xdp_ring_offset tx; + struct xdp_ring_offset fr; + struct xdp_ring_offset cr; +}; + +struct xdp_umem_reg { + __u64 addr; + __u64 len; + __u32 chunk_size; + __u32 headroom; + __u32 flags; +}; + +struct xdp_statistics { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; + __u64 rx_ring_full; + __u64 rx_fill_ring_empty_descs; + __u64 tx_ring_empty_descs; +}; + +struct xdp_options { + __u32 flags; +}; + +struct xdp_desc { + __u64 addr; + __u32 len; + __u32 options; +}; + +struct xdp_ring; + +struct xsk_queue { + u32 ring_mask; + u32 nentries; + u32 cached_prod; + u32 cached_cons; + struct xdp_ring *ring; + u64 invalid_descs; + u64 queue_empty_descs; +}; + +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + +struct xsk_map_node { + struct list_head node; + struct xsk_map *map; + struct xdp_sock **map_entry; +}; + +struct xdp_ring { + u32 producer; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 pad; + long: 32; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + u32 consumer; + u32 flags; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; + long: 64; +}; + +struct xdp_rxtx_ring { + struct xdp_ring ptrs; + struct xdp_desc desc[0]; +}; + +struct xdp_umem_ring { + struct xdp_ring ptrs; + u64 desc[0]; +}; + +struct xsk_dma_map { + dma_addr_t *dma_pages; + struct device *dev; + struct net_device *netdev; + refcount_t users; + struct list_head list; + u32 dma_pages_cnt; + bool dma_need_sync; +}; + +struct xdp_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_show; + __u32 xdiag_cookie[2]; +}; + +struct xdp_diag_msg { + __u8 xdiag_family; + __u8 xdiag_type; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_cookie[2]; +}; + +enum { + XDP_DIAG_NONE = 0, + XDP_DIAG_INFO = 1, + XDP_DIAG_UID = 2, + XDP_DIAG_RX_RING = 3, + XDP_DIAG_TX_RING = 4, + XDP_DIAG_UMEM = 5, + XDP_DIAG_UMEM_FILL_RING = 6, + XDP_DIAG_UMEM_COMPLETION_RING = 7, + XDP_DIAG_MEMINFO = 8, + XDP_DIAG_STATS = 9, + __XDP_DIAG_MAX = 10, +}; + +struct xdp_diag_info { + __u32 ifindex; + __u32 queue_id; +}; + +struct xdp_diag_ring { + __u32 entries; +}; + +struct xdp_diag_umem { + __u64 size; + __u32 id; + __u32 num_pages; + __u32 chunk_size; + __u32 headroom; + __u32 ifindex; + __u32 queue_id; + __u32 flags; + __u32 refs; +}; + +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + +struct mptcp_mib { + long unsigned int mibs[23]; +}; + +struct mptcp_options_received { + u64 sndr_key; + u64 rcvr_key; + u64 data_ack; + u64 data_seq; + u32 subflow_seq; + u16 data_len; + u16 mp_capable: 1; + u16 mp_join: 1; + u16 dss: 1; + u16 add_addr: 1; + u16 rm_addr: 1; + u16 family: 4; + u16 echo: 1; + u16 backup: 1; + u32 token; + u32 nonce; + u64 thmac; + u8 hmac[20]; + u8 join_id; + u8 use_map: 1; + u8 dsn64: 1; + u8 data_fin: 1; + u8 use_ack: 1; + u8 ack64: 1; + u8 mpc_map: 1; + u8 __unused: 2; + u8 addr_id; + u8 rm_id; + union { + struct in_addr addr; + struct in6_addr addr6; + }; + u64 ahmac; + u16 port; +}; + +struct mptcp_addr_info { + sa_family_t family; + __be16 port; + u8 id; + u8 flags; + int ifindex; + union { + struct in_addr addr; + struct in6_addr addr6; + }; +}; + +enum mptcp_pm_status { + MPTCP_PM_ADD_ADDR_RECEIVED = 0, + MPTCP_PM_RM_ADDR_RECEIVED = 1, + MPTCP_PM_ESTABLISHED = 2, + MPTCP_PM_SUBFLOW_ESTABLISHED = 3, +}; + +struct mptcp_pm_data { + struct mptcp_addr_info local; + struct mptcp_addr_info remote; + struct list_head anno_list; + spinlock_t lock; + bool add_addr_signal; + bool rm_addr_signal; + bool server_side; + bool work_pending; + bool accept_addr; + bool accept_subflow; + bool add_addr_echo; + u8 add_addr_signaled; + u8 add_addr_accepted; + u8 local_addr_used; + u8 subflows; + u8 add_addr_signal_max; + u8 add_addr_accept_max; + u8 local_addr_max; + u8 subflows_max; + u8 status; + u8 rm_id; +}; + +struct mptcp_data_frag { + struct list_head list; + u64 data_seq; + int data_len; + int offset; + int overhead; + struct page *page; +}; + +struct mptcp_sock { + struct inet_connection_sock sk; + u64 local_key; + u64 remote_key; + u64 write_seq; + u64 ack_seq; + u64 rcv_data_fin_seq; + struct sock *last_snd; + int snd_burst; + atomic64_t snd_una; + long unsigned int timer_ival; + u32 token; + long unsigned int flags; + bool can_ack; + bool fully_established; + bool rcv_data_fin; + bool snd_data_fin_enable; + bool use_64bit_ack; + spinlock_t join_list_lock; + struct work_struct work; + struct sk_buff *ooo_last_skb; + struct rb_root out_of_order_queue; + struct list_head conn_list; + struct list_head rtx_queue; + struct list_head join_list; + struct skb_ext *cached_ext; + struct socket *subflow; + struct sock *first; + struct mptcp_pm_data pm; + struct { + u32 space; + u32 copied; + u64 time; + u64 rtt_us; + } rcvq_space; +}; + +struct mptcp_subflow_request_sock { + struct tcp_request_sock sk; + u16 mp_capable: 1; + u16 mp_join: 1; + u16 backup: 1; + u8 local_id; + u8 remote_id; + u64 local_key; + u64 idsn; + u32 token; + u32 ssn_offset; + u64 thmac; + u32 local_nonce; + u32 remote_nonce; + struct mptcp_sock *msk; + struct hlist_nulls_node token_node; +}; + +enum mptcp_data_avail { + MPTCP_SUBFLOW_NODATA = 0, + MPTCP_SUBFLOW_DATA_AVAIL = 1, + MPTCP_SUBFLOW_OOO_DATA = 2, +}; + +struct mptcp_subflow_context { + struct list_head node; + u64 local_key; + u64 remote_key; + u64 idsn; + u64 map_seq; + u32 snd_isn; + u32 token; + u32 rel_write_seq; + u32 map_subflow_seq; + u32 ssn_offset; + u32 map_data_len; + u32 request_mptcp: 1; + u32 request_join: 1; + u32 request_bkup: 1; + u32 mp_capable: 1; + u32 mp_join: 1; + u32 fully_established: 1; + u32 pm_notified: 1; + u32 conn_finished: 1; + u32 map_valid: 1; + u32 mpc_map: 1; + u32 backup: 1; + u32 rx_eof: 1; + u32 can_ack: 1; + enum mptcp_data_avail data_avail; + u32 remote_nonce; + u64 thmac; + u32 local_nonce; + u32 remote_token; + u8 hmac[20]; + u8 local_id; + u8 remote_id; + struct sock *tcp_sock; + struct sock *conn; + const struct inet_connection_sock_af_ops *icsk_af_ops; + void (*tcp_data_ready)(struct sock *); + void (*tcp_state_change)(struct sock *); + void (*tcp_write_space)(struct sock *); + struct callback_head rcu; +}; + +enum linux_mptcp_mib_field { + MPTCP_MIB_NUM = 0, + MPTCP_MIB_MPCAPABLEPASSIVE = 1, + MPTCP_MIB_MPCAPABLEPASSIVEACK = 2, + MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK = 3, + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK = 4, + MPTCP_MIB_RETRANSSEGS = 5, + MPTCP_MIB_JOINNOTOKEN = 6, + MPTCP_MIB_JOINSYNRX = 7, + MPTCP_MIB_JOINSYNACKRX = 8, + MPTCP_MIB_JOINSYNACKMAC = 9, + MPTCP_MIB_JOINACKRX = 10, + MPTCP_MIB_JOINACKMAC = 11, + MPTCP_MIB_DSSNOMATCH = 12, + MPTCP_MIB_INFINITEMAPRX = 13, + MPTCP_MIB_OFOQUEUETAIL = 14, + MPTCP_MIB_OFOQUEUE = 15, + MPTCP_MIB_OFOMERGE = 16, + MPTCP_MIB_NODSSWINDOW = 17, + MPTCP_MIB_DUPDATA = 18, + MPTCP_MIB_ADDADDR = 19, + MPTCP_MIB_ECHOADD = 20, + MPTCP_MIB_RMADDR = 21, + MPTCP_MIB_RMSUBFLOW = 22, + __MPTCP_MIB_MAX = 23, +}; + +struct mptcp_skb_cb { + u64 map_seq; + u64 end_seq; + u32 offset; +}; + +struct subflow_send_info { + struct sock *ssk; + u64 ratio; +}; + +enum mapping_status { + MAPPING_OK = 0, + MAPPING_INVALID = 1, + MAPPING_EMPTY = 2, + MAPPING_DATA_FIN = 3, + MAPPING_DUMMY = 4, +}; + +struct token_bucket { + spinlock_t lock; + int chain_len; + struct hlist_nulls_head req_chain; + struct hlist_nulls_head msk_chain; +}; + +struct mptcp_pernet { + struct ctl_table_header *ctl_table_hdr; + int mptcp_enabled; +}; + +enum { + INET_ULP_INFO_UNSPEC = 0, + INET_ULP_INFO_NAME = 1, + INET_ULP_INFO_TLS = 2, + INET_ULP_INFO_MPTCP = 3, + __INET_ULP_INFO_MAX = 4, +}; + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC = 0, + MPTCP_SUBFLOW_ATTR_TOKEN_REM = 1, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC = 2, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ = 3, + MPTCP_SUBFLOW_ATTR_MAP_SEQ = 4, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ = 5, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET = 6, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN = 7, + MPTCP_SUBFLOW_ATTR_FLAGS = 8, + MPTCP_SUBFLOW_ATTR_ID_REM = 9, + MPTCP_SUBFLOW_ATTR_ID_LOC = 10, + MPTCP_SUBFLOW_ATTR_PAD = 11, + __MPTCP_SUBFLOW_ATTR_MAX = 12, +}; + +enum { + MPTCP_PM_ATTR_UNSPEC = 0, + MPTCP_PM_ATTR_ADDR = 1, + MPTCP_PM_ATTR_RCV_ADD_ADDRS = 2, + MPTCP_PM_ATTR_SUBFLOWS = 3, + __MPTCP_PM_ATTR_MAX = 4, +}; + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC = 0, + MPTCP_PM_ADDR_ATTR_FAMILY = 1, + MPTCP_PM_ADDR_ATTR_ID = 2, + MPTCP_PM_ADDR_ATTR_ADDR4 = 3, + MPTCP_PM_ADDR_ATTR_ADDR6 = 4, + MPTCP_PM_ADDR_ATTR_PORT = 5, + MPTCP_PM_ADDR_ATTR_FLAGS = 6, + MPTCP_PM_ADDR_ATTR_IF_IDX = 7, + __MPTCP_PM_ADDR_ATTR_MAX = 8, +}; + +enum { + MPTCP_PM_CMD_UNSPEC = 0, + MPTCP_PM_CMD_ADD_ADDR = 1, + MPTCP_PM_CMD_DEL_ADDR = 2, + MPTCP_PM_CMD_GET_ADDR = 3, + MPTCP_PM_CMD_FLUSH_ADDRS = 4, + MPTCP_PM_CMD_SET_LIMITS = 5, + MPTCP_PM_CMD_GET_LIMITS = 6, + __MPTCP_PM_CMD_AFTER_LAST = 7, +}; + +struct mptcp_pm_addr_entry { + struct list_head list; + struct mptcp_addr_info addr; + struct callback_head rcu; +}; + +struct mptcp_pm_add_entry { + struct list_head list; + struct mptcp_addr_info addr; + struct timer_list add_timer; + struct mptcp_sock *sock; + u8 retrans_times; +}; + +struct pm_nl_pernet { + spinlock_t lock; + struct list_head local_addr_list; + unsigned int addrs; + unsigned int add_addr_signal_max; + unsigned int add_addr_accept_max; + unsigned int local_addr_max; + unsigned int subflows_max; + unsigned int next_id; +}; + +struct join_entry { + u32 token; + u32 remote_nonce; + u32 local_nonce; + u8 join_id; + u8 local_id; + u8 backup; + u8 valid; +}; + +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +enum efi_secureboot_mode { + efi_secureboot_mode_unset = 0, + efi_secureboot_mode_unknown = 1, + efi_secureboot_mode_disabled = 2, + efi_secureboot_mode_enabled = 3, +}; + +typedef union { + struct { + u32 revision; + efi_handle_t parent_handle; + efi_system_table_t *system_table; + efi_handle_t device_handle; + void *file_path; + void *reserved; + u32 load_options_size; + void *load_options; + void *image_base; + __u64 image_size; + unsigned int image_code_type; + unsigned int image_data_type; + efi_status_t (*unload)(efi_handle_t); + }; + struct { + u32 revision; + u32 parent_handle; + u32 system_table; + u32 device_handle; + u32 file_path; + u32 reserved; + u32 load_options_size; + u32 load_options; + u32 image_base; + __u64 image_size; + u32 image_code_type; + u32 image_data_type; + u32 unload; + } mixed_mode; +} efi_loaded_image_t; + +struct efi_boot_memmap { + efi_memory_desc_t **map; + long unsigned int *map_size; + long unsigned int *desc_size; + u32 *desc_ver; + long unsigned int *key_ptr; + long unsigned int *buff_size; +}; + +struct exit_boot_struct { + efi_memory_desc_t *runtime_map; + int *runtime_entry_count; + void *new_fdt_addr; +}; + +typedef struct { + u64 size; + u64 file_size; + u64 phys_size; + efi_time_t create_time; + efi_time_t last_access_time; + efi_time_t modification_time; + __u64 attribute; + efi_char16_t filename[0]; +} efi_file_info_t; + +struct efi_file_protocol; + +typedef struct efi_file_protocol efi_file_protocol_t; + +struct efi_file_protocol { + u64 revision; + efi_status_t (*open)(efi_file_protocol_t *, efi_file_protocol_t **, efi_char16_t *, u64, u64); + efi_status_t (*close)(efi_file_protocol_t *); + efi_status_t (*delete)(efi_file_protocol_t *); + efi_status_t (*read)(efi_file_protocol_t *, long unsigned int *, void *); + efi_status_t (*write)(efi_file_protocol_t *, long unsigned int, void *); + efi_status_t (*get_position)(efi_file_protocol_t *, u64 *); + efi_status_t (*set_position)(efi_file_protocol_t *, u64); + efi_status_t (*get_info)(efi_file_protocol_t *, efi_guid_t *, long unsigned int *, void *); + efi_status_t (*set_info)(efi_file_protocol_t *, efi_guid_t *, long unsigned int, void *); + efi_status_t (*flush)(efi_file_protocol_t *); +}; + +struct efi_simple_file_system_protocol; + +typedef struct efi_simple_file_system_protocol efi_simple_file_system_protocol_t; + +struct efi_simple_file_system_protocol { + u64 revision; + int (*open_volume)(efi_simple_file_system_protocol_t *, efi_file_protocol_t **); +}; + +struct finfo { + efi_file_info_t info; + efi_char16_t filename[256]; +}; + +typedef struct { + u32 red_mask; + u32 green_mask; + u32 blue_mask; + u32 reserved_mask; +} efi_pixel_bitmask_t; + +typedef struct { + u32 version; + u32 horizontal_resolution; + u32 vertical_resolution; + int pixel_format; + efi_pixel_bitmask_t pixel_information; + u32 pixels_per_scan_line; +} efi_graphics_output_mode_info_t; + +union efi_graphics_output_protocol_mode { + struct { + u32 max_mode; + u32 mode; + efi_graphics_output_mode_info_t *info; + long unsigned int size_of_info; + efi_physical_addr_t frame_buffer_base; + long unsigned int frame_buffer_size; + }; + struct { + u32 max_mode; + u32 mode; + u32 info; + u32 size_of_info; + u64 frame_buffer_base; + u32 frame_buffer_size; + } mixed_mode; +}; + +typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t; + +union efi_graphics_output_protocol; + +typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t; + +union efi_graphics_output_protocol { + struct { + efi_status_t (*query_mode)(efi_graphics_output_protocol_t *, u32, long unsigned int *, efi_graphics_output_mode_info_t **); + efi_status_t (*set_mode)(efi_graphics_output_protocol_t *, u32); + void *blt; + efi_graphics_output_protocol_mode_t *mode; + }; + struct { + u32 query_mode; + u32 set_mode; + u32 blt; + u32 mode; + } mixed_mode; +}; + +enum efi_cmdline_option { + EFI_CMDLINE_NONE = 0, + EFI_CMDLINE_MODE_NUM = 1, + EFI_CMDLINE_RES = 2, + EFI_CMDLINE_AUTO = 3, + EFI_CMDLINE_LIST = 4, +}; + +union efi_rng_protocol; + +typedef union efi_rng_protocol efi_rng_protocol_t; + +union efi_rng_protocol { + struct { + efi_status_t (*get_info)(efi_rng_protocol_t *, long unsigned int *, efi_guid_t *); + efi_status_t (*get_rng)(efi_rng_protocol_t *, efi_guid_t *, long unsigned int, u8 *); + }; + struct { + u32 get_info; + u32 get_rng; + } mixed_mode; +}; + +typedef u32 efi_tcg2_event_log_format; + +union efi_tcg2_protocol { + struct { + void *get_capability; + efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); + void *hash_log_extend_event; + void *submit_command; + void *get_active_pcr_banks; + void *set_active_pcr_banks; + void *get_result_of_set_active_pcr_banks; + }; + struct { + u32 get_capability; + u32 get_event_log; + u32 hash_log_extend_event; + u32 submit_command; + u32 get_active_pcr_banks; + u32 set_active_pcr_banks; + u32 get_result_of_set_active_pcr_banks; + } mixed_mode; +}; + +typedef union efi_tcg2_protocol efi_tcg2_protocol_t; + +struct efi_vendor_dev_path { + struct efi_generic_dev_path header; + efi_guid_t vendorguid; + u8 vendordata[0]; +}; + +union efi_load_file_protocol; + +typedef union efi_load_file_protocol efi_load_file_protocol_t; + +union efi_load_file_protocol { + struct { + efi_status_t (*load_file)(efi_load_file_protocol_t *, efi_device_path_protocol_t *, bool, long unsigned int *, void *); + }; + struct { + u32 load_file; + } mixed_mode; +}; + +typedef union efi_load_file_protocol efi_load_file2_protocol_t; + +typedef struct { + u32 attributes; + u16 file_path_list_length; + u8 variable_data[0]; +} __attribute__((packed)) efi_load_option_t; + +typedef struct { + u32 attributes; + u16 file_path_list_length; + const efi_char16_t *description; + const efi_device_path_protocol_t *file_path_list; + size_t optional_data_size; + const void *optional_data; +} efi_load_option_unpacked_t; + +typedef efi_status_t (*efi_exit_boot_map_processing)(struct efi_boot_memmap *, void *); + +typedef enum { + EfiPciIoWidthUint8 = 0, + EfiPciIoWidthUint16 = 1, + EfiPciIoWidthUint32 = 2, + EfiPciIoWidthUint64 = 3, + EfiPciIoWidthFifoUint8 = 4, + EfiPciIoWidthFifoUint16 = 5, + EfiPciIoWidthFifoUint32 = 6, + EfiPciIoWidthFifoUint64 = 7, + EfiPciIoWidthFillUint8 = 8, + EfiPciIoWidthFillUint16 = 9, + EfiPciIoWidthFillUint32 = 10, + EfiPciIoWidthFillUint64 = 11, + EfiPciIoWidthMaximum = 12, +} EFI_PCI_IO_PROTOCOL_WIDTH; + +typedef struct { + u32 read; + u32 write; +} efi_pci_io_protocol_access_32_t; + +typedef struct { + void *read; + void *write; +} efi_pci_io_protocol_access_t; + +union efi_pci_io_protocol; + +typedef union efi_pci_io_protocol efi_pci_io_protocol_t; + +typedef efi_status_t (*efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *, EFI_PCI_IO_PROTOCOL_WIDTH, u32, long unsigned int, void *); + +typedef struct { + efi_pci_io_protocol_cfg_t read; + efi_pci_io_protocol_cfg_t write; +} efi_pci_io_protocol_config_access_t; + +union efi_pci_io_protocol { + struct { + void *poll_mem; + void *poll_io; + efi_pci_io_protocol_access_t mem; + efi_pci_io_protocol_access_t io; + efi_pci_io_protocol_config_access_t pci; + void *copy_mem; + void *map; + void *unmap; + void *allocate_buffer; + void *free_buffer; + void *flush; + efi_status_t (*get_location)(efi_pci_io_protocol_t *, long unsigned int *, long unsigned int *, long unsigned int *, long unsigned int *); + void *attributes; + void *get_bar_attributes; + void *set_bar_attributes; + uint64_t romsize; + void *romimage; + }; + struct { + u32 poll_mem; + u32 poll_io; + efi_pci_io_protocol_access_32_t mem; + efi_pci_io_protocol_access_32_t io; + efi_pci_io_protocol_access_32_t pci; + u32 copy_mem; + u32 map; + u32 unmap; + u32 allocate_buffer; + u32 free_buffer; + u32 flush; + u32 get_location; + u32 attributes; + u32 get_bar_attributes; + u32 set_bar_attributes; + u64 romsize; + u32 romimage; + } mixed_mode; +}; + +#ifndef BPF_NO_PRESERVE_ACCESS_INDEX +#pragma clang attribute pop +#endif + +#endif /* __VMLINUX_H__ */ diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf.c b/pkg/plugin/lib/common/libbpf/_src/bpf.c new file mode 100644 index 000000000..a9d292c10 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf.c @@ -0,0 +1,1296 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * common eBPF ELF operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +/* + * When building perf, unistd.h is overridden. __NR_bpf is + * required to be defined explicitly. + */ +#ifndef __NR_bpf +# if defined(__i386__) +# define __NR_bpf 357 +# elif defined(__x86_64__) +# define __NR_bpf 321 +# elif defined(__aarch64__) +# define __NR_bpf 280 +# elif defined(__sparc__) +# define __NR_bpf 349 +# elif defined(__s390__) +# define __NR_bpf 351 +# elif defined(__arc__) +# define __NR_bpf 280 +# elif defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 +# else +# error __NR_bpf not defined. libbpf does not support your arch. +# endif +#endif + +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, + unsigned int size) +{ + return syscall(__NR_bpf, cmd, attr, size); +} + +static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, + unsigned int size) +{ + int fd; + + fd = sys_bpf(cmd, attr, size); + return ensure_good_fd(fd); +} + +#define PROG_LOAD_ATTEMPTS 5 + +static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) +{ + int fd; + + do { + fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); + } while (fd < 0 && errno == EAGAIN && --attempts > 0); + + return fd; +} + +/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to + * memcg-based memory accounting for BPF maps and progs. This was done in [0]. + * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in + * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. + * + * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ + * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") + */ +int probe_memcg_account(void) +{ + const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); + struct bpf_insn insns[] = { + BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), + BPF_EXIT_INSN(), + }; + size_t insn_cnt = ARRAY_SIZE(insns); + union bpf_attr attr; + int prog_fd; + + /* attempt loading freplace trying to use custom BTF */ + memset(&attr, 0, prog_load_attr_sz); + attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; + attr.insns = ptr_to_u64(insns); + attr.insn_cnt = insn_cnt; + attr.license = ptr_to_u64("GPL"); + + prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz); + if (prog_fd >= 0) { + close(prog_fd); + return 1; + } + return 0; +} + +static bool memlock_bumped; +static rlim_t memlock_rlim = RLIM_INFINITY; + +int libbpf_set_memlock_rlim(size_t memlock_bytes) +{ + if (memlock_bumped) + return libbpf_err(-EBUSY); + + memlock_rlim = memlock_bytes; + return 0; +} + +int bump_rlimit_memlock(void) +{ + struct rlimit rlim; + + /* this the default in libbpf 1.0, but for now user has to opt-in explicitly */ + if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK)) + return 0; + + /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ + if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT)) + return 0; + + memlock_bumped = true; + + /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ + if (memlock_rlim == 0) + return 0; + + rlim.rlim_cur = rlim.rlim_max = memlock_rlim; + if (setrlimit(RLIMIT_MEMLOCK, &rlim)) + return -errno; + + return 0; +} + +int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts) +{ + const size_t attr_sz = offsetofend(union bpf_attr, map_extra); + union bpf_attr attr; + int fd; + + bump_rlimit_memlock(); + + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_map_create_opts)) + return libbpf_err(-EINVAL); + + attr.map_type = map_type; + if (map_name) + libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + + attr.btf_fd = OPTS_GET(opts, btf_fd, 0); + attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); + attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); + attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); + + attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); + attr.map_flags = OPTS_GET(opts, map_flags, 0); + attr.map_extra = OPTS_GET(opts, map_extra, 0); + attr.numa_node = OPTS_GET(opts, numa_node, 0); + attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); + + fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz); + return libbpf_err_errno(fd); +} + +int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) +{ + LIBBPF_OPTS(bpf_map_create_opts, p); + + p.map_flags = create_attr->map_flags; + p.numa_node = create_attr->numa_node; + p.btf_fd = create_attr->btf_fd; + p.btf_key_type_id = create_attr->btf_key_type_id; + p.btf_value_type_id = create_attr->btf_value_type_id; + p.map_ifindex = create_attr->map_ifindex; + if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS) + p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id; + else + p.inner_map_fd = create_attr->inner_map_fd; + + return bpf_map_create(create_attr->map_type, create_attr->name, + create_attr->key_size, create_attr->value_size, + create_attr->max_entries, &p); +} + +int bpf_create_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags, int node) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + + opts.map_flags = map_flags; + if (node >= 0) { + opts.numa_node = node; + opts.map_flags |= BPF_F_NUMA_NODE; + } + + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); +} + +int bpf_create_map(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); + + return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); +} + +int bpf_create_map_name(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, int max_entries, + __u32 map_flags) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags); + + return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts); +} + +int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, + __u32 map_flags, int node) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + + opts.inner_map_fd = inner_map_fd; + opts.map_flags = map_flags; + if (node >= 0) { + opts.map_flags |= BPF_F_NUMA_NODE; + opts.numa_node = node; + } + + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); +} + +int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, + int key_size, int inner_map_fd, int max_entries, + __u32 map_flags) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, + .inner_map_fd = inner_map_fd, + .map_flags = map_flags, + ); + + return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts); +} + +static void * +alloc_zero_tailing_info(const void *orecord, __u32 cnt, + __u32 actual_rec_size, __u32 expected_rec_size) +{ + __u64 info_len = (__u64)actual_rec_size * cnt; + void *info, *nrecord; + int i; + + info = malloc(info_len); + if (!info) + return NULL; + + /* zero out bytes kernel does not understand */ + nrecord = info; + for (i = 0; i < cnt; i++) { + memcpy(nrecord, orecord, expected_rec_size); + memset(nrecord + expected_rec_size, 0, + actual_rec_size - expected_rec_size); + orecord += actual_rec_size; + nrecord += actual_rec_size; + } + + return info; +} + +DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0) +int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts) +{ + void *finfo = NULL, *linfo = NULL; + const char *func_info, *line_info; + __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; + __u32 func_info_rec_size, line_info_rec_size; + int fd, attempts; + union bpf_attr attr; + char *log_buf; + + bump_rlimit_memlock(); + + if (!OPTS_VALID(opts, bpf_prog_load_opts)) + return libbpf_err(-EINVAL); + + attempts = OPTS_GET(opts, attempts, 0); + if (attempts < 0) + return libbpf_err(-EINVAL); + if (attempts == 0) + attempts = PROG_LOAD_ATTEMPTS; + + memset(&attr, 0, sizeof(attr)); + + attr.prog_type = prog_type; + attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); + + attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); + attr.prog_flags = OPTS_GET(opts, prog_flags, 0); + attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); + attr.kern_version = OPTS_GET(opts, kern_version, 0); + + if (prog_name) + libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); + attr.license = ptr_to_u64(license); + + if (insn_cnt > UINT_MAX) + return libbpf_err(-E2BIG); + + attr.insns = ptr_to_u64(insns); + attr.insn_cnt = (__u32)insn_cnt; + + attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); + attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); + + if (attach_prog_fd && attach_btf_obj_fd) + return libbpf_err(-EINVAL); + + attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); + if (attach_prog_fd) + attr.attach_prog_fd = attach_prog_fd; + else + attr.attach_btf_obj_fd = attach_btf_obj_fd; + + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (!!log_buf != !!log_size) + return libbpf_err(-EINVAL); + if (log_level > (4 | 2 | 1)) + return libbpf_err(-EINVAL); + if (log_level && !log_buf) + return libbpf_err(-EINVAL); + + func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); + func_info = OPTS_GET(opts, func_info, NULL); + attr.func_info_rec_size = func_info_rec_size; + attr.func_info = ptr_to_u64(func_info); + attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); + + line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); + line_info = OPTS_GET(opts, line_info, NULL); + attr.line_info_rec_size = line_info_rec_size; + attr.line_info = ptr_to_u64(line_info); + attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); + + attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + + if (log_level) { + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = log_level; + } + + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + if (fd >= 0) + return fd; + + /* After bpf_prog_load, the kernel may modify certain attributes + * to give user space a hint how to deal with loading failure. + * Check to see whether we can make some changes and load again. + */ + while (errno == E2BIG && (!finfo || !linfo)) { + if (!finfo && attr.func_info_cnt && + attr.func_info_rec_size < func_info_rec_size) { + /* try with corrected func info records */ + finfo = alloc_zero_tailing_info(func_info, + attr.func_info_cnt, + func_info_rec_size, + attr.func_info_rec_size); + if (!finfo) { + errno = E2BIG; + goto done; + } + + attr.func_info = ptr_to_u64(finfo); + attr.func_info_rec_size = func_info_rec_size; + } else if (!linfo && attr.line_info_cnt && + attr.line_info_rec_size < line_info_rec_size) { + linfo = alloc_zero_tailing_info(line_info, + attr.line_info_cnt, + line_info_rec_size, + attr.line_info_rec_size); + if (!linfo) { + errno = E2BIG; + goto done; + } + + attr.line_info = ptr_to_u64(linfo); + attr.line_info_rec_size = line_info_rec_size; + } else { + break; + } + + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + if (fd >= 0) + goto done; + } + + if (log_level == 0 && log_buf) { + /* log_level == 0 with non-NULL log_buf requires retrying on error + * with log_level == 1 and log_buf/log_buf_size set, to get details of + * failure + */ + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = 1; + + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + } +done: + /* free() doesn't affect errno, so we don't need to restore it */ + free(finfo); + free(linfo); + return libbpf_err_errno(fd); +} + +__attribute__((alias("bpf_load_program_xattr2"))) +int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz); + +static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz) +{ + LIBBPF_OPTS(bpf_prog_load_opts, p); + + if (!load_attr || !log_buf != !log_buf_sz) + return libbpf_err(-EINVAL); + + p.expected_attach_type = load_attr->expected_attach_type; + switch (load_attr->prog_type) { + case BPF_PROG_TYPE_STRUCT_OPS: + case BPF_PROG_TYPE_LSM: + p.attach_btf_id = load_attr->attach_btf_id; + break; + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_EXT: + p.attach_btf_id = load_attr->attach_btf_id; + p.attach_prog_fd = load_attr->attach_prog_fd; + break; + default: + p.prog_ifindex = load_attr->prog_ifindex; + p.kern_version = load_attr->kern_version; + } + p.log_level = load_attr->log_level; + p.log_buf = log_buf; + p.log_size = log_buf_sz; + p.prog_btf_fd = load_attr->prog_btf_fd; + p.func_info_rec_size = load_attr->func_info_rec_size; + p.func_info_cnt = load_attr->func_info_cnt; + p.func_info = load_attr->func_info; + p.line_info_rec_size = load_attr->line_info_rec_size; + p.line_info_cnt = load_attr->line_info_cnt; + p.line_info = load_attr->line_info; + p.prog_flags = load_attr->prog_flags; + + return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license, + load_attr->insns, load_attr->insns_cnt, &p); +} + +int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, const char *license, + __u32 kern_version, char *log_buf, + size_t log_buf_sz) +{ + struct bpf_load_program_attr load_attr; + + memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); + load_attr.prog_type = type; + load_attr.expected_attach_type = 0; + load_attr.name = NULL; + load_attr.insns = insns; + load_attr.insns_cnt = insns_cnt; + load_attr.license = license; + load_attr.kern_version = kern_version; + + return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz); +} + +int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, __u32 prog_flags, const char *license, + __u32 kern_version, char *log_buf, size_t log_buf_sz, + int log_level) +{ + union bpf_attr attr; + int fd; + + bump_rlimit_memlock(); + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = type; + attr.insn_cnt = (__u32)insns_cnt; + attr.insns = ptr_to_u64(insns); + attr.license = ptr_to_u64(license); + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_buf_sz; + attr.log_level = log_level; + log_buf[0] = 0; + attr.kern_version = kern_version; + attr.prog_flags = prog_flags; + + fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS); + return libbpf_err_errno(fd); +} + +int bpf_map_update_elem(int fd, const void *key, const void *value, + __u64 flags) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + attr.flags = flags; + + ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_lookup_elem(int fd, const void *key, void *value) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + + ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + attr.flags = flags; + + ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + + ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.value = ptr_to_u64(value); + attr.flags = flags; + + ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_delete_elem(int fd, const void *key) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + + ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_get_next_key(int fd, const void *key, void *next_key) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + attr.key = ptr_to_u64(key); + attr.next_key = ptr_to_u64(next_key); + + ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_map_freeze(int fd) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.map_fd = fd; + + ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +static int bpf_map_batch_common(int cmd, int fd, void *in_batch, + void *out_batch, void *keys, void *values, + __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_map_batch_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.batch.map_fd = fd; + attr.batch.in_batch = ptr_to_u64(in_batch); + attr.batch.out_batch = ptr_to_u64(out_batch); + attr.batch.keys = ptr_to_u64(keys); + attr.batch.values = ptr_to_u64(values); + attr.batch.count = *count; + attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); + attr.batch.flags = OPTS_GET(opts, flags, 0); + + ret = sys_bpf(cmd, &attr, sizeof(attr)); + *count = attr.batch.count; + + return libbpf_err_errno(ret); +} + +int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL, + NULL, (void *)keys, NULL, count, opts); +} + +int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, + void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch, + out_batch, keys, values, count, opts); +} + +int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, + void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH, + fd, in_batch, out_batch, keys, values, + count, opts); +} + +int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, + const struct bpf_map_batch_opts *opts) +{ + return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, + (void *)keys, (void *)values, count, opts); +} + +int bpf_obj_pin(int fd, const char *pathname) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.pathname = ptr_to_u64((void *)pathname); + attr.bpf_fd = fd; + + ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_obj_get(const char *pathname) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.pathname = ptr_to_u64((void *)pathname); + + fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, + unsigned int flags) +{ + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, + .flags = flags, + ); + + return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); +} + +int bpf_prog_attach_opts(int prog_fd, int target_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_prog_attach_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.target_fd = target_fd; + attr.attach_bpf_fd = prog_fd; + attr.attach_type = type; + attr.attach_flags = OPTS_GET(opts, flags, 0); + attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0); + + ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +__attribute__((alias("bpf_prog_attach_opts"))) +int bpf_prog_attach_xattr(int prog_fd, int target_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); + +int bpf_prog_detach(int target_fd, enum bpf_attach_type type) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.target_fd = target_fd; + attr.attach_type = type; + + ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.target_fd = target_fd; + attr.attach_bpf_fd = prog_fd; + attr.attach_type = type; + + ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type, + const struct bpf_link_create_opts *opts) +{ + __u32 target_btf_id, iter_info_len; + union bpf_attr attr; + int fd, err; + + if (!OPTS_VALID(opts, bpf_link_create_opts)) + return libbpf_err(-EINVAL); + + iter_info_len = OPTS_GET(opts, iter_info_len, 0); + target_btf_id = OPTS_GET(opts, target_btf_id, 0); + + /* validate we don't have unexpected combinations of non-zero fields */ + if (iter_info_len || target_btf_id) { + if (iter_info_len && target_btf_id) + return libbpf_err(-EINVAL); + if (!OPTS_ZEROED(opts, target_btf_id)) + return libbpf_err(-EINVAL); + } + + memset(&attr, 0, sizeof(attr)); + attr.link_create.prog_fd = prog_fd; + attr.link_create.target_fd = target_fd; + attr.link_create.attach_type = attach_type; + attr.link_create.flags = OPTS_GET(opts, flags, 0); + + if (target_btf_id) { + attr.link_create.target_btf_id = target_btf_id; + goto proceed; + } + + switch (attach_type) { + case BPF_TRACE_ITER: + attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); + attr.link_create.iter_info_len = iter_info_len; + break; + case BPF_PERF_EVENT: + attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); + if (!OPTS_ZEROED(opts, perf_event)) + return libbpf_err(-EINVAL); + break; + case BPF_TRACE_KPROBE_MULTI: + attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); + attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); + attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); + attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); + attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); + if (!OPTS_ZEROED(opts, kprobe_multi)) + return libbpf_err(-EINVAL); + break; + default: + if (!OPTS_ZEROED(opts, flags)) + return libbpf_err(-EINVAL); + break; + } +proceed: + fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr)); + if (fd >= 0) + return fd; + /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry + * and other similar programs + */ + err = -errno; + if (err != -EINVAL) + return libbpf_err(err); + + /* if user used features not supported by + * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately + */ + if (attr.link_create.target_fd || attr.link_create.target_btf_id) + return libbpf_err(err); + if (!OPTS_ZEROED(opts, sz)) + return libbpf_err(err); + + /* otherwise, for few select kinds of programs that can be + * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as + * a fallback for older kernels + */ + switch (attach_type) { + case BPF_TRACE_RAW_TP: + case BPF_LSM_MAC: + case BPF_TRACE_FENTRY: + case BPF_TRACE_FEXIT: + case BPF_MODIFY_RETURN: + return bpf_raw_tracepoint_open(NULL, prog_fd); + default: + return libbpf_err(err); + } +} + +int bpf_link_detach(int link_fd) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.link_detach.link_fd = link_fd; + + ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_link_update(int link_fd, int new_prog_fd, + const struct bpf_link_update_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_link_update_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.link_update.link_fd = link_fd; + attr.link_update.new_prog_fd = new_prog_fd; + attr.link_update.flags = OPTS_GET(opts, flags, 0); + attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + + ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} + +int bpf_iter_create(int link_fd) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.iter_create.link_fd = link_fd; + + fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, + __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.query.target_fd = target_fd; + attr.query.attach_type = type; + attr.query.query_flags = query_flags; + attr.query.prog_cnt = *prog_cnt; + attr.query.prog_ids = ptr_to_u64(prog_ids); + + ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr)); + + if (attach_flags) + *attach_flags = attr.query.attach_flags; + *prog_cnt = attr.query.prog_cnt; + + return libbpf_err_errno(ret); +} + +int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, + void *data_out, __u32 *size_out, __u32 *retval, + __u32 *duration) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.data_in = ptr_to_u64(data); + attr.test.data_out = ptr_to_u64(data_out); + attr.test.data_size_in = size; + attr.test.repeat = repeat; + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + + if (size_out) + *size_out = attr.test.data_size_out; + if (retval) + *retval = attr.test.retval; + if (duration) + *duration = attr.test.duration; + + return libbpf_err_errno(ret); +} + +int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr) +{ + union bpf_attr attr; + int ret; + + if (!test_attr->data_out && test_attr->data_size_out > 0) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = test_attr->prog_fd; + attr.test.data_in = ptr_to_u64(test_attr->data_in); + attr.test.data_out = ptr_to_u64(test_attr->data_out); + attr.test.data_size_in = test_attr->data_size_in; + attr.test.data_size_out = test_attr->data_size_out; + attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in); + attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out); + attr.test.ctx_size_in = test_attr->ctx_size_in; + attr.test.ctx_size_out = test_attr->ctx_size_out; + attr.test.repeat = test_attr->repeat; + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + + test_attr->data_size_out = attr.test.data_size_out; + test_attr->ctx_size_out = attr.test.ctx_size_out; + test_attr->retval = attr.test.retval; + test_attr->duration = attr.test.duration; + + return libbpf_err_errno(ret); +} + +int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_test_run_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.batch_size = OPTS_GET(opts, batch_size, 0); + attr.test.cpu = OPTS_GET(opts, cpu, 0); + attr.test.flags = OPTS_GET(opts, flags, 0); + attr.test.repeat = OPTS_GET(opts, repeat, 0); + attr.test.duration = OPTS_GET(opts, duration, 0); + attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); + attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); + attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); + attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); + attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); + attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); + attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); + attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + + OPTS_SET(opts, data_size_out, attr.test.data_size_out); + OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); + OPTS_SET(opts, duration, attr.test.duration); + OPTS_SET(opts, retval, attr.test.retval); + + return libbpf_err_errno(ret); +} + +static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) +{ + union bpf_attr attr; + int err; + + memset(&attr, 0, sizeof(attr)); + attr.start_id = start_id; + + err = sys_bpf(cmd, &attr, sizeof(attr)); + if (!err) + *next_id = attr.next_id; + + return libbpf_err_errno(err); +} + +int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) +{ + return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); +} + +int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) +{ + return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); +} + +int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) +{ + return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); +} + +int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) +{ + return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID); +} + +int bpf_prog_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.prog_id = id; + + fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_map_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.map_id = id; + + fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_btf_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.btf_id = id; + + fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_link_get_fd_by_id(__u32 id) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.link_id = id; + + fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) +{ + union bpf_attr attr; + int err; + + memset(&attr, 0, sizeof(attr)); + attr.info.bpf_fd = bpf_fd; + attr.info.info_len = *info_len; + attr.info.info = ptr_to_u64(info); + + err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); + + if (!err) + *info_len = attr.info.info_len; + + return libbpf_err_errno(err); +} + +int bpf_raw_tracepoint_open(const char *name, int prog_fd) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.raw_tracepoint.name = ptr_to_u64(name); + attr.raw_tracepoint.prog_fd = prog_fd; + + fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) +{ + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + union bpf_attr attr; + char *log_buf; + size_t log_size; + __u32 log_level; + int fd; + + bump_rlimit_memlock(); + + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_btf_load_opts)) + return libbpf_err(-EINVAL); + + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (log_size > UINT_MAX) + return libbpf_err(-EINVAL); + if (log_size && !log_buf) + return libbpf_err(-EINVAL); + + attr.btf = ptr_to_u64(btf_data); + attr.btf_size = btf_size; + /* log_level == 0 and log_buf != NULL means "try loading without + * log_buf, but retry with log_buf and log_level=1 on error", which is + * consistent across low-level and high-level BTF and program loading + * APIs within libbpf and provides a sensible behavior in practice + */ + if (log_level) { + attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = log_level; + } + + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); + if (fd < 0 && log_buf && log_level == 0) { + attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = 1; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); + } + return libbpf_err_errno(fd); +} + +int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int fd; + +retry: + if (do_log && log_buf && log_buf_size) { + opts.log_buf = log_buf; + opts.log_size = log_buf_size; + opts.log_level = 1; + } + + fd = bpf_btf_load(btf, btf_size, &opts); + if (fd < 0 && !do_log && log_buf && log_buf_size) { + do_log = true; + goto retry; + } + + return libbpf_err_errno(fd); +} + +int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, + __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, + __u64 *probe_addr) +{ + union bpf_attr attr = {}; + int err; + + attr.task_fd_query.pid = pid; + attr.task_fd_query.fd = fd; + attr.task_fd_query.flags = flags; + attr.task_fd_query.buf = ptr_to_u64(buf); + attr.task_fd_query.buf_len = *buf_len; + + err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr)); + + *buf_len = attr.task_fd_query.buf_len; + *prog_id = attr.task_fd_query.prog_id; + *fd_type = attr.task_fd_query.fd_type; + *probe_offset = attr.task_fd_query.probe_offset; + *probe_addr = attr.task_fd_query.probe_addr; + + return libbpf_err_errno(err); +} + +int bpf_enable_stats(enum bpf_stats_type type) +{ + union bpf_attr attr; + int fd; + + memset(&attr, 0, sizeof(attr)); + attr.enable_stats.type = type; + + fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr)); + return libbpf_err_errno(fd); +} + +int bpf_prog_bind_map(int prog_fd, int map_fd, + const struct bpf_prog_bind_opts *opts) +{ + union bpf_attr attr; + int ret; + + if (!OPTS_VALID(opts, bpf_prog_bind_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, sizeof(attr)); + attr.prog_bind_map.prog_fd = prog_fd; + attr.prog_bind_map.map_fd = map_fd; + attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); + + ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); + return libbpf_err_errno(ret); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf.h b/pkg/plugin/lib/common/libbpf/_src/bpf.h new file mode 100644 index 000000000..f4b4afb6d --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf.h @@ -0,0 +1,533 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * common eBPF ELF operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see + */ +#ifndef __LIBBPF_BPF_H +#define __LIBBPF_BPF_H + +#include +#include +#include +#include + +#include "libbpf_common.h" +#include "libbpf_legacy.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int libbpf_set_memlock_rlim(size_t memlock_bytes); + +struct bpf_map_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + + __u32 inner_map_fd; + __u32 map_flags; + __u64 map_extra; + + __u32 numa_node; + __u32 map_ifindex; +}; +#define bpf_map_create_opts__last_field map_ifindex + +LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries, + const struct bpf_map_create_opts *opts); + +struct bpf_create_map_attr { + const char *name; + enum bpf_map_type map_type; + __u32 map_flags; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 numa_node; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 map_ifindex; + union { + __u32 inner_map_fd; + __u32 btf_vmlinux_value_type_id; + }; +}; + +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, + int max_entries, __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name, + int key_size, int value_size, + int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type, + const char *name, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags, int node); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead") +LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type, + const char *name, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags); + +struct bpf_prog_load_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns + * -EAGAIN. This field determines how many attempts libbpf has to + * make. If not specified, libbpf will use default value of 5. + */ + int attempts; + + enum bpf_attach_type expected_attach_type; + __u32 prog_btf_fd; + __u32 prog_flags; + __u32 prog_ifindex; + __u32 kern_version; + + __u32 attach_btf_id; + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + + const int *fd_array; + + /* .BTF.ext func info data */ + const void *func_info; + __u32 func_info_cnt; + __u32 func_info_rec_size; + + /* .BTF.ext line info data */ + const void *line_info; + __u32 line_info_cnt; + __u32 line_info_rec_size; + + /* verifier log options */ + __u32 log_level; + __u32 log_size; + char *log_buf; +}; +#define bpf_prog_load_opts__last_field log_buf + +LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts); +/* this "specialization" should go away in libbpf 1.0 */ +LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, + const char *prog_name, const char *license, + const struct bpf_insn *insns, size_t insn_cnt, + const struct bpf_prog_load_opts *opts); + +/* This is an elaborate way to not conflict with deprecated bpf_prog_load() + * API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone. + * With this approach, if someone is calling bpf_prog_load() with + * 4 arguments, they will use the deprecated API, which keeps backwards + * compatibility (both source code and binary). If bpf_prog_load() is called + * with 6 arguments, though, it gets redirected to __bpf_prog_load. + * So looking forward to libbpf 1.0 when this hack will be gone and + * __bpf_prog_load() will be called just bpf_prog_load(). + */ +#ifndef bpf_prog_load +#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__) +#define ___bpf_prog_load4(file, type, pobj, prog_fd) \ + bpf_prog_load_deprecated(file, type, pobj, prog_fd) +#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \ + bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts) +#endif /* bpf_prog_load */ + +struct bpf_load_program_attr { + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + const char *name; + const struct bpf_insn *insns; + size_t insns_cnt; + const char *license; + union { + __u32 kern_version; + __u32 attach_prog_fd; + }; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + const void *func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + const void *line_info; + __u32 line_info_cnt; + __u32 log_level; + __u32 prog_flags; +}; + +/* Flags to direct loading requirements */ +#define MAPS_RELAX_COMPAT 0x01 + +/* Recommended log buffer size */ +#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */ + +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") +LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, + char *log_buf, size_t log_buf_sz); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") +LIBBPF_API int bpf_load_program(enum bpf_prog_type type, + const struct bpf_insn *insns, size_t insns_cnt, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead") +LIBBPF_API int bpf_verify_program(enum bpf_prog_type type, + const struct bpf_insn *insns, + size_t insns_cnt, __u32 prog_flags, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz, + int log_level); + +struct bpf_btf_load_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + + /* kernel log options */ + char *log_buf; + __u32 log_level; + __u32 log_size; +}; +#define bpf_btf_load_opts__last_field log_size + +LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size, + const struct bpf_btf_load_opts *opts); + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead") +LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, + __u32 log_buf_size, bool do_log); + +LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value, + __u64 flags); + +LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value); +LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, + __u64 flags); +LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key, + void *value); +LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, + void *value, __u64 flags); +LIBBPF_API int bpf_map_delete_elem(int fd, const void *key); +LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key); +LIBBPF_API int bpf_map_freeze(int fd); + +struct bpf_map_batch_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u64 elem_flags; + __u64 flags; +}; +#define bpf_map_batch_opts__last_field flags + + +/** + * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple + * elements in a BPF map. + * + * @param fd BPF map file descriptor + * @param keys pointer to an array of *count* keys + * @param count input and output parameter; on input **count** represents the + * number of elements in the map to delete in batch; + * on output if a non-EFAULT error is returned, **count** represents the number of deleted + * elements if the output **count** value is not equal to the input **count** value + * If EFAULT is returned, **count** should not be trusted to be correct. + * @param opts options for configuring the way the batch deletion works + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys, + __u32 *count, + const struct bpf_map_batch_opts *opts); + +/** + * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements. + * + * The parameter *in_batch* is the address of the first element in the batch to read. + * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent + * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate + * that the batched lookup starts from the beginning of the map. + * + * The *keys* and *values* are output parameters which must point to memory large enough to + * hold *count* items based on the key and value size of the map *map_fd*. The *keys* + * buffer must be of *key_size* * *count*. The *values* buffer must be of + * *value_size* * *count*. + * + * @param fd BPF map file descriptor + * @param in_batch address of the first element in batch to read, can pass NULL to + * indicate that the batched lookup starts from the beginning of the map. + * @param out_batch output parameter that should be passed to next call as *in_batch* + * @param keys pointer to an array large enough for *count* keys + * @param values pointer to an array large enough for *count* values + * @param count input and output parameter; on input it's the number of elements + * in the map to read in batch; on output it's the number of elements that were + * successfully read. + * If a non-EFAULT error is returned, count will be set as the number of elements + * that were read before the error occurred. + * If EFAULT is returned, **count** should not be trusted to be correct. + * @param opts options for configuring the way the batch lookup works + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, + void *keys, void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); + +/** + * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion + * of BPF map elements where each element is deleted after being retrieved. + * + * @param fd BPF map file descriptor + * @param in_batch address of the first element in batch to read, can pass NULL to + * get address of the first element in *out_batch* + * @param out_batch output parameter that should be passed to next call as *in_batch* + * @param keys pointer to an array of *count* keys + * @param values pointer to an array large enough for *count* values + * @param count input and output parameter; on input it's the number of elements + * in the map to read and delete in batch; on output it represents the number of + * elements that were successfully read and deleted + * If a non-**EFAULT** error code is returned and if the output **count** value + * is not equal to the input **count** value, up to **count** elements may + * have been deleted. + * if **EFAULT** is returned up to *count* elements may have been deleted without + * being returned via the *keys* and *values* output parameters. + * @param opts options for configuring the way the batch lookup and delete works + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, + void *out_batch, void *keys, + void *values, __u32 *count, + const struct bpf_map_batch_opts *opts); + +/** + * @brief **bpf_map_update_batch()** updates multiple elements in a map + * by specifying keys and their corresponding values. + * + * The *keys* and *values* parameters must point to memory large enough + * to hold *count* items based on the key and value size of the map. + * + * The *opts* parameter can be used to control how *bpf_map_update_batch()* + * should handle keys that either do or do not already exist in the map. + * In particular the *flags* parameter of *bpf_map_batch_opts* can be + * one of the following: + * + * Note that *count* is an input and output parameter, where on output it + * represents how many elements were successfully updated. Also note that if + * **EFAULT** then *count* should not be trusted to be correct. + * + * **BPF_ANY** + * Create new elements or update existing. + * + * **BPF_NOEXIST** + * Create new elements only if they do not exist. + * + * **BPF_EXIST** + * Update existing elements. + * + * **BPF_F_LOCK** + * Update spin_lock-ed map elements. This must be + * specified if the map value contains a spinlock. + * + * @param fd BPF map file descriptor + * @param keys pointer to an array of *count* keys + * @param values pointer to an array of *count* values + * @param count input and output parameter; on input it's the number of elements + * in the map to update in batch; on output if a non-EFAULT error is returned, + * **count** represents the number of updated elements if the output **count** + * value is not equal to the input **count** value. + * If EFAULT is returned, **count** should not be trusted to be correct. + * @param opts options for configuring the way the batch update works + * @return 0, on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values, + __u32 *count, + const struct bpf_map_batch_opts *opts); + +LIBBPF_API int bpf_obj_pin(int fd, const char *pathname); +LIBBPF_API int bpf_obj_get(const char *pathname); + +struct bpf_prog_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + unsigned int flags; + int replace_prog_fd; +}; +#define bpf_prog_attach_opts__last_field replace_prog_fd + +LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, + enum bpf_attach_type type, unsigned int flags); +LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_prog_attach_opts() instead") +LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); +LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); +LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, + enum bpf_attach_type type); + +union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */ +struct bpf_link_create_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; + union bpf_iter_link_info *iter_info; + __u32 iter_info_len; + __u32 target_btf_id; + union { + struct { + __u64 bpf_cookie; + } perf_event; + struct { + __u32 flags; + __u32 cnt; + const char **syms; + const unsigned long *addrs; + const __u64 *cookies; + } kprobe_multi; + }; + size_t :0; +}; +#define bpf_link_create_opts__last_field kprobe_multi.cookies + +LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type, + const struct bpf_link_create_opts *opts); + +LIBBPF_API int bpf_link_detach(int link_fd); + +struct bpf_link_update_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; /* extra flags */ + __u32 old_prog_fd; /* expected old program FD */ +}; +#define bpf_link_update_opts__last_field old_prog_fd + +LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd, + const struct bpf_link_update_opts *opts); + +LIBBPF_API int bpf_iter_create(int link_fd); + +struct bpf_prog_test_run_attr { + int prog_fd; + int repeat; + const void *data_in; + __u32 data_size_in; + void *data_out; /* optional */ + __u32 data_size_out; /* in: max length of data_out + * out: length of data_out */ + __u32 retval; /* out: return code of the BPF program */ + __u32 duration; /* out: average per repetition in ns */ + const void *ctx_in; /* optional */ + __u32 ctx_size_in; + void *ctx_out; /* optional */ + __u32 ctx_size_out; /* in: max length of ctx_out + * out: length of cxt_out */ +}; + +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead") +LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr); + +/* + * bpf_prog_test_run does not check that data_out is large enough. Consider + * using bpf_prog_test_run_opts instead. + */ +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead") +LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, + __u32 size, void *data_out, __u32 *size_out, + __u32 *retval, __u32 *duration); +LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id); +LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_link_get_fd_by_id(__u32 id); +LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len); +LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type, + __u32 query_flags, __u32 *attach_flags, + __u32 *prog_ids, __u32 *prog_cnt); +LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd); +LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, + __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, + __u64 *probe_offset, __u64 *probe_addr); + +enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ +LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); + +struct bpf_prog_bind_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; +}; +#define bpf_prog_bind_opts__last_field flags + +LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd, + const struct bpf_prog_bind_opts *opts); + +struct bpf_test_run_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + const void *data_in; /* optional */ + void *data_out; /* optional */ + __u32 data_size_in; + __u32 data_size_out; /* in: max length of data_out + * out: length of data_out + */ + const void *ctx_in; /* optional */ + void *ctx_out; /* optional */ + __u32 ctx_size_in; + __u32 ctx_size_out; /* in: max length of ctx_out + * out: length of cxt_out + */ + __u32 retval; /* out: return code of the BPF program */ + int repeat; + __u32 duration; /* out: average per repetition in ns */ + __u32 flags; + __u32 cpu; + __u32 batch_size; +}; +#define bpf_test_run_opts__last_field batch_size + +LIBBPF_API int bpf_prog_test_run_opts(int prog_fd, + struct bpf_test_run_opts *opts); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_BPF_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_core_read.h b/pkg/plugin/lib/common/libbpf/_src/bpf_core_read.h new file mode 100644 index 000000000..e4aa9996a --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_core_read.h @@ -0,0 +1,444 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_CORE_READ_H__ +#define __BPF_CORE_READ_H__ + +/* + * enum bpf_field_info_kind is passed as a second argument into + * __builtin_preserve_field_info() built-in to get a specific aspect of + * a field, captured as a first argument. __builtin_preserve_field_info(field, + * info_kind) returns __u32 integer and produces BTF field relocation, which + * is understood and processed by libbpf during BPF object loading. See + * selftests/bpf for examples. + */ +enum bpf_field_info_kind { + BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, + BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, +}; + +/* second argument to __builtin_btf_type_id() built-in */ +enum bpf_type_id_kind { + BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */ + BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */ +}; + +/* second argument to __builtin_preserve_type_info() built-in */ +enum bpf_type_info_kind { + BPF_TYPE_EXISTS = 0, /* type existence in target kernel */ + BPF_TYPE_SIZE = 1, /* type size in target kernel */ +}; + +/* second argument to __builtin_preserve_enum_value() built-in */ +enum bpf_enum_value_kind { + BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */ + BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */ +}; + +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read_kernel( \ + (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read_kernel() to read underlying + * integer storage. Macro functions as an expression and its return type is + * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + /* This is a so-called barrier_var() operation that makes specified \ + * variable "a black box" for optimizing compiler. \ + * It forces compiler to perform BYTE_OFFSET relocation on p and use \ + * its calculated value in the switch below, instead of applying \ + * the same relocation 4 times for each individual memory load. \ + */ \ + asm volatile("" : "=r"(p) : "0"(p)); \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; break; \ + case 2: val = *(const unsigned short *)p; break; \ + case 4: val = *(const unsigned int *)p; break; \ + case 8: val = *(const unsigned long long *)p; break; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Convenience macro to check that field actually exists in target kernel's. + * Returns: + * 1, if matching field is present in target kernel; + * 0, if no matching field found. + */ +#define bpf_core_field_exists(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) + +/* + * Convenience macro to get the byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + */ +#define bpf_core_field_size(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) + +/* + * Convenience macro to get BTF type ID of a specified type, using a local BTF + * information. Return 32-bit unsigned integer with type ID from program's own + * BTF. Always succeeds. + */ +#define bpf_core_type_id_local(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL) + +/* + * Convenience macro to get BTF type ID of a target kernel's type that matches + * specified local type. + * Returns: + * - valid 32-bit unsigned type ID in kernel BTF; + * - 0, if no matching type was found in a target kernel BTF. + */ +#define bpf_core_type_id_kernel(type) \ + __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET) + +/* + * Convenience macro to check that provided named type + * (struct/union/enum/typedef) exists in a target kernel. + * Returns: + * 1, if such type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_exists(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS) + +/* + * Convenience macro to get the byte size of a provided named type + * (struct/union/enum/typedef) in a target kernel. + * Returns: + * >= 0 size (in bytes), if type is present in target kernel's BTF; + * 0, if no matching type is found. + */ +#define bpf_core_type_size(type) \ + __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE) + +/* + * Convenience macro to check that provided enumerator value is defined in + * a target kernel. + * Returns: + * 1, if specified enum type and its enumerator value are present in target + * kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value_exists(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS) + +/* + * Convenience macro to get the integer value of an enumerator value in + * a target kernel. + * Returns: + * 64-bit value, if specified enum type and its enumerator value are + * present in target kernel's BTF; + * 0, if no matching enum and/or enum value within that enum is found. + */ +#define bpf_core_enum_value(enum_type, enum_value) \ + __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE) + +/* + * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures + * offset relocation for source address using __builtin_preserve_access_index() + * built-in, provided by Clang. + * + * __builtin_preserve_access_index() takes as an argument an expression of + * taking an address of a field within struct/union. It makes compiler emit + * a relocation, which records BTF type ID describing root struct/union and an + * accessor string which describes exact embedded field that was used to take + * an address. See detailed description of this relocation format and + * semantics in comments to struct bpf_field_reloc in libbpf_internal.h. + * + * This relocation allows libbpf to adjust BPF instruction to use correct + * actual field offset, based on target kernel BTF type that matches original + * (local) BTF, used to record relocation. + */ +#define bpf_core_read(dst, sz, src) \ + bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ +#define bpf_core_read_user(dst, sz, src) \ + bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src)) +/* + * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str() + * additionally emitting BPF CO-RE field relocation for specified source + * argument. + */ +#define bpf_core_read_str(dst, sz, src) \ + bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */ +#define bpf_core_read_user_str(dst, sz, src) \ + bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src)) + +#define ___concat(a, b) a ## b +#define ___apply(fn, n) ___concat(fn, n) +#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N + +/* + * return number of provided arguments; used for switch-based variadic macro + * definitions (see ___last, ___arrow, etc below) + */ +#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* + * return 0 if no arguments are passed, N - otherwise; used for + * recursively-defined macros to specify termination (0) case, and generic + * (N) case (e.g., ___read_ptrs, ___core_read) + */ +#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___last1(x) x +#define ___last2(a, x) x +#define ___last3(a, b, x) x +#define ___last4(a, b, c, x) x +#define ___last5(a, b, c, d, x) x +#define ___last6(a, b, c, d, e, x) x +#define ___last7(a, b, c, d, e, f, x) x +#define ___last8(a, b, c, d, e, f, g, x) x +#define ___last9(a, b, c, d, e, f, g, h, x) x +#define ___last10(a, b, c, d, e, f, g, h, i, x) x +#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___nolast2(a, _) a +#define ___nolast3(a, b, _) a, b +#define ___nolast4(a, b, c, _) a, b, c +#define ___nolast5(a, b, c, d, _) a, b, c, d +#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e +#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f +#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g +#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h +#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i +#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___arrow1(a) a +#define ___arrow2(a, b) a->b +#define ___arrow3(a, b, c) a->b->c +#define ___arrow4(a, b, c, d) a->b->c->d +#define ___arrow5(a, b, c, d, e) a->b->c->d->e +#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f +#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g +#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h +#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i +#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j +#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) + +#define ___type(...) typeof(___arrow(__VA_ARGS__)) + +#define ___read(read_fn, dst, src_type, src, accessor) \ + read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) + +/* "recursively" read a sequence of inner pointers using local __t var */ +#define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a); +#define ___rd_last(fn, ...) \ + ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__)); +#define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__) +#define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__) +#define ___read_ptrs(fn, src, ...) \ + ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__) + +#define ___core_read0(fn, fn_ptr, dst, src, a) \ + ___read(fn, dst, ___type(src), src, a); +#define ___core_readN(fn, fn_ptr, dst, src, ...) \ + ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \ + ___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \ + ___last(__VA_ARGS__)); +#define ___core_read(fn, fn_ptr, dst, src, a, ...) \ + ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \ + src, a, ##__VA_ARGS__) + +/* + * BPF_CORE_READ_INTO() is a more performance-conscious variant of + * BPF_CORE_READ(), in which final field is read into user-provided storage. + * See BPF_CORE_READ() below for more details on general usage. + */ +#define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read, bpf_core_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Variant of BPF_CORE_READ_INTO() for reading from user-space memory. + * + * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. + */ +#define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_user, bpf_core_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_INTO() */ +#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read, bpf_probe_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_USER_INTO(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_user, bpf_probe_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as + * BPF_CORE_READ() for intermediate pointers, but then executes (and returns + * corresponding error code) bpf_core_read_str() for final string read. + */ +#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_str, bpf_core_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory. + * + * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. + */ +#define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_core_read_user_str, bpf_core_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */ +#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_str, bpf_probe_read, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \ + ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \ + dst, (src), a, ##__VA_ARGS__) \ +}) + +/* + * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially + * when there are few pointer chasing steps. + * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like: + * int x = s->a.b.c->d.e->f->g; + * can be succinctly achieved using BPF_CORE_READ as: + * int x = BPF_CORE_READ(s, a.b.c, d.e, f, g); + * + * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF + * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically + * equivalent to: + * 1. const void *__t = s->a.b.c; + * 2. __t = __t->d.e; + * 3. __t = __t->f; + * 4. return __t->g; + * + * Equivalence is logical, because there is a heavy type casting/preservation + * involved, as well as all the reads are happening through + * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to + * emit CO-RE relocations. + * + * N.B. Only up to 9 "field accessors" are supported, which should be more + * than enough for any practical purpose. + */ +#define BPF_CORE_READ(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* + * Variant of BPF_CORE_READ() for reading from user-space memory. + * + * NOTE: all the source types involved are still *kernel types* and need to + * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will + * fail. Custom user types are not relocatable with CO-RE. + * The typical situation in which BPF_CORE_READ_USER() might be used is to + * read kernel UAPI types from the user-space memory passed in as a syscall + * input argument. + */ +#define BPF_CORE_READ_USER(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* Non-CO-RE variant of BPF_CORE_READ() */ +#define BPF_PROBE_READ(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +/* + * Non-CO-RE variant of BPF_CORE_READ_USER(). + * + * As no CO-RE relocations are emitted, source types can be arbitrary and are + * not restricted to kernel types only. + */ +#define BPF_PROBE_READ_USER(src, a, ...) ({ \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \ + __r; \ +}) + +#endif + diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_endian.h b/pkg/plugin/lib/common/libbpf/_src/bpf_endian.h new file mode 100644 index 000000000..ec9db4fec --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_endian.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) + +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) +# define __bpf_ntohl(x) __builtin_bswap32(x) +# define __bpf_htonl(x) __builtin_bswap32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) +# define __bpf_be64_to_cpu(x) __builtin_bswap64(x) +# define __bpf_cpu_to_be64(x) __builtin_bswap64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) +# define __bpf_ntohl(x) (x) +# define __bpf_htonl(x) (x) +# define __bpf_constant_ntohl(x) (x) +# define __bpf_constant_htonl(x) (x) +# define __bpf_be64_to_cpu(x) (x) +# define __bpf_cpu_to_be64(x) (x) +# define __bpf_constant_be64_to_cpu(x) (x) +# define __bpf_constant_cpu_to_be64(x) (x) +#else +# error "Fix your compiler's __BYTE_ORDER__?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) +#define bpf_htonl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htonl(x) : __bpf_htonl(x)) +#define bpf_ntohl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohl(x) : __bpf_ntohl(x)) +#define bpf_cpu_to_be64(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) +#define bpf_be64_to_cpu(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) + +#endif /* __BPF_ENDIAN__ */ diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_gen_internal.h b/pkg/plugin/lib/common/libbpf/_src/bpf_gen_internal.h new file mode 100644 index 000000000..223308931 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_gen_internal.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2021 Facebook */ +#ifndef __BPF_GEN_INTERNAL_H +#define __BPF_GEN_INTERNAL_H + +#include "bpf.h" + +struct ksym_relo_desc { + const char *name; + int kind; + int insn_idx; + bool is_weak; + bool is_typeless; +}; + +struct ksym_desc { + const char *name; + int ref; + int kind; + union { + /* used for kfunc */ + int off; + /* used for typeless ksym */ + bool typeless; + }; + int insn; +}; + +struct bpf_gen { + struct gen_loader_opts *opts; + void *data_start; + void *data_cur; + void *insn_start; + void *insn_cur; + ssize_t cleanup_label; + __u32 nr_progs; + __u32 nr_maps; + int log_level; + int error; + struct ksym_relo_desc *relos; + int relo_cnt; + struct bpf_core_relo *core_relos; + int core_relo_cnt; + char attach_target[128]; + int attach_kind; + struct ksym_desc *ksyms; + __u32 nr_ksyms; + int fd_array; + int nr_fd_array; +}; + +void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps); +int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps); +void bpf_gen__free(struct bpf_gen *gen); +void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); +void bpf_gen__map_create(struct bpf_gen *gen, + enum bpf_map_type map_type, const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx); +void bpf_gen__prog_load(struct bpf_gen *gen, + enum bpf_prog_type prog_type, const char *prog_name, + const char *license, struct bpf_insn *insns, size_t insn_cnt, + struct bpf_prog_load_opts *load_attr, int prog_idx); +void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size); +void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); +void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); +void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, + bool is_typeless, int kind, int insn_idx); +void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo); +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx); + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_helper_defs.h b/pkg/plugin/lib/common/libbpf/_src/bpf_helper_defs.h new file mode 100644 index 000000000..5d690a54c --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_helper_defs.h @@ -0,0 +1,4361 @@ +/* This is auto-generated file. See bpf_doc.py for details. */ + +/* Forward declarations of BPF structs */ +struct bpf_fib_lookup; +struct bpf_sk_lookup; +struct bpf_perf_event_data; +struct bpf_perf_event_value; +struct bpf_pidns_info; +struct bpf_redir_neigh; +struct bpf_sock; +struct bpf_sock_addr; +struct bpf_sock_ops; +struct bpf_sock_tuple; +struct bpf_spin_lock; +struct bpf_sysctl; +struct bpf_tcp_sock; +struct bpf_tunnel_key; +struct bpf_xfrm_state; +struct linux_binprm; +struct pt_regs; +struct sk_reuseport_md; +struct sockaddr; +struct tcphdr; +struct seq_file; +struct tcp6_sock; +struct tcp_sock; +struct tcp_timewait_sock; +struct tcp_request_sock; +struct udp6_sock; +struct unix_sock; +struct task_struct; +struct __sk_buff; +struct sk_msg_md; +struct xdp_md; +struct path; +struct btf_ptr; +struct inode; +struct socket; +struct file; +struct bpf_timer; + +/* + * bpf_map_lookup_elem + * + * Perform a lookup in *map* for an entry associated to *key*. + * + * Returns + * Map value associated to *key*, or **NULL** if no entry was + * found. + */ +static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1; + +/* + * bpf_map_update_elem + * + * Add or update the value of the entry associated to *key* in + * *map* with *value*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * Flag value **BPF_NOEXIST** cannot be used for maps of types + * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all + * elements always exist), the helper would return an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; + +/* + * bpf_map_delete_elem + * + * Delete entry with *key* from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_delete_elem)(void *map, const void *key) = (void *) 3; + +/* + * bpf_probe_read + * + * For tracing programs, safely attempt to read *size* bytes from + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use **bpf_probe_read_user**\ () or + * **bpf_probe_read_kernel**\ () instead. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4; + +/* + * bpf_ktime_get_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does not include time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5; + +/* + * bpf_trace_printk + * + * This helper is a "printk()-like" facility for debugging. It + * prints a message defined by format *fmt* (of size *fmt_size*) + * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * available. It can take up to three additional **u64** + * arguments (as an eBPF helpers, the total number of arguments is + * limited to five). + * + * Each time the helper is called, it appends a line to the trace. + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * The format of the trace is customizable, and the exact output + * one will get depends on the options set in + * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *README* file under the same directory). However, it usually + * defaults to something like: + * + * :: + * + * telnet-470 [001] .N.. 419421.045894: 0x00000001: + * + * In the above: + * + * * ``telnet`` is the name of the current task. + * * ``470`` is the PID of the current task. + * * ``001`` is the CPU number on which the task is + * running. + * * In ``.N..``, each character refers to a set of + * options (whether irqs are enabled, scheduling + * options, whether hard/softirqs are running, level of + * preempt_disabled respectively). **N** means that + * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** + * are set. + * * ``419421.045894`` is a timestamp. + * * ``0x00000001`` is a fake value used by BPF for the + * instruction pointer register. + * * ```` is the message formatted with + * *fmt*. + * + * The conversion specifiers supported by *fmt* are similar, but + * more limited than for printk(). They are **%d**, **%i**, + * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, + * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size + * of field, padding with zeroes, etc.) is available, and the + * helper will return **-EINVAL** (but print nothing) if it + * encounters an unknown specifier. + * + * Also, note that **bpf_trace_printk**\ () is slow, and should + * only be used for debugging purposes. For this reason, a notice + * block (spanning several lines) is printed to kernel logs and + * states that the helper should not be used "for production use" + * the first time this helper is used (or more precisely, when + * **trace_printk**\ () buffers are allocated). For passing values + * to user space, perf events should be preferred. + * + * Returns + * The number of bytes written to the buffer, or a negative error + * in case of failure. + */ +static long (*bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6; + +/* + * bpf_get_prandom_u32 + * + * Get a pseudo-random number. + * + * From a security point of view, this helper uses its own + * pseudo-random internal state, and cannot be used to infer the + * seed of other random functions in the kernel. However, it is + * essential to note that the generator used by the helper is not + * cryptographically secure. + * + * Returns + * A random 32-bit unsigned value. + */ +static __u32 (*bpf_get_prandom_u32)(void) = (void *) 7; + +/* + * bpf_get_smp_processor_id + * + * Get the SMP (symmetric multiprocessing) processor id. Note that + * all programs run with migration disabled, which means that the + * SMP processor id is stable during all the execution of the + * program. + * + * Returns + * The SMP id of the processor running the program. + */ +static __u32 (*bpf_get_smp_processor_id)(void) = (void *) 8; + +/* + * bpf_skb_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. *flags* are a combination of + * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the + * checksum for the packet after storing the bytes) and + * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ + * **->swhash** and *skb*\ **->l4hash** to 0). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9; + +/* + * bpf_l3_csum_replace + * + * Recompute the layer 3 (e.g. IP) checksum for the packet + * associated to *skb*. Computation is incremental, so the helper + * must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored in *size*. + * Alternatively, it is possible to store the difference between + * the previous and the new values of the header field in *to*, by + * setting *from* and *size* to 0. For both methods, *offset* + * indicates the location of the IP checksum within the packet. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10; + +/* + * bpf_l4_csum_replace + * + * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the + * packet associated to *skb*. Computation is incremental, so the + * helper must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored on the lowest + * four bits of *flags*. Alternatively, it is possible to store + * the difference between the previous and the new values of the + * header field in *to*, by setting *from* and the four lowest + * bits of *flags* to 0. For both methods, *offset* indicates the + * location of the IP checksum within the packet. In addition to + * the size of the field, *flags* can be added (bitwise OR) actual + * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left + * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and + * for updates resulting in a null checksum the value is set to + * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates + * the checksum is to be computed against a pseudo-header. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11; + +/* + * bpf_tail_call + * + * This special helper is used to trigger a "tail call", or in + * other words, to jump into another eBPF program. The same stack + * frame is used (but values on stack and in registers for the + * caller are not accessible to the callee). This mechanism allows + * for program chaining, either for raising the maximum number of + * available eBPF instructions, or to execute given programs in + * conditional blocks. For security reasons, there is an upper + * limit to the number of successive tail calls that can be + * performed. + * + * Upon call of this helper, the program attempts to jump into a + * program referenced at index *index* in *prog_array_map*, a + * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes + * *ctx*, a pointer to the context. + * + * If the call succeeds, the kernel immediately runs the first + * instruction of the new program. This is not a function call, + * and it never returns to the previous program. If the call + * fails, then the helper has no effect, and the caller continues + * to run its subsequent instructions. A call can fail if the + * destination program for the jump does not exist (i.e. *index* + * is superior to the number of entries in *prog_array_map*), or + * if the maximum number of tail calls has been reached for this + * chain of programs. This limit is defined in the kernel by the + * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), + * which is currently set to 33. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12; + +/* + * bpf_clone_redirect + * + * Clone and redirect the packet associated to *skb* to another + * net device of index *ifindex*. Both ingress and egress + * interfaces can be used for redirection. The **BPF_F_INGRESS** + * value in *flags* is used to make the distinction (ingress path + * is selected if the flag is present, egress path otherwise). + * This is the only flag supported for now. + * + * In comparison with **bpf_redirect**\ () helper, + * **bpf_clone_redirect**\ () has the associated cost of + * duplicating the packet buffer, but this can be executed out of + * the eBPF program. Conversely, **bpf_redirect**\ () is more + * efficient, but it is handled through an action code where the + * redirection happens only after the eBPF program has returned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13; + +/* + * bpf_get_current_pid_tgid + * + * Get the current pid and tgid. + * + * Returns + * A 64-bit integer containing the current tgid and pid, and + * created as such: + * *current_task*\ **->tgid << 32 \|** + * *current_task*\ **->pid**. + */ +static __u64 (*bpf_get_current_pid_tgid)(void) = (void *) 14; + +/* + * bpf_get_current_uid_gid + * + * Get the current uid and gid. + * + * Returns + * A 64-bit integer containing the current GID and UID, and + * created as such: *current_gid* **<< 32 \|** *current_uid*. + */ +static __u64 (*bpf_get_current_uid_gid)(void) = (void *) 15; + +/* + * bpf_get_current_comm + * + * Copy the **comm** attribute of the current task into *buf* of + * *size_of_buf*. The **comm** attribute contains the name of + * the executable (excluding the path) for the current task. The + * *size_of_buf* must be strictly positive. On success, the + * helper makes sure that the *buf* is NUL-terminated. On failure, + * it is filled with zeroes. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16; + +/* + * bpf_get_cgroup_classid + * + * Retrieve the classid for the current task, i.e. for the net_cls + * cgroup to which *skb* belongs. + * + * This helper can be used on TC egress path, but not on ingress. + * + * The net_cls cgroup provides an interface to tag network packets + * based on a user-provided identifier for all traffic coming from + * the tasks belonging to the related cgroup. See also the related + * kernel documentation, available from the Linux sources in file + * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. + * + * The Linux kernel has two versions for cgroups: there are + * cgroups v1 and cgroups v2. Both are available to users, who can + * use a mixture of them, but note that the net_cls cgroup is for + * cgroup v1 only. This makes it incompatible with BPF programs + * run on cgroups, which is a cgroup-v2-only feature (a socket can + * only hold data for one version of cgroups at a time). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to + * "**y**" or to "**m**". + * + * Returns + * The classid, or 0 for the default unconfigured classid. + */ +static __u32 (*bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17; + +/* + * bpf_skb_vlan_push + * + * Push a *vlan_tci* (VLAN tag control information) of protocol + * *vlan_proto* to the packet associated to *skb*, then update + * the checksum. Note that if *vlan_proto* is different from + * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to + * be **ETH_P_8021Q**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18; + +/* + * bpf_skb_vlan_pop + * + * Pop a VLAN header from the packet associated to *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19; + +/* + * bpf_skb_get_tunnel_key + * + * Get tunnel metadata. This helper takes a pointer *key* to an + * empty **struct bpf_tunnel_key** of **size**, that will be + * filled with tunnel metadata for the packet associated to *skb*. + * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which + * indicates that the tunnel is based on IPv6 protocol instead of + * IPv4. + * + * The **struct bpf_tunnel_key** is an object that generalizes the + * principal parameters used by various tunneling protocols into a + * single struct. This way, it can be used to easily make a + * decision based on the contents of the encapsulation header, + * "summarized" in this struct. In particular, it holds the IP + * address of the remote end (IPv4 or IPv6, depending on the case) + * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, + * this struct exposes the *key*\ **->tunnel_id**, which is + * generally mapped to a VNI (Virtual Network Identifier), making + * it programmable together with the **bpf_skb_set_tunnel_key**\ + * () helper. + * + * Let's imagine that the following code is part of a program + * attached to the TC ingress interface, on one end of a GRE + * tunnel, and is supposed to filter out all messages coming from + * remote ends with IPv4 address other than 10.0.0.1: + * + * :: + * + * int ret; + * struct bpf_tunnel_key key = {}; + * + * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + * if (ret < 0) + * return TC_ACT_SHOT; // drop packet + * + * if (key.remote_ipv4 != 0x0a000001) + * return TC_ACT_SHOT; // drop packet + * + * return TC_ACT_OK; // accept packet + * + * This interface can also be used with all encapsulation devices + * that can operate in "collect metadata" mode: instead of having + * one network device per specific configuration, the "collect + * metadata" mode only requires a single device where the + * configuration can be extracted from this helper. + * + * This can be used together with various tunnels such as VXLan, + * Geneve, GRE or IP in IP (IPIP). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20; + +/* + * bpf_skb_set_tunnel_key + * + * Populate tunnel metadata for packet associated to *skb.* The + * tunnel metadata is set to the contents of *key*, of *size*. The + * *flags* can be set to a combination of the following values: + * + * **BPF_F_TUNINFO_IPV6** + * Indicate that the tunnel is based on IPv6 protocol + * instead of IPv4. + * **BPF_F_ZERO_CSUM_TX** + * For IPv4 packets, add a flag to tunnel metadata + * indicating that checksum computation should be skipped + * and checksum set to zeroes. + * **BPF_F_DONT_FRAGMENT** + * Add a flag to tunnel metadata indicating that the + * packet should not be fragmented. + * **BPF_F_SEQ_NUMBER** + * Add a flag to tunnel metadata indicating that a + * sequence number should be added to tunnel header before + * sending the packet. This flag was added for GRE + * encapsulation, but might be used with other protocols + * as well in the future. + * + * Here is a typical usage on the transmit path: + * + * :: + * + * struct bpf_tunnel_key key; + * populate key ... + * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); + * + * See also the description of the **bpf_skb_get_tunnel_key**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21; + +/* + * bpf_perf_event_read + * + * Read the value of a perf event counter. This helper relies on a + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of + * the perf event counter is selected when *map* is updated with + * perf event file descriptors. The *map* is an array whose size + * is the number of available CPUs, and each cell contains a value + * relative to one CPU. The value to retrieve is indicated by + * *flags*, that contains the index of the CPU to look up, masked + * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * Note that before Linux 4.13, only hardware perf event can be + * retrieved. + * + * Also, be aware that the newer helper + * **bpf_perf_event_read_value**\ () is recommended over + * **bpf_perf_event_read**\ () in general. The latter has some ABI + * quirks where error and counter value are used as a return code + * (which is wrong to do since ranges may overlap). This issue is + * fixed with **bpf_perf_event_read_value**\ (), which at the same + * time provides more features over the **bpf_perf_event_read**\ + * () interface. Please refer to the description of + * **bpf_perf_event_read_value**\ () for details. + * + * Returns + * The value of the perf event counter read from the map, or a + * negative error code in case of failure. + */ +static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22; + +/* + * bpf_redirect + * + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_clone_redirect**\ + * (), except that the packet is not cloned, which provides + * increased performance. + * + * Except for XDP, both ingress and egress interfaces can be used + * for redirection. The **BPF_F_INGRESS** value in *flags* is used + * to make the distinction (ingress path is selected if the flag + * is present, egress path otherwise). Currently, XDP only + * supports redirection to the egress interface, and accepts no + * flag at all. + * + * The same effect can also be attained with the more generic + * **bpf_redirect_map**\ (), which uses a BPF map to store the + * redirect target instead of providing it directly to the helper. + * + * Returns + * For XDP, the helper returns **XDP_REDIRECT** on success or + * **XDP_ABORTED** on error. For other program types, the values + * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on + * error. + */ +static long (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23; + +/* + * bpf_get_route_realm + * + * Retrieve the realm or the route, that is to say the + * **tclassid** field of the destination for the *skb*. The + * identifier retrieved is a user-provided tag, similar to the + * one used with the net_cls cgroup (see description for + * **bpf_get_cgroup_classid**\ () helper), but here this tag is + * held by a route (a destination entry), not by a task. + * + * Retrieving this identifier works with the clsact TC egress hook + * (see also **tc-bpf(8)**), or alternatively on conventional + * classful egress qdiscs, but not on TC ingress path. In case of + * clsact TC egress hook, this has the advantage that, internally, + * the destination entry has not been dropped yet in the transmit + * path. Therefore, the destination entry does not need to be + * artificially held via **netif_keep_dst**\ () for a classful + * qdisc until the *skb* is freed. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_IP_ROUTE_CLASSID** configuration option. + * + * Returns + * The realm of the route for the packet associated to *skb*, or 0 + * if none was found. + */ +static __u32 (*bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24; + +/* + * bpf_perf_event_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * The context of the program *ctx* needs also be passed to the + * helper. + * + * On user space, a program willing to read the values needs to + * call **perf_event_open**\ () on the perf event (either for + * one or for all CPUs) and to store the file descriptor into the + * *map*. This must be done before the eBPF program can send data + * into it. An example is available in file + * *samples/bpf/trace_output_user.c* in the Linux kernel source + * tree (the eBPF program counterpart is in + * *samples/bpf/trace_output_kern.c*). + * + * **bpf_perf_event_output**\ () achieves better performance + * than **bpf_trace_printk**\ () for sharing data with user + * space, and is much better suitable for streaming data from eBPF + * programs. + * + * Note that this helper is not restricted to tracing use cases + * and can be used with programs attached to TC or XDP as well, + * where it allows for passing data to user space listeners. Data + * can be: + * + * * Only custom structs, + * * Only the packet payload, or + * * A combination of both. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25; + +/* + * bpf_skb_load_bytes + * + * This helper was provided as an easy way to load data from a + * packet. It can be used to load *len* bytes from *offset* from + * the packet associated to *skb*, into the buffer pointed by + * *to*. + * + * Since Linux 4.7, usage of this helper has mostly been replaced + * by "direct packet access", enabling packet data to be + * manipulated with *skb*\ **->data** and *skb*\ **->data_end** + * pointing respectively to the first byte of packet data and to + * the byte after the last byte of packet data. However, it + * remains useful if one wishes to read large quantities of data + * at once from a packet into the eBPF stack. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26; + +/* + * bpf_get_stackid + * + * Walk a user or a kernel stack and return its id. To achieve + * this, the helper needs *ctx*, which is a pointer to the context + * on which the tracing program is executed, and a pointer to a + * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * a combination of the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_FAST_STACK_CMP** + * Compare stacks by hash only. + * **BPF_F_REUSE_STACKID** + * If two different stacks hash into the same *stackid*, + * discard the old one. + * + * The stack id retrieved is a 32 bit long integer handle which + * can be further combined with other data (including other stack + * ids) and used as a key into maps. This can be useful for + * generating a variety of graphs (such as flame graphs or off-cpu + * graphs). + * + * For walking a stack, this helper is an improvement over + * **bpf_probe_read**\ (), which can be used with unrolled loops + * but is not efficient and consumes a lot of eBPF instructions. + * Instead, **bpf_get_stackid**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * The positive or null stack id on success, or a negative error + * in case of failure. + */ +static long (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27; + +/* + * bpf_csum_diff + * + * Compute a checksum difference, from the raw buffer pointed by + * *from*, of length *from_size* (that must be a multiple of 4), + * towards the raw buffer pointed by *to*, of size *to_size* + * (same remark). An optional *seed* can be added to the value + * (this can be cascaded, the seed may come from a previous call + * to the helper). + * + * This is flexible enough to be used in several ways: + * + * * With *from_size* == 0, *to_size* > 0 and *seed* set to + * checksum, it can be used when pushing new data. + * * With *from_size* > 0, *to_size* == 0 and *seed* set to + * checksum, it can be used when removing data from a packet. + * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it + * can be used to compute a diff. Note that *from_size* and + * *to_size* do not need to be equal. + * + * This helper can be used in combination with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to + * which one can feed in the difference computed with + * **bpf_csum_diff**\ (). + * + * Returns + * The checksum result, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28; + +/* + * bpf_skb_get_tunnel_opt + * + * Retrieve tunnel options metadata for the packet associated to + * *skb*, and store the raw tunnel option data to the buffer *opt* + * of *size*. + * + * This helper can be used with encapsulation devices that can + * operate in "collect metadata" mode (please refer to the related + * note in the description of **bpf_skb_get_tunnel_key**\ () for + * more details). A particular example where this can be used is + * in combination with the Geneve encapsulation protocol, where it + * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) + * and retrieving arbitrary TLVs (Type-Length-Value headers) from + * the eBPF program. This allows for full customization of these + * headers. + * + * Returns + * The size of the option data retrieved. + */ +static long (*bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29; + +/* + * bpf_skb_set_tunnel_opt + * + * Set tunnel options metadata for the packet associated to *skb* + * to the option data contained in the raw buffer *opt* of *size*. + * + * See also the description of the **bpf_skb_get_tunnel_opt**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30; + +/* + * bpf_skb_change_proto + * + * Change the protocol of the *skb* to *proto*. Currently + * supported are transition from IPv4 to IPv6, and from IPv6 to + * IPv4. The helper takes care of the groundwork for the + * transition, including resizing the socket buffer. The eBPF + * program is expected to fill the new headers, if any, via + * **skb_store_bytes**\ () and to recompute the checksums with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ + * (). The main case for this helper is to perform NAT64 + * operations out of an eBPF program. + * + * Internally, the GSO type is marked as dodgy so that headers are + * checked and segments are recalculated by the GSO/GRO engine. + * The size for GSO target is adapted as well. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31; + +/* + * bpf_skb_change_type + * + * Change the packet type for the packet associated to *skb*. This + * comes down to setting *skb*\ **->pkt_type** to *type*, except + * the eBPF program does not have a write access to *skb*\ + * **->pkt_type** beside this helper. Using a helper here allows + * for graceful handling of errors. + * + * The major use case is to change incoming *skb*s to + * **PACKET_HOST** in a programmatic way instead of having to + * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for + * example. + * + * Note that *type* only allows certain values. At this time, they + * are: + * + * **PACKET_HOST** + * Packet is for us. + * **PACKET_BROADCAST** + * Send packet to all. + * **PACKET_MULTICAST** + * Send packet to group. + * **PACKET_OTHERHOST** + * Send packet to someone else. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32; + +/* + * bpf_skb_under_cgroup + * + * Check whether *skb* is a descendant of the cgroup2 held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 0, if the *skb* failed the cgroup2 descendant test. + * * 1, if the *skb* succeeded the cgroup2 descendant test. + * * A negative error code, if an error occurred. + */ +static long (*bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33; + +/* + * bpf_get_hash_recalc + * + * Retrieve the hash of the packet, *skb*\ **->hash**. If it is + * not set, in particular if the hash was cleared due to mangling, + * recompute this hash. Later accesses to the hash can be done + * directly with *skb*\ **->hash**. + * + * Calling **bpf_set_hash_invalid**\ (), changing a packet + * prototype with **bpf_skb_change_proto**\ (), or calling + * **bpf_skb_store_bytes**\ () with the + * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear + * the hash and to trigger a new computation for the next call to + * **bpf_get_hash_recalc**\ (). + * + * Returns + * The 32-bit hash. + */ +static __u32 (*bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34; + +/* + * bpf_get_current_task + * + * Get the current task. + * + * Returns + * A pointer to the current task struct. + */ +static __u64 (*bpf_get_current_task)(void) = (void *) 35; + +/* + * bpf_probe_write_user + * + * Attempt in a safe way to write *len* bytes from the buffer + * *src* to *dst* in memory. It only works for threads that are in + * user context, and *dst* must be a valid user space address. + * + * This helper should not be used to implement any kind of + * security mechanism because of TOC-TOU attacks, but rather to + * debug, divert, and manipulate execution of semi-cooperative + * processes. + * + * Keep in mind that this feature is meant for experiments, and it + * has a risk of crashing the system and running programs. + * Therefore, when an eBPF program using this helper is attached, + * a warning including PID and process name is printed to kernel + * logs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36; + +/* + * bpf_current_task_under_cgroup + * + * Check whether the probe is being run is the context of a given + * subset of the cgroup2 hierarchy. The cgroup2 to test is held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. + * * A negative error code, if an error occurred. + */ +static long (*bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37; + +/* + * bpf_skb_change_tail + * + * Resize (trim or grow) the packet associated to *skb* to the + * new *len*. The *flags* are reserved for future usage, and must + * be left at zero. + * + * The basic idea is that the helper performs the needed work to + * change the size of the packet, then the eBPF program rewrites + * the rest via helpers like **bpf_skb_store_bytes**\ (), + * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () + * and others. This helper is a slow path utility intended for + * replies with control messages. And because it is targeted for + * slow path, the helper itself can afford to be slow: it + * implicitly linearizes, unclones and drops offloads from the + * *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38; + +/* + * bpf_skb_pull_data + * + * Pull in non-linear data in case the *skb* is non-linear and not + * all of *len* are part of the linear section. Make *len* bytes + * from *skb* readable and writable. If a zero value is passed for + * *len*, then the whole length of the *skb* is pulled. + * + * This helper is only needed for reading and writing with direct + * packet access. + * + * For direct packet access, testing that offsets to access + * are within packet boundaries (test on *skb*\ **->data_end**) is + * susceptible to fail if offsets are invalid, or if the requested + * data is in non-linear parts of the *skb*. On failure the + * program can just bail out, or in the case of a non-linear + * buffer, use a helper to make the data available. The + * **bpf_skb_load_bytes**\ () helper is a first solution to access + * the data. Another one consists in using **bpf_skb_pull_data** + * to pull in once the non-linear parts, then retesting and + * eventually access the data. + * + * At the same time, this also makes sure the *skb* is uncloned, + * which is a necessary condition for direct write. As this needs + * to be an invariant for the write part only, the verifier + * detects writes and adds a prologue that is calling + * **bpf_skb_pull_data()** to effectively unclone the *skb* from + * the very beginning in case it is indeed cloned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39; + +/* + * bpf_csum_update + * + * Add the checksum *csum* into *skb*\ **->csum** in case the + * driver has supplied a checksum for the entire packet into that + * field. Return an error otherwise. This helper is intended to be + * used in combination with **bpf_csum_diff**\ (), in particular + * when the checksum needs to be updated after data has been + * written into the packet through direct packet access. + * + * Returns + * The checksum on success, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40; + +/* + * bpf_set_hash_invalid + * + * Invalidate the current *skb*\ **->hash**. It can be used after + * mangling on headers through direct packet access, in order to + * indicate that the hash is outdated and to trigger a + * recalculation the next time the kernel tries to access this + * hash or when the **bpf_get_hash_recalc**\ () helper is called. + * + * Returns + * void. + */ +static void (*bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41; + +/* + * bpf_get_numa_node_id + * + * Return the id of the current NUMA node. The primary use case + * for this helper is the selection of sockets for the local NUMA + * node, when the program is attached to sockets using the + * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), + * but the helper is also available to other eBPF program types, + * similarly to **bpf_get_smp_processor_id**\ (). + * + * Returns + * The id of current NUMA node. + */ +static long (*bpf_get_numa_node_id)(void) = (void *) 42; + +/* + * bpf_skb_change_head + * + * Grows headroom of packet associated to *skb* and adjusts the + * offset of the MAC header accordingly, adding *len* bytes of + * space. It automatically extends and reallocates memory as + * required. + * + * This helper can be used on a layer 3 *skb* to push a MAC header + * for redirection into a layer 2 device. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43; + +/* + * bpf_xdp_adjust_head + * + * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that + * it is possible to use a negative value for *delta*. This helper + * can be used to prepare the packet for pushing or popping + * headers. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44; + +/* + * bpf_probe_read_str + * + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for + * more details. + * + * Generally, use **bpf_probe_read_user_str**\ () or + * **bpf_probe_read_kernel_str**\ () instead. + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45; + +/* + * bpf_get_socket_cookie + * + * If the **struct sk_buff** pointed by *skb* has a known socket, + * retrieve the cookie (generated by the kernel) of this socket. + * If no cookie has been set yet, generate a new cookie. Once + * generated, the socket cookie remains stable for the life of the + * socket. This helper can be useful for monitoring per socket + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. + * + * Returns + * A 8-byte long unique number on success, or 0 if the socket + * field is missing inside *skb*. + */ +static __u64 (*bpf_get_socket_cookie)(void *ctx) = (void *) 46; + +/* + * bpf_get_socket_uid + * + * Get the owner UID of the socked associated to *skb*. + * + * Returns + * The owner UID of the socket associated to *skb*. If the socket + * is **NULL**, or if it is not a full socket (i.e. if it is a + * time-wait or a request socket instead), **overflowuid** value + * is returned (note that **overflowuid** might also be the actual + * UID value for the socket). + */ +static __u32 (*bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47; + +/* + * bpf_set_hash + * + * Set the full hash for *skb* (set the field *skb*\ **->hash**) + * to value *hash*. + * + * Returns + * 0 + */ +static long (*bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48; + +/* + * bpf_setsockopt + * + * Emulate a call to **setsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **setsockopt(2)** for more information. + * The option value of length *optlen* is pointed by *optval*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **setsockopt()**. + * It supports the following *level*\ s: + * + * * **SOL_SOCKET**, which supports the following *optname*\ s: + * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. + * * **IPPROTO_TCP**, which supports the following *optname*\ s: + * **TCP_CONGESTION**, **TCP_BPF_IW**, + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49; + +/* + * bpf_skb_adjust_room + * + * Grow or shrink the room for data in the packet associated to + * *skb* by *len_diff*, and according to the selected *mode*. + * + * By default, the helper will reset any offloaded checksum + * indicator of the skb to CHECKSUM_NONE. This can be avoided + * by the following flag: + * + * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded + * checksum data of the skb to CHECKSUM_NONE. + * + * There are two supported modes at this time: + * + * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer + * (room space is added or removed below the layer 2 header). + * + * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer + * (room space is added or removed below the layer 3 header). + * + * The following flags are supported at this time: + * + * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. + * Adjusting mss in this way is not allowed for datagrams. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: + * Any new space is reserved to hold a tunnel header. + * Configure skb offsets and other fields accordingly. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: + * Use with ENCAP_L3 flags to further specify the tunnel type. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): + * Use with ENCAP_L3/L4 flags to further specify the tunnel + * type; *len* is the length of the inner MAC header. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: + * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the + * L2 type as Ethernet. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50; + +/* + * bpf_redirect_map + * + * Redirect the packet to the endpoint referenced by *map* at + * index *key*. Depending on its type, this *map* can contain + * references to net devices (for forwarding packets through other + * ports), or to CPUs (for redirecting XDP frames to another CPU; + * but this is only implemented for native XDP (with driver + * support) as of this writing). + * + * The lower two bits of *flags* are used as the return code if + * the map lookup fails. This is so that the return value can be + * one of the XDP program return codes up to **XDP_TX**, as chosen + * by the caller. The higher bits of *flags* can be set to + * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. + * + * With BPF_F_BROADCAST the packet will be broadcasted to all the + * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress + * interface will be excluded when do broadcasting. + * + * See also **bpf_redirect**\ (), which only supports redirecting + * to an ifindex, but doesn't require a map to do so. + * + * Returns + * **XDP_REDIRECT** on success, or the value of the two lower bits + * of the *flags* argument on error. + */ +static long (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void *) 51; + +/* + * bpf_sk_redirect_map + * + * Redirect the packet to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52; + +/* + * bpf_sock_map_update + * + * Add an entry to, or update a *map* referencing sockets. The + * *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53; + +/* + * bpf_xdp_adjust_meta + * + * Adjust the address pointed by *xdp_md*\ **->data_meta** by + * *delta* (which can be positive or negative). Note that this + * operation modifies the address stored in *xdp_md*\ **->data**, + * so the latter must be loaded only after the helper has been + * called. + * + * The use of *xdp_md*\ **->data_meta** is optional and programs + * are not required to use it. The rationale is that when the + * packet is processed with XDP (e.g. as DoS filter), it is + * possible to push further meta data along with it before passing + * to the stack, and to give the guarantee that an ingress eBPF + * program attached as a TC classifier on the same device can pick + * this up for further post-processing. Since TC works with socket + * buffers, it remains possible to set from XDP the **mark** or + * **priority** pointers, or other pointers for the socket buffer. + * Having this scratch space generic and programmable allows for + * more flexibility as the user is free to store whatever meta + * data they need. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54; + +/* + * bpf_perf_event_read_value + * + * Read the value of a perf event counter, and store it into *buf* + * of size *buf_size*. This helper relies on a *map* of type + * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event + * counter is selected when *map* is updated with perf event file + * descriptors. The *map* is an array whose size is the number of + * available CPUs, and each cell contains a value relative to one + * CPU. The value to retrieve is indicated by *flags*, that + * contains the index of the CPU to look up, masked with + * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * This helper behaves in a way close to + * **bpf_perf_event_read**\ () helper, save that instead of + * just returning the value observed, it fills the *buf* + * structure. This allows for additional data to be retrieved: in + * particular, the enabled and running times (in *buf*\ + * **->enabled** and *buf*\ **->running**, respectively) are + * copied. In general, **bpf_perf_event_read_value**\ () is + * recommended over **bpf_perf_event_read**\ (), which has some + * ABI issues and provides fewer functionalities. + * + * These values are interesting, because hardware PMU (Performance + * Monitoring Unit) counters are limited resources. When there are + * more PMU based perf events opened than available counters, + * kernel will multiplex these events so each event gets certain + * percentage (but not all) of the PMU time. In case that + * multiplexing happens, the number of samples or counter value + * will not reflect the case compared to when no multiplexing + * occurs. This makes comparison between different runs difficult. + * Typically, the counter value should be normalized before + * comparing to other experiments. The usual normalization is done + * as follows. + * + * :: + * + * normalized_counter = counter * t_enabled / t_running + * + * Where t_enabled is the time enabled for event and t_running is + * the time running for event since last normalization. The + * enabled and running times are accumulated since the perf event + * open. To achieve scaling factor between two invocations of an + * eBPF program, users can use CPU id as the key (which is + * typical for perf array usage model) to remember the previous + * value and do the calculation inside the eBPF program. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55; + +/* + * bpf_perf_prog_read_value + * + * For en eBPF program attached to a perf event, retrieve the + * value of the event counter associated to *ctx* and store it in + * the structure pointed by *buf* and of size *buf_size*. Enabled + * and running times are also stored in the structure (see + * description of helper **bpf_perf_event_read_value**\ () for + * more details). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56; + +/* + * bpf_getsockopt + * + * Emulate a call to **getsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **getsockopt(2)** for more information. + * The retrieved value is stored in the structure pointed by + * *opval* and of length *optlen*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **getsockopt()**. + * It supports the following *level*\ s: + * + * * **IPPROTO_TCP**, which supports *optname* + * **TCP_CONGESTION**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57; + +/* + * bpf_override_return + * + * Used for error injection, this helper uses kprobes to override + * the return value of the probed function, and to set it to *rc*. + * The first argument is the context *regs* on which the kprobe + * works. + * + * This helper works by setting the PC (program counter) + * to an override function which is run in place of the original + * probed function. This means the probed function is not run at + * all. The replacement function just returns with the required + * value. + * + * This helper has security implications, and thus is subject to + * restrictions. It is only available if the kernel was compiled + * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration + * option, and in this case it only works on functions tagged with + * **ALLOW_ERROR_INJECTION** in the kernel code. + * + * Also, the helper is only available for the architectures having + * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, + * x86 architecture is the only one to support this feature. + * + * Returns + * 0 + */ +static long (*bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58; + +/* + * bpf_sock_ops_cb_flags_set + * + * Attempt to set the value of the **bpf_sock_ops_cb_flags** field + * for the full TCP socket associated to *bpf_sock_ops* to + * *argval*. + * + * The primary use of this field is to determine if there should + * be calls to eBPF programs of type + * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP + * code. A program of the same type can change its value, per + * connection and as necessary, when the connection is + * established. This field is directly accessible for reading, but + * this helper must be used for updates in order to return an + * error if an eBPF program tries to set a callback that is not + * supported in the current kernel. + * + * *argval* is a flag array which can combine these flags: + * + * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) + * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) + * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) + * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) + * + * Therefore, this function can be used to clear a callback flag by + * setting the appropriate bit to zero. e.g. to disable the RTO + * callback: + * + * **bpf_sock_ops_cb_flags_set(bpf_sock,** + * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** + * + * Here are some examples of where one could call such eBPF + * program: + * + * * When RTO fires. + * * When a packet is retransmitted. + * * When the connection terminates. + * * When a packet is sent. + * * When a packet is received. + * + * Returns + * Code **-EINVAL** if the socket is not a full TCP socket; + * otherwise, a positive number containing the bits that could not + * be set is returned (which comes down to 0 if all bits were set + * as required). + */ +static long (*bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59; + +/* + * bpf_msg_redirect_map + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60; + +/* + * bpf_msg_apply_bytes + * + * For socket policies, apply the verdict of the eBPF program to + * the next *bytes* (number of bytes) of message *msg*. + * + * For example, this helper can be used in the following cases: + * + * * A single **sendmsg**\ () or **sendfile**\ () system call + * contains multiple logical messages that the eBPF program is + * supposed to read and for which it should apply a verdict. + * * An eBPF program only cares to read the first *bytes* of a + * *msg*. If the message has a large payload, then setting up + * and calling the eBPF program repeatedly for all bytes, even + * though the verdict is already known, would create unnecessary + * overhead. + * + * When called from within an eBPF program, the helper sets a + * counter internal to the BPF infrastructure, that is used to + * apply the last verdict to the next *bytes*. If *bytes* is + * smaller than the current data being processed from a + * **sendmsg**\ () or **sendfile**\ () system call, the first + * *bytes* will be sent and the eBPF program will be re-run with + * the pointer for start of data pointing to byte number *bytes* + * **+ 1**. If *bytes* is larger than the current data being + * processed, then the eBPF verdict will be applied to multiple + * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are + * consumed. + * + * Note that if a socket closes with the internal counter holding + * a non-zero value, this is not a problem because data is not + * being buffered for *bytes* and is sent as it is received. + * + * Returns + * 0 + */ +static long (*bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61; + +/* + * bpf_msg_cork_bytes + * + * For socket policies, prevent the execution of the verdict eBPF + * program for message *msg* until *bytes* (byte number) have been + * accumulated. + * + * This can be used when one needs a specific number of bytes + * before a verdict can be assigned, even if the data spans + * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme + * case would be a user calling **sendmsg**\ () repeatedly with + * 1-byte long message segments. Obviously, this is bad for + * performance, but it is still valid. If the eBPF program needs + * *bytes* bytes to validate a header, this helper can be used to + * prevent the eBPF program to be called again until *bytes* have + * been accumulated. + * + * Returns + * 0 + */ +static long (*bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62; + +/* + * bpf_msg_pull_data + * + * For socket policies, pull in non-linear data from user space + * for *msg* and set pointers *msg*\ **->data** and *msg*\ + * **->data_end** to *start* and *end* bytes offsets into *msg*, + * respectively. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it can only parse data that the (**data**, **data_end**) + * pointers have already consumed. For **sendmsg**\ () hooks this + * is likely the first scatterlist element. But for calls relying + * on the **sendpage** handler (e.g. **sendfile**\ ()) this will + * be the range (**0**, **0**) because the data is shared with + * user space and by default the objective is to avoid allowing + * user space to modify data while (or after) eBPF verdict is + * being decided. This helper can be used to pull in data and to + * set the start and end pointer to given values. Data will be + * copied if necessary (i.e. if data was not linear and if start + * and end pointers do not point to the same chunk). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63; + +/* + * bpf_bind + * + * Bind the socket associated to *ctx* to the address pointed by + * *addr*, of length *addr_len*. This allows for making outgoing + * connection from the desired IP address, which can be useful for + * example when all processes inside a cgroup should use one + * single IP address on a host that has multiple IP configured. + * + * This helper works for IPv4 and IPv6, TCP and UDP sockets. The + * domain (*addr*\ **->sa_family**) must be **AF_INET** (or + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64; + +/* + * bpf_xdp_adjust_tail + * + * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is + * possible to both shrink and grow the packet tail. + * Shrink done via *delta* being a negative integer. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65; + +/* + * bpf_skb_get_xfrm_state + * + * Retrieve the XFRM state (IP transform framework, see also + * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. + * + * The retrieved value is stored in the **struct bpf_xfrm_state** + * pointed by *xfrm_state* and of length *size*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_XFRM** configuration option. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66; + +/* + * bpf_get_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *ctx*, which is a pointer + * to the context on which the tracing program is executed. + * To store the stacktrace, the bpf program provides *buf* with + * a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. + */ +static long (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67; + +/* + * bpf_skb_load_bytes_relative + * + * This helper is similar to **bpf_skb_load_bytes**\ () in that + * it provides an easy way to load *len* bytes from *offset* + * from the packet associated to *skb*, into the buffer pointed + * by *to*. The difference to **bpf_skb_load_bytes**\ () is that + * a fifth argument *start_header* exists in order to select a + * base offset to start from. *start_header* can be one of: + * + * **BPF_HDR_START_MAC** + * Base offset to load data from is *skb*'s mac header. + * **BPF_HDR_START_NET** + * Base offset to load data from is *skb*'s network header. + * + * In general, "direct packet access" is the preferred method to + * access packet data, however, this helper is in particular useful + * in socket filters where *skb*\ **->data** does not always point + * to the start of the mac header and where "direct packet access" + * is not available. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68; + +/* + * bpf_fib_lookup + * + * Do FIB lookup in kernel tables using parameters in *params*. + * If lookup is successful and result shows packet is to be + * forwarded, the neighbor tables are searched for the nexthop. + * If successful (ie., FIB lookup shows forwarding and nexthop + * is resolved), the nexthop address is returned in ipv4_dst + * or ipv6_dst based on family, smac is set to mac address of + * egress device, dmac is set to nexthop mac address, rt_metric + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. + * + * *plen* argument is the size of the passed in struct. + * *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_FIB_LOOKUP_DIRECT** + * Do a direct table lookup vs full lookup using FIB + * rules. + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. + * + * Returns + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * packet is not forwarded or needs assist from full stack + * + * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU + * was exceeded and output params->mtu_result contains the MTU. + */ +static long (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69; + +/* + * bpf_sock_hash_update + * + * Add an entry to, or update a sockhash *map* referencing sockets. + * The *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70; + +/* + * bpf_msg_redirect_hash + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71; + +/* + * bpf_sk_redirect_hash + * + * This helper is used in programs implementing policies at the + * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. + * if the verdict eBPF program returns **SK_PASS**), redirect it + * to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72; + +/* + * bpf_lwt_push_encap + * + * Encapsulate the packet associated to *skb* within a Layer 3 + * protocol header. This header is provided in the buffer at + * address *hdr*, with *len* its size in bytes. *type* indicates + * the protocol of the header and can be one of: + * + * **BPF_LWT_ENCAP_SEG6** + * IPv6 encapsulation with Segment Routing Header + * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, + * the IPv6 header is computed by the kernel. + * **BPF_LWT_ENCAP_SEG6_INLINE** + * Only works if *skb* contains an IPv6 packet. Insert a + * Segment Routing Header (**struct ipv6_sr_hdr**) inside + * the IPv6 header. + * **BPF_LWT_ENCAP_IP** + * IP encapsulation (GRE/GUE/IPIP/etc). The outer header + * must be IPv4 or IPv6, followed by zero or more + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. + * + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73; + +/* + * bpf_lwt_seg6_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. Only the flags, tag and TLVs + * inside the outermost IPv6 Segment Routing Header can be + * modified through this helper. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74; + +/* + * bpf_lwt_seg6_adjust_srh + * + * Adjust the size allocated to TLVs in the outermost IPv6 + * Segment Routing Header contained in the packet associated to + * *skb*, at position *offset* by *delta* bytes. Only offsets + * after the segments are accepted. *delta* can be as well + * positive (growing) as negative (shrinking). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75; + +/* + * bpf_lwt_seg6_action + * + * Apply an IPv6 Segment Routing action of type *action* to the + * packet associated to *skb*. Each action takes a parameter + * contained at address *param*, and of length *param_len* bytes. + * *action* can be one of: + * + * **SEG6_LOCAL_ACTION_END_X** + * End.X action: Endpoint with Layer-3 cross-connect. + * Type of *param*: **struct in6_addr**. + * **SEG6_LOCAL_ACTION_END_T** + * End.T action: Endpoint with specific IPv6 table lookup. + * Type of *param*: **int**. + * **SEG6_LOCAL_ACTION_END_B6** + * End.B6 action: Endpoint bound to an SRv6 policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * **SEG6_LOCAL_ACTION_END_B6_ENCAP** + * End.B6.Encap action: Endpoint bound to an SRv6 + * encapsulation policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76; + +/* + * bpf_rc_repeat + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded repeat key message. This delays + * the generation of a key up event for previously generated + * key down event. + * + * Some IR protocols like NEC have a special IR message for + * repeating last button, for when a button is held down. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_repeat)(void *ctx) = (void *) 77; + +/* + * bpf_rc_keydown + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded key press with *scancode*, + * *toggle* value in the given *protocol*. The scancode will be + * translated to a keycode using the rc keymap, and reported as + * an input key down event. After a period a key up event is + * generated. This period can be extended by calling either + * **bpf_rc_keydown**\ () again with the same values, or calling + * **bpf_rc_repeat**\ (). + * + * Some protocols include a toggle bit, in case the button was + * released and pressed again between consecutive scancodes. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * The *protocol* is the decoded protocol number (see + * **enum rc_proto** for some predefined values). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78; + +/* + * bpf_skb_cgroup_id + * + * Return the cgroup v2 id of the socket associated with the *skb*. + * This is roughly similar to the **bpf_get_cgroup_classid**\ () + * helper for cgroup v1 by providing a tag resp. identifier that + * can be matched on or used for map lookups e.g. to implement + * policy. The cgroup v2 id of a given path in the hierarchy is + * exposed in user space through the f_handle API in order to get + * to the same 64-bit id. + * + * This helper can be used on TC egress path, but not on ingress, + * and is available only if the kernel was compiled with the + * **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79; + +/* + * bpf_get_current_cgroup_id + * + * Get the current cgroup id based on the cgroup within which + * the current task is running. + * + * Returns + * A 64-bit integer containing the current cgroup id based + * on the cgroup within which the current task is running. + */ +static __u64 (*bpf_get_current_cgroup_id)(void) = (void *) 80; + +/* + * bpf_get_local_storage + * + * Get the pointer to the local storage area. + * The type and the size of the local storage is defined + * by the *map* argument. + * The *flags* meaning is specific for each map type, + * and has to be 0 for cgroup local storage. + * + * Depending on the BPF program type, a local storage area + * can be shared between multiple instances of the BPF program, + * running simultaneously. + * + * A user should care about the synchronization by himself. + * For example, by using the **BPF_ATOMIC** instructions to alter + * the shared data. + * + * Returns + * A pointer to the local storage area. + */ +static void *(*bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81; + +/* + * bpf_sk_select_reuseport + * + * Select a **SO_REUSEPORT** socket from a + * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. + * It checks the selected socket is matching the incoming + * request in the socket buffer. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82; + +/* + * bpf_skb_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83; + +/* + * bpf_sk_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84; + +/* + * bpf_sk_lookup_udp + * + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85; + +/* + * bpf_sk_release + * + * Release the reference held by *sock*. *sock* must be a + * non-**NULL** pointer that was returned from + * **bpf_sk_lookup_xxx**\ (). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_release)(void *sock) = (void *) 86; + +/* + * bpf_map_push_elem + * + * Push an element *value* in *map*. *flags* is one of: + * + * **BPF_EXIST** + * If the queue/stack is full, the oldest element is + * removed to make room for this. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87; + +/* + * bpf_map_pop_elem + * + * Pop an element from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_pop_elem)(void *map, void *value) = (void *) 88; + +/* + * bpf_map_peek_elem + * + * Get an element from *map* without removing it. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_peek_elem)(void *map, void *value) = (void *) 89; + +/* + * bpf_msg_push_data + * + * For socket policies, insert *len* bytes into *msg* at offset + * *start*. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it may want to insert metadata or options into the *msg*. + * This can later be read and used by any of the lower layer BPF + * hooks. + * + * This helper may fail if under memory pressure (a malloc + * fails) in these cases BPF programs will get an appropriate + * error and BPF programs will need to handle them. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90; + +/* + * bpf_msg_pop_data + * + * Will remove *len* bytes from a *msg* starting at byte *start*. + * This may result in **ENOMEM** errors under certain situations if + * an allocation and copy are required due to a full ring buffer. + * However, the helper will try to avoid doing the allocation + * if possible. Other errors can occur if input parameters are + * invalid either due to *start* byte not being valid part of *msg* + * payload and/or *pop* value being to large. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91; + +/* + * bpf_rc_pointer_rel + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92; + +/* + * bpf_spin_lock + * + * Acquire a spinlock represented by the pointer *lock*, which is + * stored as part of a value of a map. Taking the lock allows to + * safely update the rest of the fields in that value. The + * spinlock can (and must) later be released with a call to + * **bpf_spin_unlock**\ (\ *lock*\ ). + * + * Spinlocks in BPF programs come with a number of restrictions + * and constraints: + * + * * **bpf_spin_lock** objects are only allowed inside maps of + * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this + * list could be extended in the future). + * * BTF description of the map is mandatory. + * * The BPF program can take ONE lock at a time, since taking two + * or more could cause dead locks. + * * Only one **struct bpf_spin_lock** is allowed per map element. + * * When the lock is taken, calls (either BPF to BPF or helpers) + * are not allowed. + * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not + * allowed inside a spinlock-ed region. + * * The BPF program MUST call **bpf_spin_unlock**\ () to release + * the lock, on all execution paths, before it returns. + * * The BPF program can access **struct bpf_spin_lock** only via + * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () + * helpers. Loading or storing data into the **struct + * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. + * * To use the **bpf_spin_lock**\ () helper, the BTF description + * of the map value must be a struct and have **struct + * bpf_spin_lock** *anyname*\ **;** field at the top level. + * Nested lock inside another struct is not allowed. + * * The **struct bpf_spin_lock** *lock* field in a map value must + * be aligned on a multiple of 4 bytes in that value. + * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy + * the **bpf_spin_lock** field to user space. + * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from + * a BPF program, do not update the **bpf_spin_lock** field. + * * **bpf_spin_lock** cannot be on the stack or inside a + * networking packet (it can only be inside of a map values). + * * **bpf_spin_lock** is available to root only. + * * Tracing programs and socket filter programs cannot use + * **bpf_spin_lock**\ () due to insufficient preemption checks + * (but this may change in the future). + * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. + * + * Returns + * 0 + */ +static long (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93; + +/* + * bpf_spin_unlock + * + * Release the *lock* previously locked by a call to + * **bpf_spin_lock**\ (\ *lock*\ ). + * + * Returns + * 0 + */ +static long (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94; + +/* + * bpf_sk_fullsock + * + * This helper gets a **struct bpf_sock** pointer such + * that all the fields in this **bpf_sock** can be accessed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95; + +/* + * bpf_tcp_sock + * + * This helper gets a **struct bpf_tcp_sock** pointer from a + * **struct bpf_sock** pointer. + * + * Returns + * A **struct bpf_tcp_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96; + +/* + * bpf_skb_ecn_set_ce + * + * Set ECN (Explicit Congestion Notification) field of IP header + * to **CE** (Congestion Encountered) if current value is **ECT** + * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 + * and IPv4. + * + * Returns + * 1 if the **CE** flag is set (either by the current helper call + * or because it was already present), 0 if it is not set. + */ +static long (*bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97; + +/* + * bpf_get_listener_sock + * + * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. + * **bpf_sk_release**\ () is unnecessary and not allowed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98; + +/* + * bpf_skc_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99; + +/* + * bpf_tcp_check_syncookie + * + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). + * + * Returns + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. + */ +static long (*bpf_tcp_check_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100; + +/* + * bpf_sysctl_get_name + * + * Get name of sysctl in /proc/sys/ and copy it into provided by + * program buffer *buf* of size *buf_len*. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is + * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name + * only (e.g. "tcp_mem"). + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + */ +static long (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101; + +/* + * bpf_sysctl_get_current_value + * + * Get current value of sysctl as it is presented in /proc/sys + * (incl. newline, etc), and copy it as a string into provided + * by program buffer *buf* of size *buf_len*. + * + * The whole value is copied, no matter what file position user + * space issued e.g. sys_read at. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if current value was unavailable, e.g. because + * sysctl is uninitialized and read returns -EIO for it. + */ +static long (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102; + +/* + * bpf_sysctl_get_new_value + * + * Get new value being written by user space to sysctl (before + * the actual write happens) and copy it as a string into + * provided by program buffer *buf* of size *buf_len*. + * + * User space may write new value at file position > 0. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103; + +/* + * bpf_sysctl_set_new_value + * + * Override new value being written by user space to sysctl with + * value provided by program in buffer *buf* of size *buf_len*. + * + * *buf* should contain a string in same form as provided by user + * space on sysctl write. + * + * User space may write new value at file position > 0. To override + * the whole sysctl value file position should be set to zero. + * + * Returns + * 0 on success. + * + * **-E2BIG** if the *buf_len* is too big. + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104; + +/* + * bpf_strtol + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to a long integer according to the given base + * and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtol**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105; + +/* + * bpf_strtoul + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to an unsigned long integer according to the + * given base and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)). + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtoul**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106; + +/* + * bpf_sk_storage_get + * + * Get a bpf-local-storage from a *sk*. + * + * Logically, it could be thought of getting the value from + * a *map* with *sk* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. + * + * Underneath, the value is stored locally at *sk* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. + * + * *sk* is a kernel **struct sock** pointer for LSM program. + * *sk* is a **struct bpf_sock** pointer for other program types. + * + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf-local-storage will be + * created if one does not exist. *value* can be used + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf-local-storage. If *value* is + * **NULL**, the new bpf-local-storage will be zero initialized. + * + * Returns + * A bpf-local-storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf-local-storage. + */ +static void *(*bpf_sk_storage_get)(void *map, void *sk, void *value, __u64 flags) = (void *) 107; + +/* + * bpf_sk_storage_delete + * + * Delete a bpf-local-storage from a *sk*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf-local-storage cannot be found. + * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). + */ +static long (*bpf_sk_storage_delete)(void *map, void *sk) = (void *) 108; + +/* + * bpf_send_signal + * + * Send signal *sig* to the process of the current task. + * The signal may be delivered to any of this process's threads. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal)(__u32 sig) = (void *) 109; + +/* + * bpf_tcp_gen_syncookie + * + * Try to issue a SYN cookie for the packet with corresponding + * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header. + * + * Returns + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** SYN cookie cannot be issued due to error + * + * **-ENOENT** SYN cookie should not be issued (no SYN flood) + * + * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies + * + * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 + */ +static __s64 (*bpf_tcp_gen_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110; + +/* + * bpf_skb_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct sk_buff. + * + * This helper is similar to **bpf_perf_event_output**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111; + +/* + * bpf_probe_read_user + * + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112; + +/* + * bpf_probe_read_kernel + * + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113; + +/* + * bpf_probe_read_user_str + * + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, returns the number of bytes that were written, + * including the terminal NUL. This makes this helper useful in + * tracing programs for reading strings, and more importantly to + * get its length at runtime. See the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user**\ () helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * + * Returns + * On success, the strictly positive length of the output string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114; + +/* + * bpf_probe_read_kernel_str + * + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. + * + * Returns + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. + */ +static long (*bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115; + +/* + * bpf_tcp_send_ack + * + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. + * *rcv_nxt* is the ack_seq to be sent out. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116; + +/* + * bpf_send_signal_thread + * + * Send signal *sig* to the thread corresponding to the current task. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal_thread)(__u32 sig) = (void *) 117; + +/* + * bpf_jiffies64 + * + * Obtain the 64bit jiffies + * + * Returns + * The 64 bit jiffies + */ +static __u64 (*bpf_jiffies64)(void) = (void *) 118; + +/* + * bpf_read_branch_records + * + * For an eBPF program attached to a perf event, retrieve the + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * + * Returns + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of **sizeof**\ (**struct perf_branch_entry**\ ). + * + * **-ENOENT** if architecture does not support branch records. + */ +static long (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119; + +/* + * bpf_get_ns_current_pid_tgid + * + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * + * Returns + * 0 on success, or one of the following in case of failure: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + */ +static long (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120; + +/* + * bpf_xdp_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121; + +/* + * bpf_get_netns_cookie + * + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. + * + * Returns + * A 8-byte long opaque number. + */ +static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *) 122; + +/* + * bpf_get_current_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of the cgroup associated + * with the current task at the *ancestor_level*. The root cgroup + * is at *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with the current task, then return value will be the + * same as that of **bpf_get_current_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with the current task. + * + * The format of returned id and helper limitations are same as in + * **bpf_get_current_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123; + +/* + * bpf_sk_assign + * + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * + * Assign the *sk* to the *skb*. When combined with appropriate + * routing configuration to receive the packet towards the socket, + * will cause *skb* to be delivered to the specified socket. + * Subsequent redirection of *skb* via **bpf_redirect**\ (), + * **bpf_clone_redirect**\ () or other methods outside of BPF may + * interfere with successful delivery to the socket. + * + * This operation is only valid from TC ingress path. + * + * The *flags* argument must be zero. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EINVAL** if specified *flags* are not supported. + * + * **-ENOENT** if the socket is unavailable for assignment. + * + * **-ENETUNREACH** if the socket is unreachable (wrong netns). + * + * **-EOPNOTSUPP** if the operation is not supported, for example + * a call from outside of TC ingress. + * + * **-ESOCKTNOSUPPORT** if the socket type is not supported + * (reuseport). + */ +static long (*bpf_sk_assign)(void *ctx, void *sk, __u64 flags) = (void *) 124; + +/* + * bpf_ktime_get_boot_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does include the time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *) 125; + +/* + * bpf_seq_printf + * + * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print + * out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a **u64** array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the size of *data* in bytes - must be a multiple of 8. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what **bpf_trace_printk**\ () does for now. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EBUSY** if per-CPU memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * + * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. + * + * **-E2BIG** if *fmt* contains too many format specifiers. + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126; + +/* + * bpf_seq_write + * + * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127; + +/* + * bpf_sk_cgroup_id + * + * Return the cgroup v2 id of the socket *sk*. + * + * *sk* must be a non-**NULL** pointer to a socket, e.g. one + * returned from **bpf_sk_lookup_xxx**\ (), + * **bpf_sk_fullsock**\ (), etc. The format of returned id is + * same as in **bpf_skb_cgroup_id**\ (). + * + * This helper is available only if the kernel was compiled with + * the **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *) 128; + +/* + * bpf_sk_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *sk* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *sk*, then return value will be same as that + * of **bpf_sk_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *sk*. + * + * The format of returned id and helper limitations are same as in + * **bpf_sk_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = (void *) 129; + +/* + * bpf_ringbuf_output + * + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * An adaptive notification is a notification sent whenever the user-space + * process has caught up and consumed all available payloads. In case the user-space + * process is still processing a previous payload, then no notification is needed + * as it will process the newly added payload automatically. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130; + +/* + * bpf_ringbuf_reserve + * + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * *flags* must be 0. + * + * Returns + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + */ +static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131; + +/* + * bpf_ringbuf_submit + * + * Submit reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132; + +/* + * bpf_ringbuf_discard + * + * Discard reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133; + +/* + * bpf_ringbuf_query + * + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * + * Returns + * Requested value, or 0, if *flags* are not recognized. + */ +static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134; + +/* + * bpf_csum_level + * + * Change the skbs checksum level by one layer up or down, or + * reset it entirely to none in order to have the stack perform + * checksum validation. The level is applicable to the following + * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of + * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | + * through **bpf_skb_adjust_room**\ () helper with passing in + * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call + * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since + * the UDP header is removed. Similarly, an encap of the latter + * into the former could be accompanied by a helper call to + * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the + * skb is still intended to be processed in higher layers of the + * stack instead of just egressing at tc. + * + * There are three supported level settings at this time: + * + * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and + * sets CHECKSUM_NONE to force checksum validation by the stack. + * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current + * skb->csum_level. + * + * Returns + * 0 on success, or a negative error in case of failure. In the + * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level + * is returned or the error code -EACCES in case the skb is not + * subject to CHECKSUM_UNNECESSARY. + */ +static long (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135; + +/* + * bpf_skc_to_tcp6_sock + * + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136; + +/* + * bpf_skc_to_tcp_sock + * + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = (void *) 137; + +/* + * bpf_skc_to_tcp_timewait_sock + * + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138; + +/* + * bpf_skc_to_tcp_request_sock + * + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139; + +/* + * bpf_skc_to_udp6_sock + * + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = (void *) 140; + +/* + * bpf_get_task_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to **struct task_struct**. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. + */ +static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141; + +/* + * bpf_load_hdr_opt + * + * Load header option. Support reading a particular TCP header + * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). + * + * If *flags* is 0, it will search the option from the + * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** + * has details on what skb_data contains under different + * *skops*\ **->op**. + * + * The first byte of the *searchby_res* specifies the + * kind that it wants to search. + * + * If the searching kind is an experimental kind + * (i.e. 253 or 254 according to RFC6994). It also + * needs to specify the "magic" which is either + * 2 bytes or 4 bytes. It then also needs to + * specify the size of the magic by using + * the 2nd byte which is "kind-length" of a TCP + * header option and the "kind-length" also + * includes the first 2 bytes "kind" and "kind-length" + * itself as a normal TCP header option also does. + * + * For example, to search experimental kind 254 with + * 2 byte magic 0xeB9F, the searchby_res should be + * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. + * + * To search for the standard window scale option (3), + * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. + * Note, kind-length must be 0 for regular option. + * + * Searching for No-Op (0) and End-of-Option-List (1) are + * not supported. + * + * *len* must be at least 2 bytes which is the minimal size + * of a header option. + * + * Supported flags: + * + * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the + * saved_syn packet or the just-received syn packet. + * + * + * Returns + * > 0 when found, the header option is copied to *searchby_res*. + * The return value is the total length copied. On failure, a + * negative error code is returned: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOMSG** if the option is not found. + * + * **-ENOENT** if no syn packet is available when + * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. + * + * **-ENOSPC** if there is not enough space. Only *len* number of + * bytes are copied. + * + * **-EFAULT** on failure to parse the header options in the + * packet. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, __u32 len, __u64 flags) = (void *) 142; + +/* + * bpf_store_hdr_opt + * + * Store header option. The data will be copied + * from buffer *from* with length *len* to the TCP header. + * + * The buffer *from* should have the whole option that + * includes the kind, kind-length, and the actual + * option data. The *len* must be at least kind-length + * long. The kind-length does not have to be 4 byte + * aligned. The kernel will take care of the padding + * and setting the 4 bytes aligned value to th->doff. + * + * This helper will check for duplicated option + * by searching the same option in the outgoing skb. + * + * This helper can only be called during + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * + * Returns + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** If param is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * Nothing has been written + * + * **-EEXIST** if the option already exists. + * + * **-EFAULT** on failrue to parse the existing header options. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, __u32 len, __u64 flags) = (void *) 143; + +/* + * bpf_reserve_hdr_opt + * + * Reserve *len* bytes for the bpf header option. The + * space will be used by **bpf_store_hdr_opt**\ () later in + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * If **bpf_reserve_hdr_opt**\ () is called multiple times, + * the total number of bytes will be reserved. + * + * This helper can only be called during + * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. + * + * + * Returns + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, __u32 len, __u64 flags) = (void *) 144; + +/* + * bpf_inode_storage_get + * + * Get a bpf_local_storage from an *inode*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *inode* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this + * helper enforces the key must be an inode and the map must also + * be a **BPF_MAP_TYPE_INODE_STORAGE**. + * + * Underneath, the value is stored locally at *inode* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *inode*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * + * Returns + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + */ +static void *(*bpf_inode_storage_get)(void *map, void *inode, void *value, __u64 flags) = (void *) 145; + +/* + * bpf_inode_storage_delete + * + * Delete a bpf_local_storage from an *inode*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + */ +static int (*bpf_inode_storage_delete)(void *map, void *inode) = (void *) 146; + +/* + * bpf_d_path + * + * Return full path for given **struct path** object, which + * needs to be the kernel BTF *path* object. The path is + * returned in the provided buffer *buf* of size *sz* and + * is zero terminated. + * + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_d_path)(struct path *path, char *buf, __u32 sz) = (void *) 147; + +/* + * bpf_copy_from_user + * + * Read *size* bytes from user space address *user_ptr* and store + * the data in *dst*. This is a wrapper of **copy_from_user**\ (). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_copy_from_user)(void *dst, __u32 size, const void *user_ptr) = (void *) 148; + +/* + * bpf_snprintf_btf + * + * Use BTF to store a string representation of *ptr*->ptr in *str*, + * using *ptr*->type_id. This value should specify the type + * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) + * can be used to look up vmlinux BTF type ids. Traversing the + * data structure using BTF, the type information and values are + * stored in the first *str_size* - 1 bytes of *str*. Safe copy of + * the pointer data is carried out to avoid kernel crashes during + * operation. Smaller types can use string space on the stack; + * larger programs can use map data to store the string + * representation. + * + * The string can be subsequently shared with userspace via + * bpf_perf_event_output() or ring buffer interfaces. + * bpf_trace_printk() is to be avoided as it places too small + * a limit on string size to be useful. + * + * *flags* is a combination of + * + * **BTF_F_COMPACT** + * no formatting around type information + * **BTF_F_NONAME** + * no struct/union member names/types + * **BTF_F_PTR_RAW** + * show raw (unobfuscated) pointer values; + * equivalent to printk specifier %px. + * **BTF_F_ZERO** + * show zero-valued struct/union members; they + * are not displayed by default + * + * + * Returns + * The number of bytes that were written (or would have been + * written if output had to be truncated due to string size), + * or a negative error in cases of failure. + */ +static long (*bpf_snprintf_btf)(char *str, __u32 str_size, struct btf_ptr *ptr, __u32 btf_ptr_size, __u64 flags) = (void *) 149; + +/* + * bpf_seq_printf_btf + * + * Use BTF to write to seq_write a string representation of + * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). + * *flags* are identical to those used for bpf_snprintf_btf. + * + * Returns + * 0 on success or a negative error in case of failure. + */ +static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, __u32 ptr_size, __u64 flags) = (void *) 150; + +/* + * bpf_skb_cgroup_classid + * + * See **bpf_get_cgroup_classid**\ () for the main description. + * This helper differs from **bpf_get_cgroup_classid**\ () in that + * the cgroup v1 net_cls class is retrieved only from the *skb*'s + * associated socket instead of the current process. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_cgroup_classid)(struct __sk_buff *skb) = (void *) 151; + +/* + * bpf_redirect_neigh + * + * Redirect the packet to another net device of index *ifindex* + * and fill in L2 addresses from neighboring subsystem. This helper + * is somewhat similar to **bpf_redirect**\ (), except that it + * populates L2 addresses as well, meaning, internally, the helper + * relies on the neighbor lookup for the L2 address of the nexthop. + * + * The helper will perform a FIB lookup based on the skb's + * networking header to get the address of the next hop, unless + * this is supplied by the caller in the *params* argument. The + * *plen* argument indicates the len of *params* and should be set + * to 0 if *params* is NULL. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types, and enabled + * for IPv4 and IPv6 protocols. + * + * Returns + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + */ +static long (*bpf_redirect_neigh)(__u32 ifindex, struct bpf_redir_neigh *params, int plen, __u64 flags) = (void *) 152; + +/* + * bpf_per_cpu_ptr + * + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on *cpu*. A ksym is an + * extern variable decorated with '__ksym'. For ksym, there is a + * global var (either static or global) defined of the same name + * in the kernel. The ksym is percpu if the global var is percpu. + * The returned pointer points to the global percpu var on *cpu*. + * + * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the + * kernel, except that bpf_per_cpu_ptr() may return NULL. This + * happens if *cpu* is larger than nr_cpu_ids. The caller of + * bpf_per_cpu_ptr() must check the returned value. + * + * Returns + * A pointer pointing to the kernel percpu variable on *cpu*, or + * NULL, if *cpu* is invalid. + */ +static void *(*bpf_per_cpu_ptr)(const void *percpu_ptr, __u32 cpu) = (void *) 153; + +/* + * bpf_this_cpu_ptr + * + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on this cpu. See the + * description of 'ksym' in **bpf_per_cpu_ptr**\ (). + * + * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in + * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would + * never return NULL. + * + * Returns + * A pointer pointing to the kernel percpu variable on this cpu. + */ +static void *(*bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154; + +/* + * bpf_redirect_peer + * + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_redirect**\ (), except + * that the redirection happens to the *ifindex*' peer device and + * the netns switch takes place from ingress to ingress without + * going through the CPU's backlog queue. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types at the ingress + * hook and for veth device types. The peer device must reside in a + * different network namespace. + * + * Returns + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + */ +static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155; + +/* + * bpf_task_storage_get + * + * Get a bpf_local_storage from the *task*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *task* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this + * helper enforces the key must be an task_struct and the map must also + * be a **BPF_MAP_TYPE_TASK_STORAGE**. + * + * Underneath, the value is stored locally at *task* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *task*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * + * Returns + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + */ +static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156; + +/* + * bpf_task_storage_delete + * + * Delete a bpf_local_storage from a *task*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + */ +static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157; + +/* + * bpf_get_current_task_btf + * + * Return a BTF pointer to the "current" task. + * This pointer can also be used in helpers that accept an + * *ARG_PTR_TO_BTF_ID* of type *task_struct*. + * + * Returns + * Pointer to the current task. + */ +static struct task_struct *(*bpf_get_current_task_btf)(void) = (void *) 158; + +/* + * bpf_bprm_opts_set + * + * Set or clear certain options on *bprm*: + * + * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit + * which sets the **AT_SECURE** auxv for glibc. The bit + * is cleared if the flag is not specified. + * + * Returns + * **-EINVAL** if invalid *flags* are passed, zero otherwise. + */ +static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159; + +/* + * bpf_ktime_get_coarse_ns + * + * Return a coarse-grained version of the time elapsed since + * system boot, in nanoseconds. Does not include time the system + * was suspended. + * + * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160; + +/* + * bpf_ima_inode_hash + * + * Returns the stored IMA hash of the *inode* (if it's avaialable). + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * + * Returns + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if + * invalid arguments are passed. + */ +static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161; + +/* + * bpf_sock_from_file + * + * If the given file represents a socket, returns the associated + * socket. + * + * Returns + * A pointer to a struct socket on success or NULL if the file is + * not a socket. + */ +static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162; + +/* + * bpf_check_mtu + * + * Check packet size against exceeding MTU of net device (based + * on *ifindex*). This helper will likely be used in combination + * with helpers that adjust/change the packet size. + * + * The argument *len_diff* can be used for querying with a planned + * size change. This allows to check MTU prior to changing packet + * ctx. Providing an *len_diff* adjustment that is larger than the + * actual packet size (resulting in negative packet size) will in + * principle not exceed the MTU, why it is not considered a + * failure. Other BPF-helpers are needed for performing the + * planned size change, why the responsability for catch a negative + * packet size belong in those helpers. + * + * Specifying *ifindex* zero means the MTU check is performed + * against the current net device. This is practical if this isn't + * used prior to redirect. + * + * On input *mtu_len* must be a valid pointer, else verifier will + * reject BPF program. If the value *mtu_len* is initialized to + * zero then the ctx packet size is use. When value *mtu_len* is + * provided as input this specify the L3 length that the MTU check + * is done against. Remember XDP and TC length operate at L2, but + * this value is L3 as this correlate to MTU and IP-header tot_len + * values which are L3 (similar behavior as bpf_fib_lookup). + * + * The Linux kernel route table can configure MTUs on a more + * specific per route level, which is not provided by this helper. + * For route level MTU checks use the **bpf_fib_lookup**\ () + * helper. + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** for tc cls_act programs. + * + * The *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_MTU_CHK_SEGS** + * This flag will only works for *ctx* **struct sk_buff**. + * If packet context contains extra packet segment buffers + * (often knows as GSO skb), then MTU check is harder to + * check at this point, because in transmit path it is + * possible for the skb packet to get re-segmented + * (depending on net device features). This could still be + * a MTU violation, so this flag enables performing MTU + * check against segments, with a different violation + * return code to tell it apart. Check cannot use len_diff. + * + * On return *mtu_len* pointer contains the MTU value of the net + * device. Remember the net device configured MTU is the L3 size, + * which is returned here and XDP and TC length operate at L2. + * Helper take this into account for you, but remember when using + * MTU value in your BPF-code. + * + * + * Returns + * * 0 on success, and populate MTU value in *mtu_len* pointer. + * + * * < 0 if any input argument is invalid (*mtu_len* not updated) + * + * MTU violations return positive values, but also populate MTU + * value in *mtu_len* pointer, as this can be needed for + * implementing PMTU handing: + * + * * **BPF_MTU_CHK_RET_FRAG_NEEDED** + * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** + */ +static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163; + +/* + * bpf_for_each_map_elem + * + * For each element in **map**, call **callback_fn** function with + * **map**, **callback_ctx** and other map-specific parameters. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. + * + * The following are a list of supported map types and their + * respective expected callback signatures: + * + * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, + * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY + * + * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); + * + * For per_cpu maps, the map_value is the value on the cpu where the + * bpf_prog is running. + * + * If **callback_fn** return 0, the helper will continue to the next + * element. If return value is 1, the helper will skip the rest of + * elements and return. Other return values are not used now. + * + * + * Returns + * The number of traversed map elements for success, **-EINVAL** for + * invalid **flags**. + */ +static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164; + +/* + * bpf_snprintf + * + * Outputs a string into the **str** buffer of size **str_size** + * based on a format string stored in a read-only map pointed by + * **fmt**. + * + * Each format specifier in **fmt** corresponds to one u64 element + * in the **data** array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* + * array. The *data_len* is the size of *data* in bytes - must be + * a multiple of 8. + * + * Formats **%s** and **%p{i,I}{4,6}** require to read kernel + * memory. Reading kernel memory may fail due to either invalid + * address or valid address but requiring a major memory fault. If + * reading kernel memory fails, the string for **%s** will be an + * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. + * Not returning error to bpf program is consistent with what + * **bpf_trace_printk**\ () does for now. + * + * + * Returns + * The strictly positive length of the formatted string, including + * the trailing zero character. If the return value is greater than + * **str_size**, **str** contains a truncated string, guaranteed to + * be zero-terminated except when **str_size** is 0. + * + * Or **-EBUSY** if the per-CPU memory copy buffer is busy. + */ +static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165; + +/* + * bpf_sys_bpf + * + * Execute bpf syscall with given arguments. + * + * Returns + * A syscall result. + */ +static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166; + +/* + * bpf_btf_find_by_name_kind + * + * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. + * + * Returns + * Returns btf_id and btf_obj_fd in lower and upper 32 bits. + */ +static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167; + +/* + * bpf_sys_close + * + * Execute close syscall for given FD. + * + * Returns + * A syscall result. + */ +static long (*bpf_sys_close)(__u32 fd) = (void *) 168; + +/* + * bpf_timer_init + * + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * + * Returns + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + */ +static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = (void *) 169; + +/* + * bpf_timer_set_callback + * + * Configure the timer to call *callback_fn* static function. + * + * Returns + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + */ +static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = (void *) 170; + +/* + * bpf_timer_start + * + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * + * Returns + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + */ +static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = (void *) 171; + +/* + * bpf_timer_cancel + * + * Cancel the timer and wait for callback_fn to finish if it was running. + * + * Returns + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. + */ +static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172; + +/* + * bpf_get_func_ip + * + * Get address of the traced function (for tracing and kprobe programs). + * + * Returns + * Address of the traced function. + */ +static __u64 (*bpf_get_func_ip)(void *ctx) = (void *) 173; + +/* + * bpf_get_attach_cookie + * + * Get bpf_cookie value provided (optionally) during the program + * attachment. It might be different for each individual + * attachment, even if BPF program itself is the same. + * Expects BPF program context *ctx* as a first argument. + * + * Supported for the following program types: + * - kprobe/uprobe; + * - tracepoint; + * - perf_event. + * + * Returns + * Value specified by user at BPF link creation/attachment time + * or 0, if it was not specified. + */ +static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *) 174; + +/* + * bpf_task_pt_regs + * + * Get the struct pt_regs associated with **task**. + * + * Returns + * A pointer to struct pt_regs. + */ +static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *) 175; + +/* + * bpf_get_branch_snapshot + * + * Get branch trace from hardware engines like Intel LBR. The + * hardware engine is stopped shortly after the helper is + * called. Therefore, the user need to filter branch entries + * based on the actual use case. To capture branch trace + * before the trigger point of the BPF program, the helper + * should be called at the beginning of the BPF program. + * + * The data is stored as struct perf_branch_entry into output + * buffer *entries*. *size* is the size of *entries* in bytes. + * *flags* is reserved for now and must be zero. + * + * + * Returns + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-ENOENT** if architecture does not support branch records. + */ +static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = (void *) 176; + +/* + * bpf_trace_vprintk + * + * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 + * to format and can handle more format args as a result. + * + * Arguments are to be used as in **bpf_seq_printf**\ () helper. + * + * Returns + * The number of bytes written to the buffer, or a negative error + * in case of failure. + */ +static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177; + +/* + * bpf_skc_to_unix_sock + * + * Dynamically cast a *sk* pointer to a *unix_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) = (void *) 178; + +/* + * bpf_kallsyms_lookup_name + * + * Get the address of a kernel symbol, returned in *res*. *res* is + * set to 0 if the symbol is not found. + * + * Returns + * On success, zero. On error, a negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-EINVAL** if string *name* is not the same size as *name_sz*. + * + * **-ENOENT** if symbol is not found. + * + * **-EPERM** if caller does not have permission to obtain kernel address. + */ +static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, __u64 *res) = (void *) 179; + +/* + * bpf_find_vma + * + * Find vma of *task* that contains *addr*, call *callback_fn* + * function with *task*, *vma*, and *callback_ctx*. + * The *callback_fn* should be a static function and + * the *callback_ctx* should be a pointer to the stack. + * The *flags* is used to control certain aspects of the helper. + * Currently, the *flags* must be 0. + * + * The expected callback signature is + * + * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); + * + * + * Returns + * 0 on success. + * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. + * **-EBUSY** if failed to try lock mmap_lock. + * **-EINVAL** for invalid **flags**. + */ +static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 180; + +/* + * bpf_loop + * + * For **nr_loops**, call **callback_fn** function + * with **callback_ctx** as the context parameter. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. Currently, nr_loops is + * limited to 1 << 23 (~8 million) loops. + * + * long (\*callback_fn)(u32 index, void \*ctx); + * + * where **index** is the current index in the loop. The index + * is zero-indexed. + * + * If **callback_fn** returns 0, the helper will continue to the next + * loop. If return value is 1, the helper will skip the rest of + * the loops and return. Other return values are not used now, + * and will be rejected by the verifier. + * + * + * Returns + * The number of loops performed, **-EINVAL** for invalid **flags**, + * **-E2BIG** if **nr_loops** exceeds the maximum number of loops. + */ +static long (*bpf_loop)(__u32 nr_loops, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 181; + +/* + * bpf_strncmp + * + * Do strncmp() between **s1** and **s2**. **s1** doesn't need + * to be null-terminated and **s1_sz** is the maximum storage + * size of **s1**. **s2** must be a read-only string. + * + * Returns + * An integer less than, equal to, or greater than zero + * if the first **s1_sz** bytes of **s1** is found to be + * less than, to match, or be greater than **s2**. + */ +static long (*bpf_strncmp)(const char *s1, __u32 s1_sz, const char *s2) = (void *) 182; + +/* + * bpf_get_func_arg + * + * Get **n**-th argument (zero based) of the traced function (for tracing programs) + * returned in **value**. + * + * + * Returns + * 0 on success. + * **-EINVAL** if n >= arguments count of traced function. + */ +static long (*bpf_get_func_arg)(void *ctx, __u32 n, __u64 *value) = (void *) 183; + +/* + * bpf_get_func_ret + * + * Get return value of the traced function (for tracing programs) + * in **value**. + * + * + * Returns + * 0 on success. + * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN. + */ +static long (*bpf_get_func_ret)(void *ctx, __u64 *value) = (void *) 184; + +/* + * bpf_get_func_arg_cnt + * + * Get number of arguments of the traced function (for tracing programs). + * + * + * Returns + * The number of arguments of the traced function. + */ +static long (*bpf_get_func_arg_cnt)(void *ctx) = (void *) 185; + +/* + * bpf_get_retval + * + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * + * Returns + * The syscall's return value. + */ +static int (*bpf_get_retval)(void) = (void *) 186; + +/* + * bpf_set_retval + * + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static int (*bpf_set_retval)(int retval) = (void *) 187; + +/* + * bpf_xdp_get_buff_len + * + * Get the total size of a given xdp buff (linear and paged area) + * + * Returns + * The total size of a given xdp buffer. + */ +static __u64 (*bpf_xdp_get_buff_len)(struct xdp_md *xdp_md) = (void *) 188; + +/* + * bpf_xdp_load_bytes + * + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_load_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 189; + +/* + * bpf_xdp_store_bytes + * + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_store_bytes)(struct xdp_md *xdp_md, __u32 offset, void *buf, __u32 len) = (void *) 190; + +/* + * bpf_copy_from_user_task + * + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * + * Returns + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. + */ +static long (*bpf_copy_from_user_task)(void *dst, __u32 size, const void *user_ptr, struct task_struct *tsk, __u64 flags) = (void *) 191; + +/* + * bpf_skb_set_tstamp + * + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. + * + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp + * instead. + * + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * + * Returns + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported protocol + */ +static long (*bpf_skb_set_tstamp)(struct __sk_buff *skb, __u64 tstamp, __u32 tstamp_type) = (void *) 192; + +/* + * bpf_ima_file_hash + * + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * + * Returns + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. + */ +static long (*bpf_ima_file_hash)(struct file *file, void *dst, __u32 size) = (void *) 193; + +/* + * bpf_kptr_xchg + * + * Exchange kptr at pointer *map_value* with *ptr*, and return the + * old value. *ptr* can be NULL, otherwise it must be a referenced + * pointer which will be released when this helper is called. + * + * Returns + * The old value of kptr (which can be NULL). The returned pointer + * if not NULL, is a reference which must be released using its + * corresponding release function, or moved into a BPF map before + * program exit. + */ +static void *(*bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194; + + diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_helpers.h b/pkg/plugin/lib/common/libbpf/_src/bpf_helpers.h new file mode 100644 index 000000000..5de3eb267 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_helpers.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_HELPERS__ +#define __BPF_HELPERS__ + +/* + * Note that bpf programs need to include either + * vmlinux.h (auto-generated from BTF) or linux/types.h + * in advance since bpf_helper_defs.h uses such types + * as __u64. + */ +#include "bpf_helper_defs.h" + +#define __uint(name, val) int (*name)[val] +#define __type(name, val) typeof(val) *name +#define __array(name, val) typeof(val) *name[] + +/* + * Helper macro to place programs, maps, license in + * different sections in elf_bpf file. Section names + * are interpreted by libbpf depending on the context (BPF programs, BPF maps, + * extern variables, etc). + * To allow use of SEC() with externs (e.g., for extern .maps declarations), + * make sure __attribute__((unused)) doesn't trigger compilation warning. + */ +#define SEC(name) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ + __attribute__((section(name), used)) \ + _Pragma("GCC diagnostic pop") \ + +/* Avoid 'linux/stddef.h' definition of '__always_inline'. */ +#undef __always_inline +#define __always_inline inline __attribute__((always_inline)) + +#ifndef __noinline +#define __noinline __attribute__((noinline)) +#endif +#ifndef __weak +#define __weak __attribute__((weak)) +#endif + +/* + * Use __hidden attribute to mark a non-static BPF subprogram effectively + * static for BPF verifier's verification algorithm purposes, allowing more + * extensive and permissive BPF verification process, taking into account + * subprogram's caller context. + */ +#define __hidden __attribute__((visibility("hidden"))) + +/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include + * any system-level headers (such as stddef.h, linux/version.h, etc), and + * commonly-used macros like NULL and KERNEL_VERSION aren't available through + * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define + * them on their own. So as a convenience, provide such definitions here. + */ +#ifndef NULL +#define NULL ((void *)0) +#endif + +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) +#endif + +/* + * Helper macros to manipulate data structures + */ +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) +#endif +#ifndef container_of +#define container_of(ptr, type, member) \ + ({ \ + void *__mptr = (void *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +/* + * Helper macro to throw a compilation error if __bpf_unreachable() gets + * built into the resulting code. This works given BPF back end does not + * implement __builtin_trap(). This is useful to assert that certain paths + * of the program code are never used and hence eliminated by the compiler. + * + * For example, consider a switch statement that covers known cases used by + * the program. __bpf_unreachable() can then reside in the default case. If + * the program gets extended such that a case is not covered in the switch + * statement, then it will throw a build error due to the default case not + * being compiled out. + */ +#ifndef __bpf_unreachable +# define __bpf_unreachable() __builtin_trap() +#endif + +/* + * Helper function to perform a tail call with a constant/immediate map slot. + */ +#if __clang_major__ >= 8 && defined(__bpf__) +static __always_inline void +bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) +{ + if (!__builtin_constant_p(slot)) + __bpf_unreachable(); + + /* + * Provide a hard guarantee that LLVM won't optimize setting r2 (map + * pointer) and r3 (constant map index) from _different paths_ ending + * up at the _same_ call insn as otherwise we won't be able to use the + * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel + * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key + * tracking for prog array pokes") for details on verifier tracking. + * + * Note on clobber list: we need to stay in-line with BPF calling + * convention, so even if we don't end up using r0, r4, r5, we need + * to mark them as clobber so that LLVM doesn't end up using them + * before / after the call. + */ + asm volatile("r1 = %[ctx]\n\t" + "r2 = %[map]\n\t" + "r3 = %[slot]\n\t" + "call 12" + :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) + : "r0", "r1", "r2", "r3", "r4", "r5"); +} +#endif + +/* + * Helper structure used by eBPF C program + * to describe BPF map attributes to libbpf loader + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +} __attribute__((deprecated("use BTF-defined maps in .maps section"))); + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +#define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) +#if __has_attribute(btf_type_tag) +#define __kptr __attribute__((btf_type_tag("kptr"))) +#define __kptr_ref __attribute__((btf_type_tag("kptr_ref"))) +#else +#define __kptr +#define __kptr_ref +#endif + +#ifndef ___bpf_concat +#define ___bpf_concat(a, b) a ## b +#endif +#ifndef ___bpf_apply +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#endif +#ifndef ___bpf_nth +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#endif +#ifndef ___bpf_narg +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#endif + +#define ___bpf_fill0(arr, p, x) do {} while (0) +#define ___bpf_fill1(arr, p, x) arr[p] = x +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) +#define ___bpf_fill(arr, args...) \ + ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) + +/* + * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values + * in a structure. + */ +#define BPF_SEQ_PRINTF(seq, fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ +}) + +/* + * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of + * an array of u64. + */ +#define BPF_SNPRINTF(out, out_size, fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_snprintf(out, out_size, ___fmt, \ + ___param, sizeof(___param)); \ +}) + +#ifdef BPF_NO_GLOBAL_DATA +#define BPF_PRINTK_FMT_MOD +#else +#define BPF_PRINTK_FMT_MOD static const +#endif + +#define __bpf_printk(fmt, ...) \ +({ \ + BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +/* + * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments + * instead of an array of u64. + */ +#define __bpf_vprintk(fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_trace_vprintk(___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ +}) + +/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args + * Otherwise use __bpf_vprintk + */ +#define ___bpf_pick_printk(...) \ + ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ + __bpf_printk /*1*/, __bpf_printk /*0*/) + +/* Helper macro to print out debug messages */ +#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_prog_linfo.c b/pkg/plugin/lib/common/libbpf/_src/bpf_prog_linfo.c new file mode 100644 index 000000000..5c503096e --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_prog_linfo.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2018 Facebook */ + +#include +#include +#include +#include +#include "libbpf.h" +#include "libbpf_internal.h" + +struct bpf_prog_linfo { + void *raw_linfo; + void *raw_jited_linfo; + __u32 *nr_jited_linfo_per_func; + __u32 *jited_linfo_func_idx; + __u32 nr_linfo; + __u32 nr_jited_func; + __u32 rec_size; + __u32 jited_rec_size; +}; + +static int dissect_jited_func(struct bpf_prog_linfo *prog_linfo, + const __u64 *ksym_func, const __u32 *ksym_len) +{ + __u32 nr_jited_func, nr_linfo; + const void *raw_jited_linfo; + const __u64 *jited_linfo; + __u64 last_jited_linfo; + /* + * Index to raw_jited_linfo: + * i: Index for searching the next ksym_func + * prev_i: Index to the last found ksym_func + */ + __u32 i, prev_i; + __u32 f; /* Index to ksym_func */ + + raw_jited_linfo = prog_linfo->raw_jited_linfo; + jited_linfo = raw_jited_linfo; + if (ksym_func[0] != *jited_linfo) + goto errout; + + prog_linfo->jited_linfo_func_idx[0] = 0; + nr_jited_func = prog_linfo->nr_jited_func; + nr_linfo = prog_linfo->nr_linfo; + + for (prev_i = 0, i = 1, f = 1; + i < nr_linfo && f < nr_jited_func; + i++) { + raw_jited_linfo += prog_linfo->jited_rec_size; + last_jited_linfo = *jited_linfo; + jited_linfo = raw_jited_linfo; + + if (ksym_func[f] == *jited_linfo) { + prog_linfo->jited_linfo_func_idx[f] = i; + + /* Sanity check */ + if (last_jited_linfo - ksym_func[f - 1] + 1 > + ksym_len[f - 1]) + goto errout; + + prog_linfo->nr_jited_linfo_per_func[f - 1] = + i - prev_i; + prev_i = i; + + /* + * The ksym_func[f] is found in jited_linfo. + * Look for the next one. + */ + f++; + } else if (*jited_linfo <= last_jited_linfo) { + /* Ensure the addr is increasing _within_ a func */ + goto errout; + } + } + + if (f != nr_jited_func) + goto errout; + + prog_linfo->nr_jited_linfo_per_func[nr_jited_func - 1] = + nr_linfo - prev_i; + + return 0; + +errout: + return -EINVAL; +} + +void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo) +{ + if (!prog_linfo) + return; + + free(prog_linfo->raw_linfo); + free(prog_linfo->raw_jited_linfo); + free(prog_linfo->nr_jited_linfo_per_func); + free(prog_linfo->jited_linfo_func_idx); + free(prog_linfo); +} + +struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) +{ + struct bpf_prog_linfo *prog_linfo; + __u32 nr_linfo, nr_jited_func; + __u64 data_sz; + + nr_linfo = info->nr_line_info; + + if (!nr_linfo) + return errno = EINVAL, NULL; + + /* + * The min size that bpf_prog_linfo has to access for + * searching purpose. + */ + if (info->line_info_rec_size < + offsetof(struct bpf_line_info, file_name_off)) + return errno = EINVAL, NULL; + + prog_linfo = calloc(1, sizeof(*prog_linfo)); + if (!prog_linfo) + return errno = ENOMEM, NULL; + + /* Copy xlated line_info */ + prog_linfo->nr_linfo = nr_linfo; + prog_linfo->rec_size = info->line_info_rec_size; + data_sz = (__u64)nr_linfo * prog_linfo->rec_size; + prog_linfo->raw_linfo = malloc(data_sz); + if (!prog_linfo->raw_linfo) + goto err_free; + memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz); + + nr_jited_func = info->nr_jited_ksyms; + if (!nr_jited_func || + !info->jited_line_info || + info->nr_jited_line_info != nr_linfo || + info->jited_line_info_rec_size < sizeof(__u64) || + info->nr_jited_func_lens != nr_jited_func || + !info->jited_ksyms || + !info->jited_func_lens) + /* Not enough info to provide jited_line_info */ + return prog_linfo; + + /* Copy jited_line_info */ + prog_linfo->nr_jited_func = nr_jited_func; + prog_linfo->jited_rec_size = info->jited_line_info_rec_size; + data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size; + prog_linfo->raw_jited_linfo = malloc(data_sz); + if (!prog_linfo->raw_jited_linfo) + goto err_free; + memcpy(prog_linfo->raw_jited_linfo, + (void *)(long)info->jited_line_info, data_sz); + + /* Number of jited_line_info per jited func */ + prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * + sizeof(__u32)); + if (!prog_linfo->nr_jited_linfo_per_func) + goto err_free; + + /* + * For each jited func, + * the start idx to the "linfo" and "jited_linfo" array, + */ + prog_linfo->jited_linfo_func_idx = malloc(nr_jited_func * + sizeof(__u32)); + if (!prog_linfo->jited_linfo_func_idx) + goto err_free; + + if (dissect_jited_func(prog_linfo, + (__u64 *)(long)info->jited_ksyms, + (__u32 *)(long)info->jited_func_lens)) + goto err_free; + + return prog_linfo; + +err_free: + bpf_prog_linfo__free(prog_linfo); + return errno = EINVAL, NULL; +} + +const struct bpf_line_info * +bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, + __u64 addr, __u32 func_idx, __u32 nr_skip) +{ + __u32 jited_rec_size, rec_size, nr_linfo, start, i; + const void *raw_jited_linfo, *raw_linfo; + const __u64 *jited_linfo; + + if (func_idx >= prog_linfo->nr_jited_func) + return errno = ENOENT, NULL; + + nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx]; + if (nr_skip >= nr_linfo) + return errno = ENOENT, NULL; + + start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip; + jited_rec_size = prog_linfo->jited_rec_size; + raw_jited_linfo = prog_linfo->raw_jited_linfo + + (start * jited_rec_size); + jited_linfo = raw_jited_linfo; + if (addr < *jited_linfo) + return errno = ENOENT, NULL; + + nr_linfo -= nr_skip; + rec_size = prog_linfo->rec_size; + raw_linfo = prog_linfo->raw_linfo + (start * rec_size); + for (i = 0; i < nr_linfo; i++) { + if (addr < *jited_linfo) + break; + + raw_linfo += rec_size; + raw_jited_linfo += jited_rec_size; + jited_linfo = raw_jited_linfo; + } + + return raw_linfo - rec_size; +} + +const struct bpf_line_info * +bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, + __u32 insn_off, __u32 nr_skip) +{ + const struct bpf_line_info *linfo; + __u32 rec_size, nr_linfo, i; + const void *raw_linfo; + + nr_linfo = prog_linfo->nr_linfo; + if (nr_skip >= nr_linfo) + return errno = ENOENT, NULL; + + rec_size = prog_linfo->rec_size; + raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size); + linfo = raw_linfo; + if (insn_off < linfo->insn_off) + return errno = ENOENT, NULL; + + nr_linfo -= nr_skip; + for (i = 0; i < nr_linfo; i++) { + if (insn_off < linfo->insn_off) + break; + + raw_linfo += rec_size; + linfo = raw_linfo; + } + + return raw_linfo - rec_size; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/bpf_tracing.h b/pkg/plugin/lib/common/libbpf/_src/bpf_tracing.h new file mode 100644 index 000000000..01ce121c3 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/bpf_tracing.h @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_H__ +#define __BPF_TRACING_H__ + +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ +#if defined(__TARGET_ARCH_x86) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_s390) + #define bpf_target_s390 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm) + #define bpf_target_arm + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm64) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_mips) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__TARGET_ARCH_powerpc) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_sparc) + #define bpf_target_sparc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_riscv) + #define bpf_target_riscv + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arc) + #define bpf_target_arc + #define bpf_target_defined +#else + +/* Fall back to what the compiler says */ +#if defined(__x86_64__) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__s390__) + #define bpf_target_s390 + #define bpf_target_defined +#elif defined(__arm__) + #define bpf_target_arm + #define bpf_target_defined +#elif defined(__aarch64__) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__mips__) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__powerpc__) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__sparc__) + #define bpf_target_sparc + #define bpf_target_defined +#elif defined(__riscv) && __riscv_xlen == 64 + #define bpf_target_riscv + #define bpf_target_defined +#elif defined(__arc__) + #define bpf_target_arc + #define bpf_target_defined +#endif /* no compiler target */ + +#endif + +#ifndef __BPF_TARGET_MISSING +#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\"" +#endif + +#if defined(bpf_target_x86) + +#if defined(__KERNEL__) || defined(__VMLINUX_H__) + +#define __PT_PARM1_REG di +#define __PT_PARM2_REG si +#define __PT_PARM3_REG dx +#define __PT_PARM4_REG cx +#define __PT_PARM5_REG r8 +#define __PT_RET_REG sp +#define __PT_FP_REG bp +#define __PT_RC_REG ax +#define __PT_SP_REG sp +#define __PT_IP_REG ip +/* syscall uses r10 for PARM4 */ +#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) + +#else + +#ifdef __i386__ + +#define __PT_PARM1_REG eax +#define __PT_PARM2_REG edx +#define __PT_PARM3_REG ecx +/* i386 kernel is built with -mregparm=3 */ +#define __PT_PARM4_REG __unsupported__ +#define __PT_PARM5_REG __unsupported__ +#define __PT_RET_REG esp +#define __PT_FP_REG ebp +#define __PT_RC_REG eax +#define __PT_SP_REG esp +#define __PT_IP_REG eip + +#else /* __i386__ */ + +#define __PT_PARM1_REG rdi +#define __PT_PARM2_REG rsi +#define __PT_PARM3_REG rdx +#define __PT_PARM4_REG rcx +#define __PT_PARM5_REG r8 +#define __PT_RET_REG rsp +#define __PT_FP_REG rbp +#define __PT_RC_REG rax +#define __PT_SP_REG rsp +#define __PT_IP_REG rip +/* syscall uses r10 for PARM4 */ +#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) + +#endif /* __i386__ */ + +#endif /* __KERNEL__ || __VMLINUX_H__ */ + +#elif defined(bpf_target_s390) + +struct pt_regs___s390 { + unsigned long orig_gpr2; +}; + +/* s390 provides user_pt_regs instead of struct pt_regs to userspace */ +#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x)) +#define __PT_PARM1_REG gprs[2] +#define __PT_PARM2_REG gprs[3] +#define __PT_PARM3_REG gprs[4] +#define __PT_PARM4_REG gprs[5] +#define __PT_PARM5_REG gprs[6] +#define __PT_RET_REG grps[14] +#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */ +#define __PT_RC_REG gprs[2] +#define __PT_SP_REG gprs[15] +#define __PT_IP_REG psw.addr +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) +#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2) + +#elif defined(bpf_target_arm) + +#define __PT_PARM1_REG uregs[0] +#define __PT_PARM2_REG uregs[1] +#define __PT_PARM3_REG uregs[2] +#define __PT_PARM4_REG uregs[3] +#define __PT_PARM5_REG uregs[4] +#define __PT_RET_REG uregs[14] +#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */ +#define __PT_RC_REG uregs[0] +#define __PT_SP_REG uregs[13] +#define __PT_IP_REG uregs[12] + +#elif defined(bpf_target_arm64) + +struct pt_regs___arm64 { + unsigned long orig_x0; +}; + +/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ +#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) +#define __PT_PARM1_REG regs[0] +#define __PT_PARM2_REG regs[1] +#define __PT_PARM3_REG regs[2] +#define __PT_PARM4_REG regs[3] +#define __PT_PARM5_REG regs[4] +#define __PT_RET_REG regs[30] +#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */ +#define __PT_RC_REG regs[0] +#define __PT_SP_REG sp +#define __PT_IP_REG pc +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) +#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0) + +#elif defined(bpf_target_mips) + +#define __PT_PARM1_REG regs[4] +#define __PT_PARM2_REG regs[5] +#define __PT_PARM3_REG regs[6] +#define __PT_PARM4_REG regs[7] +#define __PT_PARM5_REG regs[8] +#define __PT_RET_REG regs[31] +#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */ +#define __PT_RC_REG regs[2] +#define __PT_SP_REG regs[29] +#define __PT_IP_REG cp0_epc + +#elif defined(bpf_target_powerpc) + +#define __PT_PARM1_REG gpr[3] +#define __PT_PARM2_REG gpr[4] +#define __PT_PARM3_REG gpr[5] +#define __PT_PARM4_REG gpr[6] +#define __PT_PARM5_REG gpr[7] +#define __PT_RET_REG regs[31] +#define __PT_FP_REG __unsupported__ +#define __PT_RC_REG gpr[3] +#define __PT_SP_REG sp +#define __PT_IP_REG nip +/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx + +#elif defined(bpf_target_sparc) + +#define __PT_PARM1_REG u_regs[UREG_I0] +#define __PT_PARM2_REG u_regs[UREG_I1] +#define __PT_PARM3_REG u_regs[UREG_I2] +#define __PT_PARM4_REG u_regs[UREG_I3] +#define __PT_PARM5_REG u_regs[UREG_I4] +#define __PT_RET_REG u_regs[UREG_I7] +#define __PT_FP_REG __unsupported__ +#define __PT_RC_REG u_regs[UREG_I0] +#define __PT_SP_REG u_regs[UREG_FP] +/* Should this also be a bpf_target check for the sparc case? */ +#if defined(__arch64__) +#define __PT_IP_REG tpc +#else +#define __PT_IP_REG pc +#endif + +#elif defined(bpf_target_riscv) + +#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) +#define __PT_PARM1_REG a0 +#define __PT_PARM2_REG a1 +#define __PT_PARM3_REG a2 +#define __PT_PARM4_REG a3 +#define __PT_PARM5_REG a4 +#define __PT_RET_REG ra +#define __PT_FP_REG s0 +#define __PT_RC_REG a5 +#define __PT_SP_REG sp +#define __PT_IP_REG pc +/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx + +#elif defined(bpf_target_arc) + +/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */ +#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) +#define __PT_PARM1_REG scratch.r0 +#define __PT_PARM2_REG scratch.r1 +#define __PT_PARM3_REG scratch.r2 +#define __PT_PARM4_REG scratch.r3 +#define __PT_PARM5_REG scratch.r4 +#define __PT_RET_REG scratch.blink +#define __PT_FP_REG __unsupported__ +#define __PT_RC_REG scratch.r0 +#define __PT_SP_REG scratch.sp +#define __PT_IP_REG scratch.ret +/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx + +#endif + +#if defined(bpf_target_defined) + +struct pt_regs; + +/* allow some architecutres to override `struct pt_regs` */ +#ifndef __PT_REGS_CAST +#define __PT_REGS_CAST(x) (x) +#endif + +#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG) +#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG) +#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG) +#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG) +#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG) +#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG) +#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG) +#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG) +#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG) +#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG) + +#if defined(bpf_target_powerpc) + +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP + +#elif defined(bpf_target_sparc) + +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP + +#else + +#define BPF_KPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \ + ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) + +#endif + +#ifndef PT_REGS_PARM1_SYSCALL +#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x) +#endif +#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x) +#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x) +#ifndef PT_REGS_PARM4_SYSCALL +#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x) +#endif +#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x) + +#ifndef PT_REGS_PARM1_CORE_SYSCALL +#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x) +#endif +#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x) +#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x) +#ifndef PT_REGS_PARM4_CORE_SYSCALL +#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x) +#endif +#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x) + +#else /* defined(bpf_target_defined) */ + +#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#endif /* defined(bpf_target_defined) */ + +/* + * When invoked from a syscall handler kprobe, returns a pointer to a + * struct pt_regs containing syscall arguments and suitable for passing to + * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL(). + */ +#ifndef PT_REGS_SYSCALL_REGS +/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx)) +#endif + +#ifndef ___bpf_concat +#define ___bpf_concat(a, b) a ## b +#endif +#ifndef ___bpf_apply +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#endif +#ifndef ___bpf_nth +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#endif +#ifndef ___bpf_narg +#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#endif + +#define ___bpf_ctx_cast0() ctx +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] +#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) + +/* + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and + * similar kinds of BPF programs, that accept input arguments as a single + * pointer to untyped u64 array, where each u64 can actually be a typed + * pointer or integer of different size. Instead of requring user to write + * manual casts and work with array elements by index, BPF_PROG macro + * allows user to declare a list of named and typed input arguments in the + * same syntax as for normal C function. All the casting is hidden and + * performed transparently, while user code can just assume working with + * function arguments of specified type and name. + * + * Original raw context argument is preserved as well as 'ctx' argument. + * This is useful when using BPF helpers that expect original context + * as one of the parameters (e.g., for bpf_perf_event_output()). + */ +#define BPF_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + +struct pt_regs; + +#define ___bpf_kprobe_args0() ctx +#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) +#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) +#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) +#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) +#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific + * low-level way of getting kprobe input arguments from struct pt_regs, and + * provides a familiar typed and named function arguments syntax and + * semantics of accessing kprobe input paremeters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + */ +#define BPF_KPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_kretprobe_args0() ctx +#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx) +#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional + * return value (in addition to `struct pt_regs *ctx`), but no input + * arguments, because they will be clobbered by the time probed function + * returns. + */ +#define BPF_KRETPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kretprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_syscall_args0() ctx +#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs) +#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs) +#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs) +#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs) +#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs) +#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for + * tracing syscall functions, like __x64_sys_close. It hides the underlying + * platform-specific low-level way of getting syscall input arguments from + * struct pt_regs, and provides a familiar typed and named function arguments + * syntax and semantics of accessing syscall input parameters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + * + * This macro relies on BPF CO-RE support. + */ +#define BPF_KPROBE_SYSCALL(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_syscall_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/btf.c b/pkg/plugin/lib/common/libbpf/_src/btf.c new file mode 100644 index 000000000..bb1e06eb1 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/btf.c @@ -0,0 +1,4909 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2018 Facebook */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "btf.h" +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" +#include "hashmap.h" +#include "strset.h" + +#define BTF_MAX_NR_TYPES 0x7fffffffU +#define BTF_MAX_STR_OFFSET 0x7fffffffU + +static struct btf_type btf_void; + +struct btf { + /* raw BTF data in native endianness */ + void *raw_data; + /* raw BTF data in non-native endianness */ + void *raw_data_swapped; + __u32 raw_size; + /* whether target endianness differs from the native one */ + bool swapped_endian; + + /* + * When BTF is loaded from an ELF or raw memory it is stored + * in a contiguous memory block. The hdr, type_data, and, strs_data + * point inside that memory region to their respective parts of BTF + * representation: + * + * +--------------------------------+ + * | Header | Types | Strings | + * +--------------------------------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data-+ | + * strs_data------------+ + * + * If BTF data is later modified, e.g., due to types added or + * removed, BTF deduplication performed, etc, this contiguous + * representation is broken up into three independently allocated + * memory regions to be able to modify them independently. + * raw_data is nulled out at that point, but can be later allocated + * and cached again if user calls btf__raw_data(), at which point + * raw_data will contain a contiguous copy of header, types, and + * strings: + * + * +----------+ +---------+ +-----------+ + * | Header | | Types | | Strings | + * +----------+ +---------+ +-----------+ + * ^ ^ ^ + * | | | + * hdr | | + * types_data----+ | + * strset__data(strs_set)-----+ + * + * +----------+---------+-----------+ + * | Header | Types | Strings | + * raw_data----->+----------+---------+-----------+ + */ + struct btf_header *hdr; + + void *types_data; + size_t types_data_cap; /* used size stored in hdr->type_len */ + + /* type ID to `struct btf_type *` lookup index + * type_offs[0] corresponds to the first non-VOID type: + * - for base BTF it's type [1]; + * - for split BTF it's the first non-base BTF type. + */ + __u32 *type_offs; + size_t type_offs_cap; + /* number of types in this BTF instance: + * - doesn't include special [0] void type; + * - for split BTF counts number of types added on top of base BTF. + */ + __u32 nr_types; + /* if not NULL, points to the base BTF on top of which the current + * split BTF is based + */ + struct btf *base_btf; + /* BTF type ID of the first type in this BTF instance: + * - for base BTF it's equal to 1; + * - for split BTF it's equal to biggest type ID of base BTF plus 1. + */ + int start_id; + /* logical string offset of this BTF instance: + * - for base BTF it's equal to 0; + * - for split BTF it's equal to total size of base BTF's string section size. + */ + int start_str_off; + + /* only one of strs_data or strs_set can be non-NULL, depending on + * whether BTF is in a modifiable state (strs_set is used) or not + * (strs_data points inside raw_data) + */ + void *strs_data; + /* a set of unique strings */ + struct strset *strs_set; + /* whether strings are already deduplicated */ + bool strs_deduped; + + /* BTF object FD, if loaded into kernel */ + int fd; + + /* Pointer size (in bytes) for a target architecture of this BTF */ + int ptr_sz; +}; + +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +/* Ensure given dynamically allocated memory region pointed to by *data* with + * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough + * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements + * are already used. At most *max_cnt* elements can be ever allocated. + * If necessary, memory is reallocated and all existing data is copied over, + * new pointer to the memory region is stored at *data, new memory region + * capacity (in number of elements) is stored in *cap. + * On success, memory pointer to the beginning of unused memory is returned. + * On error, NULL is returned. + */ +void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt) +{ + size_t new_cnt; + void *new_data; + + if (cur_cnt + add_cnt <= *cap_cnt) + return *data + cur_cnt * elem_sz; + + /* requested more than the set limit */ + if (cur_cnt + add_cnt > max_cnt) + return NULL; + + new_cnt = *cap_cnt; + new_cnt += new_cnt / 4; /* expand by 25% */ + if (new_cnt < 16) /* but at least 16 elements */ + new_cnt = 16; + if (new_cnt > max_cnt) /* but not exceeding a set limit */ + new_cnt = max_cnt; + if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */ + new_cnt = cur_cnt + add_cnt; + + new_data = libbpf_reallocarray(*data, new_cnt, elem_sz); + if (!new_data) + return NULL; + + /* zero out newly allocated portion of memory */ + memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz); + + *data = new_data; + *cap_cnt = new_cnt; + return new_data + cur_cnt * elem_sz; +} + +/* Ensure given dynamically allocated memory region has enough allocated space + * to accommodate *need_cnt* elements of size *elem_sz* bytes each + */ +int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt) +{ + void *p; + + if (need_cnt <= *cap_cnt) + return 0; + + p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt); + if (!p) + return -ENOMEM; + + return 0; +} + +static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt) +{ + return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32), + btf->nr_types, BTF_MAX_NR_TYPES, add_cnt); +} + +static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) +{ + __u32 *p; + + p = btf_add_type_offs_mem(btf, 1); + if (!p) + return -ENOMEM; + + *p = type_off; + return 0; +} + +static void btf_bswap_hdr(struct btf_header *h) +{ + h->magic = bswap_16(h->magic); + h->hdr_len = bswap_32(h->hdr_len); + h->type_off = bswap_32(h->type_off); + h->type_len = bswap_32(h->type_len); + h->str_off = bswap_32(h->str_off); + h->str_len = bswap_32(h->str_len); +} + +static int btf_parse_hdr(struct btf *btf) +{ + struct btf_header *hdr = btf->hdr; + __u32 meta_left; + + if (btf->raw_size < sizeof(struct btf_header)) { + pr_debug("BTF header not found\n"); + return -EINVAL; + } + + if (hdr->magic == bswap_16(BTF_MAGIC)) { + btf->swapped_endian = true; + if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) { + pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n", + bswap_32(hdr->hdr_len)); + return -ENOTSUP; + } + btf_bswap_hdr(hdr); + } else if (hdr->magic != BTF_MAGIC) { + pr_debug("Invalid BTF magic: %x\n", hdr->magic); + return -EINVAL; + } + + if (btf->raw_size < hdr->hdr_len) { + pr_debug("BTF header len %u larger than data size %u\n", + hdr->hdr_len, btf->raw_size); + return -EINVAL; + } + + meta_left = btf->raw_size - hdr->hdr_len; + if (meta_left < (long long)hdr->str_off + hdr->str_len) { + pr_debug("Invalid BTF total size: %u\n", btf->raw_size); + return -EINVAL; + } + + if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) { + pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n", + hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len); + return -EINVAL; + } + + if (hdr->type_off % 4) { + pr_debug("BTF type section is not aligned to 4 bytes\n"); + return -EINVAL; + } + + return 0; +} + +static int btf_parse_str_sec(struct btf *btf) +{ + const struct btf_header *hdr = btf->hdr; + const char *start = btf->strs_data; + const char *end = start + btf->hdr->str_len; + + if (btf->base_btf && hdr->str_len == 0) + return 0; + if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) { + pr_debug("Invalid BTF string section\n"); + return -EINVAL; + } + if (!btf->base_btf && start[0]) { + pr_debug("Invalid BTF string section\n"); + return -EINVAL; + } + return 0; +} + +static int btf_type_size(const struct btf_type *t) +{ + const int base_size = sizeof(struct btf_type); + __u16 vlen = btf_vlen(t); + + switch (btf_kind(t)) { + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: + return base_size; + case BTF_KIND_INT: + return base_size + sizeof(__u32); + case BTF_KIND_ENUM: + return base_size + vlen * sizeof(struct btf_enum); + case BTF_KIND_ARRAY: + return base_size + sizeof(struct btf_array); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + return base_size + vlen * sizeof(struct btf_member); + case BTF_KIND_FUNC_PROTO: + return base_size + vlen * sizeof(struct btf_param); + case BTF_KIND_VAR: + return base_size + sizeof(struct btf_var); + case BTF_KIND_DATASEC: + return base_size + vlen * sizeof(struct btf_var_secinfo); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); + default: + pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); + return -EINVAL; + } +} + +static void btf_bswap_type_base(struct btf_type *t) +{ + t->name_off = bswap_32(t->name_off); + t->info = bswap_32(t->info); + t->type = bswap_32(t->type); +} + +static int btf_bswap_type_rest(struct btf_type *t) +{ + struct btf_var_secinfo *v; + struct btf_member *m; + struct btf_array *a; + struct btf_param *p; + struct btf_enum *e; + __u16 vlen = btf_vlen(t); + int i; + + switch (btf_kind(t)) { + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: + return 0; + case BTF_KIND_INT: + *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1)); + return 0; + case BTF_KIND_ENUM: + for (i = 0, e = btf_enum(t); i < vlen; i++, e++) { + e->name_off = bswap_32(e->name_off); + e->val = bswap_32(e->val); + } + return 0; + case BTF_KIND_ARRAY: + a = btf_array(t); + a->type = bswap_32(a->type); + a->index_type = bswap_32(a->index_type); + a->nelems = bswap_32(a->nelems); + return 0; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + for (i = 0, m = btf_members(t); i < vlen; i++, m++) { + m->name_off = bswap_32(m->name_off); + m->type = bswap_32(m->type); + m->offset = bswap_32(m->offset); + } + return 0; + case BTF_KIND_FUNC_PROTO: + for (i = 0, p = btf_params(t); i < vlen; i++, p++) { + p->name_off = bswap_32(p->name_off); + p->type = bswap_32(p->type); + } + return 0; + case BTF_KIND_VAR: + btf_var(t)->linkage = bswap_32(btf_var(t)->linkage); + return 0; + case BTF_KIND_DATASEC: + for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) { + v->type = bswap_32(v->type); + v->offset = bswap_32(v->offset); + v->size = bswap_32(v->size); + } + return 0; + case BTF_KIND_DECL_TAG: + btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx); + return 0; + default: + pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); + return -EINVAL; + } +} + +static int btf_parse_type_sec(struct btf *btf) +{ + struct btf_header *hdr = btf->hdr; + void *next_type = btf->types_data; + void *end_type = next_type + hdr->type_len; + int err, type_size; + + while (next_type + sizeof(struct btf_type) <= end_type) { + if (btf->swapped_endian) + btf_bswap_type_base(next_type); + + type_size = btf_type_size(next_type); + if (type_size < 0) + return type_size; + if (next_type + type_size > end_type) { + pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types); + return -EINVAL; + } + + if (btf->swapped_endian && btf_bswap_type_rest(next_type)) + return -EINVAL; + + err = btf_add_type_idx_entry(btf, next_type - btf->types_data); + if (err) + return err; + + next_type += type_size; + btf->nr_types++; + } + + if (next_type != end_type) { + pr_warn("BTF types data is malformed\n"); + return -EINVAL; + } + + return 0; +} + +__u32 btf__get_nr_types(const struct btf *btf) +{ + return btf->start_id + btf->nr_types - 1; +} + +__u32 btf__type_cnt(const struct btf *btf) +{ + return btf->start_id + btf->nr_types; +} + +const struct btf *btf__base_btf(const struct btf *btf) +{ + return btf->base_btf; +} + +/* internal helper returning non-const pointer to a type */ +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id) +{ + if (type_id == 0) + return &btf_void; + if (type_id < btf->start_id) + return btf_type_by_id(btf->base_btf, type_id); + return btf->types_data + btf->type_offs[type_id - btf->start_id]; +} + +const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) +{ + if (type_id >= btf->start_id + btf->nr_types) + return errno = EINVAL, NULL; + return btf_type_by_id((struct btf *)btf, type_id); +} + +static int determine_ptr_size(const struct btf *btf) +{ + const struct btf_type *t; + const char *name; + int i, n; + + if (btf->base_btf && btf->base_btf->ptr_sz > 0) + return btf->base_btf->ptr_sz; + + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { + t = btf__type_by_id(btf, i); + if (!btf_is_int(t)) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + continue; + + if (strcmp(name, "long int") == 0 || + strcmp(name, "long unsigned int") == 0) { + if (t->size != 4 && t->size != 8) + continue; + return t->size; + } + } + + return -1; +} + +static size_t btf_ptr_sz(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; +} + +/* Return pointer size this BTF instance assumes. The size is heuristically + * determined by looking for 'long' or 'unsigned long' integer type and + * recording its size in bytes. If BTF type information doesn't have any such + * type, this function returns 0. In the latter case, native architecture's + * pointer size is assumed, so will be either 4 or 8, depending on + * architecture that libbpf was compiled for. It's possible to override + * guessed value by using btf__set_pointer_size() API. + */ +size_t btf__pointer_size(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + + if (btf->ptr_sz < 0) + /* not enough BTF type info to guess */ + return 0; + + return btf->ptr_sz; +} + +/* Override or set pointer size in bytes. Only values of 4 and 8 are + * supported. + */ +int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) +{ + if (ptr_sz != 4 && ptr_sz != 8) + return libbpf_err(-EINVAL); + btf->ptr_sz = ptr_sz; + return 0; +} + +static bool is_host_big_endian(void) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return false; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return true; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif +} + +enum btf_endianness btf__endianness(const struct btf *btf) +{ + if (is_host_big_endian()) + return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; + else + return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + +int btf__set_endianness(struct btf *btf, enum btf_endianness endian) +{ + if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) + return libbpf_err(-EINVAL); + + btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); + if (!btf->swapped_endian) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } + return 0; +} + +static bool btf_type_is_void(const struct btf_type *t) +{ + return t == &btf_void || btf_is_fwd(t); +} + +static bool btf_type_is_void_or_null(const struct btf_type *t) +{ + return !t || btf_type_is_void(t); +} + +#define MAX_RESOLVE_DEPTH 32 + +__s64 btf__resolve_size(const struct btf *btf, __u32 type_id) +{ + const struct btf_array *array; + const struct btf_type *t; + __u32 nelems = 1; + __s64 size = -1; + int i; + + t = btf__type_by_id(btf, type_id); + for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) { + switch (btf_kind(t)) { + case BTF_KIND_INT: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_DATASEC: + case BTF_KIND_FLOAT: + size = t->size; + goto done; + case BTF_KIND_PTR: + size = btf_ptr_sz(btf); + goto done; + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + type_id = t->type; + break; + case BTF_KIND_ARRAY: + array = btf_array(t); + if (nelems && array->nelems > UINT32_MAX / nelems) + return libbpf_err(-E2BIG); + nelems *= array->nelems; + type_id = array->type; + break; + default: + return libbpf_err(-EINVAL); + } + + t = btf__type_by_id(btf, type_id); + } + +done: + if (size < 0) + return libbpf_err(-EINVAL); + if (nelems && size > UINT32_MAX / nelems) + return libbpf_err(-E2BIG); + + return nelems * size; +} + +int btf__align_of(const struct btf *btf, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + __u16 kind = btf_kind(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_ENUM: + case BTF_KIND_FLOAT: + return min(btf_ptr_sz(btf), (size_t)t->size); + case BTF_KIND_PTR: + return btf_ptr_sz(btf); + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: + return btf__align_of(btf, t->type); + case BTF_KIND_ARRAY: + return btf__align_of(btf, btf_array(t)->type); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = btf_members(t); + __u16 vlen = btf_vlen(t); + int i, max_align = 1, align; + + for (i = 0; i < vlen; i++, m++) { + align = btf__align_of(btf, m->type); + if (align <= 0) + return libbpf_err(align); + max_align = max(max_align, align); + } + + return max_align; + } + default: + pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t)); + return errno = EINVAL, 0; + } +} + +int btf__resolve_type(const struct btf *btf, __u32 type_id) +{ + const struct btf_type *t; + int depth = 0; + + t = btf__type_by_id(btf, type_id); + while (depth < MAX_RESOLVE_DEPTH && + !btf_type_is_void_or_null(t) && + (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) { + type_id = t->type; + t = btf__type_by_id(btf, type_id); + depth++; + } + + if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t)) + return libbpf_err(-EINVAL); + + return type_id; +} + +__s32 btf__find_by_name(const struct btf *btf, const char *type_name) +{ + __u32 i, nr_types = btf__type_cnt(btf); + + if (!strcmp(type_name, "void")) + return 0; + + for (i = 1; i < nr_types; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name = btf__name_by_offset(btf, t->name_off); + + if (name && !strcmp(type_name, name)) + return i; + } + + return libbpf_err(-ENOENT); +} + +static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, + const char *type_name, __u32 kind) +{ + __u32 i, nr_types = btf__type_cnt(btf); + + if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) + return 0; + + for (i = start_id; i < nr_types; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name; + + if (btf_kind(t) != kind) + continue; + name = btf__name_by_offset(btf, t->name_off); + if (name && !strcmp(type_name, name)) + return i; + } + + return libbpf_err(-ENOENT); +} + +__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, + __u32 kind) +{ + return btf_find_by_name_kind(btf, btf->start_id, type_name, kind); +} + +__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, + __u32 kind) +{ + return btf_find_by_name_kind(btf, 1, type_name, kind); +} + +static bool btf_is_modifiable(const struct btf *btf) +{ + return (void *)btf->hdr != btf->raw_data; +} + +void btf__free(struct btf *btf) +{ + if (IS_ERR_OR_NULL(btf)) + return; + + if (btf->fd >= 0) + close(btf->fd); + + if (btf_is_modifiable(btf)) { + /* if BTF was modified after loading, it will have a split + * in-memory representation for header, types, and strings + * sections, so we need to free all of them individually. It + * might still have a cached contiguous raw data present, + * which will be unconditionally freed below. + */ + free(btf->hdr); + free(btf->types_data); + strset__free(btf->strs_set); + } + free(btf->raw_data); + free(btf->raw_data_swapped); + free(btf->type_offs); + free(btf); +} + +static struct btf *btf_new_empty(struct btf *base_btf) +{ + struct btf *btf; + + btf = calloc(1, sizeof(*btf)); + if (!btf) + return ERR_PTR(-ENOMEM); + + btf->nr_types = 0; + btf->start_id = 1; + btf->start_str_off = 0; + btf->fd = -1; + btf->ptr_sz = sizeof(void *); + btf->swapped_endian = false; + + if (base_btf) { + btf->base_btf = base_btf; + btf->start_id = btf__type_cnt(base_btf); + btf->start_str_off = base_btf->hdr->str_len; + } + + /* +1 for empty string at offset 0 */ + btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1); + btf->raw_data = calloc(1, btf->raw_size); + if (!btf->raw_data) { + free(btf); + return ERR_PTR(-ENOMEM); + } + + btf->hdr = btf->raw_data; + btf->hdr->hdr_len = sizeof(struct btf_header); + btf->hdr->magic = BTF_MAGIC; + btf->hdr->version = BTF_VERSION; + + btf->types_data = btf->raw_data + btf->hdr->hdr_len; + btf->strs_data = btf->raw_data + btf->hdr->hdr_len; + btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */ + + return btf; +} + +struct btf *btf__new_empty(void) +{ + return libbpf_ptr(btf_new_empty(NULL)); +} + +struct btf *btf__new_empty_split(struct btf *base_btf) +{ + return libbpf_ptr(btf_new_empty(base_btf)); +} + +static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) +{ + struct btf *btf; + int err; + + btf = calloc(1, sizeof(struct btf)); + if (!btf) + return ERR_PTR(-ENOMEM); + + btf->nr_types = 0; + btf->start_id = 1; + btf->start_str_off = 0; + btf->fd = -1; + + if (base_btf) { + btf->base_btf = base_btf; + btf->start_id = btf__type_cnt(base_btf); + btf->start_str_off = base_btf->hdr->str_len; + } + + btf->raw_data = malloc(size); + if (!btf->raw_data) { + err = -ENOMEM; + goto done; + } + memcpy(btf->raw_data, data, size); + btf->raw_size = size; + + btf->hdr = btf->raw_data; + err = btf_parse_hdr(btf); + if (err) + goto done; + + btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off; + btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off; + + err = btf_parse_str_sec(btf); + err = err ?: btf_parse_type_sec(btf); + if (err) + goto done; + +done: + if (err) { + btf__free(btf); + return ERR_PTR(err); + } + + return btf; +} + +struct btf *btf__new(const void *data, __u32 size) +{ + return libbpf_ptr(btf_new(data, size, NULL)); +} + +static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, + struct btf_ext **btf_ext) +{ + Elf_Data *btf_data = NULL, *btf_ext_data = NULL; + int err = 0, fd = -1, idx = 0; + struct btf *btf = NULL; + Elf_Scn *scn = NULL; + Elf *elf = NULL; + GElf_Ehdr ehdr; + size_t shstrndx; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("failed to init libelf for %s\n", path); + return ERR_PTR(-LIBBPF_ERRNO__LIBELF); + } + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = -errno; + pr_warn("failed to open %s: %s\n", path, strerror(errno)); + return ERR_PTR(err); + } + + err = -LIBBPF_ERRNO__FORMAT; + + elf = elf_begin(fd, ELF_C_READ, NULL); + if (!elf) { + pr_warn("failed to open %s as ELF file\n", path); + goto done; + } + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("failed to get EHDR from %s\n", path); + goto done; + } + + if (elf_getshdrstrndx(elf, &shstrndx)) { + pr_warn("failed to get section names section index for %s\n", + path); + goto done; + } + + if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { + pr_warn("failed to get e_shstrndx from %s\n", path); + goto done; + } + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_warn("failed to get section(%d) header from %s\n", + idx, path); + goto done; + } + name = elf_strptr(elf, shstrndx, sh.sh_name); + if (!name) { + pr_warn("failed to get section(%d) name from %s\n", + idx, path); + goto done; + } + if (strcmp(name, BTF_ELF_SEC) == 0) { + btf_data = elf_getdata(scn, 0); + if (!btf_data) { + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto done; + } + continue; + } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { + btf_ext_data = elf_getdata(scn, 0); + if (!btf_ext_data) { + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto done; + } + continue; + } + } + + err = 0; + + if (!btf_data) { + err = -ENOENT; + goto done; + } + btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); + err = libbpf_get_error(btf); + if (err) + goto done; + + switch (gelf_getclass(elf)) { + case ELFCLASS32: + btf__set_pointer_size(btf, 4); + break; + case ELFCLASS64: + btf__set_pointer_size(btf, 8); + break; + default: + pr_warn("failed to get ELF class (bitness) for %s\n", path); + break; + } + + if (btf_ext && btf_ext_data) { + *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); + err = libbpf_get_error(*btf_ext); + if (err) + goto done; + } else if (btf_ext) { + *btf_ext = NULL; + } +done: + if (elf) + elf_end(elf); + close(fd); + + if (!err) + return btf; + + if (btf_ext) + btf_ext__free(*btf_ext); + btf__free(btf); + + return ERR_PTR(err); +} + +struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) +{ + return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext)); +} + +struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf) +{ + return libbpf_ptr(btf_parse_elf(path, base_btf, NULL)); +} + +static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) +{ + struct btf *btf = NULL; + void *data = NULL; + FILE *f = NULL; + __u16 magic; + int err = 0; + long sz; + + f = fopen(path, "rb"); + if (!f) { + err = -errno; + goto err_out; + } + + /* check BTF magic */ + if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { + err = -EIO; + goto err_out; + } + if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) { + /* definitely not a raw BTF */ + err = -EPROTO; + goto err_out; + } + + /* get file size */ + if (fseek(f, 0, SEEK_END)) { + err = -errno; + goto err_out; + } + sz = ftell(f); + if (sz < 0) { + err = -errno; + goto err_out; + } + /* rewind to the start */ + if (fseek(f, 0, SEEK_SET)) { + err = -errno; + goto err_out; + } + + /* pre-alloc memory and read all of BTF data */ + data = malloc(sz); + if (!data) { + err = -ENOMEM; + goto err_out; + } + if (fread(data, 1, sz, f) < sz) { + err = -EIO; + goto err_out; + } + + /* finally parse BTF data */ + btf = btf_new(data, sz, base_btf); + +err_out: + free(data); + if (f) + fclose(f); + return err ? ERR_PTR(err) : btf; +} + +struct btf *btf__parse_raw(const char *path) +{ + return libbpf_ptr(btf_parse_raw(path, NULL)); +} + +struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) +{ + return libbpf_ptr(btf_parse_raw(path, base_btf)); +} + +static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) +{ + struct btf *btf; + int err; + + if (btf_ext) + *btf_ext = NULL; + + btf = btf_parse_raw(path, base_btf); + err = libbpf_get_error(btf); + if (!err) + return btf; + if (err != -EPROTO) + return ERR_PTR(err); + return btf_parse_elf(path, base_btf, btf_ext); +} + +struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) +{ + return libbpf_ptr(btf_parse(path, NULL, btf_ext)); +} + +struct btf *btf__parse_split(const char *path, struct btf *base_btf) +{ + return libbpf_ptr(btf_parse(path, base_btf, NULL)); +} + +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian); + +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + __u32 buf_sz = 0, raw_size; + char *buf = NULL, *tmp; + void *raw_data; + int err = 0; + + if (btf->fd >= 0) + return libbpf_err(-EEXIST); + if (log_sz && !log_buf) + return libbpf_err(-EINVAL); + + /* cache native raw data representation */ + raw_data = btf_get_raw_data(btf, &raw_size, false); + if (!raw_data) { + err = -ENOMEM; + goto done; + } + btf->raw_size = raw_size; + btf->raw_data = raw_data; + +retry_load: + /* if log_level is 0, we won't provide log_buf/log_size to the kernel, + * initially. Only if BTF loading fails, we bump log_level to 1 and + * retry, using either auto-allocated or custom log_buf. This way + * non-NULL custom log_buf provides a buffer just in case, but hopes + * for successful load and no need for log_buf. + */ + if (log_level) { + /* if caller didn't provide custom log_buf, we'll keep + * allocating our own progressively bigger buffers for BTF + * verification log + */ + if (!log_buf) { + buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2); + tmp = realloc(buf, buf_sz); + if (!tmp) { + err = -ENOMEM; + goto done; + } + buf = tmp; + buf[0] = '\0'; + } + + opts.log_buf = log_buf ? log_buf : buf; + opts.log_size = log_buf ? log_sz : buf_sz; + opts.log_level = log_level; + } + + btf->fd = bpf_btf_load(raw_data, raw_size, &opts); + if (btf->fd < 0) { + /* time to turn on verbose mode and try again */ + if (log_level == 0) { + log_level = 1; + goto retry_load; + } + /* only retry if caller didn't provide custom log_buf, but + * make sure we can never overflow buf_sz + */ + if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2) + goto retry_load; + + err = -errno; + pr_warn("BTF loading error: %d\n", err); + /* don't print out contents of custom log_buf */ + if (!log_buf && buf[0]) + pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); + } + +done: + free(buf); + return libbpf_err(err); +} + +int btf__load_into_kernel(struct btf *btf) +{ + return btf_load_into_kernel(btf, NULL, 0, 0); +} + +int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel"))); + +int btf__fd(const struct btf *btf) +{ + return btf->fd; +} + +void btf__set_fd(struct btf *btf, int fd) +{ + btf->fd = fd; +} + +static const void *btf_strs_data(const struct btf *btf) +{ + return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set); +} + +static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian) +{ + struct btf_header *hdr = btf->hdr; + struct btf_type *t; + void *data, *p; + __u32 data_sz; + int i; + + data = swap_endian ? btf->raw_data_swapped : btf->raw_data; + if (data) { + *size = btf->raw_size; + return data; + } + + data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len; + data = calloc(1, data_sz); + if (!data) + return NULL; + p = data; + + memcpy(p, hdr, hdr->hdr_len); + if (swap_endian) + btf_bswap_hdr(p); + p += hdr->hdr_len; + + memcpy(p, btf->types_data, hdr->type_len); + if (swap_endian) { + for (i = 0; i < btf->nr_types; i++) { + t = p + btf->type_offs[i]; + /* btf_bswap_type_rest() relies on native t->info, so + * we swap base type info after we swapped all the + * additional information + */ + if (btf_bswap_type_rest(t)) + goto err_out; + btf_bswap_type_base(t); + } + } + p += hdr->type_len; + + memcpy(p, btf_strs_data(btf), hdr->str_len); + p += hdr->str_len; + + *size = data_sz; + return data; +err_out: + free(data); + return NULL; +} + +const void *btf__raw_data(const struct btf *btf_ro, __u32 *size) +{ + struct btf *btf = (struct btf *)btf_ro; + __u32 data_sz; + void *data; + + data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian); + if (!data) + return errno = ENOMEM, NULL; + + btf->raw_size = data_sz; + if (btf->swapped_endian) + btf->raw_data_swapped = data; + else + btf->raw_data = data; + *size = data_sz; + return data; +} + +__attribute__((alias("btf__raw_data"))) +const void *btf__get_raw_data(const struct btf *btf, __u32 *size); + +const char *btf__str_by_offset(const struct btf *btf, __u32 offset) +{ + if (offset < btf->start_str_off) + return btf__str_by_offset(btf->base_btf, offset); + else if (offset - btf->start_str_off < btf->hdr->str_len) + return btf_strs_data(btf) + (offset - btf->start_str_off); + else + return errno = EINVAL, NULL; +} + +const char *btf__name_by_offset(const struct btf *btf, __u32 offset) +{ + return btf__str_by_offset(btf, offset); +} + +struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) +{ + struct bpf_btf_info btf_info; + __u32 len = sizeof(btf_info); + __u32 last_size; + struct btf *btf; + void *ptr; + int err; + + /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so + * let's start with a sane default - 4KiB here - and resize it only if + * bpf_obj_get_info_by_fd() needs a bigger buffer. + */ + last_size = 4096; + ptr = malloc(last_size); + if (!ptr) + return ERR_PTR(-ENOMEM); + + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.btf = ptr_to_u64(ptr); + btf_info.btf_size = last_size; + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + + if (!err && btf_info.btf_size > last_size) { + void *temp_ptr; + + last_size = btf_info.btf_size; + temp_ptr = realloc(ptr, last_size); + if (!temp_ptr) { + btf = ERR_PTR(-ENOMEM); + goto exit_free; + } + ptr = temp_ptr; + + len = sizeof(btf_info); + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.btf = ptr_to_u64(ptr); + btf_info.btf_size = last_size; + + err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len); + } + + if (err || btf_info.btf_size > last_size) { + btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG); + goto exit_free; + } + + btf = btf_new(ptr, btf_info.btf_size, base_btf); + +exit_free: + free(ptr); + return btf; +} + +struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf) +{ + struct btf *btf; + int btf_fd; + + btf_fd = bpf_btf_get_fd_by_id(id); + if (btf_fd < 0) + return libbpf_err_ptr(-errno); + + btf = btf_get_from_fd(btf_fd, base_btf); + close(btf_fd); + + return libbpf_ptr(btf); +} + +struct btf *btf__load_from_kernel_by_id(__u32 id) +{ + return btf__load_from_kernel_by_id_split(id, NULL); +} + +int btf__get_from_id(__u32 id, struct btf **btf) +{ + struct btf *res; + int err; + + *btf = NULL; + res = btf__load_from_kernel_by_id(id); + err = libbpf_get_error(res); + + if (err) + return libbpf_err(err); + + *btf = res; + return 0; +} + +int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, + __u32 expected_key_size, __u32 expected_value_size, + __u32 *key_type_id, __u32 *value_type_id) +{ + const struct btf_type *container_type; + const struct btf_member *key, *value; + const size_t max_name = 256; + char container_name[max_name]; + __s64 key_size, value_size; + __s32 container_id; + + if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) { + pr_warn("map:%s length of '____btf_map_%s' is too long\n", + map_name, map_name); + return libbpf_err(-EINVAL); + } + + container_id = btf__find_by_name(btf, container_name); + if (container_id < 0) { + pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", + map_name, container_name); + return libbpf_err(container_id); + } + + container_type = btf__type_by_id(btf, container_id); + if (!container_type) { + pr_warn("map:%s cannot find BTF type for container_id:%u\n", + map_name, container_id); + return libbpf_err(-EINVAL); + } + + if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { + pr_warn("map:%s container_name:%s is an invalid container struct\n", + map_name, container_name); + return libbpf_err(-EINVAL); + } + + key = btf_members(container_type); + value = key + 1; + + key_size = btf__resolve_size(btf, key->type); + if (key_size < 0) { + pr_warn("map:%s invalid BTF key_type_size\n", map_name); + return libbpf_err(key_size); + } + + if (expected_key_size != key_size) { + pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", + map_name, (__u32)key_size, expected_key_size); + return libbpf_err(-EINVAL); + } + + value_size = btf__resolve_size(btf, value->type); + if (value_size < 0) { + pr_warn("map:%s invalid BTF value_type_size\n", map_name); + return libbpf_err(value_size); + } + + if (expected_value_size != value_size) { + pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", + map_name, (__u32)value_size, expected_value_size); + return libbpf_err(-EINVAL); + } + + *key_type_id = key->type; + *value_type_id = value->type; + + return 0; +} + +static void btf_invalidate_raw_data(struct btf *btf) +{ + if (btf->raw_data) { + free(btf->raw_data); + btf->raw_data = NULL; + } + if (btf->raw_data_swapped) { + free(btf->raw_data_swapped); + btf->raw_data_swapped = NULL; + } +} + +/* Ensure BTF is ready to be modified (by splitting into a three memory + * regions for header, types, and strings). Also invalidate cached + * raw_data, if any. + */ +static int btf_ensure_modifiable(struct btf *btf) +{ + void *hdr, *types; + struct strset *set = NULL; + int err = -ENOMEM; + + if (btf_is_modifiable(btf)) { + /* any BTF modification invalidates raw_data */ + btf_invalidate_raw_data(btf); + return 0; + } + + /* split raw data into three memory regions */ + hdr = malloc(btf->hdr->hdr_len); + types = malloc(btf->hdr->type_len); + if (!hdr || !types) + goto err_out; + + memcpy(hdr, btf->hdr, btf->hdr->hdr_len); + memcpy(types, btf->types_data, btf->hdr->type_len); + + /* build lookup index for all strings */ + set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len); + if (IS_ERR(set)) { + err = PTR_ERR(set); + goto err_out; + } + + /* only when everything was successful, update internal state */ + btf->hdr = hdr; + btf->types_data = types; + btf->types_data_cap = btf->hdr->type_len; + btf->strs_data = NULL; + btf->strs_set = set; + /* if BTF was created from scratch, all strings are guaranteed to be + * unique and deduplicated + */ + if (btf->hdr->str_len == 0) + btf->strs_deduped = true; + if (!btf->base_btf && btf->hdr->str_len == 1) + btf->strs_deduped = true; + + /* invalidate raw_data representation */ + btf_invalidate_raw_data(btf); + + return 0; + +err_out: + strset__free(set); + free(hdr); + free(types); + return err; +} + +/* Find an offset in BTF string section that corresponds to a given string *s*. + * Returns: + * - >0 offset into string section, if string is found; + * - -ENOENT, if string is not in the string section; + * - <0, on any other error. + */ +int btf__find_str(struct btf *btf, const char *s) +{ + int off; + + if (btf->base_btf) { + off = btf__find_str(btf->base_btf, s); + if (off != -ENOENT) + return off; + } + + /* BTF needs to be in a modifiable state to build string lookup index */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + off = strset__find_str(btf->strs_set, s); + if (off < 0) + return libbpf_err(off); + + return btf->start_str_off + off; +} + +/* Add a string s to the BTF string section. + * Returns: + * - > 0 offset into string section, on success; + * - < 0, on error. + */ +int btf__add_str(struct btf *btf, const char *s) +{ + int off; + + if (btf->base_btf) { + off = btf__find_str(btf->base_btf, s); + if (off != -ENOENT) + return off; + } + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + off = strset__add_str(btf->strs_set, s); + if (off < 0) + return libbpf_err(off); + + btf->hdr->str_len = strset__data_size(btf->strs_set); + + return btf->start_str_off + off; +} + +static void *btf_add_type_mem(struct btf *btf, size_t add_sz) +{ + return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1, + btf->hdr->type_len, UINT_MAX, add_sz); +} + +static void btf_type_inc_vlen(struct btf_type *t) +{ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t)); +} + +static int btf_commit_type(struct btf *btf, int data_sz) +{ + int err; + + err = btf_add_type_idx_entry(btf, btf->hdr->type_len); + if (err) + return libbpf_err(err); + + btf->hdr->type_len += data_sz; + btf->hdr->str_off += data_sz; + btf->nr_types++; + return btf->start_id + btf->nr_types - 1; +} + +struct btf_pipe { + const struct btf *src; + struct btf *dst; + struct hashmap *str_off_map; /* map string offsets from src to dst */ +}; + +static int btf_rewrite_str(__u32 *str_off, void *ctx) +{ + struct btf_pipe *p = ctx; + void *mapped_off; + int off, err; + + if (!*str_off) /* nothing to do for empty strings */ + return 0; + + if (p->str_off_map && + hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) { + *str_off = (__u32)(long)mapped_off; + return 0; + } + + off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off)); + if (off < 0) + return off; + + /* Remember string mapping from src to dst. It avoids + * performing expensive string comparisons. + */ + if (p->str_off_map) { + err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off); + if (err) + return err; + } + + *str_off = off; + return 0; +} + +int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) +{ + struct btf_pipe p = { .src = src_btf, .dst = btf }; + struct btf_type *t; + int sz, err; + + sz = btf_type_size(src_type); + if (sz < 0) + return libbpf_err(sz); + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + memcpy(t, src_type, sz); + + err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); + if (err) + return libbpf_err(err); + + return btf_commit_type(btf, sz); +} + +static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) +{ + struct btf *btf = ctx; + + if (!*type_id) /* nothing to do for VOID references */ + return 0; + + /* we haven't updated btf's type count yet, so + * btf->start_id + btf->nr_types - 1 is the type ID offset we should + * add to all newly added BTF types + */ + *type_id += btf->start_id + btf->nr_types - 1; + return 0; +} + +static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx); +static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx); + +int btf__add_btf(struct btf *btf, const struct btf *src_btf) +{ + struct btf_pipe p = { .src = src_btf, .dst = btf }; + int data_sz, sz, cnt, i, err, old_strs_len; + __u32 *off; + void *t; + + /* appending split BTF isn't supported yet */ + if (src_btf->base_btf) + return libbpf_err(-ENOTSUP); + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + /* remember original strings section size if we have to roll back + * partial strings section changes + */ + old_strs_len = btf->hdr->str_len; + + data_sz = src_btf->hdr->type_len; + cnt = btf__type_cnt(src_btf) - 1; + + /* pre-allocate enough memory for new types */ + t = btf_add_type_mem(btf, data_sz); + if (!t) + return libbpf_err(-ENOMEM); + + /* pre-allocate enough memory for type offset index for new types */ + off = btf_add_type_offs_mem(btf, cnt); + if (!off) + return libbpf_err(-ENOMEM); + + /* Map the string offsets from src_btf to the offsets from btf to improve performance */ + p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(p.str_off_map)) + return libbpf_err(-ENOMEM); + + /* bulk copy types data for all types from src_btf */ + memcpy(t, src_btf->types_data, data_sz); + + for (i = 0; i < cnt; i++) { + sz = btf_type_size(t); + if (sz < 0) { + /* unlikely, has to be corrupted src_btf */ + err = sz; + goto err_out; + } + + /* fill out type ID to type offset mapping for lookups by type ID */ + *off = t - btf->types_data; + + /* add, dedup, and remap strings referenced by this BTF type */ + err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); + if (err) + goto err_out; + + /* remap all type IDs referenced from this BTF type */ + err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); + if (err) + goto err_out; + + /* go to next type data and type offset index entry */ + t += sz; + off++; + } + + /* Up until now any of the copied type data was effectively invisible, + * so if we exited early before this point due to error, BTF would be + * effectively unmodified. There would be extra internal memory + * pre-allocated, but it would not be available for querying. But now + * that we've copied and rewritten all the data successfully, we can + * update type count and various internal offsets and sizes to + * "commit" the changes and made them visible to the outside world. + */ + btf->hdr->type_len += data_sz; + btf->hdr->str_off += data_sz; + btf->nr_types += cnt; + + hashmap__free(p.str_off_map); + + /* return type ID of the first added BTF type */ + return btf->start_id + btf->nr_types - cnt; +err_out: + /* zero out preallocated memory as if it was just allocated with + * libbpf_add_mem() + */ + memset(btf->types_data + btf->hdr->type_len, 0, data_sz); + memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len); + + /* and now restore original strings section size; types data size + * wasn't modified, so doesn't need restoring, see big comment above */ + btf->hdr->str_len = old_strs_len; + + hashmap__free(p.str_off_map); + + return libbpf_err(err); +} + +/* + * Append new BTF_KIND_INT type with: + * - *name* - non-empty, non-NULL type name; + * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes; + * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding) +{ + struct btf_type *t; + int sz, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16) + return libbpf_err(-EINVAL); + if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL)) + return libbpf_err(-EINVAL); + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type) + sizeof(int); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + /* if something goes wrong later, we might end up with an extra string, + * but that shouldn't be a problem, because BTF can't be constructed + * completely anyway and will most probably be just discarded + */ + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_INT, 0, 0); + t->size = byte_sz; + /* set INT info, we don't allow setting legacy bit offset/size */ + *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8); + + return btf_commit_type(btf, sz); +} + +/* + * Append new BTF_KIND_FLOAT type with: + * - *name* - non-empty, non-NULL type name; + * - *sz* - size of the type, in bytes; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_float(struct btf *btf, const char *name, size_t byte_sz) +{ + struct btf_type *t; + int sz, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + + /* byte_sz must be one of the explicitly allowed values */ + if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 && + byte_sz != 16) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0); + t->size = byte_sz; + + return btf_commit_type(btf, sz); +} + +/* it's completely legal to append BTF types with type IDs pointing forward to + * types that haven't been appended yet, so we only make sure that id looks + * sane, we can't guarantee that ID will always be valid + */ +static int validate_type_id(int id) +{ + if (id < 0 || id > BTF_MAX_NR_TYPES) + return -EINVAL; + return 0; +} + +/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */ +static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id) +{ + struct btf_type *t; + int sz, name_off = 0; + + if (validate_type_id(ref_type_id)) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->type = ref_type_id; + + return btf_commit_type(btf, sz); +} + +/* + * Append new BTF_KIND_PTR type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_ptr(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_ARRAY type with: + * - *index_type_id* - type ID of the type describing array index; + * - *elem_type_id* - type ID of the type describing array element; + * - *nr_elems* - the size of the array; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems) +{ + struct btf_type *t; + struct btf_array *a; + int sz; + + if (validate_type_id(index_type_id) || validate_type_id(elem_type_id)) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type) + sizeof(struct btf_array); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0); + t->size = 0; + + a = btf_array(t); + a->type = elem_type_id; + a->index_type = index_type_id; + a->nelems = nr_elems; + + return btf_commit_type(btf, sz); +} + +/* generic STRUCT/UNION append function */ +static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz) +{ + struct btf_type *t; + int sz, name_off = 0; + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0 and no kflag; this will be adjusted when + * adding each member + */ + t->name_off = name_off; + t->info = btf_type_info(kind, 0, 0); + t->size = bytes_sz; + + return btf_commit_type(btf, sz); +} + +/* + * Append new BTF_KIND_STRUCT type with: + * - *name* - name of the struct, can be NULL or empty for anonymous structs; + * - *byte_sz* - size of the struct, in bytes; + * + * Struct initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_struct() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz); +} + +/* + * Append new BTF_KIND_UNION type with: + * - *name* - name of the union, can be NULL or empty for anonymous union; + * - *byte_sz* - size of the union, in bytes; + * + * Union initially has no fields in it. Fields can be added by + * btf__add_field() right after btf__add_union() succeeds. All fields + * should have *bit_offset* of 0. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz) +{ + return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz); +} + +static struct btf_type *btf_last_type(struct btf *btf) +{ + return btf_type_by_id(btf, btf__type_cnt(btf) - 1); +} + +/* + * Append new field for the current STRUCT/UNION type with: + * - *name* - name of the field, can be NULL or empty for anonymous field; + * - *type_id* - type ID for the type describing field type; + * - *bit_offset* - bit offset of the start of the field within struct/union; + * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_field(struct btf *btf, const char *name, int type_id, + __u32 bit_offset, __u32 bit_size) +{ + struct btf_type *t; + struct btf_member *m; + bool is_bitfield; + int sz, name_off = 0; + + /* last type should be union/struct */ + if (btf->nr_types == 0) + return libbpf_err(-EINVAL); + t = btf_last_type(btf); + if (!btf_is_composite(t)) + return libbpf_err(-EINVAL); + + if (validate_type_id(type_id)) + return libbpf_err(-EINVAL); + /* best-effort bit field offset/size enforcement */ + is_bitfield = bit_size || (bit_offset % 8 != 0); + if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff)) + return libbpf_err(-EINVAL); + + /* only offset 0 is allowed for unions */ + if (btf_is_union(t) && bit_offset) + return libbpf_err(-EINVAL); + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_member); + m = btf_add_type_mem(btf, sz); + if (!m) + return libbpf_err(-ENOMEM); + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + m->name_off = name_off; + m->type = type_id; + m->offset = bit_offset | (bit_size << 24); + + /* btf_add_type_mem can invalidate t pointer */ + t = btf_last_type(btf); + /* update parent type's vlen and kflag */ + t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t)); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_ENUM type with: + * - *name* - name of the enum, can be NULL or empty for anonymous enums; + * - *byte_sz* - size of the enum, in bytes. + * + * Enum initially has no enum values in it (and corresponds to enum forward + * declaration). Enumerator values can be added by btf__add_enum_value() + * immediately after btf__add_enum() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, name_off = 0; + + /* byte_sz must be power of 2 */ + if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + /* start out with vlen=0; it will be adjusted when adding enum values */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_ENUM, 0, 0); + t->size = byte_sz; + + return btf_commit_type(btf, sz); +} + +/* + * Append new enum value for the current ENUM type with: + * - *name* - name of the enumerator value, can't be NULL or empty; + * - *value* - integer value corresponding to enum value *name*; + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_enum_value(struct btf *btf, const char *name, __s64 value) +{ + struct btf_type *t; + struct btf_enum *v; + int sz, name_off; + + /* last type should be BTF_KIND_ENUM */ + if (btf->nr_types == 0) + return libbpf_err(-EINVAL); + t = btf_last_type(btf); + if (!btf_is_enum(t)) + return libbpf_err(-EINVAL); + + /* non-empty name */ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + if (value < INT_MIN || value > UINT_MAX) + return libbpf_err(-E2BIG); + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_enum); + v = btf_add_type_mem(btf, sz); + if (!v) + return libbpf_err(-ENOMEM); + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + v->name_off = name_off; + v->val = value; + + /* update parent type's vlen */ + t = btf_last_type(btf); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_FWD type with: + * - *name*, non-empty/non-NULL name; + * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT, + * BTF_FWD_UNION, or BTF_FWD_ENUM; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind) +{ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + + switch (fwd_kind) { + case BTF_FWD_STRUCT: + case BTF_FWD_UNION: { + struct btf_type *t; + int id; + + id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0); + if (id <= 0) + return id; + t = btf_type_by_id(btf, id); + t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION); + return id; + } + case BTF_FWD_ENUM: + /* enum forward in BTF currently is just an enum with no enum + * values; we also assume a standard 4-byte size for it + */ + return btf__add_enum(btf, name, sizeof(int)); + default: + return libbpf_err(-EINVAL); + } +} + +/* + * Append new BTF_KING_TYPEDEF type with: + * - *name*, non-empty/non-NULL name; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id) +{ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + + return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id); +} + +/* + * Append new BTF_KIND_VOLATILE type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_volatile(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_CONST type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_const(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_RESTRICT type with: + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_restrict(struct btf *btf, int ref_type_id) +{ + return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id); +} + +/* + * Append new BTF_KIND_TYPE_TAG type with: + * - *value*, non-empty/non-NULL tag value; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id) +{ + if (!value|| !value[0]) + return libbpf_err(-EINVAL); + + return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id); +} + +/* + * Append new BTF_KIND_FUNC type with: + * - *name*, non-empty/non-NULL name; + * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func(struct btf *btf, const char *name, + enum btf_func_linkage linkage, int proto_type_id) +{ + int id; + + if (!name || !name[0]) + return libbpf_err(-EINVAL); + if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL && + linkage != BTF_FUNC_EXTERN) + return libbpf_err(-EINVAL); + + id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id); + if (id > 0) { + struct btf_type *t = btf_type_by_id(btf, id); + + t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0); + } + return libbpf_err(id); +} + +/* + * Append new BTF_KIND_FUNC_PROTO with: + * - *ret_type_id* - type ID for return result of a function. + * + * Function prototype initially has no arguments, but they can be added by + * btf__add_func_param() one by one, immediately after + * btf__add_func_proto() succeeded. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_func_proto(struct btf *btf, int ret_type_id) +{ + struct btf_type *t; + int sz; + + if (validate_type_id(ret_type_id)) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + /* start out with vlen=0; this will be adjusted when adding enum + * values, if necessary + */ + t->name_off = 0; + t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0); + t->type = ret_type_id; + + return btf_commit_type(btf, sz); +} + +/* + * Append new function parameter for current FUNC_PROTO type with: + * - *name* - parameter name, can be NULL or empty; + * - *type_id* - type ID describing the type of the parameter. + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_func_param(struct btf *btf, const char *name, int type_id) +{ + struct btf_type *t; + struct btf_param *p; + int sz, name_off = 0; + + if (validate_type_id(type_id)) + return libbpf_err(-EINVAL); + + /* last type should be BTF_KIND_FUNC_PROTO */ + if (btf->nr_types == 0) + return libbpf_err(-EINVAL); + t = btf_last_type(btf); + if (!btf_is_func_proto(t)) + return libbpf_err(-EINVAL); + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_param); + p = btf_add_type_mem(btf, sz); + if (!p) + return libbpf_err(-ENOMEM); + + if (name && name[0]) { + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + } + + p->name_off = name_off; + p->type = type_id; + + /* update parent type's vlen */ + t = btf_last_type(btf); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_VAR type with: + * - *name* - non-empty/non-NULL name; + * - *linkage* - variable linkage, one of BTF_VAR_STATIC, + * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN; + * - *type_id* - type ID of the type describing the type of the variable. + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id) +{ + struct btf_type *t; + struct btf_var *v; + int sz, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED && + linkage != BTF_VAR_GLOBAL_EXTERN) + return libbpf_err(-EINVAL); + if (validate_type_id(type_id)) + return libbpf_err(-EINVAL); + + /* deconstruct BTF, if necessary, and invalidate raw_data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type) + sizeof(struct btf_var); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_VAR, 0, 0); + t->type = type_id; + + v = btf_var(t); + v->linkage = linkage; + + return btf_commit_type(btf, sz); +} + +/* + * Append new BTF_KIND_DATASEC type with: + * - *name* - non-empty/non-NULL name; + * - *byte_sz* - data section size, in bytes. + * + * Data section is initially empty. Variables info can be added with + * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds. + * + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz) +{ + struct btf_type *t; + int sz, name_off; + + /* non-empty name */ + if (!name || !name[0]) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + name_off = btf__add_str(btf, name); + if (name_off < 0) + return name_off; + + /* start with vlen=0, which will be update as var_secinfos are added */ + t->name_off = name_off; + t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0); + t->size = byte_sz; + + return btf_commit_type(btf, sz); +} + +/* + * Append new data section variable information entry for current DATASEC type: + * - *var_type_id* - type ID, describing type of the variable; + * - *offset* - variable offset within data section, in bytes; + * - *byte_sz* - variable size, in bytes. + * + * Returns: + * - 0, on success; + * - <0, on error. + */ +int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz) +{ + struct btf_type *t; + struct btf_var_secinfo *v; + int sz; + + /* last type should be BTF_KIND_DATASEC */ + if (btf->nr_types == 0) + return libbpf_err(-EINVAL); + t = btf_last_type(btf); + if (!btf_is_datasec(t)) + return libbpf_err(-EINVAL); + + if (validate_type_id(var_type_id)) + return libbpf_err(-EINVAL); + + /* decompose and invalidate raw data */ + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_var_secinfo); + v = btf_add_type_mem(btf, sz); + if (!v) + return libbpf_err(-ENOMEM); + + v->type = var_type_id; + v->offset = offset; + v->size = byte_sz; + + /* update parent type's vlen */ + t = btf_last_type(btf); + btf_type_inc_vlen(t); + + btf->hdr->type_len += sz; + btf->hdr->str_off += sz; + return 0; +} + +/* + * Append new BTF_KIND_DECL_TAG type with: + * - *value* - non-empty/non-NULL string; + * - *ref_type_id* - referenced type ID, it might not exist yet; + * - *component_idx* - -1 for tagging reference type, otherwise struct/union + * member or function argument index; + * Returns: + * - >0, type ID of newly added BTF type; + * - <0, on error. + */ +int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx) +{ + struct btf_type *t; + int sz, value_off; + + if (!value || !value[0] || component_idx < -1) + return libbpf_err(-EINVAL); + + if (validate_type_id(ref_type_id)) + return libbpf_err(-EINVAL); + + if (btf_ensure_modifiable(btf)) + return libbpf_err(-ENOMEM); + + sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag); + t = btf_add_type_mem(btf, sz); + if (!t) + return libbpf_err(-ENOMEM); + + value_off = btf__add_str(btf, value); + if (value_off < 0) + return value_off; + + t->name_off = value_off; + t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false); + t->type = ref_type_id; + btf_decl_tag(t)->component_idx = component_idx; + + return btf_commit_type(btf, sz); +} + +struct btf_ext_sec_setup_param { + __u32 off; + __u32 len; + __u32 min_rec_size; + struct btf_ext_info *ext_info; + const char *desc; +}; + +static int btf_ext_setup_info(struct btf_ext *btf_ext, + struct btf_ext_sec_setup_param *ext_sec) +{ + const struct btf_ext_info_sec *sinfo; + struct btf_ext_info *ext_info; + __u32 info_left, record_size; + size_t sec_cnt = 0; + /* The start of the info sec (including the __u32 record_size). */ + void *info; + + if (ext_sec->len == 0) + return 0; + + if (ext_sec->off & 0x03) { + pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", + ext_sec->desc); + return -EINVAL; + } + + info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; + info_left = ext_sec->len; + + if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) { + pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", + ext_sec->desc, ext_sec->off, ext_sec->len); + return -EINVAL; + } + + /* At least a record size */ + if (info_left < sizeof(__u32)) { + pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); + return -EINVAL; + } + + /* The record size needs to meet the minimum standard */ + record_size = *(__u32 *)info; + if (record_size < ext_sec->min_rec_size || + record_size & 0x03) { + pr_debug("%s section in .BTF.ext has invalid record size %u\n", + ext_sec->desc, record_size); + return -EINVAL; + } + + sinfo = info + sizeof(__u32); + info_left -= sizeof(__u32); + + /* If no records, return failure now so .BTF.ext won't be used. */ + if (!info_left) { + pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); + return -EINVAL; + } + + while (info_left) { + unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); + __u64 total_record_size; + __u32 num_records; + + if (info_left < sec_hdrlen) { + pr_debug("%s section header is not found in .BTF.ext\n", + ext_sec->desc); + return -EINVAL; + } + + num_records = sinfo->num_info; + if (num_records == 0) { + pr_debug("%s section has incorrect num_records in .BTF.ext\n", + ext_sec->desc); + return -EINVAL; + } + + total_record_size = sec_hdrlen + (__u64)num_records * record_size; + if (info_left < total_record_size) { + pr_debug("%s section has incorrect num_records in .BTF.ext\n", + ext_sec->desc); + return -EINVAL; + } + + info_left -= total_record_size; + sinfo = (void *)sinfo + total_record_size; + sec_cnt++; + } + + ext_info = ext_sec->ext_info; + ext_info->len = ext_sec->len - sizeof(__u32); + ext_info->rec_size = record_size; + ext_info->info = info + sizeof(__u32); + ext_info->sec_cnt = sec_cnt; + + return 0; +} + +static int btf_ext_setup_func_info(struct btf_ext *btf_ext) +{ + struct btf_ext_sec_setup_param param = { + .off = btf_ext->hdr->func_info_off, + .len = btf_ext->hdr->func_info_len, + .min_rec_size = sizeof(struct bpf_func_info_min), + .ext_info = &btf_ext->func_info, + .desc = "func_info" + }; + + return btf_ext_setup_info(btf_ext, ¶m); +} + +static int btf_ext_setup_line_info(struct btf_ext *btf_ext) +{ + struct btf_ext_sec_setup_param param = { + .off = btf_ext->hdr->line_info_off, + .len = btf_ext->hdr->line_info_len, + .min_rec_size = sizeof(struct bpf_line_info_min), + .ext_info = &btf_ext->line_info, + .desc = "line_info", + }; + + return btf_ext_setup_info(btf_ext, ¶m); +} + +static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) +{ + struct btf_ext_sec_setup_param param = { + .off = btf_ext->hdr->core_relo_off, + .len = btf_ext->hdr->core_relo_len, + .min_rec_size = sizeof(struct bpf_core_relo), + .ext_info = &btf_ext->core_relo_info, + .desc = "core_relo", + }; + + return btf_ext_setup_info(btf_ext, ¶m); +} + +static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) +{ + const struct btf_ext_header *hdr = (struct btf_ext_header *)data; + + if (data_size < offsetofend(struct btf_ext_header, hdr_len) || + data_size < hdr->hdr_len) { + pr_debug("BTF.ext header not found"); + return -EINVAL; + } + + if (hdr->magic == bswap_16(BTF_MAGIC)) { + pr_warn("BTF.ext in non-native endianness is not supported\n"); + return -ENOTSUP; + } else if (hdr->magic != BTF_MAGIC) { + pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); + return -EINVAL; + } + + if (hdr->version != BTF_VERSION) { + pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); + return -ENOTSUP; + } + + if (hdr->flags) { + pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); + return -ENOTSUP; + } + + if (data_size == hdr->hdr_len) { + pr_debug("BTF.ext has no data\n"); + return -EINVAL; + } + + return 0; +} + +void btf_ext__free(struct btf_ext *btf_ext) +{ + if (IS_ERR_OR_NULL(btf_ext)) + return; + free(btf_ext->func_info.sec_idxs); + free(btf_ext->line_info.sec_idxs); + free(btf_ext->core_relo_info.sec_idxs); + free(btf_ext->data); + free(btf_ext); +} + +struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) +{ + struct btf_ext *btf_ext; + int err; + + btf_ext = calloc(1, sizeof(struct btf_ext)); + if (!btf_ext) + return libbpf_err_ptr(-ENOMEM); + + btf_ext->data_size = size; + btf_ext->data = malloc(size); + if (!btf_ext->data) { + err = -ENOMEM; + goto done; + } + memcpy(btf_ext->data, data, size); + + err = btf_ext_parse_hdr(btf_ext->data, size); + if (err) + goto done; + + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { + err = -EINVAL; + goto done; + } + + err = btf_ext_setup_func_info(btf_ext); + if (err) + goto done; + + err = btf_ext_setup_line_info(btf_ext); + if (err) + goto done; + + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) + goto done; /* skip core relos parsing */ + + err = btf_ext_setup_core_relos(btf_ext); + if (err) + goto done; + +done: + if (err) { + btf_ext__free(btf_ext); + return libbpf_err_ptr(err); + } + + return btf_ext; +} + +const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size) +{ + *size = btf_ext->data_size; + return btf_ext->data; +} + +static int btf_ext_reloc_info(const struct btf *btf, + const struct btf_ext_info *ext_info, + const char *sec_name, __u32 insns_cnt, + void **info, __u32 *cnt) +{ + __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec); + __u32 i, record_size, existing_len, records_len; + struct btf_ext_info_sec *sinfo; + const char *info_sec_name; + __u64 remain_len; + void *data; + + record_size = ext_info->rec_size; + sinfo = ext_info->info; + remain_len = ext_info->len; + while (remain_len > 0) { + records_len = sinfo->num_info * record_size; + info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off); + if (strcmp(info_sec_name, sec_name)) { + remain_len -= sec_hdrlen + records_len; + sinfo = (void *)sinfo + sec_hdrlen + records_len; + continue; + } + + existing_len = (*cnt) * record_size; + data = realloc(*info, existing_len + records_len); + if (!data) + return libbpf_err(-ENOMEM); + + memcpy(data + existing_len, sinfo->data, records_len); + /* adjust insn_off only, the rest data will be passed + * to the kernel. + */ + for (i = 0; i < sinfo->num_info; i++) { + __u32 *insn_off; + + insn_off = data + existing_len + (i * record_size); + *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt; + } + *info = data; + *cnt += sinfo->num_info; + return 0; + } + + return libbpf_err(-ENOENT); +} + +int btf_ext__reloc_func_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **func_info, __u32 *cnt) +{ + return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name, + insns_cnt, func_info, cnt); +} + +int btf_ext__reloc_line_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **line_info, __u32 *cnt) +{ + return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name, + insns_cnt, line_info, cnt); +} + +__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext) +{ + return btf_ext->func_info.rec_size; +} + +__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) +{ + return btf_ext->line_info.rec_size; +} + +struct btf_dedup; + +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); +static void btf_dedup_free(struct btf_dedup *d); +static int btf_dedup_prep(struct btf_dedup *d); +static int btf_dedup_strings(struct btf_dedup *d); +static int btf_dedup_prim_types(struct btf_dedup *d); +static int btf_dedup_struct_types(struct btf_dedup *d); +static int btf_dedup_ref_types(struct btf_dedup *d); +static int btf_dedup_compact_types(struct btf_dedup *d); +static int btf_dedup_remap_types(struct btf_dedup *d); + +/* + * Deduplicate BTF types and strings. + * + * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF + * section with all BTF type descriptors and string data. It overwrites that + * memory in-place with deduplicated types and strings without any loss of + * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section + * is provided, all the strings referenced from .BTF.ext section are honored + * and updated to point to the right offsets after deduplication. + * + * If function returns with error, type/string data might be garbled and should + * be discarded. + * + * More verbose and detailed description of both problem btf_dedup is solving, + * as well as solution could be found at: + * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html + * + * Problem description and justification + * ===================================== + * + * BTF type information is typically emitted either as a result of conversion + * from DWARF to BTF or directly by compiler. In both cases, each compilation + * unit contains information about a subset of all the types that are used + * in an application. These subsets are frequently overlapping and contain a lot + * of duplicated information when later concatenated together into a single + * binary. This algorithm ensures that each unique type is represented by single + * BTF type descriptor, greatly reducing resulting size of BTF data. + * + * Compilation unit isolation and subsequent duplication of data is not the only + * problem. The same type hierarchy (e.g., struct and all the type that struct + * references) in different compilation units can be represented in BTF to + * various degrees of completeness (or, rather, incompleteness) due to + * struct/union forward declarations. + * + * Let's take a look at an example, that we'll use to better understand the + * problem (and solution). Suppose we have two compilation units, each using + * same `struct S`, but each of them having incomplete type information about + * struct's fields: + * + * // CU #1: + * struct S; + * struct A { + * int a; + * struct A* self; + * struct S* parent; + * }; + * struct B; + * struct S { + * struct A* a_ptr; + * struct B* b_ptr; + * }; + * + * // CU #2: + * struct S; + * struct A; + * struct B { + * int b; + * struct B* self; + * struct S* parent; + * }; + * struct S { + * struct A* a_ptr; + * struct B* b_ptr; + * }; + * + * In case of CU #1, BTF data will know only that `struct B` exist (but no + * more), but will know the complete type information about `struct A`. While + * for CU #2, it will know full type information about `struct B`, but will + * only know about forward declaration of `struct A` (in BTF terms, it will + * have `BTF_KIND_FWD` type descriptor with name `B`). + * + * This compilation unit isolation means that it's possible that there is no + * single CU with complete type information describing structs `S`, `A`, and + * `B`. Also, we might get tons of duplicated and redundant type information. + * + * Additional complication we need to keep in mind comes from the fact that + * types, in general, can form graphs containing cycles, not just DAGs. + * + * While algorithm does deduplication, it also merges and resolves type + * information (unless disabled throught `struct btf_opts`), whenever possible. + * E.g., in the example above with two compilation units having partial type + * information for structs `A` and `B`, the output of algorithm will emit + * a single copy of each BTF type that describes structs `A`, `B`, and `S` + * (as well as type information for `int` and pointers), as if they were defined + * in a single compilation unit as: + * + * struct A { + * int a; + * struct A* self; + * struct S* parent; + * }; + * struct B { + * int b; + * struct B* self; + * struct S* parent; + * }; + * struct S { + * struct A* a_ptr; + * struct B* b_ptr; + * }; + * + * Algorithm summary + * ================= + * + * Algorithm completes its work in 6 separate passes: + * + * 1. Strings deduplication. + * 2. Primitive types deduplication (int, enum, fwd). + * 3. Struct/union types deduplication. + * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func + * protos, and const/volatile/restrict modifiers). + * 5. Types compaction. + * 6. Types remapping. + * + * Algorithm determines canonical type descriptor, which is a single + * representative type for each truly unique type. This canonical type is the + * one that will go into final deduplicated BTF type information. For + * struct/unions, it is also the type that algorithm will merge additional type + * information into (while resolving FWDs), as it discovers it from data in + * other CUs. Each input BTF type eventually gets either mapped to itself, if + * that type is canonical, or to some other type, if that type is equivalent + * and was chosen as canonical representative. This mapping is stored in + * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that + * FWD type got resolved to. + * + * To facilitate fast discovery of canonical types, we also maintain canonical + * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash + * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types + * that match that signature. With sufficiently good choice of type signature + * hashing function, we can limit number of canonical types for each unique type + * signature to a very small number, allowing to find canonical type for any + * duplicated type very quickly. + * + * Struct/union deduplication is the most critical part and algorithm for + * deduplicating structs/unions is described in greater details in comments for + * `btf_dedup_is_equiv` function. + */ + +DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0) +int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts) +{ + struct btf_dedup *d; + int err; + + if (!OPTS_VALID(opts, btf_dedup_opts)) + return libbpf_err(-EINVAL); + + d = btf_dedup_new(btf, opts); + if (IS_ERR(d)) { + pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); + return libbpf_err(-EINVAL); + } + + if (btf_ensure_modifiable(btf)) { + err = -ENOMEM; + goto done; + } + + err = btf_dedup_prep(d); + if (err) { + pr_debug("btf_dedup_prep failed:%d\n", err); + goto done; + } + err = btf_dedup_strings(d); + if (err < 0) { + pr_debug("btf_dedup_strings failed:%d\n", err); + goto done; + } + err = btf_dedup_prim_types(d); + if (err < 0) { + pr_debug("btf_dedup_prim_types failed:%d\n", err); + goto done; + } + err = btf_dedup_struct_types(d); + if (err < 0) { + pr_debug("btf_dedup_struct_types failed:%d\n", err); + goto done; + } + err = btf_dedup_ref_types(d); + if (err < 0) { + pr_debug("btf_dedup_ref_types failed:%d\n", err); + goto done; + } + err = btf_dedup_compact_types(d); + if (err < 0) { + pr_debug("btf_dedup_compact_types failed:%d\n", err); + goto done; + } + err = btf_dedup_remap_types(d); + if (err < 0) { + pr_debug("btf_dedup_remap_types failed:%d\n", err); + goto done; + } + +done: + btf_dedup_free(d); + return libbpf_err(err); +} + +COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2) +int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts) +{ + LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext); + + if (unused_opts) { + pr_warn("please use new version of btf__dedup() that supports options\n"); + return libbpf_err(-ENOTSUP); + } + + return btf__dedup(btf, &opts); +} + +#define BTF_UNPROCESSED_ID ((__u32)-1) +#define BTF_IN_PROGRESS_ID ((__u32)-2) + +struct btf_dedup { + /* .BTF section to be deduped in-place */ + struct btf *btf; + /* + * Optional .BTF.ext section. When provided, any strings referenced + * from it will be taken into account when deduping strings + */ + struct btf_ext *btf_ext; + /* + * This is a map from any type's signature hash to a list of possible + * canonical representative type candidates. Hash collisions are + * ignored, so even types of various kinds can share same list of + * candidates, which is fine because we rely on subsequent + * btf_xxx_equal() checks to authoritatively verify type equality. + */ + struct hashmap *dedup_table; + /* Canonical types map */ + __u32 *map; + /* Hypothetical mapping, used during type graph equivalence checks */ + __u32 *hypot_map; + __u32 *hypot_list; + size_t hypot_cnt; + size_t hypot_cap; + /* Whether hypothetical mapping, if successful, would need to adjust + * already canonicalized types (due to a new forward declaration to + * concrete type resolution). In such case, during split BTF dedup + * candidate type would still be considered as different, because base + * BTF is considered to be immutable. + */ + bool hypot_adjust_canon; + /* Various option modifying behavior of algorithm */ + struct btf_dedup_opts opts; + /* temporary strings deduplication state */ + struct strset *strs_set; +}; + +static long hash_combine(long h, long value) +{ + return h * 31 + value; +} + +#define for_each_dedup_cand(d, node, hash) \ + hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) + +static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) +{ + return hashmap__append(d->dedup_table, + (void *)hash, (void *)(long)type_id); +} + +static int btf_dedup_hypot_map_add(struct btf_dedup *d, + __u32 from_id, __u32 to_id) +{ + if (d->hypot_cnt == d->hypot_cap) { + __u32 *new_list; + + d->hypot_cap += max((size_t)16, d->hypot_cap / 2); + new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32)); + if (!new_list) + return -ENOMEM; + d->hypot_list = new_list; + } + d->hypot_list[d->hypot_cnt++] = from_id; + d->hypot_map[from_id] = to_id; + return 0; +} + +static void btf_dedup_clear_hypot_map(struct btf_dedup *d) +{ + int i; + + for (i = 0; i < d->hypot_cnt; i++) + d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; + d->hypot_cnt = 0; + d->hypot_adjust_canon = false; +} + +static void btf_dedup_free(struct btf_dedup *d) +{ + hashmap__free(d->dedup_table); + d->dedup_table = NULL; + + free(d->map); + d->map = NULL; + + free(d->hypot_map); + d->hypot_map = NULL; + + free(d->hypot_list); + d->hypot_list = NULL; + + free(d); +} + +static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) +{ + return (size_t)key; +} + +static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) +{ + return 0; +} + +static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) +{ + return k1 == k2; +} + +static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts) +{ + struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); + hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn; + int i, err = 0, type_cnt; + + if (!d) + return ERR_PTR(-ENOMEM); + + if (OPTS_GET(opts, force_collisions, false)) + hash_fn = btf_dedup_collision_hash_fn; + + d->btf = btf; + d->btf_ext = OPTS_GET(opts, btf_ext, NULL); + + d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(d->dedup_table)) { + err = PTR_ERR(d->dedup_table); + d->dedup_table = NULL; + goto done; + } + + type_cnt = btf__type_cnt(btf); + d->map = malloc(sizeof(__u32) * type_cnt); + if (!d->map) { + err = -ENOMEM; + goto done; + } + /* special BTF "void" type is made canonical immediately */ + d->map[0] = 0; + for (i = 1; i < type_cnt; i++) { + struct btf_type *t = btf_type_by_id(d->btf, i); + + /* VAR and DATASEC are never deduped and are self-canonical */ + if (btf_is_var(t) || btf_is_datasec(t)) + d->map[i] = i; + else + d->map[i] = BTF_UNPROCESSED_ID; + } + + d->hypot_map = malloc(sizeof(__u32) * type_cnt); + if (!d->hypot_map) { + err = -ENOMEM; + goto done; + } + for (i = 0; i < type_cnt; i++) + d->hypot_map[i] = BTF_UNPROCESSED_ID; + +done: + if (err) { + btf_dedup_free(d); + return ERR_PTR(err); + } + + return d; +} + +/* + * Iterate over all possible places in .BTF and .BTF.ext that can reference + * string and pass pointer to it to a provided callback `fn`. + */ +static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx) +{ + int i, r; + + for (i = 0; i < d->btf->nr_types; i++) { + struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); + + r = btf_type_visit_str_offs(t, fn, ctx); + if (r) + return r; + } + + if (!d->btf_ext) + return 0; + + r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx); + if (r) + return r; + + return 0; +} + +static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx) +{ + struct btf_dedup *d = ctx; + __u32 str_off = *str_off_ptr; + const char *s; + int off, err; + + /* don't touch empty string or string in main BTF */ + if (str_off == 0 || str_off < d->btf->start_str_off) + return 0; + + s = btf__str_by_offset(d->btf, str_off); + if (d->btf->base_btf) { + err = btf__find_str(d->btf->base_btf, s); + if (err >= 0) { + *str_off_ptr = err; + return 0; + } + if (err != -ENOENT) + return err; + } + + off = strset__add_str(d->strs_set, s); + if (off < 0) + return off; + + *str_off_ptr = d->btf->start_str_off + off; + return 0; +} + +/* + * Dedup string and filter out those that are not referenced from either .BTF + * or .BTF.ext (if provided) sections. + * + * This is done by building index of all strings in BTF's string section, + * then iterating over all entities that can reference strings (e.g., type + * names, struct field names, .BTF.ext line info, etc) and marking corresponding + * strings as used. After that all used strings are deduped and compacted into + * sequential blob of memory and new offsets are calculated. Then all the string + * references are iterated again and rewritten using new offsets. + */ +static int btf_dedup_strings(struct btf_dedup *d) +{ + int err; + + if (d->btf->strs_deduped) + return 0; + + d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0); + if (IS_ERR(d->strs_set)) { + err = PTR_ERR(d->strs_set); + goto err_out; + } + + if (!d->btf->base_btf) { + /* insert empty string; we won't be looking it up during strings + * dedup, but it's good to have it for generic BTF string lookups + */ + err = strset__add_str(d->strs_set, ""); + if (err < 0) + goto err_out; + } + + /* remap string offsets */ + err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d); + if (err) + goto err_out; + + /* replace BTF string data and hash with deduped ones */ + strset__free(d->btf->strs_set); + d->btf->hdr->str_len = strset__data_size(d->strs_set); + d->btf->strs_set = d->strs_set; + d->strs_set = NULL; + d->btf->strs_deduped = true; + return 0; + +err_out: + strset__free(d->strs_set); + d->strs_set = NULL; + + return err; +} + +static long btf_hash_common(struct btf_type *t) +{ + long h; + + h = hash_combine(0, t->name_off); + h = hash_combine(h, t->info); + h = hash_combine(h, t->size); + return h; +} + +static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) +{ + return t1->name_off == t2->name_off && + t1->info == t2->info && + t1->size == t2->size; +} + +/* Calculate type signature hash of INT or TAG. */ +static long btf_hash_int_decl_tag(struct btf_type *t) +{ + __u32 info = *(__u32 *)(t + 1); + long h; + + h = btf_hash_common(t); + h = hash_combine(h, info); + return h; +} + +/* Check structural equality of two INTs or TAGs. */ +static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) +{ + __u32 info1, info2; + + if (!btf_equal_common(t1, t2)) + return false; + info1 = *(__u32 *)(t1 + 1); + info2 = *(__u32 *)(t2 + 1); + return info1 == info2; +} + +/* Calculate type signature hash of ENUM. */ +static long btf_hash_enum(struct btf_type *t) +{ + long h; + + /* don't hash vlen and enum members to support enum fwd resolving */ + h = hash_combine(0, t->name_off); + h = hash_combine(h, t->info & ~0xffff); + h = hash_combine(h, t->size); + return h; +} + +/* Check structural equality of two ENUMs. */ +static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) +{ + const struct btf_enum *m1, *m2; + __u16 vlen; + int i; + + if (!btf_equal_common(t1, t2)) + return false; + + vlen = btf_vlen(t1); + m1 = btf_enum(t1); + m2 = btf_enum(t2); + for (i = 0; i < vlen; i++) { + if (m1->name_off != m2->name_off || m1->val != m2->val) + return false; + m1++; + m2++; + } + return true; +} + +static inline bool btf_is_enum_fwd(struct btf_type *t) +{ + return btf_is_enum(t) && btf_vlen(t) == 0; +} + +static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2) +{ + if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2)) + return btf_equal_enum(t1, t2); + /* ignore vlen when comparing */ + return t1->name_off == t2->name_off && + (t1->info & ~0xffff) == (t2->info & ~0xffff) && + t1->size == t2->size; +} + +/* + * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, + * as referenced type IDs equivalence is established separately during type + * graph equivalence check algorithm. + */ +static long btf_hash_struct(struct btf_type *t) +{ + const struct btf_member *member = btf_members(t); + __u32 vlen = btf_vlen(t); + long h = btf_hash_common(t); + int i; + + for (i = 0; i < vlen; i++) { + h = hash_combine(h, member->name_off); + h = hash_combine(h, member->offset); + /* no hashing of referenced type ID, it can be unresolved yet */ + member++; + } + return h; +} + +/* + * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced + * type IDs. This check is performed during type graph equivalence check and + * referenced types equivalence is checked separately. + */ +static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) +{ + const struct btf_member *m1, *m2; + __u16 vlen; + int i; + + if (!btf_equal_common(t1, t2)) + return false; + + vlen = btf_vlen(t1); + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0; i < vlen; i++) { + if (m1->name_off != m2->name_off || m1->offset != m2->offset) + return false; + m1++; + m2++; + } + return true; +} + +/* + * Calculate type signature hash of ARRAY, including referenced type IDs, + * under assumption that they were already resolved to canonical type IDs and + * are not going to change. + */ +static long btf_hash_array(struct btf_type *t) +{ + const struct btf_array *info = btf_array(t); + long h = btf_hash_common(t); + + h = hash_combine(h, info->type); + h = hash_combine(h, info->index_type); + h = hash_combine(h, info->nelems); + return h; +} + +/* + * Check exact equality of two ARRAYs, taking into account referenced + * type IDs, under assumption that they were already resolved to canonical + * type IDs and are not going to change. + * This function is called during reference types deduplication to compare + * ARRAY to potential canonical representative. + */ +static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) +{ + const struct btf_array *info1, *info2; + + if (!btf_equal_common(t1, t2)) + return false; + + info1 = btf_array(t1); + info2 = btf_array(t2); + return info1->type == info2->type && + info1->index_type == info2->index_type && + info1->nelems == info2->nelems; +} + +/* + * Check structural compatibility of two ARRAYs, ignoring referenced type + * IDs. This check is performed during type graph equivalence check and + * referenced types equivalence is checked separately. + */ +static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) +{ + if (!btf_equal_common(t1, t2)) + return false; + + return btf_array(t1)->nelems == btf_array(t2)->nelems; +} + +/* + * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, + * under assumption that they were already resolved to canonical type IDs and + * are not going to change. + */ +static long btf_hash_fnproto(struct btf_type *t) +{ + const struct btf_param *member = btf_params(t); + __u16 vlen = btf_vlen(t); + long h = btf_hash_common(t); + int i; + + for (i = 0; i < vlen; i++) { + h = hash_combine(h, member->name_off); + h = hash_combine(h, member->type); + member++; + } + return h; +} + +/* + * Check exact equality of two FUNC_PROTOs, taking into account referenced + * type IDs, under assumption that they were already resolved to canonical + * type IDs and are not going to change. + * This function is called during reference types deduplication to compare + * FUNC_PROTO to potential canonical representative. + */ +static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) +{ + const struct btf_param *m1, *m2; + __u16 vlen; + int i; + + if (!btf_equal_common(t1, t2)) + return false; + + vlen = btf_vlen(t1); + m1 = btf_params(t1); + m2 = btf_params(t2); + for (i = 0; i < vlen; i++) { + if (m1->name_off != m2->name_off || m1->type != m2->type) + return false; + m1++; + m2++; + } + return true; +} + +/* + * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type + * IDs. This check is performed during type graph equivalence check and + * referenced types equivalence is checked separately. + */ +static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) +{ + const struct btf_param *m1, *m2; + __u16 vlen; + int i; + + /* skip return type ID */ + if (t1->name_off != t2->name_off || t1->info != t2->info) + return false; + + vlen = btf_vlen(t1); + m1 = btf_params(t1); + m2 = btf_params(t2); + for (i = 0; i < vlen; i++) { + if (m1->name_off != m2->name_off) + return false; + m1++; + m2++; + } + return true; +} + +/* Prepare split BTF for deduplication by calculating hashes of base BTF's + * types and initializing the rest of the state (canonical type mapping) for + * the fixed base BTF part. + */ +static int btf_dedup_prep(struct btf_dedup *d) +{ + struct btf_type *t; + int type_id; + long h; + + if (!d->btf->base_btf) + return 0; + + for (type_id = 1; type_id < d->btf->start_id; type_id++) { + t = btf_type_by_id(d->btf, type_id); + + /* all base BTF types are self-canonical by definition */ + d->map[type_id] = type_id; + + switch (btf_kind(t)) { + case BTF_KIND_VAR: + case BTF_KIND_DATASEC: + /* VAR and DATASEC are never hash/deduplicated */ + continue; + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_FWD: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: + h = btf_hash_common(t); + break; + case BTF_KIND_INT: + case BTF_KIND_DECL_TAG: + h = btf_hash_int_decl_tag(t); + break; + case BTF_KIND_ENUM: + h = btf_hash_enum(t); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + h = btf_hash_struct(t); + break; + case BTF_KIND_ARRAY: + h = btf_hash_array(t); + break; + case BTF_KIND_FUNC_PROTO: + h = btf_hash_fnproto(t); + break; + default: + pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id); + return -EINVAL; + } + if (btf_dedup_table_add(d, h, type_id)) + return -ENOMEM; + } + + return 0; +} + +/* + * Deduplicate primitive types, that can't reference other types, by calculating + * their type signature hash and comparing them with any possible canonical + * candidate. If no canonical candidate matches, type itself is marked as + * canonical and is added into `btf_dedup->dedup_table` as another candidate. + */ +static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) +{ + struct btf_type *t = btf_type_by_id(d->btf, type_id); + struct hashmap_entry *hash_entry; + struct btf_type *cand; + /* if we don't find equivalent type, then we are canonical */ + __u32 new_id = type_id; + __u32 cand_id; + long h; + + switch (btf_kind(t)) { + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_ARRAY: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_FUNC: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_VAR: + case BTF_KIND_DATASEC: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + return 0; + + case BTF_KIND_INT: + h = btf_hash_int_decl_tag(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_int_tag(t, cand)) { + new_id = cand_id; + break; + } + } + break; + + case BTF_KIND_ENUM: + h = btf_hash_enum(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_enum(t, cand)) { + new_id = cand_id; + break; + } + if (btf_compat_enum(t, cand)) { + if (btf_is_enum_fwd(t)) { + /* resolve fwd to full enum */ + new_id = cand_id; + break; + } + /* resolve canonical enum fwd to full enum */ + d->map[cand_id] = type_id; + } + } + break; + + case BTF_KIND_FWD: + case BTF_KIND_FLOAT: + h = btf_hash_common(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_common(t, cand)) { + new_id = cand_id; + break; + } + } + break; + + default: + return -EINVAL; + } + + d->map[type_id] = new_id; + if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) + return -ENOMEM; + + return 0; +} + +static int btf_dedup_prim_types(struct btf_dedup *d) +{ + int i, err; + + for (i = 0; i < d->btf->nr_types; i++) { + err = btf_dedup_prim_type(d, d->btf->start_id + i); + if (err) + return err; + } + return 0; +} + +/* + * Check whether type is already mapped into canonical one (could be to itself). + */ +static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) +{ + return d->map[type_id] <= BTF_MAX_NR_TYPES; +} + +/* + * Resolve type ID into its canonical type ID, if any; otherwise return original + * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow + * STRUCT/UNION link and resolve it into canonical type ID as well. + */ +static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) +{ + while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) + type_id = d->map[type_id]; + return type_id; +} + +/* + * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original + * type ID. + */ +static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) +{ + __u32 orig_type_id = type_id; + + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) + return type_id; + + while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) + type_id = d->map[type_id]; + + if (!btf_is_fwd(btf__type_by_id(d->btf, type_id))) + return type_id; + + return orig_type_id; +} + + +static inline __u16 btf_fwd_kind(struct btf_type *t) +{ + return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; +} + +/* Check if given two types are identical ARRAY definitions */ +static int btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + struct btf_type *t1, *t2; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + if (!btf_is_array(t1) || !btf_is_array(t2)) + return 0; + + return btf_equal_array(t1, t2); +} + +/* Check if given two types are identical STRUCT/UNION definitions */ +static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + const struct btf_member *m1, *m2; + struct btf_type *t1, *t2; + int n, i; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + + if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) + return false; + + if (!btf_shallow_equal_struct(t1, t2)) + return false; + + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { + if (m1->type != m2->type) + return false; + } + return true; +} + +/* + * Check equivalence of BTF type graph formed by candidate struct/union (we'll + * call it "candidate graph" in this description for brevity) to a type graph + * formed by (potential) canonical struct/union ("canonical graph" for brevity + * here, though keep in mind that not all types in canonical graph are + * necessarily canonical representatives themselves, some of them might be + * duplicates or its uniqueness might not have been established yet). + * Returns: + * - >0, if type graphs are equivalent; + * - 0, if not equivalent; + * - <0, on error. + * + * Algorithm performs side-by-side DFS traversal of both type graphs and checks + * equivalence of BTF types at each step. If at any point BTF types in candidate + * and canonical graphs are not compatible structurally, whole graphs are + * incompatible. If types are structurally equivalent (i.e., all information + * except referenced type IDs is exactly the same), a mapping from `canon_id` to + * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). + * If a type references other types, then those referenced types are checked + * for equivalence recursively. + * + * During DFS traversal, if we find that for current `canon_id` type we + * already have some mapping in hypothetical map, we check for two possible + * situations: + * - `canon_id` is mapped to exactly the same type as `cand_id`. This will + * happen when type graphs have cycles. In this case we assume those two + * types are equivalent. + * - `canon_id` is mapped to different type. This is contradiction in our + * hypothetical mapping, because same graph in canonical graph corresponds + * to two different types in candidate graph, which for equivalent type + * graphs shouldn't happen. This condition terminates equivalence check + * with negative result. + * + * If type graphs traversal exhausts types to check and find no contradiction, + * then type graphs are equivalent. + * + * When checking types for equivalence, there is one special case: FWD types. + * If FWD type resolution is allowed and one of the types (either from canonical + * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind + * flag) and their names match, hypothetical mapping is updated to point from + * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, + * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. + * + * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, + * if there are two exactly named (or anonymous) structs/unions that are + * compatible structurally, one of which has FWD field, while other is concrete + * STRUCT/UNION, but according to C sources they are different structs/unions + * that are referencing different types with the same name. This is extremely + * unlikely to happen, but btf_dedup API allows to disable FWD resolution if + * this logic is causing problems. + * + * Doing FWD resolution means that both candidate and/or canonical graphs can + * consists of portions of the graph that come from multiple compilation units. + * This is due to the fact that types within single compilation unit are always + * deduplicated and FWDs are already resolved, if referenced struct/union + * definiton is available. So, if we had unresolved FWD and found corresponding + * STRUCT/UNION, they will be from different compilation units. This + * consequently means that when we "link" FWD to corresponding STRUCT/UNION, + * type graph will likely have at least two different BTF types that describe + * same type (e.g., most probably there will be two different BTF types for the + * same 'int' primitive type) and could even have "overlapping" parts of type + * graph that describe same subset of types. + * + * This in turn means that our assumption that each type in canonical graph + * must correspond to exactly one type in candidate graph might not hold + * anymore and will make it harder to detect contradictions using hypothetical + * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION + * resolution only in canonical graph. FWDs in candidate graphs are never + * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs + * that can occur: + * - Both types in canonical and candidate graphs are FWDs. If they are + * structurally equivalent, then they can either be both resolved to the + * same STRUCT/UNION or not resolved at all. In both cases they are + * equivalent and there is no need to resolve FWD on candidate side. + * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, + * so nothing to resolve as well, algorithm will check equivalence anyway. + * - Type in canonical graph is FWD, while type in candidate is concrete + * STRUCT/UNION. In this case candidate graph comes from single compilation + * unit, so there is exactly one BTF type for each unique C type. After + * resolving FWD into STRUCT/UNION, there might be more than one BTF type + * in canonical graph mapping to single BTF type in candidate graph, but + * because hypothetical mapping maps from canonical to candidate types, it's + * alright, and we still maintain the property of having single `canon_id` + * mapping to single `cand_id` (there could be two different `canon_id` + * mapped to the same `cand_id`, but it's not contradictory). + * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate + * graph is FWD. In this case we are just going to check compatibility of + * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll + * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to + * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs + * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from + * canonical graph. + */ +static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, + __u32 canon_id) +{ + struct btf_type *cand_type; + struct btf_type *canon_type; + __u32 hypot_type_id; + __u16 cand_kind; + __u16 canon_kind; + int i, eq; + + /* if both resolve to the same canonical, they must be equivalent */ + if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) + return 1; + + canon_id = resolve_fwd_id(d, canon_id); + + hypot_type_id = d->hypot_map[canon_id]; + if (hypot_type_id <= BTF_MAX_NR_TYPES) { + if (hypot_type_id == cand_id) + return 1; + /* In some cases compiler will generate different DWARF types + * for *identical* array type definitions and use them for + * different fields within the *same* struct. This breaks type + * equivalence check, which makes an assumption that candidate + * types sub-graph has a consistent and deduped-by-compiler + * types within a single CU. So work around that by explicitly + * allowing identical array types here. + */ + if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) + return 1; + /* It turns out that similar situation can happen with + * struct/union sometimes, sigh... Handle the case where + * structs/unions are exactly the same, down to the referenced + * type IDs. Anything more complicated (e.g., if referenced + * types are different, but equivalent) is *way more* + * complicated and requires a many-to-many equivalence mapping. + */ + if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) + return 1; + return 0; + } + + if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) + return -ENOMEM; + + cand_type = btf_type_by_id(d->btf, cand_id); + canon_type = btf_type_by_id(d->btf, canon_id); + cand_kind = btf_kind(cand_type); + canon_kind = btf_kind(canon_type); + + if (cand_type->name_off != canon_type->name_off) + return 0; + + /* FWD <--> STRUCT/UNION equivalence check, if enabled */ + if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) + && cand_kind != canon_kind) { + __u16 real_kind; + __u16 fwd_kind; + + if (cand_kind == BTF_KIND_FWD) { + real_kind = canon_kind; + fwd_kind = btf_fwd_kind(cand_type); + } else { + real_kind = cand_kind; + fwd_kind = btf_fwd_kind(canon_type); + /* we'd need to resolve base FWD to STRUCT/UNION */ + if (fwd_kind == real_kind && canon_id < d->btf->start_id) + d->hypot_adjust_canon = true; + } + return fwd_kind == real_kind; + } + + if (cand_kind != canon_kind) + return 0; + + switch (cand_kind) { + case BTF_KIND_INT: + return btf_equal_int_tag(cand_type, canon_type); + + case BTF_KIND_ENUM: + return btf_compat_enum(cand_type, canon_type); + + case BTF_KIND_FWD: + case BTF_KIND_FLOAT: + return btf_equal_common(cand_type, canon_type); + + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: + if (cand_type->info != canon_type->info) + return 0; + return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); + + case BTF_KIND_ARRAY: { + const struct btf_array *cand_arr, *canon_arr; + + if (!btf_compat_array(cand_type, canon_type)) + return 0; + cand_arr = btf_array(cand_type); + canon_arr = btf_array(canon_type); + eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type); + if (eq <= 0) + return eq; + return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); + } + + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *cand_m, *canon_m; + __u16 vlen; + + if (!btf_shallow_equal_struct(cand_type, canon_type)) + return 0; + vlen = btf_vlen(cand_type); + cand_m = btf_members(cand_type); + canon_m = btf_members(canon_type); + for (i = 0; i < vlen; i++) { + eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); + if (eq <= 0) + return eq; + cand_m++; + canon_m++; + } + + return 1; + } + + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *cand_p, *canon_p; + __u16 vlen; + + if (!btf_compat_fnproto(cand_type, canon_type)) + return 0; + eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); + if (eq <= 0) + return eq; + vlen = btf_vlen(cand_type); + cand_p = btf_params(cand_type); + canon_p = btf_params(canon_type); + for (i = 0; i < vlen; i++) { + eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); + if (eq <= 0) + return eq; + cand_p++; + canon_p++; + } + return 1; + } + + default: + return -EINVAL; + } + return 0; +} + +/* + * Use hypothetical mapping, produced by successful type graph equivalence + * check, to augment existing struct/union canonical mapping, where possible. + * + * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record + * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: + * it doesn't matter if FWD type was part of canonical graph or candidate one, + * we are recording the mapping anyway. As opposed to carefulness required + * for struct/union correspondence mapping (described below), for FWD resolution + * it's not important, as by the time that FWD type (reference type) will be + * deduplicated all structs/unions will be deduped already anyway. + * + * Recording STRUCT/UNION mapping is purely a performance optimization and is + * not required for correctness. It needs to be done carefully to ensure that + * struct/union from candidate's type graph is not mapped into corresponding + * struct/union from canonical type graph that itself hasn't been resolved into + * canonical representative. The only guarantee we have is that canonical + * struct/union was determined as canonical and that won't change. But any + * types referenced through that struct/union fields could have been not yet + * resolved, so in case like that it's too early to establish any kind of + * correspondence between structs/unions. + * + * No canonical correspondence is derived for primitive types (they are already + * deduplicated completely already anyway) or reference types (they rely on + * stability of struct/union canonical relationship for equivalence checks). + */ +static void btf_dedup_merge_hypot_map(struct btf_dedup *d) +{ + __u32 canon_type_id, targ_type_id; + __u16 t_kind, c_kind; + __u32 t_id, c_id; + int i; + + for (i = 0; i < d->hypot_cnt; i++) { + canon_type_id = d->hypot_list[i]; + targ_type_id = d->hypot_map[canon_type_id]; + t_id = resolve_type_id(d, targ_type_id); + c_id = resolve_type_id(d, canon_type_id); + t_kind = btf_kind(btf__type_by_id(d->btf, t_id)); + c_kind = btf_kind(btf__type_by_id(d->btf, c_id)); + /* + * Resolve FWD into STRUCT/UNION. + * It's ok to resolve FWD into STRUCT/UNION that's not yet + * mapped to canonical representative (as opposed to + * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because + * eventually that struct is going to be mapped and all resolved + * FWDs will automatically resolve to correct canonical + * representative. This will happen before ref type deduping, + * which critically depends on stability of these mapping. This + * stability is not a requirement for STRUCT/UNION equivalence + * checks, though. + */ + + /* if it's the split BTF case, we still need to point base FWD + * to STRUCT/UNION in a split BTF, because FWDs from split BTF + * will be resolved against base FWD. If we don't point base + * canonical FWD to the resolved STRUCT/UNION, then all the + * FWDs in split BTF won't be correctly resolved to a proper + * STRUCT/UNION. + */ + if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) + d->map[c_id] = t_id; + + /* if graph equivalence determined that we'd need to adjust + * base canonical types, then we need to only point base FWDs + * to STRUCTs/UNIONs and do no more modifications. For all + * other purposes the type graphs were not equivalent. + */ + if (d->hypot_adjust_canon) + continue; + + if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) + d->map[t_id] = c_id; + + if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && + c_kind != BTF_KIND_FWD && + is_type_mapped(d, c_id) && + !is_type_mapped(d, t_id)) { + /* + * as a perf optimization, we can map struct/union + * that's part of type graph we just verified for + * equivalence. We can do that for struct/union that has + * canonical representative only, though. + */ + d->map[t_id] = c_id; + } + } +} + +/* + * Deduplicate struct/union types. + * + * For each struct/union type its type signature hash is calculated, taking + * into account type's name, size, number, order and names of fields, but + * ignoring type ID's referenced from fields, because they might not be deduped + * completely until after reference types deduplication phase. This type hash + * is used to iterate over all potential canonical types, sharing same hash. + * For each canonical candidate we check whether type graphs that they form + * (through referenced types in fields and so on) are equivalent using algorithm + * implemented in `btf_dedup_is_equiv`. If such equivalence is found and + * BTF_KIND_FWD resolution is allowed, then hypothetical mapping + * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence + * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to + * potentially map other structs/unions to their canonical representatives, + * if such relationship hasn't yet been established. This speeds up algorithm + * by eliminating some of the duplicate work. + * + * If no matching canonical representative was found, struct/union is marked + * as canonical for itself and is added into btf_dedup->dedup_table hash map + * for further look ups. + */ +static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) +{ + struct btf_type *cand_type, *t; + struct hashmap_entry *hash_entry; + /* if we don't find equivalent type, then we are canonical */ + __u32 new_id = type_id; + __u16 kind; + long h; + + /* already deduped or is in process of deduping (loop detected) */ + if (d->map[type_id] <= BTF_MAX_NR_TYPES) + return 0; + + t = btf_type_by_id(d->btf, type_id); + kind = btf_kind(t); + + if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) + return 0; + + h = btf_hash_struct(t); + for_each_dedup_cand(d, hash_entry, h) { + __u32 cand_id = (__u32)(long)hash_entry->value; + int eq; + + /* + * Even though btf_dedup_is_equiv() checks for + * btf_shallow_equal_struct() internally when checking two + * structs (unions) for equivalence, we need to guard here + * from picking matching FWD type as a dedup candidate. + * This can happen due to hash collision. In such case just + * relying on btf_dedup_is_equiv() would lead to potentially + * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because + * FWD and compatible STRUCT/UNION are considered equivalent. + */ + cand_type = btf_type_by_id(d->btf, cand_id); + if (!btf_shallow_equal_struct(t, cand_type)) + continue; + + btf_dedup_clear_hypot_map(d); + eq = btf_dedup_is_equiv(d, type_id, cand_id); + if (eq < 0) + return eq; + if (!eq) + continue; + btf_dedup_merge_hypot_map(d); + if (d->hypot_adjust_canon) /* not really equivalent */ + continue; + new_id = cand_id; + break; + } + + d->map[type_id] = new_id; + if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) + return -ENOMEM; + + return 0; +} + +static int btf_dedup_struct_types(struct btf_dedup *d) +{ + int i, err; + + for (i = 0; i < d->btf->nr_types; i++) { + err = btf_dedup_struct_type(d, d->btf->start_id + i); + if (err) + return err; + } + return 0; +} + +/* + * Deduplicate reference type. + * + * Once all primitive and struct/union types got deduplicated, we can easily + * deduplicate all other (reference) BTF types. This is done in two steps: + * + * 1. Resolve all referenced type IDs into their canonical type IDs. This + * resolution can be done either immediately for primitive or struct/union types + * (because they were deduped in previous two phases) or recursively for + * reference types. Recursion will always terminate at either primitive or + * struct/union type, at which point we can "unwind" chain of reference types + * one by one. There is no danger of encountering cycles because in C type + * system the only way to form type cycle is through struct/union, so any chain + * of reference types, even those taking part in a type cycle, will inevitably + * reach struct/union at some point. + * + * 2. Once all referenced type IDs are resolved into canonical ones, BTF type + * becomes "stable", in the sense that no further deduplication will cause + * any changes to it. With that, it's now possible to calculate type's signature + * hash (this time taking into account referenced type IDs) and loop over all + * potential canonical representatives. If no match was found, current type + * will become canonical representative of itself and will be added into + * btf_dedup->dedup_table as another possible canonical representative. + */ +static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) +{ + struct hashmap_entry *hash_entry; + __u32 new_id = type_id, cand_id; + struct btf_type *t, *cand; + /* if we don't find equivalent type, then we are representative type */ + int ref_type_id; + long h; + + if (d->map[type_id] == BTF_IN_PROGRESS_ID) + return -ELOOP; + if (d->map[type_id] <= BTF_MAX_NR_TYPES) + return resolve_type_id(d, type_id); + + t = btf_type_by_id(d->btf, type_id); + d->map[type_id] = BTF_IN_PROGRESS_ID; + + switch (btf_kind(t)) { + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: + ref_type_id = btf_dedup_ref_type(d, t->type); + if (ref_type_id < 0) + return ref_type_id; + t->type = ref_type_id; + + h = btf_hash_common(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_common(t, cand)) { + new_id = cand_id; + break; + } + } + break; + + case BTF_KIND_DECL_TAG: + ref_type_id = btf_dedup_ref_type(d, t->type); + if (ref_type_id < 0) + return ref_type_id; + t->type = ref_type_id; + + h = btf_hash_int_decl_tag(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_int_tag(t, cand)) { + new_id = cand_id; + break; + } + } + break; + + case BTF_KIND_ARRAY: { + struct btf_array *info = btf_array(t); + + ref_type_id = btf_dedup_ref_type(d, info->type); + if (ref_type_id < 0) + return ref_type_id; + info->type = ref_type_id; + + ref_type_id = btf_dedup_ref_type(d, info->index_type); + if (ref_type_id < 0) + return ref_type_id; + info->index_type = ref_type_id; + + h = btf_hash_array(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_array(t, cand)) { + new_id = cand_id; + break; + } + } + break; + } + + case BTF_KIND_FUNC_PROTO: { + struct btf_param *param; + __u16 vlen; + int i; + + ref_type_id = btf_dedup_ref_type(d, t->type); + if (ref_type_id < 0) + return ref_type_id; + t->type = ref_type_id; + + vlen = btf_vlen(t); + param = btf_params(t); + for (i = 0; i < vlen; i++) { + ref_type_id = btf_dedup_ref_type(d, param->type); + if (ref_type_id < 0) + return ref_type_id; + param->type = ref_type_id; + param++; + } + + h = btf_hash_fnproto(t); + for_each_dedup_cand(d, hash_entry, h) { + cand_id = (__u32)(long)hash_entry->value; + cand = btf_type_by_id(d->btf, cand_id); + if (btf_equal_fnproto(t, cand)) { + new_id = cand_id; + break; + } + } + break; + } + + default: + return -EINVAL; + } + + d->map[type_id] = new_id; + if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) + return -ENOMEM; + + return new_id; +} + +static int btf_dedup_ref_types(struct btf_dedup *d) +{ + int i, err; + + for (i = 0; i < d->btf->nr_types; i++) { + err = btf_dedup_ref_type(d, d->btf->start_id + i); + if (err < 0) + return err; + } + /* we won't need d->dedup_table anymore */ + hashmap__free(d->dedup_table); + d->dedup_table = NULL; + return 0; +} + +/* + * Compact types. + * + * After we established for each type its corresponding canonical representative + * type, we now can eliminate types that are not canonical and leave only + * canonical ones layed out sequentially in memory by copying them over + * duplicates. During compaction btf_dedup->hypot_map array is reused to store + * a map from original type ID to a new compacted type ID, which will be used + * during next phase to "fix up" type IDs, referenced from struct/union and + * reference types. + */ +static int btf_dedup_compact_types(struct btf_dedup *d) +{ + __u32 *new_offs; + __u32 next_type_id = d->btf->start_id; + const struct btf_type *t; + void *p; + int i, id, len; + + /* we are going to reuse hypot_map to store compaction remapping */ + d->hypot_map[0] = 0; + /* base BTF types are not renumbered */ + for (id = 1; id < d->btf->start_id; id++) + d->hypot_map[id] = id; + for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) + d->hypot_map[id] = BTF_UNPROCESSED_ID; + + p = d->btf->types_data; + + for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) { + if (d->map[id] != id) + continue; + + t = btf__type_by_id(d->btf, id); + len = btf_type_size(t); + if (len < 0) + return len; + + memmove(p, t, len); + d->hypot_map[id] = next_type_id; + d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data; + p += len; + next_type_id++; + } + + /* shrink struct btf's internal types index and update btf_header */ + d->btf->nr_types = next_type_id - d->btf->start_id; + d->btf->type_offs_cap = d->btf->nr_types; + d->btf->hdr->type_len = p - d->btf->types_data; + new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap, + sizeof(*new_offs)); + if (d->btf->type_offs_cap && !new_offs) + return -ENOMEM; + d->btf->type_offs = new_offs; + d->btf->hdr->str_off = d->btf->hdr->type_len; + d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len; + return 0; +} + +/* + * Figure out final (deduplicated and compacted) type ID for provided original + * `type_id` by first resolving it into corresponding canonical type ID and + * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, + * which is populated during compaction phase. + */ +static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) +{ + struct btf_dedup *d = ctx; + __u32 resolved_type_id, new_type_id; + + resolved_type_id = resolve_type_id(d, *type_id); + new_type_id = d->hypot_map[resolved_type_id]; + if (new_type_id > BTF_MAX_NR_TYPES) + return -EINVAL; + + *type_id = new_type_id; + return 0; +} + +/* + * Remap referenced type IDs into deduped type IDs. + * + * After BTF types are deduplicated and compacted, their final type IDs may + * differ from original ones. The map from original to a corresponding + * deduped type ID is stored in btf_dedup->hypot_map and is populated during + * compaction phase. During remapping phase we are rewriting all type IDs + * referenced from any BTF type (e.g., struct fields, func proto args, etc) to + * their final deduped type IDs. + */ +static int btf_dedup_remap_types(struct btf_dedup *d) +{ + int i, r; + + for (i = 0; i < d->btf->nr_types; i++) { + struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); + + r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d); + if (r) + return r; + } + + if (!d->btf_ext) + return 0; + + r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); + if (r) + return r; + + return 0; +} + +/* + * Probe few well-known locations for vmlinux kernel image and try to load BTF + * data out of it to use for target BTF. + */ +struct btf *btf__load_vmlinux_btf(void) +{ + struct { + const char *path_fmt; + bool raw_btf; + } locations[] = { + /* try canonical vmlinux BTF through sysfs first */ + { "/sys/kernel/btf/vmlinux", true /* raw BTF */ }, + /* fall back to trying to find vmlinux ELF on disk otherwise */ + { "/boot/vmlinux-%1$s" }, + { "/lib/modules/%1$s/vmlinux-%1$s" }, + { "/lib/modules/%1$s/build/vmlinux" }, + { "/usr/lib/modules/%1$s/kernel/vmlinux" }, + { "/usr/lib/debug/boot/vmlinux-%1$s" }, + { "/usr/lib/debug/boot/vmlinux-%1$s.debug" }, + { "/usr/lib/debug/lib/modules/%1$s/vmlinux" }, + }; + char path[PATH_MAX + 1]; + struct utsname buf; + struct btf *btf; + int i, err; + + uname(&buf); + + for (i = 0; i < ARRAY_SIZE(locations); i++) { + snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release); + + if (access(path, R_OK)) + continue; + + if (locations[i].raw_btf) + btf = btf__parse_raw(path); + else + btf = btf__parse_elf(path, NULL); + err = libbpf_get_error(btf); + pr_debug("loading kernel BTF '%s': %d\n", path, err); + if (err) + continue; + + return btf; + } + + pr_warn("failed to find valid kernel BTF\n"); + return libbpf_err_ptr(-ESRCH); +} + +struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf"))); + +struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf) +{ + char path[80]; + + snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name); + return btf__parse_split(path, vmlinux_btf); +} + +int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx) +{ + int i, n, err; + + switch (btf_kind(t)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + return 0; + + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + return visit(&t->type, ctx); + + case BTF_KIND_ARRAY: { + struct btf_array *a = btf_array(t); + + err = visit(&a->type, ctx); + err = err ?: visit(&a->index_type, ctx); + return err; + } + + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + struct btf_member *m = btf_members(t); + + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->type, ctx); + if (err) + return err; + } + return 0; + } + + case BTF_KIND_FUNC_PROTO: { + struct btf_param *m = btf_params(t); + + err = visit(&t->type, ctx); + if (err) + return err; + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->type, ctx); + if (err) + return err; + } + return 0; + } + + case BTF_KIND_DATASEC: { + struct btf_var_secinfo *m = btf_var_secinfos(t); + + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->type, ctx); + if (err) + return err; + } + return 0; + } + + default: + return -EINVAL; + } +} + +int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx) +{ + int i, n, err; + + err = visit(&t->name_off, ctx); + if (err) + return err; + + switch (btf_kind(t)) { + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + struct btf_member *m = btf_members(t); + + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->name_off, ctx); + if (err) + return err; + } + break; + } + case BTF_KIND_ENUM: { + struct btf_enum *m = btf_enum(t); + + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->name_off, ctx); + if (err) + return err; + } + break; + } + case BTF_KIND_FUNC_PROTO: { + struct btf_param *m = btf_params(t); + + for (i = 0, n = btf_vlen(t); i < n; i++, m++) { + err = visit(&m->name_off, ctx); + if (err) + return err; + } + break; + } + default: + break; + } + + return 0; +} + +int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) +{ + const struct btf_ext_info *seg; + struct btf_ext_info_sec *sec; + int i, err; + + seg = &btf_ext->func_info; + for_each_btf_ext_sec(seg, sec) { + struct bpf_func_info_min *rec; + + for_each_btf_ext_rec(seg, sec, i, rec) { + err = visit(&rec->type_id, ctx); + if (err < 0) + return err; + } + } + + seg = &btf_ext->core_relo_info; + for_each_btf_ext_sec(seg, sec) { + struct bpf_core_relo *rec; + + for_each_btf_ext_rec(seg, sec, i, rec) { + err = visit(&rec->type_id, ctx); + if (err < 0) + return err; + } + } + + return 0; +} + +int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx) +{ + const struct btf_ext_info *seg; + struct btf_ext_info_sec *sec; + int i, err; + + seg = &btf_ext->func_info; + for_each_btf_ext_sec(seg, sec) { + err = visit(&sec->sec_name_off, ctx); + if (err) + return err; + } + + seg = &btf_ext->line_info; + for_each_btf_ext_sec(seg, sec) { + struct bpf_line_info_min *rec; + + err = visit(&sec->sec_name_off, ctx); + if (err) + return err; + + for_each_btf_ext_rec(seg, sec, i, rec) { + err = visit(&rec->file_name_off, ctx); + if (err) + return err; + err = visit(&rec->line_off, ctx); + if (err) + return err; + } + } + + seg = &btf_ext->core_relo_info; + for_each_btf_ext_sec(seg, sec) { + struct bpf_core_relo *rec; + + err = visit(&sec->sec_name_off, ctx); + if (err) + return err; + + for_each_btf_ext_rec(seg, sec, i, rec) { + err = visit(&rec->access_str_off, ctx); + if (err) + return err; + } + } + + return 0; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/btf.h b/pkg/plugin/lib/common/libbpf/_src/btf.h new file mode 100644 index 000000000..951ac7475 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/btf.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2018 Facebook */ +/*! \file */ + +#ifndef __LIBBPF_BTF_H +#define __LIBBPF_BTF_H + +#include +#include +#include +#include + +#include "libbpf_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define BTF_ELF_SEC ".BTF" +#define BTF_EXT_ELF_SEC ".BTF.ext" +#define MAPS_ELF_SEC ".maps" + +struct btf; +struct btf_ext; +struct btf_type; + +struct bpf_object; + +enum btf_endianness { + BTF_LITTLE_ENDIAN = 0, + BTF_BIG_ENDIAN = 1, +}; + +/** + * @brief **btf__free()** frees all data of a BTF object + * @param btf BTF object to free + */ +LIBBPF_API void btf__free(struct btf *btf); + +/** + * @brief **btf__new()** creates a new instance of a BTF object from the raw + * bytes of an ELF's BTF section + * @param data raw bytes + * @param size number of bytes passed in `data` + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); + +/** + * @brief **btf__new_split()** create a new instance of a BTF object from the + * provided raw data bytes. It takes another BTF instance, **base_btf**, which + * serves as a base BTF, which is extended by types in a newly created BTF + * instance + * @param data raw bytes + * @param size length of raw bytes + * @param base_btf the base BTF object + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and + * creates non-split BTF. + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ +LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf); + +/** + * @brief **btf__new_empty()** creates an empty BTF object. Use + * `btf__add_*()` to populate such BTF object. + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ +LIBBPF_API struct btf *btf__new_empty(void); + +/** + * @brief **btf__new_empty_split()** creates an unpopulated BTF object from an + * ELF BTF section except with a base BTF on top of which split BTF should be + * based + * @return new BTF object instance which has to be eventually freed with + * **btf__free()** + * + * If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to + * `btf__new_empty()` and creates non-split BTF. + * + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract + * error code from such a pointer `libbpf_get_error()` should be used. If + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is + * returned on error instead. In both cases thread-local `errno` variable is + * always set to error code as well. + */ +LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); + +LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf); +LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf); +LIBBPF_API struct btf *btf__parse_raw(const char *path); +LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf); + +LIBBPF_API struct btf *btf__load_vmlinux_btf(void); +LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf); +LIBBPF_API struct btf *libbpf_find_kernel_btf(void); + +LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id); +LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf); +LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead") +LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); + +LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only") +LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); +LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead") +LIBBPF_API int btf__load(struct btf *btf); +LIBBPF_API int btf__load_into_kernel(struct btf *btf); +LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, + const char *type_name); +LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, + const char *type_name, __u32 kind); +LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1") +LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); +LIBBPF_API __u32 btf__type_cnt(const struct btf *btf); +LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf); +LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, + __u32 id); +LIBBPF_API size_t btf__pointer_size(const struct btf *btf); +LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); +LIBBPF_API enum btf_endianness btf__endianness(const struct btf *btf); +LIBBPF_API int btf__set_endianness(struct btf *btf, enum btf_endianness endian); +LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); +LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); +LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); +LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); +LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size); +LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); +LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); +LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used") +LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, + __u32 expected_key_size, + __u32 expected_value_size, + __u32 *key_type_id, __u32 *value_type_id); + +LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); +LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); +LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions") +int btf_ext__reloc_func_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **func_info, __u32 *cnt); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions") +int btf_ext__reloc_line_info(const struct btf *btf, + const struct btf_ext *btf_ext, + const char *sec_name, __u32 insns_cnt, + void **line_info, __u32 *cnt); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size") +__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size") +__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); + +LIBBPF_API int btf__find_str(struct btf *btf, const char *s); +LIBBPF_API int btf__add_str(struct btf *btf, const char *s); +LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf, + const struct btf_type *src_type); +/** + * @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf* + * @param btf BTF object which all the BTF types and strings are added to + * @param src_btf BTF object which all BTF types and referenced strings are copied from + * @return BTF type ID of the first appended BTF type, or negative error code + * + * **btf__add_btf()** can be used to simply and efficiently append the entire + * contents of one BTF object to another one. All the BTF type data is copied + * over, all referenced type IDs are adjusted by adding a necessary ID offset. + * Only strings referenced from BTF types are copied over and deduplicated, so + * if there were some unused strings in *src_btf*, those won't be copied over, + * which is consistent with the general string deduplication semantics of BTF + * writing APIs. + * + * If any error is encountered during this process, the contents of *btf* is + * left intact, which means that **btf__add_btf()** follows the transactional + * semantics and the operation as a whole is all-or-nothing. + * + * *src_btf* has to be non-split BTF, as of now copying types from split BTF + * is not supported and will result in -ENOTSUP error code returned. + */ +LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf); + +LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding); +LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz); +LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_array(struct btf *btf, + int index_type_id, int elem_type_id, __u32 nr_elems); +/* struct/union construction APIs */ +LIBBPF_API int btf__add_struct(struct btf *btf, const char *name, __u32 sz); +LIBBPF_API int btf__add_union(struct btf *btf, const char *name, __u32 sz); +LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_id, + __u32 bit_offset, __u32 bit_size); + +/* enum construction APIs */ +LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz); +LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value); + +enum btf_fwd_kind { + BTF_FWD_STRUCT = 0, + BTF_FWD_UNION = 1, + BTF_FWD_ENUM = 2, +}; + +LIBBPF_API int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind); +LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id); +LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id); +LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id); + +/* func and func_proto construction APIs */ +LIBBPF_API int btf__add_func(struct btf *btf, const char *name, + enum btf_func_linkage linkage, int proto_type_id); +LIBBPF_API int btf__add_func_proto(struct btf *btf, int ret_type_id); +LIBBPF_API int btf__add_func_param(struct btf *btf, const char *name, int type_id); + +/* var & datasec construction APIs */ +LIBBPF_API int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id); +LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz); +LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, + __u32 offset, __u32 byte_sz); + +/* tag construction API */ +LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, + int component_idx); + +struct btf_dedup_opts { + size_t sz; + /* optional .BTF.ext info to dedup along the main BTF info */ + struct btf_ext *btf_ext; + /* force hash collisions (used for testing) */ + bool force_collisions; + size_t :0; +}; +#define btf_dedup_opts__last_field force_collisions + +LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); + +LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts); + +LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead") +LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts); +#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__) +#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts) +#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts) + +struct btf_dump; + +struct btf_dump_opts { + union { + size_t sz; + void *ctx; /* DEPRECATED: will be gone in v1.0 */ + }; +}; + +typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args); + +LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf, + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts); + +LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf, + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts); + +LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn); + +/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the + * type of 4th argument. If it's btf_dump's print callback, use deprecated + * API; otherwise, choose the new btf_dump__new(). ___libbpf_override() + * doesn't work here because both variants have 4 input arguments. + * + * (void *) casts are necessary to avoid compilation warnings about type + * mismatches, because even though __builtin_choose_expr() only ever evaluates + * one side the other side still has to satisfy type constraints (this is + * compiler implementation limitation which might be lifted eventually, + * according to the documentation). So passing struct btf_ext in place of + * btf_dump_printf_fn_t would be generating compilation warning. Casting to + * void * avoids this issue. + * + * Also, two type compatibility checks for a function and function pointer are + * required because passing function reference into btf_dump__new() as + * btf_dump__new(..., my_callback, ...) and as btf_dump__new(..., + * &my_callback, ...) (not explicit ampersand in the latter case) actually + * differs as far as __builtin_types_compatible_p() is concerned. Thus two + * checks are combined to detect callback argument. + * + * The rest works just like in case of ___libbpf_override() usage with symbol + * versioning. + * + * C++ compilers don't support __builtin_types_compatible_p(), so at least + * don't screw up compilation for them and let C++ users pick btf_dump__new + * vs btf_dump__new_deprecated explicitly. + */ +#ifndef __cplusplus +#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \ + __builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \ + btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \ + btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4)) +#endif + +LIBBPF_API void btf_dump__free(struct btf_dump *d); + +LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id); + +struct btf_dump_emit_type_decl_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* optional field name for type declaration, e.g.: + * - struct my_struct + * - void (*)(int) + * - char (*)[123] + */ + const char *field_name; + /* extra indentation level (in number of tabs) to emit for multi-line + * type declarations (e.g., anonymous struct); applies for lines + * starting from the second one (first line is assumed to have + * necessary indentation already + */ + int indent_level; + /* strip all the const/volatile/restrict mods */ + bool strip_mods; + size_t :0; +}; +#define btf_dump_emit_type_decl_opts__last_field strip_mods + +LIBBPF_API int +btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, + const struct btf_dump_emit_type_decl_opts *opts); + + +struct btf_dump_type_data_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + const char *indent_str; + int indent_level; + /* below match "show" flags for bpf_show_snprintf() */ + bool compact; /* no newlines/indentation */ + bool skip_names; /* skip member/type names */ + bool emit_zeroes; /* show 0-valued fields */ + size_t :0; +}; +#define btf_dump_type_data_opts__last_field emit_zeroes + +LIBBPF_API int +btf_dump__dump_type_data(struct btf_dump *d, __u32 id, + const void *data, size_t data_sz, + const struct btf_dump_type_data_opts *opts); + +/* + * A set of helpers for easier BTF types handling. + * + * The inline functions below rely on constants from the kernel headers which + * may not be available for applications including this header file. To avoid + * compilation errors, we define all the constants here that were added after + * the initial introduction of the BTF_KIND* constants. + */ +#ifndef BTF_KIND_FUNC +#define BTF_KIND_FUNC 12 /* Function */ +#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ +#endif +#ifndef BTF_KIND_VAR +#define BTF_KIND_VAR 14 /* Variable */ +#define BTF_KIND_DATASEC 15 /* Section */ +#endif +#ifndef BTF_KIND_FLOAT +#define BTF_KIND_FLOAT 16 /* Floating point */ +#endif +/* The kernel header switched to enums, so these two were never #defined */ +#define BTF_KIND_DECL_TAG 17 /* Decl Tag */ +#define BTF_KIND_TYPE_TAG 18 /* Type Tag */ + +static inline __u16 btf_kind(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info); +} + +static inline __u16 btf_vlen(const struct btf_type *t) +{ + return BTF_INFO_VLEN(t->info); +} + +static inline bool btf_kflag(const struct btf_type *t) +{ + return BTF_INFO_KFLAG(t->info); +} + +static inline bool btf_is_void(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNKN; +} + +static inline bool btf_is_int(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_INT; +} + +static inline bool btf_is_ptr(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_PTR; +} + +static inline bool btf_is_array(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ARRAY; +} + +static inline bool btf_is_struct(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_STRUCT; +} + +static inline bool btf_is_union(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNION; +} + +static inline bool btf_is_composite(const struct btf_type *t) +{ + __u16 kind = btf_kind(t); + + return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; +} + +static inline bool btf_is_enum(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ENUM; +} + +static inline bool btf_is_fwd(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FWD; +} + +static inline bool btf_is_typedef(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_TYPEDEF; +} + +static inline bool btf_is_volatile(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_VOLATILE; +} + +static inline bool btf_is_const(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_CONST; +} + +static inline bool btf_is_restrict(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_RESTRICT; +} + +static inline bool btf_is_mod(const struct btf_type *t) +{ + __u16 kind = btf_kind(t); + + return kind == BTF_KIND_VOLATILE || + kind == BTF_KIND_CONST || + kind == BTF_KIND_RESTRICT || + kind == BTF_KIND_TYPE_TAG; +} + +static inline bool btf_is_func(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FUNC; +} + +static inline bool btf_is_func_proto(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FUNC_PROTO; +} + +static inline bool btf_is_var(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_VAR; +} + +static inline bool btf_is_datasec(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_DATASEC; +} + +static inline bool btf_is_float(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_FLOAT; +} + +static inline bool btf_is_decl_tag(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_DECL_TAG; +} + +static inline bool btf_is_type_tag(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_TYPE_TAG; +} + +static inline __u8 btf_int_encoding(const struct btf_type *t) +{ + return BTF_INT_ENCODING(*(__u32 *)(t + 1)); +} + +static inline __u8 btf_int_offset(const struct btf_type *t) +{ + return BTF_INT_OFFSET(*(__u32 *)(t + 1)); +} + +static inline __u8 btf_int_bits(const struct btf_type *t) +{ + return BTF_INT_BITS(*(__u32 *)(t + 1)); +} + +static inline struct btf_array *btf_array(const struct btf_type *t) +{ + return (struct btf_array *)(t + 1); +} + +static inline struct btf_enum *btf_enum(const struct btf_type *t) +{ + return (struct btf_enum *)(t + 1); +} + +static inline struct btf_member *btf_members(const struct btf_type *t) +{ + return (struct btf_member *)(t + 1); +} + +/* Get bit offset of a member with specified index. */ +static inline __u32 btf_member_bit_offset(const struct btf_type *t, + __u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + bool kflag = btf_kflag(t); + + return kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset; +} +/* + * Get bitfield size of a member, assuming t is BTF_KIND_STRUCT or + * BTF_KIND_UNION. If member is not a bitfield, zero is returned. + */ +static inline __u32 btf_member_bitfield_size(const struct btf_type *t, + __u32 member_idx) +{ + const struct btf_member *m = btf_members(t) + member_idx; + bool kflag = btf_kflag(t); + + return kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0; +} + +static inline struct btf_param *btf_params(const struct btf_type *t) +{ + return (struct btf_param *)(t + 1); +} + +static inline struct btf_var *btf_var(const struct btf_type *t) +{ + return (struct btf_var *)(t + 1); +} + +static inline struct btf_var_secinfo * +btf_var_secinfos(const struct btf_type *t) +{ + return (struct btf_var_secinfo *)(t + 1); +} + +struct btf_decl_tag; +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) +{ + return (struct btf_decl_tag *)(t + 1); +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_BTF_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/btf_dump.c b/pkg/plugin/lib/common/libbpf/_src/btf_dump.c new file mode 100644 index 000000000..6b1bc1f43 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/btf_dump.c @@ -0,0 +1,2343 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * BTF-to-C type converter. + * + * Copyright (c) 2019 Facebook + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "btf.h" +#include "hashmap.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; +static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; + +static const char *pfx(int lvl) +{ + return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl]; +} + +enum btf_dump_type_order_state { + NOT_ORDERED, + ORDERING, + ORDERED, +}; + +enum btf_dump_type_emit_state { + NOT_EMITTED, + EMITTING, + EMITTED, +}; + +/* per-type auxiliary state */ +struct btf_dump_type_aux_state { + /* topological sorting state */ + enum btf_dump_type_order_state order_state: 2; + /* emitting state used to determine the need for forward declaration */ + enum btf_dump_type_emit_state emit_state: 2; + /* whether forward declaration was already emitted */ + __u8 fwd_emitted: 1; + /* whether unique non-duplicate name was already assigned */ + __u8 name_resolved: 1; + /* whether type is referenced from any other type */ + __u8 referenced: 1; +}; + +/* indent string length; one indent string is added for each indent level */ +#define BTF_DATA_INDENT_STR_LEN 32 + +/* + * Common internal data for BTF type data dump operations. + */ +struct btf_dump_data { + const void *data_end; /* end of valid data to show */ + bool compact; + bool skip_names; + bool emit_zeroes; + __u8 indent_lvl; /* base indent level */ + char indent_str[BTF_DATA_INDENT_STR_LEN]; + /* below are used during iteration */ + int depth; + bool is_array_member; + bool is_array_terminated; + bool is_array_char; +}; + +struct btf_dump { + const struct btf *btf; + btf_dump_printf_fn_t printf_fn; + void *cb_ctx; + int ptr_sz; + bool strip_mods; + bool skip_anon_defs; + int last_id; + + /* per-type auxiliary state */ + struct btf_dump_type_aux_state *type_states; + size_t type_states_cap; + /* per-type optional cached unique name, must be freed, if present */ + const char **cached_names; + size_t cached_names_cap; + + /* topo-sorted list of dependent type definitions */ + __u32 *emit_queue; + int emit_queue_cap; + int emit_queue_cnt; + + /* + * stack of type declarations (e.g., chain of modifiers, arrays, + * funcs, etc) + */ + __u32 *decl_stack; + int decl_stack_cap; + int decl_stack_cnt; + + /* maps struct/union/enum name to a number of name occurrences */ + struct hashmap *type_names; + /* + * maps typedef identifiers and enum value names to a number of such + * name occurrences + */ + struct hashmap *ident_names; + /* + * data for typed display; allocated if needed. + */ + struct btf_dump_data *typed_dump; +}; + +static size_t str_hash_fn(const void *key, void *ctx) +{ + return str_hash(key); +} + +static bool str_equal_fn(const void *a, const void *b, void *ctx) +{ + return strcmp(a, b) == 0; +} + +static const char *btf_name_of(const struct btf_dump *d, __u32 name_off) +{ + return btf__name_by_offset(d->btf, name_off); +} + +static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + d->printf_fn(d->cb_ctx, fmt, args); + va_end(args); +} + +static int btf_dump_mark_referenced(struct btf_dump *d); +static int btf_dump_resize(struct btf_dump *d); + +DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0) +struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf, + btf_dump_printf_fn_t printf_fn, + void *ctx, + const struct btf_dump_opts *opts) +{ + struct btf_dump *d; + int err; + + if (!printf_fn) + return libbpf_err_ptr(-EINVAL); + + d = calloc(1, sizeof(struct btf_dump)); + if (!d) + return libbpf_err_ptr(-ENOMEM); + + d->btf = btf; + d->printf_fn = printf_fn; + d->cb_ctx = ctx; + d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); + + d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); + if (IS_ERR(d->type_names)) { + err = PTR_ERR(d->type_names); + d->type_names = NULL; + goto err; + } + d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); + if (IS_ERR(d->ident_names)) { + err = PTR_ERR(d->ident_names); + d->ident_names = NULL; + goto err; + } + + err = btf_dump_resize(d); + if (err) + goto err; + + return d; +err: + btf_dump__free(d); + return libbpf_err_ptr(err); +} + +COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4) +struct btf_dump *btf_dump__new_deprecated(const struct btf *btf, + const struct btf_ext *btf_ext, + const struct btf_dump_opts *opts, + btf_dump_printf_fn_t printf_fn) +{ + if (!printf_fn) + return libbpf_err_ptr(-EINVAL); + return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts); +} + +static int btf_dump_resize(struct btf_dump *d) +{ + int err, last_id = btf__type_cnt(d->btf) - 1; + + if (last_id <= d->last_id) + return 0; + + if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap, + sizeof(*d->type_states), last_id + 1)) + return -ENOMEM; + if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap, + sizeof(*d->cached_names), last_id + 1)) + return -ENOMEM; + + if (d->last_id == 0) { + /* VOID is special */ + d->type_states[0].order_state = ORDERED; + d->type_states[0].emit_state = EMITTED; + } + + /* eagerly determine referenced types for anon enums */ + err = btf_dump_mark_referenced(d); + if (err) + return err; + + d->last_id = last_id; + return 0; +} + +void btf_dump__free(struct btf_dump *d) +{ + int i; + + if (IS_ERR_OR_NULL(d)) + return; + + free(d->type_states); + if (d->cached_names) { + /* any set cached name is owned by us and should be freed */ + for (i = 0; i <= d->last_id; i++) { + if (d->cached_names[i]) + free((void *)d->cached_names[i]); + } + } + free(d->cached_names); + free(d->emit_queue); + free(d->decl_stack); + hashmap__free(d->type_names); + hashmap__free(d->ident_names); + + free(d); +} + +static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr); +static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id); + +/* + * Dump BTF type in a compilable C syntax, including all the necessary + * dependent types, necessary for compilation. If some of the dependent types + * were already emitted as part of previous btf_dump__dump_type() invocation + * for another type, they won't be emitted again. This API allows callers to + * filter out BTF types according to user-defined criterias and emitted only + * minimal subset of types, necessary to compile everything. Full struct/union + * definitions will still be emitted, even if the only usage is through + * pointer and could be satisfied with just a forward declaration. + * + * Dumping is done in two high-level passes: + * 1. Topologically sort type definitions to satisfy C rules of compilation. + * 2. Emit type definitions in C syntax. + * + * Returns 0 on success; <0, otherwise. + */ +int btf_dump__dump_type(struct btf_dump *d, __u32 id) +{ + int err, i; + + if (id >= btf__type_cnt(d->btf)) + return libbpf_err(-EINVAL); + + err = btf_dump_resize(d); + if (err) + return libbpf_err(err); + + d->emit_queue_cnt = 0; + err = btf_dump_order_type(d, id, false); + if (err < 0) + return libbpf_err(err); + + for (i = 0; i < d->emit_queue_cnt; i++) + btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/); + + return 0; +} + +/* + * Mark all types that are referenced from any other type. This is used to + * determine top-level anonymous enums that need to be emitted as an + * independent type declarations. + * Anonymous enums come in two flavors: either embedded in a struct's field + * definition, in which case they have to be declared inline as part of field + * type declaration; or as a top-level anonymous enum, typically used for + * declaring global constants. It's impossible to distinguish between two + * without knowning whether given enum type was referenced from other type: + * top-level anonymous enum won't be referenced by anything, while embedded + * one will. + */ +static int btf_dump_mark_referenced(struct btf_dump *d) +{ + int i, j, n = btf__type_cnt(d->btf); + const struct btf_type *t; + __u16 vlen; + + for (i = d->last_id + 1; i < n; i++) { + t = btf__type_by_id(d->btf, i); + vlen = btf_vlen(t); + + switch (btf_kind(t)) { + case BTF_KIND_INT: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + case BTF_KIND_FLOAT: + break; + + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + d->type_states[t->type].referenced = 1; + break; + + case BTF_KIND_ARRAY: { + const struct btf_array *a = btf_array(t); + + d->type_states[a->index_type].referenced = 1; + d->type_states[a->type].referenced = 1; + break; + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = btf_members(t); + + for (j = 0; j < vlen; j++, m++) + d->type_states[m->type].referenced = 1; + break; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = btf_params(t); + + for (j = 0; j < vlen; j++, p++) + d->type_states[p->type].referenced = 1; + break; + } + case BTF_KIND_DATASEC: { + const struct btf_var_secinfo *v = btf_var_secinfos(t); + + for (j = 0; j < vlen; j++, v++) + d->type_states[v->type].referenced = 1; + break; + } + default: + return -EINVAL; + } + } + return 0; +} + +static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) +{ + __u32 *new_queue; + size_t new_cap; + + if (d->emit_queue_cnt >= d->emit_queue_cap) { + new_cap = max(16, d->emit_queue_cap * 3 / 2); + new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0])); + if (!new_queue) + return -ENOMEM; + d->emit_queue = new_queue; + d->emit_queue_cap = new_cap; + } + + d->emit_queue[d->emit_queue_cnt++] = id; + return 0; +} + +/* + * Determine order of emitting dependent types and specified type to satisfy + * C compilation rules. This is done through topological sorting with an + * additional complication which comes from C rules. The main idea for C is + * that if some type is "embedded" into a struct/union, it's size needs to be + * known at the time of definition of containing type. E.g., for: + * + * struct A {}; + * struct B { struct A x; } + * + * struct A *HAS* to be defined before struct B, because it's "embedded", + * i.e., it is part of struct B layout. But in the following case: + * + * struct A; + * struct B { struct A *x; } + * struct A {}; + * + * it's enough to just have a forward declaration of struct A at the time of + * struct B definition, as struct B has a pointer to struct A, so the size of + * field x is known without knowing struct A size: it's sizeof(void *). + * + * Unfortunately, there are some trickier cases we need to handle, e.g.: + * + * struct A {}; // if this was forward-declaration: compilation error + * struct B { + * struct { // anonymous struct + * struct A y; + * } *x; + * }; + * + * In this case, struct B's field x is a pointer, so it's size is known + * regardless of the size of (anonymous) struct it points to. But because this + * struct is anonymous and thus defined inline inside struct B, *and* it + * embeds struct A, compiler requires full definition of struct A to be known + * before struct B can be defined. This creates a transitive dependency + * between struct A and struct B. If struct A was forward-declared before + * struct B definition and fully defined after struct B definition, that would + * trigger compilation error. + * + * All this means that while we are doing topological sorting on BTF type + * graph, we need to determine relationships between different types (graph + * nodes): + * - weak link (relationship) between X and Y, if Y *CAN* be + * forward-declared at the point of X definition; + * - strong link, if Y *HAS* to be fully-defined before X can be defined. + * + * The rule is as follows. Given a chain of BTF types from X to Y, if there is + * BTF_KIND_PTR type in the chain and at least one non-anonymous type + * Z (excluding X, including Y), then link is weak. Otherwise, it's strong. + * Weak/strong relationship is determined recursively during DFS traversal and + * is returned as a result from btf_dump_order_type(). + * + * btf_dump_order_type() is trying to avoid unnecessary forward declarations, + * but it is not guaranteeing that no extraneous forward declarations will be + * emitted. + * + * To avoid extra work, algorithm marks some of BTF types as ORDERED, when + * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT, + * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the + * entire graph path, so depending where from one came to that BTF type, it + * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM, + * once they are processed, there is no need to do it again, so they are + * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces + * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But + * in any case, once those are processed, no need to do it again, as the + * result won't change. + * + * Returns: + * - 1, if type is part of strong link (so there is strong topological + * ordering requirements); + * - 0, if type is part of weak link (so can be satisfied through forward + * declaration); + * - <0, on error (e.g., unsatisfiable type loop detected). + */ +static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) +{ + /* + * Order state is used to detect strong link cycles, but only for BTF + * kinds that are or could be an independent definition (i.e., + * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays, + * func_protos, modifiers are just means to get to these definitions. + * Int/void don't need definitions, they are assumed to be always + * properly defined. We also ignore datasec, var, and funcs for now. + * So for all non-defining kinds, we never even set ordering state, + * for defining kinds we set ORDERING and subsequently ORDERED if it + * forms a strong link. + */ + struct btf_dump_type_aux_state *tstate = &d->type_states[id]; + const struct btf_type *t; + __u16 vlen; + int err, i; + + /* return true, letting typedefs know that it's ok to be emitted */ + if (tstate->order_state == ORDERED) + return 1; + + t = btf__type_by_id(d->btf, id); + + if (tstate->order_state == ORDERING) { + /* type loop, but resolvable through fwd declaration */ + if (btf_is_composite(t) && through_ptr && t->name_off != 0) + return 0; + pr_warn("unsatisfiable type cycle, id:[%u]\n", id); + return -ELOOP; + } + + switch (btf_kind(t)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + tstate->order_state = ORDERED; + return 0; + + case BTF_KIND_PTR: + err = btf_dump_order_type(d, t->type, true); + tstate->order_state = ORDERED; + return err; + + case BTF_KIND_ARRAY: + return btf_dump_order_type(d, btf_array(t)->type, false); + + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = btf_members(t); + /* + * struct/union is part of strong link, only if it's embedded + * (so no ptr in a path) or it's anonymous (so has to be + * defined inline, even if declared through ptr) + */ + if (through_ptr && t->name_off != 0) + return 0; + + tstate->order_state = ORDERING; + + vlen = btf_vlen(t); + for (i = 0; i < vlen; i++, m++) { + err = btf_dump_order_type(d, m->type, false); + if (err < 0) + return err; + } + + if (t->name_off != 0) { + err = btf_dump_add_emit_queue_id(d, id); + if (err < 0) + return err; + } + + tstate->order_state = ORDERED; + return 1; + } + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + /* + * non-anonymous or non-referenced enums are top-level + * declarations and should be emitted. Same logic can be + * applied to FWDs, it won't hurt anyways. + */ + if (t->name_off != 0 || !tstate->referenced) { + err = btf_dump_add_emit_queue_id(d, id); + if (err) + return err; + } + tstate->order_state = ORDERED; + return 1; + + case BTF_KIND_TYPEDEF: { + int is_strong; + + is_strong = btf_dump_order_type(d, t->type, through_ptr); + if (is_strong < 0) + return is_strong; + + /* typedef is similar to struct/union w.r.t. fwd-decls */ + if (through_ptr && !is_strong) + return 0; + + /* typedef is always a named definition */ + err = btf_dump_add_emit_queue_id(d, id); + if (err) + return err; + + d->type_states[id].order_state = ORDERED; + return 1; + } + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: + return btf_dump_order_type(d, t->type, through_ptr); + + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = btf_params(t); + bool is_strong; + + err = btf_dump_order_type(d, t->type, through_ptr); + if (err < 0) + return err; + is_strong = err > 0; + + vlen = btf_vlen(t); + for (i = 0; i < vlen; i++, p++) { + err = btf_dump_order_type(d, p->type, through_ptr); + if (err < 0) + return err; + if (err > 0) + is_strong = true; + } + return is_strong; + } + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DATASEC: + case BTF_KIND_DECL_TAG: + d->type_states[id].order_state = ORDERED; + return 0; + + default: + return -EINVAL; + } +} + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t); + +static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t); +static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t); +static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, + const struct btf_type *t); + +static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl); + +/* a local view into a shared stack */ +struct id_stack { + const __u32 *ids; + int cnt; +}; + +static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, + const char *fname, int lvl); +static void btf_dump_emit_type_chain(struct btf_dump *d, + struct id_stack *decl_stack, + const char *fname, int lvl); + +static const char *btf_dump_type_name(struct btf_dump *d, __u32 id); +static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id); +static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, + const char *orig_name); + +static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id) +{ + const struct btf_type *t = btf__type_by_id(d->btf, id); + + /* __builtin_va_list is a compiler built-in, which causes compilation + * errors, when compiling w/ different compiler, then used to compile + * original code (e.g., GCC to compile kernel, Clang to use generated + * C header from BTF). As it is built-in, it should be already defined + * properly internally in compiler. + */ + if (t->name_off == 0) + return false; + return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0; +} + +/* + * Emit C-syntax definitions of types from chains of BTF types. + * + * High-level handling of determining necessary forward declarations are handled + * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type + * declarations/definitions in C syntax are handled by a combo of + * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to + * corresponding btf_dump_emit_*_{def,fwd}() functions. + * + * We also keep track of "containing struct/union type ID" to determine when + * we reference it from inside and thus can avoid emitting unnecessary forward + * declaration. + * + * This algorithm is designed in such a way, that even if some error occurs + * (either technical, e.g., out of memory, or logical, i.e., malformed BTF + * that doesn't comply to C rules completely), algorithm will try to proceed + * and produce as much meaningful output as possible. + */ +static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) +{ + struct btf_dump_type_aux_state *tstate = &d->type_states[id]; + bool top_level_def = cont_id == 0; + const struct btf_type *t; + __u16 kind; + + if (tstate->emit_state == EMITTED) + return; + + t = btf__type_by_id(d->btf, id); + kind = btf_kind(t); + + if (tstate->emit_state == EMITTING) { + if (tstate->fwd_emitted) + return; + + switch (kind) { + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* + * if we are referencing a struct/union that we are + * part of - then no need for fwd declaration + */ + if (id == cont_id) + return; + if (t->name_off == 0) { + pr_warn("anonymous struct/union loop, id:[%u]\n", + id); + return; + } + btf_dump_emit_struct_fwd(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->fwd_emitted = 1; + break; + case BTF_KIND_TYPEDEF: + /* + * for typedef fwd_emitted means typedef definition + * was emitted, but it can be used only for "weak" + * references through pointer only, not for embedding + */ + if (!btf_dump_is_blacklisted(d, id)) { + btf_dump_emit_typedef_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + } + tstate->fwd_emitted = 1; + break; + default: + break; + } + + return; + } + + switch (kind) { + case BTF_KIND_INT: + /* Emit type alias definitions if necessary */ + btf_dump_emit_missing_aliases(d, id, t); + + tstate->emit_state = EMITTED; + break; + case BTF_KIND_ENUM: + if (top_level_def) { + btf_dump_emit_enum_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + } + tstate->emit_state = EMITTED; + break; + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPE_TAG: + btf_dump_emit_type(d, t->type, cont_id); + break; + case BTF_KIND_ARRAY: + btf_dump_emit_type(d, btf_array(t)->type, cont_id); + break; + case BTF_KIND_FWD: + btf_dump_emit_fwd_def(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->emit_state = EMITTED; + break; + case BTF_KIND_TYPEDEF: + tstate->emit_state = EMITTING; + btf_dump_emit_type(d, t->type, id); + /* + * typedef can server as both definition and forward + * declaration; at this stage someone depends on + * typedef as a forward declaration (refers to it + * through pointer), so unless we already did it, + * emit typedef as a forward declaration + */ + if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) { + btf_dump_emit_typedef_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + } + tstate->emit_state = EMITTED; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + tstate->emit_state = EMITTING; + /* if it's a top-level struct/union definition or struct/union + * is anonymous, then in C we'll be emitting all fields and + * their types (as opposed to just `struct X`), so we need to + * make sure that all types, referenced from struct/union + * members have necessary forward-declarations, where + * applicable + */ + if (top_level_def || t->name_off == 0) { + const struct btf_member *m = btf_members(t); + __u16 vlen = btf_vlen(t); + int i, new_cont_id; + + new_cont_id = t->name_off == 0 ? cont_id : id; + for (i = 0; i < vlen; i++, m++) + btf_dump_emit_type(d, m->type, new_cont_id); + } else if (!tstate->fwd_emitted && id != cont_id) { + btf_dump_emit_struct_fwd(d, id, t); + btf_dump_printf(d, ";\n\n"); + tstate->fwd_emitted = 1; + } + + if (top_level_def) { + btf_dump_emit_struct_def(d, id, t, 0); + btf_dump_printf(d, ";\n\n"); + tstate->emit_state = EMITTED; + } else { + tstate->emit_state = NOT_EMITTED; + } + break; + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = btf_params(t); + __u16 n = btf_vlen(t); + int i; + + btf_dump_emit_type(d, t->type, cont_id); + for (i = 0; i < n; i++, p++) + btf_dump_emit_type(d, p->type, cont_id); + + break; + } + default: + break; + } +} + +static bool btf_is_struct_packed(const struct btf *btf, __u32 id, + const struct btf_type *t) +{ + const struct btf_member *m; + int align, i, bit_sz; + __u16 vlen; + + align = btf__align_of(btf, id); + /* size of a non-packed struct has to be a multiple of its alignment*/ + if (align && t->size % align) + return true; + + m = btf_members(t); + vlen = btf_vlen(t); + /* all non-bitfield fields have to be naturally aligned */ + for (i = 0; i < vlen; i++, m++) { + align = btf__align_of(btf, m->type); + bit_sz = btf_member_bitfield_size(t, i); + if (align && bit_sz == 0 && m->offset % (8 * align) != 0) + return true; + } + + /* + * if original struct was marked as packed, but its layout is + * naturally aligned, we'll detect that it's not packed + */ + return false; +} + +static int chip_away_bits(int total, int at_most) +{ + return total % at_most ? : at_most; +} + +static void btf_dump_emit_bit_padding(const struct btf_dump *d, + int cur_off, int m_off, int m_bit_sz, + int align, int lvl) +{ + int off_diff = m_off - cur_off; + int ptr_bits = d->ptr_sz * 8; + + if (off_diff <= 0) + /* no gap */ + return; + if (m_bit_sz == 0 && off_diff < align * 8) + /* natural padding will take care of a gap */ + return; + + while (off_diff > 0) { + const char *pad_type; + int pad_bits; + + if (ptr_bits > 32 && off_diff > 32) { + pad_type = "long"; + pad_bits = chip_away_bits(off_diff, ptr_bits); + } else if (off_diff > 16) { + pad_type = "int"; + pad_bits = chip_away_bits(off_diff, 32); + } else if (off_diff > 8) { + pad_type = "short"; + pad_bits = chip_away_bits(off_diff, 16); + } else { + pad_type = "char"; + pad_bits = chip_away_bits(off_diff, 8); + } + btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits); + off_diff -= pad_bits; + } +} + +static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + btf_dump_printf(d, "%s%s%s", + btf_is_struct(t) ? "struct" : "union", + t->name_off ? " " : "", + btf_dump_type_name(d, id)); +} + +static void btf_dump_emit_struct_def(struct btf_dump *d, + __u32 id, + const struct btf_type *t, + int lvl) +{ + const struct btf_member *m = btf_members(t); + bool is_struct = btf_is_struct(t); + int align, i, packed, off = 0; + __u16 vlen = btf_vlen(t); + + packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0; + + btf_dump_printf(d, "%s%s%s {", + is_struct ? "struct" : "union", + t->name_off ? " " : "", + btf_dump_type_name(d, id)); + + for (i = 0; i < vlen; i++, m++) { + const char *fname; + int m_off, m_sz; + + fname = btf_name_of(d, m->name_off); + m_sz = btf_member_bitfield_size(t, i); + m_off = btf_member_bit_offset(t, i); + align = packed ? 1 : btf__align_of(d->btf, m->type); + + btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1); + btf_dump_printf(d, "\n%s", pfx(lvl + 1)); + btf_dump_emit_type_decl(d, m->type, fname, lvl + 1); + + if (m_sz) { + btf_dump_printf(d, ": %d", m_sz); + off = m_off + m_sz; + } else { + m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type)); + off = m_off + m_sz * 8; + } + btf_dump_printf(d, ";"); + } + + /* pad at the end, if necessary */ + if (is_struct) { + align = packed ? 1 : btf__align_of(d->btf, id); + btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align, + lvl + 1); + } + + if (vlen) + btf_dump_printf(d, "\n"); + btf_dump_printf(d, "%s}", pfx(lvl)); + if (packed) + btf_dump_printf(d, " __attribute__((packed))"); +} + +static const char *missing_base_types[][2] = { + /* + * GCC emits typedefs to its internal __PolyX_t types when compiling Arm + * SIMD intrinsics. Alias them to standard base types. + */ + { "__Poly8_t", "unsigned char" }, + { "__Poly16_t", "unsigned short" }, + { "__Poly64_t", "unsigned long long" }, + { "__Poly128_t", "unsigned __int128" }, +}; + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + int i; + + for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { + if (strcmp(name, missing_base_types[i][0]) == 0) { + btf_dump_printf(d, "typedef %s %s;\n\n", + missing_base_types[i][1], name); + break; + } + } +} + +static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id)); +} + +static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, + int lvl) +{ + const struct btf_enum *v = btf_enum(t); + __u16 vlen = btf_vlen(t); + const char *name; + size_t dup_cnt; + int i; + + btf_dump_printf(d, "enum%s%s", + t->name_off ? " " : "", + btf_dump_type_name(d, id)); + + if (vlen) { + btf_dump_printf(d, " {"); + for (i = 0; i < vlen; i++, v++) { + name = btf_name_of(d, v->name_off); + /* enumerators share namespace with typedef idents */ + dup_cnt = btf_dump_name_dups(d, d->ident_names, name); + if (dup_cnt > 1) { + btf_dump_printf(d, "\n%s%s___%zu = %u,", + pfx(lvl + 1), name, dup_cnt, + (__u32)v->val); + } else { + btf_dump_printf(d, "\n%s%s = %u,", + pfx(lvl + 1), name, + (__u32)v->val); + } + } + btf_dump_printf(d, "\n%s}", pfx(lvl)); + } +} + +static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + + if (btf_kflag(t)) + btf_dump_printf(d, "union %s", name); + else + btf_dump_printf(d, "struct %s", name); +} + +static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id, + const struct btf_type *t, int lvl) +{ + const char *name = btf_dump_ident_name(d, id); + + /* + * Old GCC versions are emitting invalid typedef for __gnuc_va_list + * pointing to VOID. This generates warnings from btf_dump() and + * results in uncompilable header file, so we are fixing it up here + * with valid typedef into __builtin_va_list. + */ + if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) { + btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list"); + return; + } + + btf_dump_printf(d, "typedef "); + btf_dump_emit_type_decl(d, t->type, name, lvl); +} + +static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id) +{ + __u32 *new_stack; + size_t new_cap; + + if (d->decl_stack_cnt >= d->decl_stack_cap) { + new_cap = max(16, d->decl_stack_cap * 3 / 2); + new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0])); + if (!new_stack) + return -ENOMEM; + d->decl_stack = new_stack; + d->decl_stack_cap = new_cap; + } + + d->decl_stack[d->decl_stack_cnt++] = id; + + return 0; +} + +/* + * Emit type declaration (e.g., field type declaration in a struct or argument + * declaration in function prototype) in correct C syntax. + * + * For most types it's trivial, but there are few quirky type declaration + * cases worth mentioning: + * - function prototypes (especially nesting of function prototypes); + * - arrays; + * - const/volatile/restrict for pointers vs other types. + * + * For a good discussion of *PARSING* C syntax (as a human), see + * Peter van der Linden's "Expert C Programming: Deep C Secrets", + * Ch.3 "Unscrambling Declarations in C". + * + * It won't help with BTF to C conversion much, though, as it's an opposite + * problem. So we came up with this algorithm in reverse to van der Linden's + * parsing algorithm. It goes from structured BTF representation of type + * declaration to a valid compilable C syntax. + * + * For instance, consider this C typedef: + * typedef const int * const * arr[10] arr_t; + * It will be represented in BTF with this chain of BTF types: + * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int] + * + * Notice how [const] modifier always goes before type it modifies in BTF type + * graph, but in C syntax, const/volatile/restrict modifiers are written to + * the right of pointers, but to the left of other types. There are also other + * quirks, like function pointers, arrays of them, functions returning other + * functions, etc. + * + * We handle that by pushing all the types to a stack, until we hit "terminal" + * type (int/enum/struct/union/fwd). Then depending on the kind of a type on + * top of a stack, modifiers are handled differently. Array/function pointers + * have also wildly different syntax and how nesting of them are done. See + * code for authoritative definition. + * + * To avoid allocating new stack for each independent chain of BTF types, we + * share one bigger stack, with each chain working only on its own local view + * of a stack frame. Some care is required to "pop" stack frames after + * processing type declaration chain. + */ +int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, + const struct btf_dump_emit_type_decl_opts *opts) +{ + const char *fname; + int lvl, err; + + if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) + return libbpf_err(-EINVAL); + + err = btf_dump_resize(d); + if (err) + return libbpf_err(err); + + fname = OPTS_GET(opts, field_name, ""); + lvl = OPTS_GET(opts, indent_level, 0); + d->strip_mods = OPTS_GET(opts, strip_mods, false); + btf_dump_emit_type_decl(d, id, fname, lvl); + d->strip_mods = false; + return 0; +} + +static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, + const char *fname, int lvl) +{ + struct id_stack decl_stack; + const struct btf_type *t; + int err, stack_start; + + stack_start = d->decl_stack_cnt; + for (;;) { + t = btf__type_by_id(d->btf, id); + if (d->strip_mods && btf_is_mod(t)) + goto skip_mod; + + err = btf_dump_push_decl_stack_id(d, id); + if (err < 0) { + /* + * if we don't have enough memory for entire type decl + * chain, restore stack, emit warning, and try to + * proceed nevertheless + */ + pr_warn("not enough memory for decl stack:%d", err); + d->decl_stack_cnt = stack_start; + return; + } +skip_mod: + /* VOID */ + if (id == 0) + break; + + switch (btf_kind(t)) { + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: + id = t->type; + break; + case BTF_KIND_ARRAY: + id = btf_array(t)->type; + break; + case BTF_KIND_INT: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FLOAT: + goto done; + default: + pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", + btf_kind(t), id); + goto done; + } + } +done: + /* + * We might be inside a chain of declarations (e.g., array of function + * pointers returning anonymous (so inlined) structs, having another + * array field). Each of those needs its own "stack frame" to handle + * emitting of declarations. Those stack frames are non-overlapping + * portions of shared btf_dump->decl_stack. To make it a bit nicer to + * handle this set of nested stacks, we create a view corresponding to + * our own "stack frame" and work with it as an independent stack. + * We'll need to clean up after emit_type_chain() returns, though. + */ + decl_stack.ids = d->decl_stack + stack_start; + decl_stack.cnt = d->decl_stack_cnt - stack_start; + btf_dump_emit_type_chain(d, &decl_stack, fname, lvl); + /* + * emit_type_chain() guarantees that it will pop its entire decl_stack + * frame before returning. But it works with a read-only view into + * decl_stack, so it doesn't actually pop anything from the + * perspective of shared btf_dump->decl_stack, per se. We need to + * reset decl_stack state to how it was before us to avoid it growing + * all the time. + */ + d->decl_stack_cnt = stack_start; +} + +static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) +{ + const struct btf_type *t; + __u32 id; + + while (decl_stack->cnt) { + id = decl_stack->ids[decl_stack->cnt - 1]; + t = btf__type_by_id(d->btf, id); + + switch (btf_kind(t)) { + case BTF_KIND_VOLATILE: + btf_dump_printf(d, "volatile "); + break; + case BTF_KIND_CONST: + btf_dump_printf(d, "const "); + break; + case BTF_KIND_RESTRICT: + btf_dump_printf(d, "restrict "); + break; + default: + return; + } + decl_stack->cnt--; + } +} + +static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack) +{ + const struct btf_type *t; + __u32 id; + + while (decl_stack->cnt) { + id = decl_stack->ids[decl_stack->cnt - 1]; + t = btf__type_by_id(d->btf, id); + if (!btf_is_mod(t)) + return; + decl_stack->cnt--; + } +} + +static void btf_dump_emit_name(const struct btf_dump *d, + const char *name, bool last_was_ptr) +{ + bool separate = name[0] && !last_was_ptr; + + btf_dump_printf(d, "%s%s", separate ? " " : "", name); +} + +static void btf_dump_emit_type_chain(struct btf_dump *d, + struct id_stack *decls, + const char *fname, int lvl) +{ + /* + * last_was_ptr is used to determine if we need to separate pointer + * asterisk (*) from previous part of type signature with space, so + * that we get `int ***`, instead of `int * * *`. We default to true + * for cases where we have single pointer in a chain. E.g., in ptr -> + * func_proto case. func_proto will start a new emit_type_chain call + * with just ptr, which should be emitted as (*) or (*), so we + * don't want to prepend space for that last pointer. + */ + bool last_was_ptr = true; + const struct btf_type *t; + const char *name; + __u16 kind; + __u32 id; + + while (decls->cnt) { + id = decls->ids[--decls->cnt]; + if (id == 0) { + /* VOID is a special snowflake */ + btf_dump_emit_mods(d, decls); + btf_dump_printf(d, "void"); + last_was_ptr = false; + continue; + } + + t = btf__type_by_id(d->btf, id); + kind = btf_kind(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + btf_dump_emit_mods(d, decls); + name = btf_name_of(d, t->name_off); + btf_dump_printf(d, "%s", name); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + btf_dump_emit_mods(d, decls); + /* inline anonymous struct/union */ + if (t->name_off == 0 && !d->skip_anon_defs) + btf_dump_emit_struct_def(d, id, t, lvl); + else + btf_dump_emit_struct_fwd(d, id, t); + break; + case BTF_KIND_ENUM: + btf_dump_emit_mods(d, decls); + /* inline anonymous enum */ + if (t->name_off == 0 && !d->skip_anon_defs) + btf_dump_emit_enum_def(d, id, t, lvl); + else + btf_dump_emit_enum_fwd(d, id, t); + break; + case BTF_KIND_FWD: + btf_dump_emit_mods(d, decls); + btf_dump_emit_fwd_def(d, id, t); + break; + case BTF_KIND_TYPEDEF: + btf_dump_emit_mods(d, decls); + btf_dump_printf(d, "%s", btf_dump_ident_name(d, id)); + break; + case BTF_KIND_PTR: + btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *"); + break; + case BTF_KIND_VOLATILE: + btf_dump_printf(d, " volatile"); + break; + case BTF_KIND_CONST: + btf_dump_printf(d, " const"); + break; + case BTF_KIND_RESTRICT: + btf_dump_printf(d, " restrict"); + break; + case BTF_KIND_TYPE_TAG: + btf_dump_emit_mods(d, decls); + name = btf_name_of(d, t->name_off); + btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name); + break; + case BTF_KIND_ARRAY: { + const struct btf_array *a = btf_array(t); + const struct btf_type *next_t; + __u32 next_id; + bool multidim; + /* + * GCC has a bug + * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354) + * which causes it to emit extra const/volatile + * modifiers for an array, if array's element type has + * const/volatile modifiers. Clang doesn't do that. + * In general, it doesn't seem very meaningful to have + * a const/volatile modifier for array, so we are + * going to silently skip them here. + */ + btf_dump_drop_mods(d, decls); + + if (decls->cnt == 0) { + btf_dump_emit_name(d, fname, last_was_ptr); + btf_dump_printf(d, "[%u]", a->nelems); + return; + } + + next_id = decls->ids[decls->cnt - 1]; + next_t = btf__type_by_id(d->btf, next_id); + multidim = btf_is_array(next_t); + /* we need space if we have named non-pointer */ + if (fname[0] && !last_was_ptr) + btf_dump_printf(d, " "); + /* no parentheses for multi-dimensional array */ + if (!multidim) + btf_dump_printf(d, "("); + btf_dump_emit_type_chain(d, decls, fname, lvl); + if (!multidim) + btf_dump_printf(d, ")"); + btf_dump_printf(d, "[%u]", a->nelems); + return; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p = btf_params(t); + __u16 vlen = btf_vlen(t); + int i; + + /* + * GCC emits extra volatile qualifier for + * __attribute__((noreturn)) function pointers. Clang + * doesn't do it. It's a GCC quirk for backwards + * compatibility with code written for GCC <2.5. So, + * similarly to extra qualifiers for array, just drop + * them, instead of handling them. + */ + btf_dump_drop_mods(d, decls); + if (decls->cnt) { + btf_dump_printf(d, " ("); + btf_dump_emit_type_chain(d, decls, fname, lvl); + btf_dump_printf(d, ")"); + } else { + btf_dump_emit_name(d, fname, last_was_ptr); + } + btf_dump_printf(d, "("); + /* + * Clang for BPF target generates func_proto with no + * args as a func_proto with a single void arg (e.g., + * `int (*f)(void)` vs just `int (*f)()`). We are + * going to pretend there are no args for such case. + */ + if (vlen == 1 && p->type == 0) { + btf_dump_printf(d, ")"); + return; + } + + for (i = 0; i < vlen; i++, p++) { + if (i > 0) + btf_dump_printf(d, ", "); + + /* last arg of type void is vararg */ + if (i == vlen - 1 && p->type == 0) { + btf_dump_printf(d, "..."); + break; + } + + name = btf_name_of(d, p->name_off); + btf_dump_emit_type_decl(d, p->type, name, lvl); + } + + btf_dump_printf(d, ")"); + return; + } + default: + pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n", + kind, id); + return; + } + + last_was_ptr = kind == BTF_KIND_PTR; + } + + btf_dump_emit_name(d, fname, last_was_ptr); +} + +/* show type name as (type_name) */ +static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id, + bool top_level) +{ + const struct btf_type *t; + + /* for array members, we don't bother emitting type name for each + * member to avoid the redundancy of + * .name = (char[4])[(char)'f',(char)'o',(char)'o',] + */ + if (d->typed_dump->is_array_member) + return; + + /* avoid type name specification for variable/section; it will be done + * for the associated variable value(s). + */ + t = btf__type_by_id(d->btf, id); + if (btf_is_var(t) || btf_is_datasec(t)) + return; + + if (top_level) + btf_dump_printf(d, "("); + + d->skip_anon_defs = true; + d->strip_mods = true; + btf_dump_emit_type_decl(d, id, "", 0); + d->strip_mods = false; + d->skip_anon_defs = false; + + if (top_level) + btf_dump_printf(d, ")"); +} + +/* return number of duplicates (occurrences) of a given name */ +static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map, + const char *orig_name) +{ + size_t dup_cnt = 0; + + hashmap__find(name_map, orig_name, (void **)&dup_cnt); + dup_cnt++; + hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL); + + return dup_cnt; +} + +static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, + struct hashmap *name_map) +{ + struct btf_dump_type_aux_state *s = &d->type_states[id]; + const struct btf_type *t = btf__type_by_id(d->btf, id); + const char *orig_name = btf_name_of(d, t->name_off); + const char **cached_name = &d->cached_names[id]; + size_t dup_cnt; + + if (t->name_off == 0) + return ""; + + if (s->name_resolved) + return *cached_name ? *cached_name : orig_name; + + if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) { + s->name_resolved = 1; + return orig_name; + } + + dup_cnt = btf_dump_name_dups(d, name_map, orig_name); + if (dup_cnt > 1) { + const size_t max_len = 256; + char new_name[max_len]; + + snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt); + *cached_name = strdup(new_name); + } + + s->name_resolved = 1; + return *cached_name ? *cached_name : orig_name; +} + +static const char *btf_dump_type_name(struct btf_dump *d, __u32 id) +{ + return btf_dump_resolve_name(d, id, d->type_names); +} + +static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id) +{ + return btf_dump_resolve_name(d, id, d->ident_names); +} + +static int btf_dump_dump_type_data(struct btf_dump *d, + const char *fname, + const struct btf_type *t, + __u32 id, + const void *data, + __u8 bits_offset, + __u8 bit_sz); + +static const char *btf_dump_data_newline(struct btf_dump *d) +{ + return d->typed_dump->compact || d->typed_dump->depth == 0 ? "" : "\n"; +} + +static const char *btf_dump_data_delim(struct btf_dump *d) +{ + return d->typed_dump->depth == 0 ? "" : ","; +} + +static void btf_dump_data_pfx(struct btf_dump *d) +{ + int i, lvl = d->typed_dump->indent_lvl + d->typed_dump->depth; + + if (d->typed_dump->compact) + return; + + for (i = 0; i < lvl; i++) + btf_dump_printf(d, "%s", d->typed_dump->indent_str); +} + +/* A macro is used here as btf_type_value[s]() appends format specifiers + * to the format specifier passed in; these do the work of appending + * delimiters etc while the caller simply has to specify the type values + * in the format specifier + value(s). + */ +#define btf_dump_type_values(d, fmt, ...) \ + btf_dump_printf(d, fmt "%s%s", \ + ##__VA_ARGS__, \ + btf_dump_data_delim(d), \ + btf_dump_data_newline(d)) + +static int btf_dump_unsupported_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id) +{ + btf_dump_printf(d, "", btf_kind(t)); + return -ENOTSUP; +} + +static int btf_dump_get_bitfield_value(struct btf_dump *d, + const struct btf_type *t, + const void *data, + __u8 bits_offset, + __u8 bit_sz, + __u64 *value) +{ + __u16 left_shift_bits, right_shift_bits; + const __u8 *bytes = data; + __u8 nr_copy_bits; + __u64 num = 0; + int i; + + /* Maximum supported bitfield size is 64 bits */ + if (t->size > 8) { + pr_warn("unexpected bitfield size %d\n", t->size); + return -EINVAL; + } + + /* Bitfield value retrieval is done in two steps; first relevant bytes are + * stored in num, then we left/right shift num to eliminate irrelevant bits. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + for (i = t->size - 1; i >= 0; i--) + num = num * 256 + bytes[i]; + nr_copy_bits = bit_sz + bits_offset; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + for (i = 0; i < t->size; i++) + num = num * 256 + bytes[i]; + nr_copy_bits = t->size * 8 - bits_offset; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif + left_shift_bits = 64 - nr_copy_bits; + right_shift_bits = 64 - bit_sz; + + *value = (num << left_shift_bits) >> right_shift_bits; + + return 0; +} + +static int btf_dump_bitfield_check_zero(struct btf_dump *d, + const struct btf_type *t, + const void *data, + __u8 bits_offset, + __u8 bit_sz) +{ + __u64 check_num; + int err; + + err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &check_num); + if (err) + return err; + if (check_num == 0) + return -ENODATA; + return 0; +} + +static int btf_dump_bitfield_data(struct btf_dump *d, + const struct btf_type *t, + const void *data, + __u8 bits_offset, + __u8 bit_sz) +{ + __u64 print_num; + int err; + + err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num); + if (err) + return err; + + btf_dump_type_values(d, "0x%llx", (unsigned long long)print_num); + + return 0; +} + +/* ints, floats and ptrs */ +static int btf_dump_base_type_check_zero(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + static __u8 bytecmp[16] = {}; + int nr_bytes; + + /* For pointer types, pointer size is not defined on a per-type basis. + * On dump creation however, we store the pointer size. + */ + if (btf_kind(t) == BTF_KIND_PTR) + nr_bytes = d->ptr_sz; + else + nr_bytes = t->size; + + if (nr_bytes < 1 || nr_bytes > 16) { + pr_warn("unexpected size %d for id [%u]\n", nr_bytes, id); + return -EINVAL; + } + + if (memcmp(data, bytecmp, nr_bytes) == 0) + return -ENODATA; + return 0; +} + +static bool ptr_is_aligned(const struct btf *btf, __u32 type_id, + const void *data) +{ + int alignment = btf__align_of(btf, type_id); + + if (alignment == 0) + return false; + + return ((uintptr_t)data) % alignment == 0; +} + +static int btf_dump_int_data(struct btf_dump *d, + const struct btf_type *t, + __u32 type_id, + const void *data, + __u8 bits_offset) +{ + __u8 encoding = btf_int_encoding(t); + bool sign = encoding & BTF_INT_SIGNED; + char buf[16] __attribute__((aligned(16))); + int sz = t->size; + + if (sz == 0 || sz > sizeof(buf)) { + pr_warn("unexpected size %d for id [%u]\n", sz, type_id); + return -EINVAL; + } + + /* handle packed int data - accesses of integers not aligned on + * int boundaries can cause problems on some platforms. + */ + if (!ptr_is_aligned(d->btf, type_id, data)) { + memcpy(buf, data, sz); + data = buf; + } + + switch (sz) { + case 16: { + const __u64 *ints = data; + __u64 lsi, msi; + + /* avoid use of __int128 as some 32-bit platforms do not + * support it. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + lsi = ints[0]; + msi = ints[1]; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + lsi = ints[1]; + msi = ints[0]; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif + if (msi == 0) + btf_dump_type_values(d, "0x%llx", (unsigned long long)lsi); + else + btf_dump_type_values(d, "0x%llx%016llx", (unsigned long long)msi, + (unsigned long long)lsi); + break; + } + case 8: + if (sign) + btf_dump_type_values(d, "%lld", *(long long *)data); + else + btf_dump_type_values(d, "%llu", *(unsigned long long *)data); + break; + case 4: + if (sign) + btf_dump_type_values(d, "%d", *(__s32 *)data); + else + btf_dump_type_values(d, "%u", *(__u32 *)data); + break; + case 2: + if (sign) + btf_dump_type_values(d, "%d", *(__s16 *)data); + else + btf_dump_type_values(d, "%u", *(__u16 *)data); + break; + case 1: + if (d->typed_dump->is_array_char) { + /* check for null terminator */ + if (d->typed_dump->is_array_terminated) + break; + if (*(char *)data == '\0') { + d->typed_dump->is_array_terminated = true; + break; + } + if (isprint(*(char *)data)) { + btf_dump_type_values(d, "'%c'", *(char *)data); + break; + } + } + if (sign) + btf_dump_type_values(d, "%d", *(__s8 *)data); + else + btf_dump_type_values(d, "%u", *(__u8 *)data); + break; + default: + pr_warn("unexpected sz %d for id [%u]\n", sz, type_id); + return -EINVAL; + } + return 0; +} + +union float_data { + long double ld; + double d; + float f; +}; + +static int btf_dump_float_data(struct btf_dump *d, + const struct btf_type *t, + __u32 type_id, + const void *data) +{ + const union float_data *flp = data; + union float_data fl; + int sz = t->size; + + /* handle unaligned data; copy to local union */ + if (!ptr_is_aligned(d->btf, type_id, data)) { + memcpy(&fl, data, sz); + flp = &fl; + } + + switch (sz) { + case 16: + btf_dump_type_values(d, "%Lf", flp->ld); + break; + case 8: + btf_dump_type_values(d, "%lf", flp->d); + break; + case 4: + btf_dump_type_values(d, "%f", flp->f); + break; + default: + pr_warn("unexpected size %d for id [%u]\n", sz, type_id); + return -EINVAL; + } + return 0; +} + +static int btf_dump_var_data(struct btf_dump *d, + const struct btf_type *v, + __u32 id, + const void *data) +{ + enum btf_func_linkage linkage = btf_var(v)->linkage; + const struct btf_type *t; + const char *l; + __u32 type_id; + + switch (linkage) { + case BTF_FUNC_STATIC: + l = "static "; + break; + case BTF_FUNC_EXTERN: + l = "extern "; + break; + case BTF_FUNC_GLOBAL: + default: + l = ""; + break; + } + + /* format of output here is [linkage] [type] [varname] = (type)value, + * for example "static int cpu_profile_flip = (int)1" + */ + btf_dump_printf(d, "%s", l); + type_id = v->type; + t = btf__type_by_id(d->btf, type_id); + btf_dump_emit_type_cast(d, type_id, false); + btf_dump_printf(d, " %s = ", btf_name_of(d, v->name_off)); + return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0); +} + +static int btf_dump_array_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + const struct btf_array *array = btf_array(t); + const struct btf_type *elem_type; + __u32 i, elem_type_id; + __s64 elem_size; + bool is_array_member; + + elem_type_id = array->type; + elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); + elem_size = btf__resolve_size(d->btf, elem_type_id); + if (elem_size <= 0) { + pr_warn("unexpected elem size %zd for array type [%u]\n", + (ssize_t)elem_size, id); + return -EINVAL; + } + + if (btf_is_int(elem_type)) { + /* + * BTF_INT_CHAR encoding never seems to be set for + * char arrays, so if size is 1 and element is + * printable as a char, we'll do that. + */ + if (elem_size == 1) + d->typed_dump->is_array_char = true; + } + + /* note that we increment depth before calling btf_dump_print() below; + * this is intentional. btf_dump_data_newline() will not print a + * newline for depth 0 (since this leaves us with trailing newlines + * at the end of typed display), so depth is incremented first. + * For similar reasons, we decrement depth before showing the closing + * parenthesis. + */ + d->typed_dump->depth++; + btf_dump_printf(d, "[%s", btf_dump_data_newline(d)); + + /* may be a multidimensional array, so store current "is array member" + * status so we can restore it correctly later. + */ + is_array_member = d->typed_dump->is_array_member; + d->typed_dump->is_array_member = true; + for (i = 0; i < array->nelems; i++, data += elem_size) { + if (d->typed_dump->is_array_terminated) + break; + btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0); + } + d->typed_dump->is_array_member = is_array_member; + d->typed_dump->depth--; + btf_dump_data_pfx(d); + btf_dump_type_values(d, "]"); + + return 0; +} + +static int btf_dump_struct_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + const struct btf_member *m = btf_members(t); + __u16 n = btf_vlen(t); + int i, err; + + /* note that we increment depth before calling btf_dump_print() below; + * this is intentional. btf_dump_data_newline() will not print a + * newline for depth 0 (since this leaves us with trailing newlines + * at the end of typed display), so depth is incremented first. + * For similar reasons, we decrement depth before showing the closing + * parenthesis. + */ + d->typed_dump->depth++; + btf_dump_printf(d, "{%s", btf_dump_data_newline(d)); + + for (i = 0; i < n; i++, m++) { + const struct btf_type *mtype; + const char *mname; + __u32 moffset; + __u8 bit_sz; + + mtype = btf__type_by_id(d->btf, m->type); + mname = btf_name_of(d, m->name_off); + moffset = btf_member_bit_offset(t, i); + + bit_sz = btf_member_bitfield_size(t, i); + err = btf_dump_dump_type_data(d, mname, mtype, m->type, data + moffset / 8, + moffset % 8, bit_sz); + if (err < 0) + return err; + } + d->typed_dump->depth--; + btf_dump_data_pfx(d); + btf_dump_type_values(d, "}"); + return err; +} + +union ptr_data { + unsigned int p; + unsigned long long lp; +}; + +static int btf_dump_ptr_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) { + btf_dump_type_values(d, "%p", *(void **)data); + } else { + union ptr_data pt; + + memcpy(&pt, data, d->ptr_sz); + if (d->ptr_sz == 4) + btf_dump_type_values(d, "0x%x", pt.p); + else + btf_dump_type_values(d, "0x%llx", pt.lp); + } + return 0; +} + +static int btf_dump_get_enum_value(struct btf_dump *d, + const struct btf_type *t, + const void *data, + __u32 id, + __s64 *value) +{ + /* handle unaligned enum value */ + if (!ptr_is_aligned(d->btf, id, data)) { + __u64 val; + int err; + + err = btf_dump_get_bitfield_value(d, t, data, 0, 0, &val); + if (err) + return err; + *value = (__s64)val; + return 0; + } + + switch (t->size) { + case 8: + *value = *(__s64 *)data; + return 0; + case 4: + *value = *(__s32 *)data; + return 0; + case 2: + *value = *(__s16 *)data; + return 0; + case 1: + *value = *(__s8 *)data; + return 0; + default: + pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id); + return -EINVAL; + } +} + +static int btf_dump_enum_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + const struct btf_enum *e; + __s64 value; + int i, err; + + err = btf_dump_get_enum_value(d, t, data, id, &value); + if (err) + return err; + + for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) { + if (value != e->val) + continue; + btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off)); + return 0; + } + + btf_dump_type_values(d, "%d", value); + return 0; +} + +static int btf_dump_datasec_data(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data) +{ + const struct btf_var_secinfo *vsi; + const struct btf_type *var; + __u32 i; + int err; + + btf_dump_type_values(d, "SEC(\"%s\") ", btf_name_of(d, t->name_off)); + + for (i = 0, vsi = btf_var_secinfos(t); i < btf_vlen(t); i++, vsi++) { + var = btf__type_by_id(d->btf, vsi->type); + err = btf_dump_dump_type_data(d, NULL, var, vsi->type, data + vsi->offset, 0, 0); + if (err < 0) + return err; + btf_dump_printf(d, ";"); + } + return 0; +} + +/* return size of type, or if base type overflows, return -E2BIG. */ +static int btf_dump_type_data_check_overflow(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data, + __u8 bits_offset) +{ + __s64 size = btf__resolve_size(d->btf, id); + + if (size < 0 || size >= INT_MAX) { + pr_warn("unexpected size [%zu] for id [%u]\n", + (size_t)size, id); + return -EINVAL; + } + + /* Only do overflow checking for base types; we do not want to + * avoid showing part of a struct, union or array, even if we + * do not have enough data to show the full object. By + * restricting overflow checking to base types we can ensure + * that partial display succeeds, while avoiding overflowing + * and using bogus data for display. + */ + t = skip_mods_and_typedefs(d->btf, id, NULL); + if (!t) { + pr_warn("unexpected error skipping mods/typedefs for id [%u]\n", + id); + return -EINVAL; + } + + switch (btf_kind(t)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_PTR: + case BTF_KIND_ENUM: + if (data + bits_offset / 8 + size > d->typed_dump->data_end) + return -E2BIG; + break; + default: + break; + } + return (int)size; +} + +static int btf_dump_type_data_check_zero(struct btf_dump *d, + const struct btf_type *t, + __u32 id, + const void *data, + __u8 bits_offset, + __u8 bit_sz) +{ + __s64 value; + int i, err; + + /* toplevel exceptions; we show zero values if + * - we ask for them (emit_zeros) + * - if we are at top-level so we see "struct empty { }" + * - or if we are an array member and the array is non-empty and + * not a char array; we don't want to be in a situation where we + * have an integer array 0, 1, 0, 1 and only show non-zero values. + * If the array contains zeroes only, or is a char array starting + * with a '\0', the array-level check_zero() will prevent showing it; + * we are concerned with determining zero value at the array member + * level here. + */ + if (d->typed_dump->emit_zeroes || d->typed_dump->depth == 0 || + (d->typed_dump->is_array_member && + !d->typed_dump->is_array_char)) + return 0; + + t = skip_mods_and_typedefs(d->btf, id, NULL); + + switch (btf_kind(t)) { + case BTF_KIND_INT: + if (bit_sz) + return btf_dump_bitfield_check_zero(d, t, data, bits_offset, bit_sz); + return btf_dump_base_type_check_zero(d, t, id, data); + case BTF_KIND_FLOAT: + case BTF_KIND_PTR: + return btf_dump_base_type_check_zero(d, t, id, data); + case BTF_KIND_ARRAY: { + const struct btf_array *array = btf_array(t); + const struct btf_type *elem_type; + __u32 elem_type_id, elem_size; + bool ischar; + + elem_type_id = array->type; + elem_size = btf__resolve_size(d->btf, elem_type_id); + elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); + + ischar = btf_is_int(elem_type) && elem_size == 1; + + /* check all elements; if _any_ element is nonzero, all + * of array is displayed. We make an exception however + * for char arrays where the first element is 0; these + * are considered zeroed also, even if later elements are + * non-zero because the string is terminated. + */ + for (i = 0; i < array->nelems; i++) { + if (i == 0 && ischar && *(char *)data == 0) + return -ENODATA; + err = btf_dump_type_data_check_zero(d, elem_type, + elem_type_id, + data + + (i * elem_size), + bits_offset, 0); + if (err != -ENODATA) + return err; + } + return -ENODATA; + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m = btf_members(t); + __u16 n = btf_vlen(t); + + /* if any struct/union member is non-zero, the struct/union + * is considered non-zero and dumped. + */ + for (i = 0; i < n; i++, m++) { + const struct btf_type *mtype; + __u32 moffset; + + mtype = btf__type_by_id(d->btf, m->type); + moffset = btf_member_bit_offset(t, i); + + /* btf_int_bits() does not store member bitfield size; + * bitfield size needs to be stored here so int display + * of member can retrieve it. + */ + bit_sz = btf_member_bitfield_size(t, i); + err = btf_dump_type_data_check_zero(d, mtype, m->type, data + moffset / 8, + moffset % 8, bit_sz); + if (err != ENODATA) + return err; + } + return -ENODATA; + } + case BTF_KIND_ENUM: + err = btf_dump_get_enum_value(d, t, data, id, &value); + if (err) + return err; + if (value == 0) + return -ENODATA; + return 0; + default: + return 0; + } +} + +/* returns size of data dumped, or error. */ +static int btf_dump_dump_type_data(struct btf_dump *d, + const char *fname, + const struct btf_type *t, + __u32 id, + const void *data, + __u8 bits_offset, + __u8 bit_sz) +{ + int size, err = 0; + + size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset); + if (size < 0) + return size; + err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz); + if (err) { + /* zeroed data is expected and not an error, so simply skip + * dumping such data. Record other errors however. + */ + if (err == -ENODATA) + return size; + return err; + } + btf_dump_data_pfx(d); + + if (!d->typed_dump->skip_names) { + if (fname && strlen(fname) > 0) + btf_dump_printf(d, ".%s = ", fname); + btf_dump_emit_type_cast(d, id, true); + } + + t = skip_mods_and_typedefs(d->btf, id, NULL); + + switch (btf_kind(t)) { + case BTF_KIND_UNKN: + case BTF_KIND_FWD: + case BTF_KIND_FUNC: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_DECL_TAG: + err = btf_dump_unsupported_data(d, t, id); + break; + case BTF_KIND_INT: + if (bit_sz) + err = btf_dump_bitfield_data(d, t, data, bits_offset, bit_sz); + else + err = btf_dump_int_data(d, t, id, data, bits_offset); + break; + case BTF_KIND_FLOAT: + err = btf_dump_float_data(d, t, id, data); + break; + case BTF_KIND_PTR: + err = btf_dump_ptr_data(d, t, id, data); + break; + case BTF_KIND_ARRAY: + err = btf_dump_array_data(d, t, id, data); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + err = btf_dump_struct_data(d, t, id, data); + break; + case BTF_KIND_ENUM: + /* handle bitfield and int enum values */ + if (bit_sz) { + __u64 print_num; + __s64 enum_val; + + err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, + &print_num); + if (err) + break; + enum_val = (__s64)print_num; + err = btf_dump_enum_data(d, t, id, &enum_val); + } else + err = btf_dump_enum_data(d, t, id, data); + break; + case BTF_KIND_VAR: + err = btf_dump_var_data(d, t, id, data); + break; + case BTF_KIND_DATASEC: + err = btf_dump_datasec_data(d, t, id, data); + break; + default: + pr_warn("unexpected kind [%u] for id [%u]\n", + BTF_INFO_KIND(t->info), id); + return -EINVAL; + } + if (err < 0) + return err; + return size; +} + +int btf_dump__dump_type_data(struct btf_dump *d, __u32 id, + const void *data, size_t data_sz, + const struct btf_dump_type_data_opts *opts) +{ + struct btf_dump_data typed_dump = {}; + const struct btf_type *t; + int ret; + + if (!OPTS_VALID(opts, btf_dump_type_data_opts)) + return libbpf_err(-EINVAL); + + t = btf__type_by_id(d->btf, id); + if (!t) + return libbpf_err(-ENOENT); + + d->typed_dump = &typed_dump; + d->typed_dump->data_end = data + data_sz; + d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0); + + /* default indent string is a tab */ + if (!opts->indent_str) + d->typed_dump->indent_str[0] = '\t'; + else + libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str, + sizeof(d->typed_dump->indent_str)); + + d->typed_dump->compact = OPTS_GET(opts, compact, false); + d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false); + d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false); + + ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0); + + d->typed_dump = NULL; + + return libbpf_err(ret); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/gen_loader.c b/pkg/plugin/lib/common/libbpf/_src/gen_loader.c new file mode 100644 index 000000000..927745b08 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/gen_loader.c @@ -0,0 +1,1121 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include +#include +#include +#include "btf.h" +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" +#include "hashmap.h" +#include "bpf_gen_internal.h" +#include "skel_internal.h" +#include + +#define MAX_USED_MAPS 64 +#define MAX_USED_PROGS 32 +#define MAX_KFUNC_DESCS 256 +#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS) + +/* The following structure describes the stack layout of the loader program. + * In addition R6 contains the pointer to context. + * R7 contains the result of the last sys_bpf command (typically error or FD). + * R9 contains the result of the last sys_close command. + * + * Naming convention: + * ctx - bpf program context + * stack - bpf program stack + * blob - bpf_attr-s, strings, insns, map data. + * All the bytes that loader prog will use for read/write. + */ +struct loader_stack { + __u32 btf_fd; + __u32 inner_map_fd; + __u32 prog_fd[MAX_USED_PROGS]; +}; + +#define stack_off(field) \ + (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field)) + +#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field)) + +static int blob_fd_array_off(struct bpf_gen *gen, int index) +{ + return gen->fd_array + index * sizeof(int); +} + +static int realloc_insn_buf(struct bpf_gen *gen, __u32 size) +{ + size_t off = gen->insn_cur - gen->insn_start; + void *insn_start; + + if (gen->error) + return gen->error; + if (size > INT32_MAX || off + size > INT32_MAX) { + gen->error = -ERANGE; + return -ERANGE; + } + insn_start = realloc(gen->insn_start, off + size); + if (!insn_start) { + gen->error = -ENOMEM; + free(gen->insn_start); + gen->insn_start = NULL; + return -ENOMEM; + } + gen->insn_start = insn_start; + gen->insn_cur = insn_start + off; + return 0; +} + +static int realloc_data_buf(struct bpf_gen *gen, __u32 size) +{ + size_t off = gen->data_cur - gen->data_start; + void *data_start; + + if (gen->error) + return gen->error; + if (size > INT32_MAX || off + size > INT32_MAX) { + gen->error = -ERANGE; + return -ERANGE; + } + data_start = realloc(gen->data_start, off + size); + if (!data_start) { + gen->error = -ENOMEM; + free(gen->data_start); + gen->data_start = NULL; + return -ENOMEM; + } + gen->data_start = data_start; + gen->data_cur = data_start + off; + return 0; +} + +static void emit(struct bpf_gen *gen, struct bpf_insn insn) +{ + if (realloc_insn_buf(gen, sizeof(insn))) + return; + memcpy(gen->insn_cur, &insn, sizeof(insn)); + gen->insn_cur += sizeof(insn); +} + +static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2) +{ + emit(gen, insn1); + emit(gen, insn2); +} + +static int add_data(struct bpf_gen *gen, const void *data, __u32 size); +static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off); + +void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps) +{ + size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz; + int i; + + gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); + gen->log_level = log_level; + /* save ctx pointer into R6 */ + emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1)); + + /* bzero stack */ + emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10)); + emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); + + /* amount of stack actually used, only used to calculate iterations, not stack offset */ + nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]); + /* jump over cleanup code */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, + /* size of cleanup code below (including map fd cleanup) */ + (nr_progs_sz / 4) * 3 + 2 + + /* 6 insns for emit_sys_close_blob, + * 6 insns for debug_regs in emit_sys_close_blob + */ + nr_maps * (6 + (gen->log_level ? 6 : 0)))); + + /* remember the label where all error branches will jump to */ + gen->cleanup_label = gen->insn_cur - gen->insn_start; + /* emit cleanup code: close all temp FDs */ + for (i = 0; i < nr_progs_sz; i += 4) { + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i)); + emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); + } + for (i = 0; i < nr_maps; i++) + emit_sys_close_blob(gen, blob_fd_array_off(gen, i)); + /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */ + emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); + emit(gen, BPF_EXIT_INSN()); +} + +static int add_data(struct bpf_gen *gen, const void *data, __u32 size) +{ + __u32 size8 = roundup(size, 8); + __u64 zero = 0; + void *prev; + + if (realloc_data_buf(gen, size8)) + return 0; + prev = gen->data_cur; + if (data) { + memcpy(gen->data_cur, data, size); + memcpy(gen->data_cur + size, &zero, size8 - size); + } else { + memset(gen->data_cur, 0, size8); + } + gen->data_cur += size8; + return prev - gen->data_start; +} + +/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative + * to start of fd_array. Caller can decide if it is usable or not. + */ +static int add_map_fd(struct bpf_gen *gen) +{ + if (gen->nr_maps == MAX_USED_MAPS) { + pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); + gen->error = -E2BIG; + return 0; + } + return gen->nr_maps++; +} + +static int add_kfunc_btf_fd(struct bpf_gen *gen) +{ + int cur; + + if (gen->nr_fd_array == MAX_KFUNC_DESCS) { + cur = add_data(gen, NULL, sizeof(int)); + return (cur - gen->fd_array) / sizeof(int); + } + return MAX_USED_MAPS + gen->nr_fd_array++; +} + +static int insn_bytes_to_bpf_size(__u32 sz) +{ + switch (sz) { + case 8: return BPF_DW; + case 4: return BPF_W; + case 2: return BPF_H; + case 1: return BPF_B; + default: return -1; + } +} + +/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */ +static void emit_rel_store(struct bpf_gen *gen, int off, int data) +{ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, data)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, off)); + emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0)); +} + +static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off) +{ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_off)); + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, off)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); +} + +static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off) +{ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_off)); + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); +} + +static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off, + bool check_non_zero) +{ + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off)); + if (check_non_zero) + /* If value in ctx is zero don't update the blob. + * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c + */ + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, off)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); +} + +static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off) +{ + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, off)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); +} + +static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off) +{ + emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); + emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); +} + +static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size) +{ + emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, attr)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf)); + /* remember the result in R7 */ + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); +} + +static bool is_simm16(__s64 value) +{ + return value == (__s64)(__s16)value; +} + +static void emit_check_err(struct bpf_gen *gen) +{ + __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1; + + /* R7 contains result of last sys_bpf command. + * if (R7 < 0) goto cleanup; + */ + if (is_simm16(off)) { + emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off)); + } else { + gen->error = -ERANGE; + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1)); + } +} + +/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */ +static void emit_debug(struct bpf_gen *gen, int reg1, int reg2, + const char *fmt, va_list args) +{ + char buf[1024]; + int addr, len, ret; + + if (!gen->log_level) + return; + ret = vsnprintf(buf, sizeof(buf), fmt, args); + if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0) + /* The special case to accommodate common debug_ret(): + * to avoid specifying BPF_REG_7 and adding " r=%%d" to + * prints explicitly. + */ + strcat(buf, " r=%d"); + len = strlen(buf) + 1; + addr = add_data(gen, buf, len); + + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, addr)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); + if (reg1 >= 0) + emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1)); + if (reg2 >= 0) + emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk)); +} + +static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + emit_debug(gen, reg1, reg2, fmt, args); + va_end(args); +} + +static void debug_ret(struct bpf_gen *gen, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + emit_debug(gen, BPF_REG_7, -1, fmt, args); + va_end(args); +} + +static void __emit_sys_close(struct bpf_gen *gen) +{ + emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, + /* 2 is the number of the following insns + * * 6 is additional insns in debug_regs + */ + 2 + (gen->log_level ? 6 : 0))); + emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); + debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d"); +} + +static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off) +{ + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off)); + __emit_sys_close(gen); +} + +static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off) +{ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_off)); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0)); + __emit_sys_close(gen); +} + +int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) +{ + int i; + + if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) { + pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n", + nr_progs, gen->nr_progs, nr_maps, gen->nr_maps); + gen->error = -EFAULT; + return gen->error; + } + emit_sys_close_stack(gen, stack_off(btf_fd)); + for (i = 0; i < gen->nr_progs; i++) + move_stack2ctx(gen, + sizeof(struct bpf_loader_ctx) + + sizeof(struct bpf_map_desc) * gen->nr_maps + + sizeof(struct bpf_prog_desc) * i + + offsetof(struct bpf_prog_desc, prog_fd), 4, + stack_off(prog_fd[i])); + for (i = 0; i < gen->nr_maps; i++) + move_blob2ctx(gen, + sizeof(struct bpf_loader_ctx) + + sizeof(struct bpf_map_desc) * i + + offsetof(struct bpf_map_desc, map_fd), 4, + blob_fd_array_off(gen, i)); + emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0)); + emit(gen, BPF_EXIT_INSN()); + pr_debug("gen: finish %d\n", gen->error); + if (!gen->error) { + struct gen_loader_opts *opts = gen->opts; + + opts->insns = gen->insn_start; + opts->insns_sz = gen->insn_cur - gen->insn_start; + opts->data = gen->data_start; + opts->data_sz = gen->data_cur - gen->data_start; + } + return gen->error; +} + +void bpf_gen__free(struct bpf_gen *gen) +{ + if (!gen) + return; + free(gen->data_start); + free(gen->insn_start); + free(gen); +} + +void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, + __u32 btf_raw_size) +{ + int attr_size = offsetofend(union bpf_attr, btf_log_level); + int btf_data, btf_load_attr; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: load_btf: size %d\n", btf_raw_size); + btf_data = add_data(gen, btf_raw_data, btf_raw_size); + + attr.btf_size = btf_raw_size; + btf_load_attr = add_data(gen, &attr, attr_size); + + /* populate union bpf_attr with user provided log details */ + move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4, + offsetof(struct bpf_loader_ctx, log_level), false); + move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4, + offsetof(struct bpf_loader_ctx, log_size), false); + move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8, + offsetof(struct bpf_loader_ctx, log_buf), false); + /* populate union bpf_attr with a pointer to the BTF data */ + emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data); + /* emit BTF_LOAD command */ + emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size); + debug_ret(gen, "btf_load size %d", btf_raw_size); + emit_check_err(gen); + /* remember btf_fd in the stack, if successful */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd))); +} + +void bpf_gen__map_create(struct bpf_gen *gen, + enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, __u32 value_size, __u32 max_entries, + struct bpf_map_create_opts *map_attr, int map_idx) +{ + int attr_size = offsetofend(union bpf_attr, map_extra); + bool close_inner_map_fd = false; + int map_create_attr, idx; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = value_size; + attr.map_flags = map_attr->map_flags; + attr.map_extra = map_attr->map_extra; + if (map_name) + libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); + attr.numa_node = map_attr->numa_node; + attr.map_ifindex = map_attr->map_ifindex; + attr.max_entries = max_entries; + attr.btf_key_type_id = map_attr->btf_key_type_id; + attr.btf_value_type_id = map_attr->btf_value_type_id; + + pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", + attr.map_name, map_idx, map_type, attr.btf_value_type_id); + + map_create_attr = add_data(gen, &attr, attr_size); + if (attr.btf_value_type_id) + /* populate union bpf_attr with btf_fd saved in the stack earlier */ + move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4, + stack_off(btf_fd)); + switch (attr.map_type) { + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4, + stack_off(inner_map_fd)); + close_inner_map_fd = true; + break; + default: + break; + } + /* conditionally update max_entries */ + if (map_idx >= 0) + move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4, + sizeof(struct bpf_loader_ctx) + + sizeof(struct bpf_map_desc) * map_idx + + offsetof(struct bpf_map_desc, max_entries), + true /* check that max_entries != 0 */); + /* emit MAP_CREATE command */ + emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); + debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", + attr.map_name, map_idx, map_type, value_size, + attr.btf_value_type_id); + emit_check_err(gen); + /* remember map_fd in the stack, if successful */ + if (map_idx < 0) { + /* This bpf_gen__map_create() function is called with map_idx >= 0 + * for all maps that libbpf loading logic tracks. + * It's called with -1 to create an inner map. + */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, + stack_off(inner_map_fd))); + } else if (map_idx != gen->nr_maps) { + gen->error = -EDOM; /* internal bug */ + return; + } else { + /* add_map_fd does gen->nr_maps++ */ + idx = add_map_fd(gen); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_fd_array_off(gen, idx))); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0)); + } + if (close_inner_map_fd) + emit_sys_close_stack(gen, stack_off(inner_map_fd)); +} + +void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name, + enum bpf_attach_type type) +{ + const char *prefix; + int kind, ret; + + btf_get_kernel_prefix_kind(type, &prefix, &kind); + gen->attach_kind = kind; + ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s", + prefix, attach_name); + if (ret == sizeof(gen->attach_target)) + gen->error = -ENOSPC; +} + +static void emit_find_attach_target(struct bpf_gen *gen) +{ + int name, len = strlen(gen->attach_target) + 1; + + pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind); + name = add_data(gen, gen->attach_target, len); + + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, name)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind)); + emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); + debug_ret(gen, "find_by_name_kind(%s,%d)", + gen->attach_target, gen->attach_kind); + emit_check_err(gen); + /* if successful, btf_id is in lower 32-bit of R7 and + * btf_obj_fd is in upper 32-bit + */ +} + +void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, + bool is_typeless, int kind, int insn_idx) +{ + struct ksym_relo_desc *relo; + + relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo)); + if (!relo) { + gen->error = -ENOMEM; + return; + } + gen->relos = relo; + relo += gen->relo_cnt; + relo->name = name; + relo->is_weak = is_weak; + relo->is_typeless = is_typeless; + relo->kind = kind; + relo->insn_idx = insn_idx; + gen->relo_cnt++; +} + +/* returns existing ksym_desc with ref incremented, or inserts a new one */ +static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo) +{ + struct ksym_desc *kdesc; + int i; + + for (i = 0; i < gen->nr_ksyms; i++) { + if (!strcmp(gen->ksyms[i].name, relo->name)) { + gen->ksyms[i].ref++; + return &gen->ksyms[i]; + } + } + kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc)); + if (!kdesc) { + gen->error = -ENOMEM; + return NULL; + } + gen->ksyms = kdesc; + kdesc = &gen->ksyms[gen->nr_ksyms++]; + kdesc->name = relo->name; + kdesc->kind = relo->kind; + kdesc->ref = 1; + kdesc->off = 0; + kdesc->insn = 0; + return kdesc; +} + +/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} + * Returns result in BPF_REG_7 + */ +static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo) +{ + int name_off, len = strlen(relo->name) + 1; + + name_off = add_data(gen, relo->name, len); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, name_off)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind)); + emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); + debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind); +} + +/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} + * Returns result in BPF_REG_7 + * Returns u64 symbol addr in BPF_REG_9 + */ +static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo) +{ + int name_off, len = strlen(relo->name) + 1, res_off; + + name_off = add_data(gen, relo->name, len); + res_off = add_data(gen, NULL, 8); /* res is u64 */ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, name_off)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); + emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, res_off)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name)); + emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0)); + emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); + debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind); +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + * + * We need to reuse BTF fd for same symbol otherwise each relocation takes a new + * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols, + * this would mean a new BTF fd index for each entry. By pairing symbol name + * with index, we get the insn->imm, insn->off pairing that kernel uses for + * kfunc_tab, which becomes the effective limit even though all of them may + * share same index in fd_array (such that kfunc_btf_tab has 1 element). + */ +static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + int btf_fd_idx; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing bpf_insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2, + kdesc->insn + offsetof(struct bpf_insn, off)); + goto log; + } + /* remember insn offset, so we can copy BTF ID and FD later */ + kdesc->insn = insn; + emit_bpf_find_by_name_kind(gen, relo); + if (!relo->is_weak) + emit_check_err(gen); + /* get index in fd_array to store BTF FD at */ + btf_fd_idx = add_kfunc_btf_fd(gen); + if (btf_fd_idx > INT16_MAX) { + pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n", + btf_fd_idx, relo->name); + gen->error = -E2BIG; + return; + } + kdesc->off = btf_fd_idx; + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set value for imm, off as 0 */ + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10)); + /* store btf_id into insn[insn_idx].imm */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* obtain fd in BPF_REG_9 */ + emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + /* jump to fd_array store if fd denotes module BTF */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2)); + /* set the default value for off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); + /* skip BTF fd store for vmlinux BTF */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); + /* load fd_array slot pointer */ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); + /* store BTF fd in slot */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); + /* store index into insn[insn_idx].off */ + emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); +log: + if (!gen->log_level) + return; + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, + offsetof(struct bpf_insn, imm))); + emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, + offsetof(struct bpf_insn, off))); + debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d", + relo->name, kdesc->ref); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, blob_fd_array_off(gen, kdesc->off))); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0)); + debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd", + relo->name, kdesc->ref); +} + +static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo, + int ref) +{ + if (!gen->log_level) + return; + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, + offsetof(struct bpf_insn, imm))); + emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) + + offsetof(struct bpf_insn, imm))); + debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d", + relo->is_typeless, relo->is_weak, relo->name, ref); + emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); + debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg", + relo->is_typeless, relo->is_weak, relo->name, ref); +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + */ +static void emit_relo_ksym_typeless(struct bpf_gen *gen, + struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing ldimm64 insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); + goto log; + } + /* remember insn offset, so we can copy ksym addr later */ + kdesc->insn = insn; + /* skip typeless ksym_desc in fd closing loop in cleanup_relos */ + kdesc->typeless = true; + emit_bpf_kallsyms_lookup_name(gen, relo); + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1)); + emit_check_err(gen); + /* store lower half of addr into insn[insn_idx].imm */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm))); + /* store upper half of addr into insn[insn_idx + 1].imm */ + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); +log: + emit_ksym_relo_log(gen, relo, kdesc->ref); +} + +static __u32 src_reg_mask(void) +{ +#if defined(__LITTLE_ENDIAN_BITFIELD) + return 0x0f; /* src_reg,dst_reg,... */ +#elif defined(__BIG_ENDIAN_BITFIELD) + return 0xf0; /* dst_reg,src_reg,... */ +#else +#error "Unsupported bit endianness, cannot proceed" +#endif +} + +/* Expects: + * BPF_REG_8 - pointer to instruction + */ +static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) +{ + struct ksym_desc *kdesc; + __u32 reg_mask; + + kdesc = get_ksym_desc(gen, relo); + if (!kdesc) + return; + /* try to copy from existing ldimm64 insn */ + if (kdesc->ref > 1) { + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + offsetof(struct bpf_insn, imm)); + move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, + kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); + /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */ + emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); + goto clear_src_reg; + } + /* remember insn offset, so we can copy BTF ID and FD later */ + kdesc->insn = insn; + emit_bpf_find_by_name_kind(gen, relo); + if (!relo->is_weak) + emit_check_err(gen); + /* jump to success case */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); + /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */ + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); + emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); + /* skip success case for ret < 0 */ + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); + /* store btf_id into insn[insn_idx].imm */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); + /* store btf_obj_fd into insn[insn_idx + 1].imm */ + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); + /* skip src_reg adjustment */ + emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); +clear_src_reg: + /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ + reg_mask = src_reg_mask(); + emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); + emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); + emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); + + emit_ksym_relo_log(gen, relo, kdesc->ref); +} + +void bpf_gen__record_relo_core(struct bpf_gen *gen, + const struct bpf_core_relo *core_relo) +{ + struct bpf_core_relo *relos; + + relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos)); + if (!relos) { + gen->error = -ENOMEM; + return; + } + gen->core_relos = relos; + relos += gen->core_relo_cnt; + memcpy(relos, core_relo, sizeof(*relos)); + gen->core_relo_cnt++; +} + +static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) +{ + int insn; + + pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx); + insn = insns + sizeof(struct bpf_insn) * relo->insn_idx; + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn)); + switch (relo->kind) { + case BTF_KIND_VAR: + if (relo->is_typeless) + emit_relo_ksym_typeless(gen, relo, insn); + else + emit_relo_ksym_btf(gen, relo, insn); + break; + case BTF_KIND_FUNC: + emit_relo_kfunc_btf(gen, relo, insn); + break; + default: + pr_warn("Unknown relocation kind '%d'\n", relo->kind); + gen->error = -EDOM; + return; + } +} + +static void emit_relos(struct bpf_gen *gen, int insns) +{ + int i; + + for (i = 0; i < gen->relo_cnt; i++) + emit_relo(gen, gen->relos + i, insns); +} + +static void cleanup_core_relo(struct bpf_gen *gen) +{ + if (!gen->core_relo_cnt) + return; + free(gen->core_relos); + gen->core_relo_cnt = 0; + gen->core_relos = NULL; +} + +static void cleanup_relos(struct bpf_gen *gen, int insns) +{ + int i, insn; + + for (i = 0; i < gen->nr_ksyms; i++) { + /* only close fds for typed ksyms and kfuncs */ + if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) { + /* close fd recorded in insn[insn_idx + 1].imm */ + insn = gen->ksyms[i].insn; + insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm); + emit_sys_close_blob(gen, insn); + } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) { + emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off)); + if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ) + gen->nr_fd_array--; + } + } + if (gen->nr_ksyms) { + free(gen->ksyms); + gen->nr_ksyms = 0; + gen->ksyms = NULL; + } + if (gen->relo_cnt) { + free(gen->relos); + gen->relo_cnt = 0; + gen->relos = NULL; + } + cleanup_core_relo(gen); +} + +void bpf_gen__prog_load(struct bpf_gen *gen, + enum bpf_prog_type prog_type, const char *prog_name, + const char *license, struct bpf_insn *insns, size_t insn_cnt, + struct bpf_prog_load_opts *load_attr, int prog_idx) +{ + int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; + int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n", + prog_type, insn_cnt, prog_idx); + /* add license string to blob of bytes */ + license_off = add_data(gen, license, strlen(license) + 1); + /* add insns to blob of bytes */ + insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); + + attr.prog_type = prog_type; + attr.expected_attach_type = load_attr->expected_attach_type; + attr.attach_btf_id = load_attr->attach_btf_id; + attr.prog_ifindex = load_attr->prog_ifindex; + attr.kern_version = 0; + attr.insn_cnt = (__u32)insn_cnt; + attr.prog_flags = load_attr->prog_flags; + + attr.func_info_rec_size = load_attr->func_info_rec_size; + attr.func_info_cnt = load_attr->func_info_cnt; + func_info = add_data(gen, load_attr->func_info, + attr.func_info_cnt * attr.func_info_rec_size); + + attr.line_info_rec_size = load_attr->line_info_rec_size; + attr.line_info_cnt = load_attr->line_info_cnt; + line_info = add_data(gen, load_attr->line_info, + attr.line_info_cnt * attr.line_info_rec_size); + + attr.core_relo_rec_size = sizeof(struct bpf_core_relo); + attr.core_relo_cnt = gen->core_relo_cnt; + core_relos = add_data(gen, gen->core_relos, + attr.core_relo_cnt * attr.core_relo_rec_size); + + libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); + prog_load_attr = add_data(gen, &attr, attr_size); + + /* populate union bpf_attr with a pointer to license */ + emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); + + /* populate union bpf_attr with a pointer to instructions */ + emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off); + + /* populate union bpf_attr with a pointer to func_info */ + emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info); + + /* populate union bpf_attr with a pointer to line_info */ + emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); + + /* populate union bpf_attr with a pointer to core_relos */ + emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos); + + /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ + emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); + + /* populate union bpf_attr with user provided log details */ + move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4, + offsetof(struct bpf_loader_ctx, log_level), false); + move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4, + offsetof(struct bpf_loader_ctx, log_size), false); + move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8, + offsetof(struct bpf_loader_ctx, log_buf), false); + /* populate union bpf_attr with btf_fd saved in the stack earlier */ + move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4, + stack_off(btf_fd)); + if (gen->attach_kind) { + emit_find_attach_target(gen); + /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */ + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, prog_load_attr)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, + offsetof(union bpf_attr, attach_btf_id))); + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, + offsetof(union bpf_attr, attach_btf_obj_fd))); + } + emit_relos(gen, insns_off); + /* emit PROG_LOAD command */ + emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size); + debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); + /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ + cleanup_relos(gen, insns_off); + if (gen->attach_kind) { + emit_sys_close_blob(gen, + attr_field(prog_load_attr, attach_btf_obj_fd)); + gen->attach_kind = 0; + } + emit_check_err(gen); + /* remember prog_fd in the stack, if successful */ + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, + stack_off(prog_fd[gen->nr_progs]))); + gen->nr_progs++; +} + +void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, + __u32 value_size) +{ + int attr_size = offsetofend(union bpf_attr, flags); + int map_update_attr, value, key; + union bpf_attr attr; + int zero = 0; + + memset(&attr, 0, attr_size); + pr_debug("gen: map_update_elem: idx %d\n", map_idx); + + value = add_data(gen, pvalue, value_size); + key = add_data(gen, &zero, sizeof(zero)); + + /* if (map_desc[map_idx].initial_value) { + * if (ctx->flags & BPF_SKEL_KERNEL) + * bpf_probe_read_kernel(value, value_size, initial_value); + * else + * bpf_copy_from_user(value, value_size, initial_value); + * } + */ + emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, + sizeof(struct bpf_loader_ctx) + + sizeof(struct bpf_map_desc) * map_idx + + offsetof(struct bpf_map_desc, initial_value))); + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8)); + emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, + 0, 0, 0, value)); + emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size)); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, + offsetof(struct bpf_loader_ctx, flags))); + emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user)); + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); + + map_update_attr = add_data(gen, &attr, attr_size); + move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, + blob_fd_array_off(gen, map_idx)); + emit_rel_store(gen, attr_field(map_update_attr, key), key); + emit_rel_store(gen, attr_field(map_update_attr, value), value); + /* emit MAP_UPDATE_ELEM command */ + emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); + debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size); + emit_check_err(gen); +} + +void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot, + int inner_map_idx) +{ + int attr_size = offsetofend(union bpf_attr, flags); + int map_update_attr, key; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", + outer_map_idx, slot, inner_map_idx); + + key = add_data(gen, &slot, sizeof(slot)); + + map_update_attr = add_data(gen, &attr, attr_size); + move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, + blob_fd_array_off(gen, outer_map_idx)); + emit_rel_store(gen, attr_field(map_update_attr, key), key); + emit_rel_store(gen, attr_field(map_update_attr, value), + blob_fd_array_off(gen, inner_map_idx)); + + /* emit MAP_UPDATE_ELEM command */ + emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); + debug_ret(gen, "populate_outer_map outer %d key %d inner %d", + outer_map_idx, slot, inner_map_idx); + emit_check_err(gen); +} + +void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) +{ + int attr_size = offsetofend(union bpf_attr, map_fd); + int map_freeze_attr; + union bpf_attr attr; + + memset(&attr, 0, attr_size); + pr_debug("gen: map_freeze: idx %d\n", map_idx); + map_freeze_attr = add_data(gen, &attr, attr_size); + move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, + blob_fd_array_off(gen, map_idx)); + /* emit MAP_FREEZE command */ + emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size); + debug_ret(gen, "map_freeze"); + emit_check_err(gen); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/hashmap.c b/pkg/plugin/lib/common/libbpf/_src/hashmap.c new file mode 100644 index 000000000..aeb09c288 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/hashmap.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Generic non-thread safe hash map implementation. + * + * Copyright (c) 2019 Facebook + */ +#include +#include +#include +#include +#include +#include "hashmap.h" + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + +/* start with 4 buckets */ +#define HASHMAP_MIN_CAP_BITS 2 + +static void hashmap_add_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + entry->next = *pprev; + *pprev = entry; +} + +static void hashmap_del_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + *pprev = entry->next; + entry->next = NULL; +} + +void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, void *ctx) +{ + map->hash_fn = hash_fn; + map->equal_fn = equal_fn; + map->ctx = ctx; + + map->buckets = NULL; + map->cap = 0; + map->cap_bits = 0; + map->sz = 0; +} + +struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, + void *ctx) +{ + struct hashmap *map = malloc(sizeof(struct hashmap)); + + if (!map) + return ERR_PTR(-ENOMEM); + hashmap__init(map, hash_fn, equal_fn, ctx); + return map; +} + +void hashmap__clear(struct hashmap *map) +{ + struct hashmap_entry *cur, *tmp; + size_t bkt; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + free(cur); + } + free(map->buckets); + map->buckets = NULL; + map->cap = map->cap_bits = map->sz = 0; +} + +void hashmap__free(struct hashmap *map) +{ + if (IS_ERR_OR_NULL(map)) + return; + + hashmap__clear(map); + free(map); +} + +size_t hashmap__size(const struct hashmap *map) +{ + return map->sz; +} + +size_t hashmap__capacity(const struct hashmap *map) +{ + return map->cap; +} + +static bool hashmap_needs_to_grow(struct hashmap *map) +{ + /* grow if empty or more than 75% filled */ + return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); +} + +static int hashmap_grow(struct hashmap *map) +{ + struct hashmap_entry **new_buckets; + struct hashmap_entry *cur, *tmp; + size_t new_cap_bits, new_cap; + size_t h, bkt; + + new_cap_bits = map->cap_bits + 1; + if (new_cap_bits < HASHMAP_MIN_CAP_BITS) + new_cap_bits = HASHMAP_MIN_CAP_BITS; + + new_cap = 1UL << new_cap_bits; + new_buckets = calloc(new_cap, sizeof(new_buckets[0])); + if (!new_buckets) + return -ENOMEM; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); + hashmap_add_entry(&new_buckets[h], cur); + } + + map->cap = new_cap; + map->cap_bits = new_cap_bits; + free(map->buckets); + map->buckets = new_buckets; + + return 0; +} + +static bool hashmap_find_entry(const struct hashmap *map, + const void *key, size_t hash, + struct hashmap_entry ***pprev, + struct hashmap_entry **entry) +{ + struct hashmap_entry *cur, **prev_ptr; + + if (!map->buckets) + return false; + + for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; + cur; + prev_ptr = &cur->next, cur = cur->next) { + if (map->equal_fn(cur->key, key, map->ctx)) { + if (pprev) + *pprev = prev_ptr; + *entry = cur; + return true; + } + } + + return false; +} + +int hashmap__insert(struct hashmap *map, const void *key, void *value, + enum hashmap_insert_strategy strategy, + const void **old_key, void **old_value) +{ + struct hashmap_entry *entry; + size_t h; + int err; + + if (old_key) + *old_key = NULL; + if (old_value) + *old_value = NULL; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (strategy != HASHMAP_APPEND && + hashmap_find_entry(map, key, h, NULL, &entry)) { + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) { + entry->key = key; + entry->value = value; + return 0; + } else if (strategy == HASHMAP_ADD) { + return -EEXIST; + } + } + + if (strategy == HASHMAP_UPDATE) + return -ENOENT; + + if (hashmap_needs_to_grow(map)) { + err = hashmap_grow(map); + if (err) + return err; + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + } + + entry = malloc(sizeof(struct hashmap_entry)); + if (!entry) + return -ENOMEM; + + entry->key = key; + entry->value = value; + hashmap_add_entry(&map->buckets[h], entry); + map->sz++; + + return 0; +} + +bool hashmap__find(const struct hashmap *map, const void *key, void **value) +{ + struct hashmap_entry *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, NULL, &entry)) + return false; + + if (value) + *value = entry->value; + return true; +} + +bool hashmap__delete(struct hashmap *map, const void *key, + const void **old_key, void **old_value) +{ + struct hashmap_entry **pprev, *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, &pprev, &entry)) + return false; + + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + hashmap_del_entry(pprev, entry); + free(entry); + map->sz--; + + return true; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/hashmap.h b/pkg/plugin/lib/common/libbpf/_src/hashmap.h new file mode 100644 index 000000000..10a4c4cd1 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/hashmap.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Generic non-thread safe hash map implementation. + * + * Copyright (c) 2019 Facebook + */ +#ifndef __LIBBPF_HASHMAP_H +#define __LIBBPF_HASHMAP_H + +#include +#include +#include + +static inline size_t hash_bits(size_t h, int bits) +{ + /* shuffle bits and return requested number of upper bits */ + if (bits == 0) + return 0; + +#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) + /* LP64 case */ + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); +#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); +#else +# error "Unsupported size_t size" +#endif +} + +/* generic C-string hashing function */ +static inline size_t str_hash(const char *s) +{ + size_t h = 0; + + while (*s) { + h = h * 31 + *s; + s++; + } + return h; +} + +typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); +typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); + +struct hashmap_entry { + const void *key; + void *value; + struct hashmap_entry *next; +}; + +struct hashmap { + hashmap_hash_fn hash_fn; + hashmap_equal_fn equal_fn; + void *ctx; + + struct hashmap_entry **buckets; + size_t cap; + size_t cap_bits; + size_t sz; +}; + +#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \ + .hash_fn = (hash_fn), \ + .equal_fn = (equal_fn), \ + .ctx = (ctx), \ + .buckets = NULL, \ + .cap = 0, \ + .cap_bits = 0, \ + .sz = 0, \ +} + +void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, void *ctx); +struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, + void *ctx); +void hashmap__clear(struct hashmap *map); +void hashmap__free(struct hashmap *map); + +size_t hashmap__size(const struct hashmap *map); +size_t hashmap__capacity(const struct hashmap *map); + +/* + * Hashmap insertion strategy: + * - HASHMAP_ADD - only add key/value if key doesn't exist yet; + * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise, + * update value; + * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do + * nothing and return -ENOENT; + * - HASHMAP_APPEND - always add key/value pair, even if key already exists. + * This turns hashmap into a multimap by allowing multiple values to be + * associated with the same key. Most useful read API for such hashmap is + * hashmap__for_each_key_entry() iteration. If hashmap__find() is still + * used, it will return last inserted key/value entry (first in a bucket + * chain). + */ +enum hashmap_insert_strategy { + HASHMAP_ADD, + HASHMAP_SET, + HASHMAP_UPDATE, + HASHMAP_APPEND, +}; + +/* + * hashmap__insert() adds key/value entry w/ various semantics, depending on + * provided strategy value. If a given key/value pair replaced already + * existing key/value pair, both old key and old value will be returned + * through old_key and old_value to allow calling code do proper memory + * management. + */ +int hashmap__insert(struct hashmap *map, const void *key, void *value, + enum hashmap_insert_strategy strategy, + const void **old_key, void **old_value); + +static inline int hashmap__add(struct hashmap *map, + const void *key, void *value) +{ + return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL); +} + +static inline int hashmap__set(struct hashmap *map, + const void *key, void *value, + const void **old_key, void **old_value) +{ + return hashmap__insert(map, key, value, HASHMAP_SET, + old_key, old_value); +} + +static inline int hashmap__update(struct hashmap *map, + const void *key, void *value, + const void **old_key, void **old_value) +{ + return hashmap__insert(map, key, value, HASHMAP_UPDATE, + old_key, old_value); +} + +static inline int hashmap__append(struct hashmap *map, + const void *key, void *value) +{ + return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL); +} + +bool hashmap__delete(struct hashmap *map, const void *key, + const void **old_key, void **old_value); + +bool hashmap__find(const struct hashmap *map, const void *key, void **value); + +/* + * hashmap__for_each_entry - iterate over all entries in hashmap + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @bkt: integer used as a bucket loop cursor + */ +#define hashmap__for_each_entry(map, cur, bkt) \ + for (bkt = 0; bkt < map->cap; bkt++) \ + for (cur = map->buckets[bkt]; cur; cur = cur->next) + +/* + * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe + * against removals + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @tmp: struct hashmap_entry * used as a temporary next cursor storage + * @bkt: integer used as a bucket loop cursor + */ +#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ + for (bkt = 0; bkt < map->cap; bkt++) \ + for (cur = map->buckets[bkt]; \ + cur && ({tmp = cur->next; true; }); \ + cur = tmp) + +/* + * hashmap__for_each_key_entry - iterate over entries associated with given key + * @map: hashmap to iterate + * @cur: struct hashmap_entry * used as a loop cursor + * @key: key to iterate entries for + */ +#define hashmap__for_each_key_entry(map, cur, _key) \ + for (cur = map->buckets \ + ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + : NULL; \ + cur; \ + cur = cur->next) \ + if (map->equal_fn(cur->key, (_key), map->ctx)) + +#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ + for (cur = map->buckets \ + ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + : NULL; \ + cur && ({ tmp = cur->next; true; }); \ + cur = tmp) \ + if (map->equal_fn(cur->key, (_key), map->ctx)) + +#endif /* __LIBBPF_HASHMAP_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf.c b/pkg/plugin/lib/common/libbpf/_src/libbpf.c new file mode 100644 index 000000000..73a5192de --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf.c @@ -0,0 +1,12903 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Common eBPF ELF object loading operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + * Copyright (C) 2017 Nicira, Inc. + * Copyright (C) 2019 Isovalent, Inc. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libbpf.h" +#include "bpf.h" +#include "btf.h" +#include "str_error.h" +#include "libbpf_internal.h" +#include "hashmap.h" +#include "bpf_gen_internal.h" + +#ifndef BPF_FS_MAGIC +#define BPF_FS_MAGIC 0xcafe4a11 +#endif + +#define BPF_INSN_SZ (sizeof(struct bpf_insn)) + +/* vsprintf() in __base_pr() uses nonliteral format string. It may break + * compilation if user enables corresponding warning. Disable it explicitly. + */ +#pragma GCC diagnostic ignored "-Wformat-nonliteral" + +#define __printf(a, b) __attribute__((format(printf, a, b))) + +static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); +static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); + +static int __base_pr(enum libbpf_print_level level, const char *format, + va_list args) +{ + if (level == LIBBPF_DEBUG) + return 0; + + return vfprintf(stderr, format, args); +} + +static libbpf_print_fn_t __libbpf_pr = __base_pr; + +libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) +{ + libbpf_print_fn_t old_print_fn = __libbpf_pr; + + __libbpf_pr = fn; + return old_print_fn; +} + +__printf(2, 3) +void libbpf_print(enum libbpf_print_level level, const char *format, ...) +{ + va_list args; + + if (!__libbpf_pr) + return; + + va_start(args, format); + __libbpf_pr(level, format, args); + va_end(args); +} + +static void pr_perm_msg(int err) +{ + struct rlimit limit; + char buf[100]; + + if (err != -EPERM || geteuid() != 0) + return; + + err = getrlimit(RLIMIT_MEMLOCK, &limit); + if (err) + return; + + if (limit.rlim_cur == RLIM_INFINITY) + return; + + if (limit.rlim_cur < 1024) + snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); + else if (limit.rlim_cur < 1024*1024) + snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); + else + snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); + + pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", + buf); +} + +#define STRERR_BUFSIZE 128 + +/* Copied from tools/perf/util/util.h */ +#ifndef zfree +# define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) +#endif + +#ifndef zclose +# define zclose(fd) ({ \ + int ___err = 0; \ + if ((fd) >= 0) \ + ___err = close((fd)); \ + fd = -1; \ + ___err; }) +#endif + +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + +/* this goes away in libbpf 1.0 */ +enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE; + +int libbpf_set_strict_mode(enum libbpf_strict_mode mode) +{ + libbpf_mode = mode; + return 0; +} + +__u32 libbpf_major_version(void) +{ + return LIBBPF_MAJOR_VERSION; +} + +__u32 libbpf_minor_version(void) +{ + return LIBBPF_MINOR_VERSION; +} + +const char *libbpf_version_string(void) +{ +#define __S(X) #X +#define _S(X) __S(X) + return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION); +#undef _S +#undef __S +} + +enum reloc_type { + RELO_LD64, + RELO_CALL, + RELO_DATA, + RELO_EXTERN_VAR, + RELO_EXTERN_FUNC, + RELO_SUBPROG_ADDR, + RELO_CORE, +}; + +struct reloc_desc { + enum reloc_type type; + int insn_idx; + union { + const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */ + struct { + int map_idx; + int sym_off; + }; + }; +}; + +/* stored as sec_def->cookie for all libbpf-supported SEC()s */ +enum sec_def_flags { + SEC_NONE = 0, + /* expected_attach_type is optional, if kernel doesn't support that */ + SEC_EXP_ATTACH_OPT = 1, + /* legacy, only used by libbpf_get_type_names() and + * libbpf_attach_type_by_name(), not used by libbpf itself at all. + * This used to be associated with cgroup (and few other) BPF programs + * that were attachable through BPF_PROG_ATTACH command. Pretty + * meaningless nowadays, though. + */ + SEC_ATTACHABLE = 2, + SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, + /* attachment target is specified through BTF ID in either kernel or + * other BPF program's BTF object */ + SEC_ATTACH_BTF = 4, + /* BPF program type allows sleeping/blocking in kernel */ + SEC_SLEEPABLE = 8, + /* allow non-strict prefix matching */ + SEC_SLOPPY_PFX = 16, + /* BPF program support non-linear XDP buffer */ + SEC_XDP_FRAGS = 32, + /* deprecated sec definitions not supposed to be used */ + SEC_DEPRECATED = 64, +}; + +struct bpf_sec_def { + char *sec; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + long cookie; + int handler_id; + + libbpf_prog_setup_fn_t prog_setup_fn; + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + libbpf_prog_attach_fn_t prog_attach_fn; +}; + +/* + * bpf_prog should be a better name but it has been used in + * linux/filter.h. + */ +struct bpf_program { + const struct bpf_sec_def *sec_def; + char *sec_name; + size_t sec_idx; + /* this program's instruction offset (in number of instructions) + * within its containing ELF section + */ + size_t sec_insn_off; + /* number of original instructions in ELF section belonging to this + * program, not taking into account subprogram instructions possible + * appended later during relocation + */ + size_t sec_insn_cnt; + /* Offset (in number of instructions) of the start of instruction + * belonging to this BPF program within its containing main BPF + * program. For the entry-point (main) BPF program, this is always + * zero. For a sub-program, this gets reset before each of main BPF + * programs are processed and relocated and is used to determined + * whether sub-program was already appended to the main program, and + * if yes, at which instruction offset. + */ + size_t sub_insn_off; + + char *name; + /* name with / replaced by _; makes recursive pinning + * in bpf_object__pin_programs easier + */ + char *pin_name; + + /* instructions that belong to BPF program; insns[0] is located at + * sec_insn_off instruction within its ELF section in ELF file, so + * when mapping ELF file instruction index to the local instruction, + * one needs to subtract sec_insn_off; and vice versa. + */ + struct bpf_insn *insns; + /* actual number of instruction in this BPF program's image; for + * entry-point BPF programs this includes the size of main program + * itself plus all the used sub-programs, appended at the end + */ + size_t insns_cnt; + + struct reloc_desc *reloc_desc; + int nr_reloc; + + /* BPF verifier log settings */ + char *log_buf; + size_t log_size; + __u32 log_level; + + struct { + int nr; + int *fds; + } instances; + bpf_program_prep_t preprocessor; + + struct bpf_object *obj; + void *priv; + bpf_program_clear_priv_t clear_priv; + + bool autoload; + bool mark_btf_static; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + int prog_ifindex; + __u32 attach_btf_obj_fd; + __u32 attach_btf_id; + __u32 attach_prog_fd; + void *func_info; + __u32 func_info_rec_size; + __u32 func_info_cnt; + + void *line_info; + __u32 line_info_rec_size; + __u32 line_info_cnt; + __u32 prog_flags; +}; + +struct bpf_struct_ops { + const char *tname; + const struct btf_type *type; + struct bpf_program **progs; + __u32 *kern_func_off; + /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ + void *data; + /* e.g. struct bpf_struct_ops_tcp_congestion_ops in + * btf_vmlinux's format. + * struct bpf_struct_ops_tcp_congestion_ops { + * [... some other kernel fields ...] + * struct tcp_congestion_ops data; + * } + * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) + * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" + * from "data". + */ + void *kern_vdata; + __u32 type_id; +}; + +#define DATA_SEC ".data" +#define BSS_SEC ".bss" +#define RODATA_SEC ".rodata" +#define KCONFIG_SEC ".kconfig" +#define KSYMS_SEC ".ksyms" +#define STRUCT_OPS_SEC ".struct_ops" + +enum libbpf_map_type { + LIBBPF_MAP_UNSPEC, + LIBBPF_MAP_DATA, + LIBBPF_MAP_BSS, + LIBBPF_MAP_RODATA, + LIBBPF_MAP_KCONFIG, +}; + +struct bpf_map { + char *name; + /* real_name is defined for special internal maps (.rodata*, + * .data*, .bss, .kconfig) and preserves their original ELF section + * name. This is important to be be able to find corresponding BTF + * DATASEC information. + */ + char *real_name; + int fd; + int sec_idx; + size_t sec_offset; + int map_ifindex; + int inner_map_fd; + struct bpf_map_def def; + __u32 numa_node; + __u32 btf_var_idx; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + void *priv; + bpf_map_clear_priv_t clear_priv; + enum libbpf_map_type libbpf_type; + void *mmaped; + struct bpf_struct_ops *st_ops; + struct bpf_map *inner_map; + void **init_slots; + int init_slots_sz; + char *pin_path; + bool pinned; + bool reused; + bool skipped; + __u64 map_extra; +}; + +enum extern_type { + EXT_UNKNOWN, + EXT_KCFG, + EXT_KSYM, +}; + +enum kcfg_type { + KCFG_UNKNOWN, + KCFG_CHAR, + KCFG_BOOL, + KCFG_INT, + KCFG_TRISTATE, + KCFG_CHAR_ARR, +}; + +struct extern_desc { + enum extern_type type; + int sym_idx; + int btf_id; + int sec_btf_id; + const char *name; + bool is_set; + bool is_weak; + union { + struct { + enum kcfg_type type; + int sz; + int align; + int data_off; + bool is_signed; + } kcfg; + struct { + unsigned long long addr; + + /* target btf_id of the corresponding kernel var. */ + int kernel_btf_obj_fd; + int kernel_btf_id; + + /* local btf_id of the ksym extern's type. */ + __u32 type_id; + /* BTF fd index to be patched in for insn->off, this is + * 0 for vmlinux BTF, index in obj->fd_array for module + * BTF + */ + __s16 btf_fd_idx; + } ksym; + }; +}; + +static LIST_HEAD(bpf_objects_list); + +struct module_btf { + struct btf *btf; + char *name; + __u32 id; + int fd; + int fd_array_idx; +}; + +enum sec_type { + SEC_UNUSED = 0, + SEC_RELO, + SEC_BSS, + SEC_DATA, + SEC_RODATA, +}; + +struct elf_sec_desc { + enum sec_type sec_type; + Elf64_Shdr *shdr; + Elf_Data *data; +}; + +struct elf_state { + int fd; + const void *obj_buf; + size_t obj_buf_sz; + Elf *elf; + Elf64_Ehdr *ehdr; + Elf_Data *symbols; + Elf_Data *st_ops_data; + size_t shstrndx; /* section index for section name strings */ + size_t strtabidx; + struct elf_sec_desc *secs; + int sec_cnt; + int maps_shndx; + int btf_maps_shndx; + __u32 btf_maps_sec_btf_id; + int text_shndx; + int symbols_shndx; + int st_ops_shndx; +}; + +struct usdt_manager; + +struct bpf_object { + char name[BPF_OBJ_NAME_LEN]; + char license[64]; + __u32 kern_version; + + struct bpf_program *programs; + size_t nr_programs; + struct bpf_map *maps; + size_t nr_maps; + size_t maps_cap; + + char *kconfig; + struct extern_desc *externs; + int nr_extern; + int kconfig_map_idx; + + bool loaded; + bool has_subcalls; + bool has_rodata; + + struct bpf_gen *gen_loader; + + /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ + struct elf_state efile; + /* + * All loaded bpf_object are linked in a list, which is + * hidden to caller. bpf_objects__ handlers deal with + * all objects. + */ + struct list_head list; + + struct btf *btf; + struct btf_ext *btf_ext; + + /* Parse and load BTF vmlinux if any of the programs in the object need + * it at load time. + */ + struct btf *btf_vmlinux; + /* Path to the custom BTF to be used for BPF CO-RE relocations as an + * override for vmlinux BTF. + */ + char *btf_custom_path; + /* vmlinux BTF override for CO-RE relocations */ + struct btf *btf_vmlinux_override; + /* Lazily initialized kernel module BTFs */ + struct module_btf *btf_modules; + bool btf_modules_loaded; + size_t btf_module_cnt; + size_t btf_module_cap; + + /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */ + char *log_buf; + size_t log_size; + __u32 log_level; + + void *priv; + bpf_object_clear_priv_t clear_priv; + + int *fd_array; + size_t fd_array_cap; + size_t fd_array_cnt; + + struct usdt_manager *usdt_man; + + char path[]; +}; + +static const char *elf_sym_str(const struct bpf_object *obj, size_t off); +static const char *elf_sec_str(const struct bpf_object *obj, size_t off); +static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); +static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); +static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); +static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); +static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); +static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); +static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); + +void bpf_program__unload(struct bpf_program *prog) +{ + int i; + + if (!prog) + return; + + /* + * If the object is opened but the program was never loaded, + * it is possible that prog->instances.nr == -1. + */ + if (prog->instances.nr > 0) { + for (i = 0; i < prog->instances.nr; i++) + zclose(prog->instances.fds[i]); + } else if (prog->instances.nr != -1) { + pr_warn("Internal error: instances.nr is %d\n", + prog->instances.nr); + } + + prog->instances.nr = -1; + zfree(&prog->instances.fds); + + zfree(&prog->func_info); + zfree(&prog->line_info); +} + +static void bpf_program__exit(struct bpf_program *prog) +{ + if (!prog) + return; + + if (prog->clear_priv) + prog->clear_priv(prog, prog->priv); + + prog->priv = NULL; + prog->clear_priv = NULL; + + bpf_program__unload(prog); + zfree(&prog->name); + zfree(&prog->sec_name); + zfree(&prog->pin_name); + zfree(&prog->insns); + zfree(&prog->reloc_desc); + + prog->nr_reloc = 0; + prog->insns_cnt = 0; + prog->sec_idx = -1; +} + +static char *__bpf_program__pin_name(struct bpf_program *prog) +{ + char *name, *p; + + if (libbpf_mode & LIBBPF_STRICT_SEC_NAME) + name = strdup(prog->name); + else + name = strdup(prog->sec_name); + + if (!name) + return NULL; + + p = name; + + while ((p = strchr(p, '/'))) + *p = '_'; + + return name; +} + +static bool insn_is_subprog_call(const struct bpf_insn *insn) +{ + return BPF_CLASS(insn->code) == BPF_JMP && + BPF_OP(insn->code) == BPF_CALL && + BPF_SRC(insn->code) == BPF_K && + insn->src_reg == BPF_PSEUDO_CALL && + insn->dst_reg == 0 && + insn->off == 0; +} + +static bool is_call_insn(const struct bpf_insn *insn) +{ + return insn->code == (BPF_JMP | BPF_CALL); +} + +static bool insn_is_pseudo_func(struct bpf_insn *insn) +{ + return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; +} + +static int +bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, + const char *name, size_t sec_idx, const char *sec_name, + size_t sec_off, void *insn_data, size_t insn_data_sz) +{ + if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { + pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", + sec_name, name, sec_off, insn_data_sz); + return -EINVAL; + } + + memset(prog, 0, sizeof(*prog)); + prog->obj = obj; + + prog->sec_idx = sec_idx; + prog->sec_insn_off = sec_off / BPF_INSN_SZ; + prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; + /* insns_cnt can later be increased by appending used subprograms */ + prog->insns_cnt = prog->sec_insn_cnt; + + prog->type = BPF_PROG_TYPE_UNSPEC; + + /* libbpf's convention for SEC("?abc...") is that it's just like + * SEC("abc...") but the corresponding bpf_program starts out with + * autoload set to false. + */ + if (sec_name[0] == '?') { + prog->autoload = false; + /* from now on forget there was ? in section name */ + sec_name++; + } else { + prog->autoload = true; + } + + prog->instances.fds = NULL; + prog->instances.nr = -1; + + /* inherit object's log_level */ + prog->log_level = obj->log_level; + + prog->sec_name = strdup(sec_name); + if (!prog->sec_name) + goto errout; + + prog->name = strdup(name); + if (!prog->name) + goto errout; + + prog->pin_name = __bpf_program__pin_name(prog); + if (!prog->pin_name) + goto errout; + + prog->insns = malloc(insn_data_sz); + if (!prog->insns) + goto errout; + memcpy(prog->insns, insn_data, insn_data_sz); + + return 0; +errout: + pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); + bpf_program__exit(prog); + return -ENOMEM; +} + +static int +bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, + const char *sec_name, int sec_idx) +{ + Elf_Data *symbols = obj->efile.symbols; + struct bpf_program *prog, *progs; + void *data = sec_data->d_buf; + size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; + int nr_progs, err, i; + const char *name; + Elf64_Sym *sym; + + progs = obj->programs; + nr_progs = obj->nr_programs; + nr_syms = symbols->d_size / sizeof(Elf64_Sym); + sec_off = 0; + + for (i = 0; i < nr_syms; i++) { + sym = elf_sym_by_idx(obj, i); + + if (sym->st_shndx != sec_idx) + continue; + if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) + continue; + + prog_sz = sym->st_size; + sec_off = sym->st_value; + + name = elf_sym_str(obj, sym->st_name); + if (!name) { + pr_warn("sec '%s': failed to get symbol name for offset %zu\n", + sec_name, sec_off); + return -LIBBPF_ERRNO__FORMAT; + } + + if (sec_off + prog_sz > sec_sz) { + pr_warn("sec '%s': program at offset %zu crosses section boundary\n", + sec_name, sec_off); + return -LIBBPF_ERRNO__FORMAT; + } + + if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { + pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); + return -ENOTSUP; + } + + pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", + sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); + + progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); + if (!progs) { + /* + * In this case the original obj->programs + * is still valid, so don't need special treat for + * bpf_close_object(). + */ + pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", + sec_name, name); + return -ENOMEM; + } + obj->programs = progs; + + prog = &progs[nr_progs]; + + err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, + sec_off, data + sec_off, prog_sz); + if (err) + return err; + + /* if function is a global/weak symbol, but has restricted + * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC + * as static to enable more permissive BPF verification mode + * with more outside context available to BPF verifier + */ + if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL + && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN + || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) + prog->mark_btf_static = true; + + nr_progs++; + obj->nr_programs = nr_progs; + } + + return 0; +} + +__u32 get_kernel_version(void) +{ + /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release, + * but Ubuntu provides /proc/version_signature file, as described at + * https://ubuntu.com/kernel, with an example contents below, which we + * can use to get a proper LINUX_VERSION_CODE. + * + * Ubuntu 5.4.0-12.15-generic 5.4.8 + * + * In the above, 5.4.8 is what kernel is actually expecting, while + * uname() call will return 5.4.0 in info.release. + */ + const char *ubuntu_kver_file = "/proc/version_signature"; + __u32 major, minor, patch; + struct utsname info; + + if (access(ubuntu_kver_file, R_OK) == 0) { + FILE *f; + + f = fopen(ubuntu_kver_file, "r"); + if (f) { + if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) { + fclose(f); + return KERNEL_VERSION(major, minor, patch); + } + fclose(f); + } + /* something went wrong, fall back to uname() approach */ + } + + uname(&info); + if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) + return 0; + return KERNEL_VERSION(major, minor, patch); +} + +static const struct btf_member * +find_member_by_offset(const struct btf_type *t, __u32 bit_offset) +{ + struct btf_member *m; + int i; + + for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { + if (btf_member_bit_offset(t, i) == bit_offset) + return m; + } + + return NULL; +} + +static const struct btf_member * +find_member_by_name(const struct btf *btf, const struct btf_type *t, + const char *name) +{ + struct btf_member *m; + int i; + + for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { + if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) + return m; + } + + return NULL; +} + +#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" +static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, + const char *name, __u32 kind); + +static int +find_struct_ops_kern_types(const struct btf *btf, const char *tname, + const struct btf_type **type, __u32 *type_id, + const struct btf_type **vtype, __u32 *vtype_id, + const struct btf_member **data_member) +{ + const struct btf_type *kern_type, *kern_vtype; + const struct btf_member *kern_data_member; + __s32 kern_vtype_id, kern_type_id; + __u32 i; + + kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); + if (kern_type_id < 0) { + pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", + tname); + return kern_type_id; + } + kern_type = btf__type_by_id(btf, kern_type_id); + + /* Find the corresponding "map_value" type that will be used + * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, + * find "struct bpf_struct_ops_tcp_congestion_ops" from the + * btf_vmlinux. + */ + kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, + tname, BTF_KIND_STRUCT); + if (kern_vtype_id < 0) { + pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", + STRUCT_OPS_VALUE_PREFIX, tname); + return kern_vtype_id; + } + kern_vtype = btf__type_by_id(btf, kern_vtype_id); + + /* Find "struct tcp_congestion_ops" from + * struct bpf_struct_ops_tcp_congestion_ops { + * [ ... ] + * struct tcp_congestion_ops data; + * } + */ + kern_data_member = btf_members(kern_vtype); + for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { + if (kern_data_member->type == kern_type_id) + break; + } + if (i == btf_vlen(kern_vtype)) { + pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", + tname, STRUCT_OPS_VALUE_PREFIX, tname); + return -EINVAL; + } + + *type = kern_type; + *type_id = kern_type_id; + *vtype = kern_vtype; + *vtype_id = kern_vtype_id; + *data_member = kern_data_member; + + return 0; +} + +static bool bpf_map__is_struct_ops(const struct bpf_map *map) +{ + return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; +} + +/* Init the map's fields that depend on kern_btf */ +static int bpf_map__init_kern_struct_ops(struct bpf_map *map, + const struct btf *btf, + const struct btf *kern_btf) +{ + const struct btf_member *member, *kern_member, *kern_data_member; + const struct btf_type *type, *kern_type, *kern_vtype; + __u32 i, kern_type_id, kern_vtype_id, kern_data_off; + struct bpf_struct_ops *st_ops; + void *data, *kern_data; + const char *tname; + int err; + + st_ops = map->st_ops; + type = st_ops->type; + tname = st_ops->tname; + err = find_struct_ops_kern_types(kern_btf, tname, + &kern_type, &kern_type_id, + &kern_vtype, &kern_vtype_id, + &kern_data_member); + if (err) + return err; + + pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", + map->name, st_ops->type_id, kern_type_id, kern_vtype_id); + + map->def.value_size = kern_vtype->size; + map->btf_vmlinux_value_type_id = kern_vtype_id; + + st_ops->kern_vdata = calloc(1, kern_vtype->size); + if (!st_ops->kern_vdata) + return -ENOMEM; + + data = st_ops->data; + kern_data_off = kern_data_member->offset / 8; + kern_data = st_ops->kern_vdata + kern_data_off; + + member = btf_members(type); + for (i = 0; i < btf_vlen(type); i++, member++) { + const struct btf_type *mtype, *kern_mtype; + __u32 mtype_id, kern_mtype_id; + void *mdata, *kern_mdata; + __s64 msize, kern_msize; + __u32 moff, kern_moff; + __u32 kern_member_idx; + const char *mname; + + mname = btf__name_by_offset(btf, member->name_off); + kern_member = find_member_by_name(kern_btf, kern_type, mname); + if (!kern_member) { + pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", + map->name, mname); + return -ENOTSUP; + } + + kern_member_idx = kern_member - btf_members(kern_type); + if (btf_member_bitfield_size(type, i) || + btf_member_bitfield_size(kern_type, kern_member_idx)) { + pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", + map->name, mname); + return -ENOTSUP; + } + + moff = member->offset / 8; + kern_moff = kern_member->offset / 8; + + mdata = data + moff; + kern_mdata = kern_data + kern_moff; + + mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); + kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, + &kern_mtype_id); + if (BTF_INFO_KIND(mtype->info) != + BTF_INFO_KIND(kern_mtype->info)) { + pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", + map->name, mname, BTF_INFO_KIND(mtype->info), + BTF_INFO_KIND(kern_mtype->info)); + return -ENOTSUP; + } + + if (btf_is_ptr(mtype)) { + struct bpf_program *prog; + + prog = st_ops->progs[i]; + if (!prog) + continue; + + kern_mtype = skip_mods_and_typedefs(kern_btf, + kern_mtype->type, + &kern_mtype_id); + + /* mtype->type must be a func_proto which was + * guaranteed in bpf_object__collect_st_ops_relos(), + * so only check kern_mtype for func_proto here. + */ + if (!btf_is_func_proto(kern_mtype)) { + pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", + map->name, mname); + return -ENOTSUP; + } + + prog->attach_btf_id = kern_type_id; + prog->expected_attach_type = kern_member_idx; + + st_ops->kern_func_off[i] = kern_data_off + kern_moff; + + pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", + map->name, mname, prog->name, moff, + kern_moff); + + continue; + } + + msize = btf__resolve_size(btf, mtype_id); + kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); + if (msize < 0 || kern_msize < 0 || msize != kern_msize) { + pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", + map->name, mname, (ssize_t)msize, + (ssize_t)kern_msize); + return -ENOTSUP; + } + + pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", + map->name, mname, (unsigned int)msize, + moff, kern_moff); + memcpy(kern_mdata, mdata, msize); + } + + return 0; +} + +static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) +{ + struct bpf_map *map; + size_t i; + int err; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + if (!bpf_map__is_struct_ops(map)) + continue; + + err = bpf_map__init_kern_struct_ops(map, obj->btf, + obj->btf_vmlinux); + if (err) + return err; + } + + return 0; +} + +static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) +{ + const struct btf_type *type, *datasec; + const struct btf_var_secinfo *vsi; + struct bpf_struct_ops *st_ops; + const char *tname, *var_name; + __s32 type_id, datasec_id; + const struct btf *btf; + struct bpf_map *map; + __u32 i; + + if (obj->efile.st_ops_shndx == -1) + return 0; + + btf = obj->btf; + datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC, + BTF_KIND_DATASEC); + if (datasec_id < 0) { + pr_warn("struct_ops init: DATASEC %s not found\n", + STRUCT_OPS_SEC); + return -EINVAL; + } + + datasec = btf__type_by_id(btf, datasec_id); + vsi = btf_var_secinfos(datasec); + for (i = 0; i < btf_vlen(datasec); i++, vsi++) { + type = btf__type_by_id(obj->btf, vsi->type); + var_name = btf__name_by_offset(obj->btf, type->name_off); + + type_id = btf__resolve_type(obj->btf, vsi->type); + if (type_id < 0) { + pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", + vsi->type, STRUCT_OPS_SEC); + return -EINVAL; + } + + type = btf__type_by_id(obj->btf, type_id); + tname = btf__name_by_offset(obj->btf, type->name_off); + if (!tname[0]) { + pr_warn("struct_ops init: anonymous type is not supported\n"); + return -ENOTSUP; + } + if (!btf_is_struct(type)) { + pr_warn("struct_ops init: %s is not a struct\n", tname); + return -EINVAL; + } + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + + map->sec_idx = obj->efile.st_ops_shndx; + map->sec_offset = vsi->offset; + map->name = strdup(var_name); + if (!map->name) + return -ENOMEM; + + map->def.type = BPF_MAP_TYPE_STRUCT_OPS; + map->def.key_size = sizeof(int); + map->def.value_size = type->size; + map->def.max_entries = 1; + + map->st_ops = calloc(1, sizeof(*map->st_ops)); + if (!map->st_ops) + return -ENOMEM; + st_ops = map->st_ops; + st_ops->data = malloc(type->size); + st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); + st_ops->kern_func_off = malloc(btf_vlen(type) * + sizeof(*st_ops->kern_func_off)); + if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) + return -ENOMEM; + + if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { + pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", + var_name, STRUCT_OPS_SEC); + return -EINVAL; + } + + memcpy(st_ops->data, + obj->efile.st_ops_data->d_buf + vsi->offset, + type->size); + st_ops->tname = tname; + st_ops->type = type; + st_ops->type_id = type_id; + + pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", + tname, type_id, var_name, vsi->offset); + } + + return 0; +} + +static struct bpf_object *bpf_object__new(const char *path, + const void *obj_buf, + size_t obj_buf_sz, + const char *obj_name) +{ + bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST); + struct bpf_object *obj; + char *end; + + obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); + if (!obj) { + pr_warn("alloc memory failed for %s\n", path); + return ERR_PTR(-ENOMEM); + } + + strcpy(obj->path, path); + if (obj_name) { + libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name)); + } else { + /* Using basename() GNU version which doesn't modify arg. */ + libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name)); + end = strchr(obj->name, '.'); + if (end) + *end = 0; + } + + obj->efile.fd = -1; + /* + * Caller of this function should also call + * bpf_object__elf_finish() after data collection to return + * obj_buf to user. If not, we should duplicate the buffer to + * avoid user freeing them before elf finish. + */ + obj->efile.obj_buf = obj_buf; + obj->efile.obj_buf_sz = obj_buf_sz; + obj->efile.maps_shndx = -1; + obj->efile.btf_maps_shndx = -1; + obj->efile.st_ops_shndx = -1; + obj->kconfig_map_idx = -1; + + obj->kern_version = get_kernel_version(); + obj->loaded = false; + + INIT_LIST_HEAD(&obj->list); + if (!strict) + list_add(&obj->list, &bpf_objects_list); + return obj; +} + +static void bpf_object__elf_finish(struct bpf_object *obj) +{ + if (!obj->efile.elf) + return; + + elf_end(obj->efile.elf); + obj->efile.elf = NULL; + obj->efile.symbols = NULL; + obj->efile.st_ops_data = NULL; + + zfree(&obj->efile.secs); + obj->efile.sec_cnt = 0; + zclose(obj->efile.fd); + obj->efile.obj_buf = NULL; + obj->efile.obj_buf_sz = 0; +} + +static int bpf_object__elf_init(struct bpf_object *obj) +{ + Elf64_Ehdr *ehdr; + int err = 0; + Elf *elf; + + if (obj->efile.elf) { + pr_warn("elf: init internal error\n"); + return -LIBBPF_ERRNO__LIBELF; + } + + if (obj->efile.obj_buf_sz > 0) { + /* + * obj_buf should have been validated by + * bpf_object__open_buffer(). + */ + elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); + } else { + obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); + if (obj->efile.fd < 0) { + char errmsg[STRERR_BUFSIZE], *cp; + + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("elf: failed to open %s: %s\n", obj->path, cp); + return err; + } + + elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); + } + + if (!elf) { + pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__LIBELF; + goto errout; + } + + obj->efile.elf = elf; + + if (elf_kind(elf) != ELF_K_ELF) { + err = -LIBBPF_ERRNO__FORMAT; + pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); + goto errout; + } + + if (gelf_getclass(elf) != ELFCLASS64) { + err = -LIBBPF_ERRNO__FORMAT; + pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); + goto errout; + } + + obj->efile.ehdr = ehdr = elf64_getehdr(elf); + if (!obj->efile.ehdr) { + pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { + pr_warn("elf: failed to get section names section index for %s: %s\n", + obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + /* Elf is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { + pr_warn("elf: failed to get section names strings from %s: %s\n", + obj->path, elf_errmsg(-1)); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + /* Old LLVM set e_machine to EM_NONE */ + if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { + pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); + err = -LIBBPF_ERRNO__FORMAT; + goto errout; + } + + return 0; +errout: + bpf_object__elf_finish(obj); + return err; +} + +static int bpf_object__check_endianness(struct bpf_object *obj) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + return 0; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + return 0; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif + pr_warn("elf: endianness mismatch in %s.\n", obj->path); + return -LIBBPF_ERRNO__ENDIAN; +} + +static int +bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) +{ + /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't + * go over allowed ELF data section buffer + */ + libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license))); + pr_debug("license of %s is %s\n", obj->path, obj->license); + return 0; +} + +static int +bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) +{ + __u32 kver; + + if (size != sizeof(kver)) { + pr_warn("invalid kver section in %s\n", obj->path); + return -LIBBPF_ERRNO__FORMAT; + } + memcpy(&kver, data, sizeof(kver)); + obj->kern_version = kver; + pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); + return 0; +} + +static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) +{ + if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || + type == BPF_MAP_TYPE_HASH_OF_MAPS) + return true; + return false; +} + +static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) +{ + Elf_Data *data; + Elf_Scn *scn; + + if (!name) + return -EINVAL; + + scn = elf_sec_by_name(obj, name); + data = elf_sec_data(obj, scn); + if (data) { + *size = data->d_size; + return 0; /* found it */ + } + + return -ENOENT; +} + +static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off) +{ + Elf_Data *symbols = obj->efile.symbols; + const char *sname; + size_t si; + + if (!name || !off) + return -EINVAL; + + for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { + Elf64_Sym *sym = elf_sym_by_idx(obj, si); + + if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) + continue; + + if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && + ELF64_ST_BIND(sym->st_info) != STB_WEAK) + continue; + + sname = elf_sym_str(obj, sym->st_name); + if (!sname) { + pr_warn("failed to get sym name string for var %s\n", name); + return -EIO; + } + if (strcmp(name, sname) == 0) { + *off = sym->st_value; + return 0; + } + } + + return -ENOENT; +} + +static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) +{ + struct bpf_map *new_maps; + size_t new_cap; + int i; + + if (obj->nr_maps < obj->maps_cap) + return &obj->maps[obj->nr_maps++]; + + new_cap = max((size_t)4, obj->maps_cap * 3 / 2); + new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); + if (!new_maps) { + pr_warn("alloc maps for object failed\n"); + return ERR_PTR(-ENOMEM); + } + + obj->maps_cap = new_cap; + obj->maps = new_maps; + + /* zero out new maps */ + memset(obj->maps + obj->nr_maps, 0, + (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); + /* + * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) + * when failure (zclose won't close negative fd)). + */ + for (i = obj->nr_maps; i < obj->maps_cap; i++) { + obj->maps[i].fd = -1; + obj->maps[i].inner_map_fd = -1; + } + + return &obj->maps[obj->nr_maps++]; +} + +static size_t bpf_map_mmap_sz(const struct bpf_map *map) +{ + long page_sz = sysconf(_SC_PAGE_SIZE); + size_t map_sz; + + map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; + map_sz = roundup(map_sz, page_sz); + return map_sz; +} + +static char *internal_map_name(struct bpf_object *obj, const char *real_name) +{ + char map_name[BPF_OBJ_NAME_LEN], *p; + int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); + + /* This is one of the more confusing parts of libbpf for various + * reasons, some of which are historical. The original idea for naming + * internal names was to include as much of BPF object name prefix as + * possible, so that it can be distinguished from similar internal + * maps of a different BPF object. + * As an example, let's say we have bpf_object named 'my_object_name' + * and internal map corresponding to '.rodata' ELF section. The final + * map name advertised to user and to the kernel will be + * 'my_objec.rodata', taking first 8 characters of object name and + * entire 7 characters of '.rodata'. + * Somewhat confusingly, if internal map ELF section name is shorter + * than 7 characters, e.g., '.bss', we still reserve 7 characters + * for the suffix, even though we only have 4 actual characters, and + * resulting map will be called 'my_objec.bss', not even using all 15 + * characters allowed by the kernel. Oh well, at least the truncated + * object name is somewhat consistent in this case. But if the map + * name is '.kconfig', we'll still have entirety of '.kconfig' added + * (8 chars) and thus will be left with only first 7 characters of the + * object name ('my_obje'). Happy guessing, user, that the final map + * name will be "my_obje.kconfig". + * Now, with libbpf starting to support arbitrarily named .rodata.* + * and .data.* data sections, it's possible that ELF section name is + * longer than allowed 15 chars, so we now need to be careful to take + * only up to 15 first characters of ELF name, taking no BPF object + * name characters at all. So '.rodata.abracadabra' will result in + * '.rodata.abracad' kernel and user-visible name. + * We need to keep this convoluted logic intact for .data, .bss and + * .rodata maps, but for new custom .data.custom and .rodata.custom + * maps we use their ELF names as is, not prepending bpf_object name + * in front. We still need to truncate them to 15 characters for the + * kernel. Full name can be recovered for such maps by using DATASEC + * BTF type associated with such map's value type, though. + */ + if (sfx_len >= BPF_OBJ_NAME_LEN) + sfx_len = BPF_OBJ_NAME_LEN - 1; + + /* if there are two or more dots in map name, it's a custom dot map */ + if (strchr(real_name + 1, '.') != NULL) + pfx_len = 0; + else + pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); + + snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, + sfx_len, real_name); + + /* sanitise map name to characters allowed by kernel */ + for (p = map_name; *p && p < map_name + sizeof(map_name); p++) + if (!isalnum(*p) && *p != '_' && *p != '.') + *p = '_'; + + return strdup(map_name); +} + +static int +bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map); + +static int +bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, + const char *real_name, int sec_idx, void *data, size_t data_sz) +{ + struct bpf_map_def *def; + struct bpf_map *map; + int err; + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + + map->libbpf_type = type; + map->sec_idx = sec_idx; + map->sec_offset = 0; + map->real_name = strdup(real_name); + map->name = internal_map_name(obj, real_name); + if (!map->real_name || !map->name) { + zfree(&map->real_name); + zfree(&map->name); + return -ENOMEM; + } + + def = &map->def; + def->type = BPF_MAP_TYPE_ARRAY; + def->key_size = sizeof(int); + def->value_size = data_sz; + def->max_entries = 1; + def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG + ? BPF_F_RDONLY_PROG : 0; + def->map_flags |= BPF_F_MMAPABLE; + + pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", + map->name, map->sec_idx, map->sec_offset, def->map_flags); + + map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (map->mmaped == MAP_FAILED) { + err = -errno; + map->mmaped = NULL; + pr_warn("failed to alloc map '%s' content buffer: %d\n", + map->name, err); + zfree(&map->real_name); + zfree(&map->name); + return err; + } + + /* failures are fine because of maps like .rodata.str1.1 */ + (void) bpf_map_find_btf_info(obj, map); + + if (data) + memcpy(map->mmaped, data, data_sz); + + pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); + return 0; +} + +static int bpf_object__init_global_data_maps(struct bpf_object *obj) +{ + struct elf_sec_desc *sec_desc; + const char *sec_name; + int err = 0, sec_idx; + + /* + * Populate obj->maps with libbpf internal maps. + */ + for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { + sec_desc = &obj->efile.secs[sec_idx]; + + switch (sec_desc->sec_type) { + case SEC_DATA: + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, + sec_name, sec_idx, + sec_desc->data->d_buf, + sec_desc->data->d_size); + break; + case SEC_RODATA: + obj->has_rodata = true; + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, + sec_name, sec_idx, + sec_desc->data->d_buf, + sec_desc->data->d_size); + break; + case SEC_BSS: + sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, + sec_name, sec_idx, + NULL, + sec_desc->data->d_size); + break; + default: + /* skip */ + break; + } + if (err) + return err; + } + return 0; +} + + +static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, + const void *name) +{ + int i; + + for (i = 0; i < obj->nr_extern; i++) { + if (strcmp(obj->externs[i].name, name) == 0) + return &obj->externs[i]; + } + return NULL; +} + +static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, + char value) +{ + switch (ext->kcfg.type) { + case KCFG_BOOL: + if (value == 'm') { + pr_warn("extern (kcfg) %s=%c should be tristate or char\n", + ext->name, value); + return -EINVAL; + } + *(bool *)ext_val = value == 'y' ? true : false; + break; + case KCFG_TRISTATE: + if (value == 'y') + *(enum libbpf_tristate *)ext_val = TRI_YES; + else if (value == 'm') + *(enum libbpf_tristate *)ext_val = TRI_MODULE; + else /* value == 'n' */ + *(enum libbpf_tristate *)ext_val = TRI_NO; + break; + case KCFG_CHAR: + *(char *)ext_val = value; + break; + case KCFG_UNKNOWN: + case KCFG_INT: + case KCFG_CHAR_ARR: + default: + pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", + ext->name, value); + return -EINVAL; + } + ext->is_set = true; + return 0; +} + +static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, + const char *value) +{ + size_t len; + + if (ext->kcfg.type != KCFG_CHAR_ARR) { + pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); + return -EINVAL; + } + + len = strlen(value); + if (value[len - 1] != '"') { + pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", + ext->name, value); + return -EINVAL; + } + + /* strip quotes */ + len -= 2; + if (len >= ext->kcfg.sz) { + pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", + ext->name, value, len, ext->kcfg.sz - 1); + len = ext->kcfg.sz - 1; + } + memcpy(ext_val, value + 1, len); + ext_val[len] = '\0'; + ext->is_set = true; + return 0; +} + +static int parse_u64(const char *value, __u64 *res) +{ + char *value_end; + int err; + + errno = 0; + *res = strtoull(value, &value_end, 0); + if (errno) { + err = -errno; + pr_warn("failed to parse '%s' as integer: %d\n", value, err); + return err; + } + if (*value_end) { + pr_warn("failed to parse '%s' as integer completely\n", value); + return -EINVAL; + } + return 0; +} + +static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) +{ + int bit_sz = ext->kcfg.sz * 8; + + if (ext->kcfg.sz == 8) + return true; + + /* Validate that value stored in u64 fits in integer of `ext->sz` + * bytes size without any loss of information. If the target integer + * is signed, we rely on the following limits of integer type of + * Y bits and subsequent transformation: + * + * -2^(Y-1) <= X <= 2^(Y-1) - 1 + * 0 <= X + 2^(Y-1) <= 2^Y - 1 + * 0 <= X + 2^(Y-1) < 2^Y + * + * For unsigned target integer, check that all the (64 - Y) bits are + * zero. + */ + if (ext->kcfg.is_signed) + return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); + else + return (v >> bit_sz) == 0; +} + +static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, + __u64 value) +{ + if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { + pr_warn("extern (kcfg) %s=%llu should be integer\n", + ext->name, (unsigned long long)value); + return -EINVAL; + } + if (!is_kcfg_value_in_range(ext, value)) { + pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", + ext->name, (unsigned long long)value, ext->kcfg.sz); + return -ERANGE; + } + switch (ext->kcfg.sz) { + case 1: *(__u8 *)ext_val = value; break; + case 2: *(__u16 *)ext_val = value; break; + case 4: *(__u32 *)ext_val = value; break; + case 8: *(__u64 *)ext_val = value; break; + default: + return -EINVAL; + } + ext->is_set = true; + return 0; +} + +static int bpf_object__process_kconfig_line(struct bpf_object *obj, + char *buf, void *data) +{ + struct extern_desc *ext; + char *sep, *value; + int len, err = 0; + void *ext_val; + __u64 num; + + if (!str_has_pfx(buf, "CONFIG_")) + return 0; + + sep = strchr(buf, '='); + if (!sep) { + pr_warn("failed to parse '%s': no separator\n", buf); + return -EINVAL; + } + + /* Trim ending '\n' */ + len = strlen(buf); + if (buf[len - 1] == '\n') + buf[len - 1] = '\0'; + /* Split on '=' and ensure that a value is present. */ + *sep = '\0'; + if (!sep[1]) { + *sep = '='; + pr_warn("failed to parse '%s': no value\n", buf); + return -EINVAL; + } + + ext = find_extern_by_name(obj, buf); + if (!ext || ext->is_set) + return 0; + + ext_val = data + ext->kcfg.data_off; + value = sep + 1; + + switch (*value) { + case 'y': case 'n': case 'm': + err = set_kcfg_value_tri(ext, ext_val, *value); + break; + case '"': + err = set_kcfg_value_str(ext, ext_val, value); + break; + default: + /* assume integer */ + err = parse_u64(value, &num); + if (err) { + pr_warn("extern (kcfg) %s=%s should be integer\n", + ext->name, value); + return err; + } + err = set_kcfg_value_num(ext, ext_val, num); + break; + } + if (err) + return err; + pr_debug("extern (kcfg) %s=%s\n", ext->name, value); + return 0; +} + +static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) +{ + char buf[PATH_MAX]; + struct utsname uts; + int len, err = 0; + gzFile file; + + uname(&uts); + len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + /* gzopen also accepts uncompressed files. */ + file = gzopen(buf, "r"); + if (!file) + file = gzopen("/proc/config.gz", "r"); + + if (!file) { + pr_warn("failed to open system Kconfig\n"); + return -ENOENT; + } + + while (gzgets(file, buf, sizeof(buf))) { + err = bpf_object__process_kconfig_line(obj, buf, data); + if (err) { + pr_warn("error parsing system Kconfig line '%s': %d\n", + buf, err); + goto out; + } + } + +out: + gzclose(file); + return err; +} + +static int bpf_object__read_kconfig_mem(struct bpf_object *obj, + const char *config, void *data) +{ + char buf[PATH_MAX]; + int err = 0; + FILE *file; + + file = fmemopen((void *)config, strlen(config), "r"); + if (!file) { + err = -errno; + pr_warn("failed to open in-memory Kconfig: %d\n", err); + return err; + } + + while (fgets(buf, sizeof(buf), file)) { + err = bpf_object__process_kconfig_line(obj, buf, data); + if (err) { + pr_warn("error parsing in-memory Kconfig line '%s': %d\n", + buf, err); + break; + } + } + + fclose(file); + return err; +} + +static int bpf_object__init_kconfig_map(struct bpf_object *obj) +{ + struct extern_desc *last_ext = NULL, *ext; + size_t map_sz; + int i, err; + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG) + last_ext = ext; + } + + if (!last_ext) + return 0; + + map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; + err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, + ".kconfig", obj->efile.symbols_shndx, + NULL, map_sz); + if (err) + return err; + + obj->kconfig_map_idx = obj->nr_maps - 1; + + return 0; +} + +static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) +{ + Elf_Data *symbols = obj->efile.symbols; + int i, map_def_sz = 0, nr_maps = 0, nr_syms; + Elf_Data *data = NULL; + Elf_Scn *scn; + + if (obj->efile.maps_shndx < 0) + return 0; + + if (libbpf_mode & LIBBPF_STRICT_MAP_DEFINITIONS) { + pr_warn("legacy map definitions in SEC(\"maps\") are not supported\n"); + return -EOPNOTSUPP; + } + + if (!symbols) + return -EINVAL; + + scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); + data = elf_sec_data(obj, scn); + if (!scn || !data) { + pr_warn("elf: failed to get legacy map definitions for %s\n", + obj->path); + return -EINVAL; + } + + /* + * Count number of maps. Each map has a name. + * Array of maps is not supported: only the first element is + * considered. + * + * TODO: Detect array of map and report error. + */ + nr_syms = symbols->d_size / sizeof(Elf64_Sym); + for (i = 0; i < nr_syms; i++) { + Elf64_Sym *sym = elf_sym_by_idx(obj, i); + + if (sym->st_shndx != obj->efile.maps_shndx) + continue; + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + continue; + nr_maps++; + } + /* Assume equally sized map definitions */ + pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", + nr_maps, data->d_size, obj->path); + + if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { + pr_warn("elf: unable to determine legacy map definition size in %s\n", + obj->path); + return -EINVAL; + } + map_def_sz = data->d_size / nr_maps; + + /* Fill obj->maps using data in "maps" section. */ + for (i = 0; i < nr_syms; i++) { + Elf64_Sym *sym = elf_sym_by_idx(obj, i); + const char *map_name; + struct bpf_map_def *def; + struct bpf_map *map; + + if (sym->st_shndx != obj->efile.maps_shndx) + continue; + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + continue; + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + + map_name = elf_sym_str(obj, sym->st_name); + if (!map_name) { + pr_warn("failed to get map #%d name sym string for obj %s\n", + i, obj->path); + return -LIBBPF_ERRNO__FORMAT; + } + + pr_warn("map '%s' (legacy): legacy map definitions are deprecated, use BTF-defined maps instead\n", map_name); + + if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { + pr_warn("map '%s' (legacy): static maps are not supported\n", map_name); + return -ENOTSUP; + } + + map->libbpf_type = LIBBPF_MAP_UNSPEC; + map->sec_idx = sym->st_shndx; + map->sec_offset = sym->st_value; + pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", + map_name, map->sec_idx, map->sec_offset); + if (sym->st_value + map_def_sz > data->d_size) { + pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", + obj->path, map_name); + return -EINVAL; + } + + map->name = strdup(map_name); + if (!map->name) { + pr_warn("map '%s': failed to alloc map name\n", map_name); + return -ENOMEM; + } + pr_debug("map %d is \"%s\"\n", i, map->name); + def = (struct bpf_map_def *)(data->d_buf + sym->st_value); + /* + * If the definition of the map in the object file fits in + * bpf_map_def, copy it. Any extra fields in our version + * of bpf_map_def will default to zero as a result of the + * calloc above. + */ + if (map_def_sz <= sizeof(struct bpf_map_def)) { + memcpy(&map->def, def, map_def_sz); + } else { + /* + * Here the map structure being read is bigger than what + * we expect, truncate if the excess bits are all zero. + * If they are not zero, reject this map as + * incompatible. + */ + char *b; + + for (b = ((char *)def) + sizeof(struct bpf_map_def); + b < ((char *)def) + map_def_sz; b++) { + if (*b != 0) { + pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", + obj->path, map_name); + if (strict) + return -EINVAL; + } + } + memcpy(&map->def, def, sizeof(struct bpf_map_def)); + } + + /* btf info may not exist but fill it in if it does exist */ + (void) bpf_map_find_btf_info(obj, map); + } + return 0; +} + +const struct btf_type * +skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) +{ + const struct btf_type *t = btf__type_by_id(btf, id); + + if (res_id) + *res_id = id; + + while (btf_is_mod(t) || btf_is_typedef(t)) { + if (res_id) + *res_id = t->type; + t = btf__type_by_id(btf, t->type); + } + + return t; +} + +static const struct btf_type * +resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) +{ + const struct btf_type *t; + + t = skip_mods_and_typedefs(btf, id, NULL); + if (!btf_is_ptr(t)) + return NULL; + + t = skip_mods_and_typedefs(btf, t->type, res_id); + + return btf_is_func_proto(t) ? t : NULL; +} + +static const char *__btf_kind_str(__u16 kind) +{ + switch (kind) { + case BTF_KIND_UNKN: return "void"; + case BTF_KIND_INT: return "int"; + case BTF_KIND_PTR: return "ptr"; + case BTF_KIND_ARRAY: return "array"; + case BTF_KIND_STRUCT: return "struct"; + case BTF_KIND_UNION: return "union"; + case BTF_KIND_ENUM: return "enum"; + case BTF_KIND_FWD: return "fwd"; + case BTF_KIND_TYPEDEF: return "typedef"; + case BTF_KIND_VOLATILE: return "volatile"; + case BTF_KIND_CONST: return "const"; + case BTF_KIND_RESTRICT: return "restrict"; + case BTF_KIND_FUNC: return "func"; + case BTF_KIND_FUNC_PROTO: return "func_proto"; + case BTF_KIND_VAR: return "var"; + case BTF_KIND_DATASEC: return "datasec"; + case BTF_KIND_FLOAT: return "float"; + case BTF_KIND_DECL_TAG: return "decl_tag"; + case BTF_KIND_TYPE_TAG: return "type_tag"; + default: return "unknown"; + } +} + +const char *btf_kind_str(const struct btf_type *t) +{ + return __btf_kind_str(btf_kind(t)); +} + +/* + * Fetch integer attribute of BTF map definition. Such attributes are + * represented using a pointer to an array, in which dimensionality of array + * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; + * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF + * type definition, while using only sizeof(void *) space in ELF data section. + */ +static bool get_map_field_int(const char *map_name, const struct btf *btf, + const struct btf_member *m, __u32 *res) +{ + const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); + const char *name = btf__name_by_offset(btf, m->name_off); + const struct btf_array *arr_info; + const struct btf_type *arr_t; + + if (!btf_is_ptr(t)) { + pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", + map_name, name, btf_kind_str(t)); + return false; + } + + arr_t = btf__type_by_id(btf, t->type); + if (!arr_t) { + pr_warn("map '%s': attr '%s': type [%u] not found.\n", + map_name, name, t->type); + return false; + } + if (!btf_is_array(arr_t)) { + pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", + map_name, name, btf_kind_str(arr_t)); + return false; + } + arr_info = btf_array(arr_t); + *res = arr_info->nelems; + return true; +} + +static int build_map_pin_path(struct bpf_map *map, const char *path) +{ + char buf[PATH_MAX]; + int len; + + if (!path) + path = "/sys/fs/bpf"; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + return bpf_map__set_pin_path(map, buf); +} + +int parse_btf_map_def(const char *map_name, struct btf *btf, + const struct btf_type *def_t, bool strict, + struct btf_map_def *map_def, struct btf_map_def *inner_def) +{ + const struct btf_type *t; + const struct btf_member *m; + bool is_inner = inner_def == NULL; + int vlen, i; + + vlen = btf_vlen(def_t); + m = btf_members(def_t); + for (i = 0; i < vlen; i++, m++) { + const char *name = btf__name_by_offset(btf, m->name_off); + + if (!name) { + pr_warn("map '%s': invalid field #%d.\n", map_name, i); + return -EINVAL; + } + if (strcmp(name, "type") == 0) { + if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) + return -EINVAL; + map_def->parts |= MAP_DEF_MAP_TYPE; + } else if (strcmp(name, "max_entries") == 0) { + if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) + return -EINVAL; + map_def->parts |= MAP_DEF_MAX_ENTRIES; + } else if (strcmp(name, "map_flags") == 0) { + if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) + return -EINVAL; + map_def->parts |= MAP_DEF_MAP_FLAGS; + } else if (strcmp(name, "numa_node") == 0) { + if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) + return -EINVAL; + map_def->parts |= MAP_DEF_NUMA_NODE; + } else if (strcmp(name, "key_size") == 0) { + __u32 sz; + + if (!get_map_field_int(map_name, btf, m, &sz)) + return -EINVAL; + if (map_def->key_size && map_def->key_size != sz) { + pr_warn("map '%s': conflicting key size %u != %u.\n", + map_name, map_def->key_size, sz); + return -EINVAL; + } + map_def->key_size = sz; + map_def->parts |= MAP_DEF_KEY_SIZE; + } else if (strcmp(name, "key") == 0) { + __s64 sz; + + t = btf__type_by_id(btf, m->type); + if (!t) { + pr_warn("map '%s': key type [%d] not found.\n", + map_name, m->type); + return -EINVAL; + } + if (!btf_is_ptr(t)) { + pr_warn("map '%s': key spec is not PTR: %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + sz = btf__resolve_size(btf, t->type); + if (sz < 0) { + pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", + map_name, t->type, (ssize_t)sz); + return sz; + } + if (map_def->key_size && map_def->key_size != sz) { + pr_warn("map '%s': conflicting key size %u != %zd.\n", + map_name, map_def->key_size, (ssize_t)sz); + return -EINVAL; + } + map_def->key_size = sz; + map_def->key_type_id = t->type; + map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; + } else if (strcmp(name, "value_size") == 0) { + __u32 sz; + + if (!get_map_field_int(map_name, btf, m, &sz)) + return -EINVAL; + if (map_def->value_size && map_def->value_size != sz) { + pr_warn("map '%s': conflicting value size %u != %u.\n", + map_name, map_def->value_size, sz); + return -EINVAL; + } + map_def->value_size = sz; + map_def->parts |= MAP_DEF_VALUE_SIZE; + } else if (strcmp(name, "value") == 0) { + __s64 sz; + + t = btf__type_by_id(btf, m->type); + if (!t) { + pr_warn("map '%s': value type [%d] not found.\n", + map_name, m->type); + return -EINVAL; + } + if (!btf_is_ptr(t)) { + pr_warn("map '%s': value spec is not PTR: %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + sz = btf__resolve_size(btf, t->type); + if (sz < 0) { + pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", + map_name, t->type, (ssize_t)sz); + return sz; + } + if (map_def->value_size && map_def->value_size != sz) { + pr_warn("map '%s': conflicting value size %u != %zd.\n", + map_name, map_def->value_size, (ssize_t)sz); + return -EINVAL; + } + map_def->value_size = sz; + map_def->value_type_id = t->type; + map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; + } + else if (strcmp(name, "values") == 0) { + bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type); + bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY; + const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value"; + char inner_map_name[128]; + int err; + + if (is_inner) { + pr_warn("map '%s': multi-level inner maps not supported.\n", + map_name); + return -ENOTSUP; + } + if (i != vlen - 1) { + pr_warn("map '%s': '%s' member should be last.\n", + map_name, name); + return -EINVAL; + } + if (!is_map_in_map && !is_prog_array) { + pr_warn("map '%s': should be map-in-map or prog-array.\n", + map_name); + return -ENOTSUP; + } + if (map_def->value_size && map_def->value_size != 4) { + pr_warn("map '%s': conflicting value size %u != 4.\n", + map_name, map_def->value_size); + return -EINVAL; + } + map_def->value_size = 4; + t = btf__type_by_id(btf, m->type); + if (!t) { + pr_warn("map '%s': %s type [%d] not found.\n", + map_name, desc, m->type); + return -EINVAL; + } + if (!btf_is_array(t) || btf_array(t)->nelems) { + pr_warn("map '%s': %s spec is not a zero-sized array.\n", + map_name, desc); + return -EINVAL; + } + t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); + if (!btf_is_ptr(t)) { + pr_warn("map '%s': %s def is of unexpected kind %s.\n", + map_name, desc, btf_kind_str(t)); + return -EINVAL; + } + t = skip_mods_and_typedefs(btf, t->type, NULL); + if (is_prog_array) { + if (!btf_is_func_proto(t)) { + pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + continue; + } + if (!btf_is_struct(t)) { + pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", + map_name, btf_kind_str(t)); + return -EINVAL; + } + + snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); + err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); + if (err) + return err; + + map_def->parts |= MAP_DEF_INNER_MAP; + } else if (strcmp(name, "pinning") == 0) { + __u32 val; + + if (is_inner) { + pr_warn("map '%s': inner def can't be pinned.\n", map_name); + return -EINVAL; + } + if (!get_map_field_int(map_name, btf, m, &val)) + return -EINVAL; + if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { + pr_warn("map '%s': invalid pinning value %u.\n", + map_name, val); + return -EINVAL; + } + map_def->pinning = val; + map_def->parts |= MAP_DEF_PINNING; + } else if (strcmp(name, "map_extra") == 0) { + __u32 map_extra; + + if (!get_map_field_int(map_name, btf, m, &map_extra)) + return -EINVAL; + map_def->map_extra = map_extra; + map_def->parts |= MAP_DEF_MAP_EXTRA; + } else { + if (strict) { + pr_warn("map '%s': unknown field '%s'.\n", map_name, name); + return -ENOTSUP; + } + pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); + } + } + + if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { + pr_warn("map '%s': map type isn't specified.\n", map_name); + return -EINVAL; + } + + return 0; +} + +static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) +{ + map->def.type = def->map_type; + map->def.key_size = def->key_size; + map->def.value_size = def->value_size; + map->def.max_entries = def->max_entries; + map->def.map_flags = def->map_flags; + map->map_extra = def->map_extra; + + map->numa_node = def->numa_node; + map->btf_key_type_id = def->key_type_id; + map->btf_value_type_id = def->value_type_id; + + if (def->parts & MAP_DEF_MAP_TYPE) + pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); + + if (def->parts & MAP_DEF_KEY_TYPE) + pr_debug("map '%s': found key [%u], sz = %u.\n", + map->name, def->key_type_id, def->key_size); + else if (def->parts & MAP_DEF_KEY_SIZE) + pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); + + if (def->parts & MAP_DEF_VALUE_TYPE) + pr_debug("map '%s': found value [%u], sz = %u.\n", + map->name, def->value_type_id, def->value_size); + else if (def->parts & MAP_DEF_VALUE_SIZE) + pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); + + if (def->parts & MAP_DEF_MAX_ENTRIES) + pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); + if (def->parts & MAP_DEF_MAP_FLAGS) + pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); + if (def->parts & MAP_DEF_MAP_EXTRA) + pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, + (unsigned long long)def->map_extra); + if (def->parts & MAP_DEF_PINNING) + pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); + if (def->parts & MAP_DEF_NUMA_NODE) + pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); + + if (def->parts & MAP_DEF_INNER_MAP) + pr_debug("map '%s': found inner map definition.\n", map->name); +} + +static const char *btf_var_linkage_str(__u32 linkage) +{ + switch (linkage) { + case BTF_VAR_STATIC: return "static"; + case BTF_VAR_GLOBAL_ALLOCATED: return "global"; + case BTF_VAR_GLOBAL_EXTERN: return "extern"; + default: return "unknown"; + } +} + +static int bpf_object__init_user_btf_map(struct bpf_object *obj, + const struct btf_type *sec, + int var_idx, int sec_idx, + const Elf_Data *data, bool strict, + const char *pin_root_path) +{ + struct btf_map_def map_def = {}, inner_def = {}; + const struct btf_type *var, *def; + const struct btf_var_secinfo *vi; + const struct btf_var *var_extra; + const char *map_name; + struct bpf_map *map; + int err; + + vi = btf_var_secinfos(sec) + var_idx; + var = btf__type_by_id(obj->btf, vi->type); + var_extra = btf_var(var); + map_name = btf__name_by_offset(obj->btf, var->name_off); + + if (map_name == NULL || map_name[0] == '\0') { + pr_warn("map #%d: empty name.\n", var_idx); + return -EINVAL; + } + if ((__u64)vi->offset + vi->size > data->d_size) { + pr_warn("map '%s' BTF data is corrupted.\n", map_name); + return -EINVAL; + } + if (!btf_is_var(var)) { + pr_warn("map '%s': unexpected var kind %s.\n", + map_name, btf_kind_str(var)); + return -EINVAL; + } + if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { + pr_warn("map '%s': unsupported map linkage %s.\n", + map_name, btf_var_linkage_str(var_extra->linkage)); + return -EOPNOTSUPP; + } + + def = skip_mods_and_typedefs(obj->btf, var->type, NULL); + if (!btf_is_struct(def)) { + pr_warn("map '%s': unexpected def kind %s.\n", + map_name, btf_kind_str(var)); + return -EINVAL; + } + if (def->size > vi->size) { + pr_warn("map '%s': invalid def size.\n", map_name); + return -EINVAL; + } + + map = bpf_object__add_map(obj); + if (IS_ERR(map)) + return PTR_ERR(map); + map->name = strdup(map_name); + if (!map->name) { + pr_warn("map '%s': failed to alloc map name.\n", map_name); + return -ENOMEM; + } + map->libbpf_type = LIBBPF_MAP_UNSPEC; + map->def.type = BPF_MAP_TYPE_UNSPEC; + map->sec_idx = sec_idx; + map->sec_offset = vi->offset; + map->btf_var_idx = var_idx; + pr_debug("map '%s': at sec_idx %d, offset %zu.\n", + map_name, map->sec_idx, map->sec_offset); + + err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); + if (err) + return err; + + fill_map_from_def(map, &map_def); + + if (map_def.pinning == LIBBPF_PIN_BY_NAME) { + err = build_map_pin_path(map, pin_root_path); + if (err) { + pr_warn("map '%s': couldn't build pin path.\n", map->name); + return err; + } + } + + if (map_def.parts & MAP_DEF_INNER_MAP) { + map->inner_map = calloc(1, sizeof(*map->inner_map)); + if (!map->inner_map) + return -ENOMEM; + map->inner_map->fd = -1; + map->inner_map->sec_idx = sec_idx; + map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); + if (!map->inner_map->name) + return -ENOMEM; + sprintf(map->inner_map->name, "%s.inner", map_name); + + fill_map_from_def(map->inner_map, &inner_def); + } + + err = bpf_map_find_btf_info(obj, map); + if (err) + return err; + + return 0; +} + +static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, + const char *pin_root_path) +{ + const struct btf_type *sec = NULL; + int nr_types, i, vlen, err; + const struct btf_type *t; + const char *name; + Elf_Data *data; + Elf_Scn *scn; + + if (obj->efile.btf_maps_shndx < 0) + return 0; + + scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); + data = elf_sec_data(obj, scn); + if (!scn || !data) { + pr_warn("elf: failed to get %s map definitions for %s\n", + MAPS_ELF_SEC, obj->path); + return -EINVAL; + } + + nr_types = btf__type_cnt(obj->btf); + for (i = 1; i < nr_types; i++) { + t = btf__type_by_id(obj->btf, i); + if (!btf_is_datasec(t)) + continue; + name = btf__name_by_offset(obj->btf, t->name_off); + if (strcmp(name, MAPS_ELF_SEC) == 0) { + sec = t; + obj->efile.btf_maps_sec_btf_id = i; + break; + } + } + + if (!sec) { + pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); + return -ENOENT; + } + + vlen = btf_vlen(sec); + for (i = 0; i < vlen; i++) { + err = bpf_object__init_user_btf_map(obj, sec, i, + obj->efile.btf_maps_shndx, + data, strict, + pin_root_path); + if (err) + return err; + } + + return 0; +} + +static int bpf_object__init_maps(struct bpf_object *obj, + const struct bpf_object_open_opts *opts) +{ + const char *pin_root_path; + bool strict; + int err; + + strict = !OPTS_GET(opts, relaxed_maps, false); + pin_root_path = OPTS_GET(opts, pin_root_path, NULL); + + err = bpf_object__init_user_maps(obj, strict); + err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path); + err = err ?: bpf_object__init_global_data_maps(obj); + err = err ?: bpf_object__init_kconfig_map(obj); + err = err ?: bpf_object__init_struct_ops_maps(obj); + + return err; +} + +static bool section_have_execinstr(struct bpf_object *obj, int idx) +{ + Elf64_Shdr *sh; + + sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); + if (!sh) + return false; + + return sh->sh_flags & SHF_EXECINSTR; +} + +static bool btf_needs_sanitization(struct bpf_object *obj) +{ + bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); + bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); + bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); + bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); + + return !has_func || !has_datasec || !has_func_global || !has_float || + !has_decl_tag || !has_type_tag; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) +{ + bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); + bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); + bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); + bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); + bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); + bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG); + struct btf_type *t; + int i, j, vlen; + + for (i = 1; i < btf__type_cnt(btf); i++) { + t = (struct btf_type *)btf__type_by_id(btf, i); + + if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { + /* replace VAR/DECL_TAG with INT */ + t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); + /* + * using size = 1 is the safest choice, 4 will be too + * big and cause kernel BTF validation failure if + * original variable took less than 4 bytes + */ + t->size = 1; + *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); + } else if (!has_datasec && btf_is_datasec(t)) { + /* replace DATASEC with STRUCT */ + const struct btf_var_secinfo *v = btf_var_secinfos(t); + struct btf_member *m = btf_members(t); + struct btf_type *vt; + char *name; + + name = (char *)btf__name_by_offset(btf, t->name_off); + while (*name) { + if (*name == '.') + *name = '_'; + name++; + } + + vlen = btf_vlen(t); + t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); + for (j = 0; j < vlen; j++, v++, m++) { + /* order of field assignments is important */ + m->offset = v->offset * 8; + m->type = v->type; + /* preserve variable name as member name */ + vt = (void *)btf__type_by_id(btf, v->type); + m->name_off = vt->name_off; + } + } else if (!has_func && btf_is_func_proto(t)) { + /* replace FUNC_PROTO with ENUM */ + vlen = btf_vlen(t); + t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); + t->size = sizeof(__u32); /* kernel enforced */ + } else if (!has_func && btf_is_func(t)) { + /* replace FUNC with TYPEDEF */ + t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); + } else if (!has_func_global && btf_is_func(t)) { + /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ + t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); + } else if (!has_float && btf_is_float(t)) { + /* replace FLOAT with an equally-sized empty STRUCT; + * since C compilers do not accept e.g. "float" as a + * valid struct name, make it anonymous + */ + t->name_off = 0; + t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); + } else if (!has_type_tag && btf_is_type_tag(t)) { + /* replace TYPE_TAG with a CONST */ + t->name_off = 0; + t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0); + } + } +} + +static bool libbpf_needs_btf(const struct bpf_object *obj) +{ + return obj->efile.btf_maps_shndx >= 0 || + obj->efile.st_ops_shndx >= 0 || + obj->nr_extern > 0; +} + +static bool kernel_needs_btf(const struct bpf_object *obj) +{ + return obj->efile.st_ops_shndx >= 0; +} + +static int bpf_object__init_btf(struct bpf_object *obj, + Elf_Data *btf_data, + Elf_Data *btf_ext_data) +{ + int err = -ENOENT; + + if (btf_data) { + obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); + err = libbpf_get_error(obj->btf); + if (err) { + obj->btf = NULL; + pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); + goto out; + } + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); + } + if (btf_ext_data) { + struct btf_ext_info *ext_segs[3]; + int seg_num, sec_num; + + if (!obj->btf) { + pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", + BTF_EXT_ELF_SEC, BTF_ELF_SEC); + goto out; + } + obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); + err = libbpf_get_error(obj->btf_ext); + if (err) { + pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", + BTF_EXT_ELF_SEC, err); + obj->btf_ext = NULL; + goto out; + } + + /* setup .BTF.ext to ELF section mapping */ + ext_segs[0] = &obj->btf_ext->func_info; + ext_segs[1] = &obj->btf_ext->line_info; + ext_segs[2] = &obj->btf_ext->core_relo_info; + for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) { + struct btf_ext_info *seg = ext_segs[seg_num]; + const struct btf_ext_info_sec *sec; + const char *sec_name; + Elf_Scn *scn; + + if (seg->sec_cnt == 0) + continue; + + seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs)); + if (!seg->sec_idxs) { + err = -ENOMEM; + goto out; + } + + sec_num = 0; + for_each_btf_ext_sec(seg, sec) { + /* preventively increment index to avoid doing + * this before every continue below + */ + sec_num++; + + sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); + if (str_is_empty(sec_name)) + continue; + scn = elf_sec_by_name(obj, sec_name); + if (!scn) + continue; + + seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn); + } + } + } +out: + if (err && libbpf_needs_btf(obj)) { + pr_warn("BTF is required, but is missing or corrupted.\n"); + return err; + } + return 0; +} + +static int compare_vsi_off(const void *_a, const void *_b) +{ + const struct btf_var_secinfo *a = _a; + const struct btf_var_secinfo *b = _b; + + return a->offset - b->offset; +} + +static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, + struct btf_type *t) +{ + __u32 size = 0, off = 0, i, vars = btf_vlen(t); + const char *name = btf__name_by_offset(btf, t->name_off); + const struct btf_type *t_var; + struct btf_var_secinfo *vsi; + const struct btf_var *var; + int ret; + + if (!name) { + pr_debug("No name found in string section for DATASEC kind.\n"); + return -ENOENT; + } + + /* .extern datasec size and var offsets were set correctly during + * extern collection step, so just skip straight to sorting variables + */ + if (t->size) + goto sort_vars; + + ret = find_elf_sec_sz(obj, name, &size); + if (ret || !size) { + pr_debug("Invalid size for section %s: %u bytes\n", name, size); + return -ENOENT; + } + + t->size = size; + + for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { + t_var = btf__type_by_id(btf, vsi->type); + if (!t_var || !btf_is_var(t_var)) { + pr_debug("Non-VAR type seen in section %s\n", name); + return -EINVAL; + } + + var = btf_var(t_var); + if (var->linkage == BTF_VAR_STATIC) + continue; + + name = btf__name_by_offset(btf, t_var->name_off); + if (!name) { + pr_debug("No name found in string section for VAR kind\n"); + return -ENOENT; + } + + ret = find_elf_var_offset(obj, name, &off); + if (ret) { + pr_debug("No offset found in symbol table for VAR %s\n", + name); + return -ENOENT; + } + + vsi->offset = off; + } + +sort_vars: + qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); + return 0; +} + +static int btf_finalize_data(struct bpf_object *obj, struct btf *btf) +{ + int err = 0; + __u32 i, n = btf__type_cnt(btf); + + for (i = 1; i < n; i++) { + struct btf_type *t = btf_type_by_id(btf, i); + + /* Loader needs to fix up some of the things compiler + * couldn't get its hands on while emitting BTF. This + * is section size and global variable offset. We use + * the info from the ELF itself for this purpose. + */ + if (btf_is_datasec(t)) { + err = btf_fixup_datasec(obj, btf, t); + if (err) + break; + } + } + + return libbpf_err(err); +} + +int btf__finalize_data(struct bpf_object *obj, struct btf *btf) +{ + return btf_finalize_data(obj, btf); +} + +static int bpf_object__finalize_btf(struct bpf_object *obj) +{ + int err; + + if (!obj->btf) + return 0; + + err = btf_finalize_data(obj, obj->btf); + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; + } + + return 0; +} + +static bool prog_needs_vmlinux_btf(struct bpf_program *prog) +{ + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_LSM) + return true; + + /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs + * also need vmlinux BTF + */ + if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) + return true; + + return false; +} + +static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) +{ + struct bpf_program *prog; + int i; + + /* CO-RE relocations need kernel BTF, only when btf_custom_path + * is not specified + */ + if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) + return true; + + /* Support for typed ksyms needs kernel BTF */ + for (i = 0; i < obj->nr_extern; i++) { + const struct extern_desc *ext; + + ext = &obj->externs[i]; + if (ext->type == EXT_KSYM && ext->ksym.type_id) + return true; + } + + bpf_object__for_each_program(prog, obj) { + if (!prog->autoload) + continue; + if (prog_needs_vmlinux_btf(prog)) + return true; + } + + return false; +} + +static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) +{ + int err; + + /* btf_vmlinux could be loaded earlier */ + if (obj->btf_vmlinux || obj->gen_loader) + return 0; + + if (!force && !obj_needs_vmlinux_btf(obj)) + return 0; + + obj->btf_vmlinux = btf__load_vmlinux_btf(); + err = libbpf_get_error(obj->btf_vmlinux); + if (err) { + pr_warn("Error loading vmlinux BTF: %d\n", err); + obj->btf_vmlinux = NULL; + return err; + } + return 0; +} + +static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) +{ + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; + int i, err = 0; + + if (!obj->btf) + return 0; + + if (!kernel_supports(obj, FEAT_BTF)) { + if (kernel_needs_btf(obj)) { + err = -EOPNOTSUPP; + goto report; + } + pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); + return 0; + } + + /* Even though some subprogs are global/weak, user might prefer more + * permissive BPF verification process that BPF verifier performs for + * static functions, taking into account more context from the caller + * functions. In such case, they need to mark such subprogs with + * __attribute__((visibility("hidden"))) and libbpf will adjust + * corresponding FUNC BTF type to be marked as static and trigger more + * involved BPF verification process. + */ + for (i = 0; i < obj->nr_programs; i++) { + struct bpf_program *prog = &obj->programs[i]; + struct btf_type *t; + const char *name; + int j, n; + + if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) + continue; + + n = btf__type_cnt(obj->btf); + for (j = 1; j < n; j++) { + t = btf_type_by_id(obj->btf, j); + if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) + continue; + + name = btf__str_by_offset(obj->btf, t->name_off); + if (strcmp(name, prog->name) != 0) + continue; + + t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); + break; + } + } + + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *raw_data; + __u32 sz; + + /* clone BTF to sanitize a copy and leave the original intact */ + raw_data = btf__raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); + err = libbpf_get_error(kern_btf); + if (err) + return err; + + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); + bpf_object__sanitize_btf(obj, kern_btf); + } + + if (obj->gen_loader) { + __u32 raw_size = 0; + const void *raw_data = btf__raw_data(kern_btf, &raw_size); + + if (!raw_data) + return -ENOMEM; + bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); + /* Pretend to have valid FD to pass various fd >= 0 checks. + * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. + */ + btf__set_fd(kern_btf, 0); + } else { + /* currently BPF_BTF_LOAD only supports log_level 1 */ + err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size, + obj->log_level ? 1 : 0); + } + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } +report: + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; +} + +static const char *elf_sym_str(const struct bpf_object *obj, size_t off) +{ + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; +} + +static const char *elf_sec_str(const struct bpf_object *obj, size_t off) +{ + const char *name; + + name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); + if (!name) { + pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", + off, obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; +} + +static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) +{ + Elf_Scn *scn; + + scn = elf_getscn(obj->efile.elf, idx); + if (!scn) { + pr_warn("elf: failed to get section(%zu) from %s: %s\n", + idx, obj->path, elf_errmsg(-1)); + return NULL; + } + return scn; +} + +static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) +{ + Elf_Scn *scn = NULL; + Elf *elf = obj->efile.elf; + const char *sec_name; + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + sec_name = elf_sec_name(obj, scn); + if (!sec_name) + return NULL; + + if (strcmp(sec_name, name) != 0) + continue; + + return scn; + } + return NULL; +} + +static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) +{ + Elf64_Shdr *shdr; + + if (!scn) + return NULL; + + shdr = elf64_getshdr(scn); + if (!shdr) { + pr_warn("elf: failed to get section(%zu) header from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return NULL; + } + + return shdr; +} + +static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) +{ + const char *name; + Elf64_Shdr *sh; + + if (!scn) + return NULL; + + sh = elf_sec_hdr(obj, scn); + if (!sh) + return NULL; + + name = elf_sec_str(obj, sh->sh_name); + if (!name) { + pr_warn("elf: failed to get section(%zu) name from %s: %s\n", + elf_ndxscn(scn), obj->path, elf_errmsg(-1)); + return NULL; + } + + return name; +} + +static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) +{ + Elf_Data *data; + + if (!scn) + return NULL; + + data = elf_getdata(scn, 0); + if (!data) { + pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", + elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "", + obj->path, elf_errmsg(-1)); + return NULL; + } + + return data; +} + +static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) +{ + if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) + return NULL; + + return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; +} + +static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) +{ + if (idx >= data->d_size / sizeof(Elf64_Rel)) + return NULL; + + return (Elf64_Rel *)data->d_buf + idx; +} + +static bool is_sec_name_dwarf(const char *name) +{ + /* approximation, but the actual list is too long */ + return str_has_pfx(name, ".debug_"); +} + +static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) +{ + /* no special handling of .strtab */ + if (hdr->sh_type == SHT_STRTAB) + return true; + + /* ignore .llvm_addrsig section as well */ + if (hdr->sh_type == SHT_LLVM_ADDRSIG) + return true; + + /* no subprograms will lead to an empty .text section, ignore it */ + if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && + strcmp(name, ".text") == 0) + return true; + + /* DWARF sections */ + if (is_sec_name_dwarf(name)) + return true; + + if (str_has_pfx(name, ".rel")) { + name += sizeof(".rel") - 1; + /* DWARF section relocations */ + if (is_sec_name_dwarf(name)) + return true; + + /* .BTF and .BTF.ext don't need relocations */ + if (strcmp(name, BTF_ELF_SEC) == 0 || + strcmp(name, BTF_EXT_ELF_SEC) == 0) + return true; + } + + return false; +} + +static int cmp_progs(const void *_a, const void *_b) +{ + const struct bpf_program *a = _a; + const struct bpf_program *b = _b; + + if (a->sec_idx != b->sec_idx) + return a->sec_idx < b->sec_idx ? -1 : 1; + + /* sec_insn_off can't be the same within the section */ + return a->sec_insn_off < b->sec_insn_off ? -1 : 1; +} + +static int bpf_object__elf_collect(struct bpf_object *obj) +{ + struct elf_sec_desc *sec_desc; + Elf *elf = obj->efile.elf; + Elf_Data *btf_ext_data = NULL; + Elf_Data *btf_data = NULL; + int idx = 0, err = 0; + const char *name; + Elf_Data *data; + Elf_Scn *scn; + Elf64_Shdr *sh; + + /* ELF section indices are 0-based, but sec #0 is special "invalid" + * section. e_shnum does include sec #0, so e_shnum is the necessary + * size of an array to keep all the sections. + */ + obj->efile.sec_cnt = obj->efile.ehdr->e_shnum; + obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); + if (!obj->efile.secs) + return -ENOMEM; + + /* a bunch of ELF parsing functionality depends on processing symbols, + * so do the first pass and find the symbol table + */ + scn = NULL; + while ((scn = elf_nextscn(elf, scn)) != NULL) { + sh = elf_sec_hdr(obj, scn); + if (!sh) + return -LIBBPF_ERRNO__FORMAT; + + if (sh->sh_type == SHT_SYMTAB) { + if (obj->efile.symbols) { + pr_warn("elf: multiple symbol tables in %s\n", obj->path); + return -LIBBPF_ERRNO__FORMAT; + } + + data = elf_sec_data(obj, scn); + if (!data) + return -LIBBPF_ERRNO__FORMAT; + + idx = elf_ndxscn(scn); + + obj->efile.symbols = data; + obj->efile.symbols_shndx = idx; + obj->efile.strtabidx = sh->sh_link; + } + } + + if (!obj->efile.symbols) { + pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", + obj->path); + return -ENOENT; + } + + scn = NULL; + while ((scn = elf_nextscn(elf, scn)) != NULL) { + idx = elf_ndxscn(scn); + sec_desc = &obj->efile.secs[idx]; + + sh = elf_sec_hdr(obj, scn); + if (!sh) + return -LIBBPF_ERRNO__FORMAT; + + name = elf_sec_str(obj, sh->sh_name); + if (!name) + return -LIBBPF_ERRNO__FORMAT; + + if (ignore_elf_section(sh, name)) + continue; + + data = elf_sec_data(obj, scn); + if (!data) + return -LIBBPF_ERRNO__FORMAT; + + pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + idx, name, (unsigned long)data->d_size, + (int)sh->sh_link, (unsigned long)sh->sh_flags, + (int)sh->sh_type); + + if (strcmp(name, "license") == 0) { + err = bpf_object__init_license(obj, data->d_buf, data->d_size); + if (err) + return err; + } else if (strcmp(name, "version") == 0) { + err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); + if (err) + return err; + } else if (strcmp(name, "maps") == 0) { + obj->efile.maps_shndx = idx; + } else if (strcmp(name, MAPS_ELF_SEC) == 0) { + obj->efile.btf_maps_shndx = idx; + } else if (strcmp(name, BTF_ELF_SEC) == 0) { + if (sh->sh_type != SHT_PROGBITS) + return -LIBBPF_ERRNO__FORMAT; + btf_data = data; + } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { + if (sh->sh_type != SHT_PROGBITS) + return -LIBBPF_ERRNO__FORMAT; + btf_ext_data = data; + } else if (sh->sh_type == SHT_SYMTAB) { + /* already processed during the first pass above */ + } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { + if (sh->sh_flags & SHF_EXECINSTR) { + if (strcmp(name, ".text") == 0) + obj->efile.text_shndx = idx; + err = bpf_object__add_programs(obj, data, name, idx); + if (err) + return err; + } else if (strcmp(name, DATA_SEC) == 0 || + str_has_pfx(name, DATA_SEC ".")) { + sec_desc->sec_type = SEC_DATA; + sec_desc->shdr = sh; + sec_desc->data = data; + } else if (strcmp(name, RODATA_SEC) == 0 || + str_has_pfx(name, RODATA_SEC ".")) { + sec_desc->sec_type = SEC_RODATA; + sec_desc->shdr = sh; + sec_desc->data = data; + } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { + obj->efile.st_ops_data = data; + obj->efile.st_ops_shndx = idx; + } else { + pr_info("elf: skipping unrecognized data section(%d) %s\n", + idx, name); + } + } else if (sh->sh_type == SHT_REL) { + int targ_sec_idx = sh->sh_info; /* points to other section */ + + if (sh->sh_entsize != sizeof(Elf64_Rel) || + targ_sec_idx >= obj->efile.sec_cnt) + return -LIBBPF_ERRNO__FORMAT; + + /* Only do relo for section with exec instructions */ + if (!section_have_execinstr(obj, targ_sec_idx) && + strcmp(name, ".rel" STRUCT_OPS_SEC) && + strcmp(name, ".rel" MAPS_ELF_SEC)) { + pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", + idx, name, targ_sec_idx, + elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: ""); + continue; + } + + sec_desc->sec_type = SEC_RELO; + sec_desc->shdr = sh; + sec_desc->data = data; + } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { + sec_desc->sec_type = SEC_BSS; + sec_desc->shdr = sh; + sec_desc->data = data; + } else { + pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, + (size_t)sh->sh_size); + } + } + + if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { + pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); + return -LIBBPF_ERRNO__FORMAT; + } + + /* sort BPF programs by section name and in-section instruction offset + * for faster search */ + if (obj->nr_programs) + qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); + + return bpf_object__init_btf(obj, btf_data, btf_ext_data); +} + +static bool sym_is_extern(const Elf64_Sym *sym) +{ + int bind = ELF64_ST_BIND(sym->st_info); + /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ + return sym->st_shndx == SHN_UNDEF && + (bind == STB_GLOBAL || bind == STB_WEAK) && + ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; +} + +static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) +{ + int bind = ELF64_ST_BIND(sym->st_info); + int type = ELF64_ST_TYPE(sym->st_info); + + /* in .text section */ + if (sym->st_shndx != text_shndx) + return false; + + /* local function */ + if (bind == STB_LOCAL && type == STT_SECTION) + return true; + + /* global function */ + return bind == STB_GLOBAL && type == STT_FUNC; +} + +static int find_extern_btf_id(const struct btf *btf, const char *ext_name) +{ + const struct btf_type *t; + const char *tname; + int i, n; + + if (!btf) + return -ESRCH; + + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_var(t) && !btf_is_func(t)) + continue; + + tname = btf__name_by_offset(btf, t->name_off); + if (strcmp(tname, ext_name)) + continue; + + if (btf_is_var(t) && + btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) + return -EINVAL; + + if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) + return -EINVAL; + + return i; + } + + return -ENOENT; +} + +static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { + const struct btf_var_secinfo *vs; + const struct btf_type *t; + int i, j, n; + + if (!btf) + return -ESRCH; + + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + vs = btf_var_secinfos(t); + for (j = 0; j < btf_vlen(t); j++, vs++) { + if (vs->type == ext_btf_id) + return i; + } + } + + return -ENOENT; +} + +static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, + bool *is_signed) +{ + const struct btf_type *t; + const char *name; + + t = skip_mods_and_typedefs(btf, id, NULL); + name = btf__name_by_offset(btf, t->name_off); + + if (is_signed) + *is_signed = false; + switch (btf_kind(t)) { + case BTF_KIND_INT: { + int enc = btf_int_encoding(t); + + if (enc & BTF_INT_BOOL) + return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; + if (is_signed) + *is_signed = enc & BTF_INT_SIGNED; + if (t->size == 1) + return KCFG_CHAR; + if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) + return KCFG_UNKNOWN; + return KCFG_INT; + } + case BTF_KIND_ENUM: + if (t->size != 4) + return KCFG_UNKNOWN; + if (strcmp(name, "libbpf_tristate")) + return KCFG_UNKNOWN; + return KCFG_TRISTATE; + case BTF_KIND_ARRAY: + if (btf_array(t)->nelems == 0) + return KCFG_UNKNOWN; + if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) + return KCFG_UNKNOWN; + return KCFG_CHAR_ARR; + default: + return KCFG_UNKNOWN; + } +} + +static int cmp_externs(const void *_a, const void *_b) +{ + const struct extern_desc *a = _a; + const struct extern_desc *b = _b; + + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + if (a->type == EXT_KCFG) { + /* descending order by alignment requirements */ + if (a->kcfg.align != b->kcfg.align) + return a->kcfg.align > b->kcfg.align ? -1 : 1; + /* ascending order by size, within same alignment class */ + if (a->kcfg.sz != b->kcfg.sz) + return a->kcfg.sz < b->kcfg.sz ? -1 : 1; + } + + /* resolve ties by name */ + return strcmp(a->name, b->name); +} + +static int find_int_btf_id(const struct btf *btf) +{ + const struct btf_type *t; + int i, n; + + n = btf__type_cnt(btf); + for (i = 1; i < n; i++) { + t = btf__type_by_id(btf, i); + + if (btf_is_int(t) && btf_int_bits(t) == 32) + return i; + } + + return 0; +} + +static int add_dummy_ksym_var(struct btf *btf) +{ + int i, int_btf_id, sec_btf_id, dummy_var_btf_id; + const struct btf_var_secinfo *vs; + const struct btf_type *sec; + + if (!btf) + return 0; + + sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, + BTF_KIND_DATASEC); + if (sec_btf_id < 0) + return 0; + + sec = btf__type_by_id(btf, sec_btf_id); + vs = btf_var_secinfos(sec); + for (i = 0; i < btf_vlen(sec); i++, vs++) { + const struct btf_type *vt; + + vt = btf__type_by_id(btf, vs->type); + if (btf_is_func(vt)) + break; + } + + /* No func in ksyms sec. No need to add dummy var. */ + if (i == btf_vlen(sec)) + return 0; + + int_btf_id = find_int_btf_id(btf); + dummy_var_btf_id = btf__add_var(btf, + "dummy_ksym", + BTF_VAR_GLOBAL_ALLOCATED, + int_btf_id); + if (dummy_var_btf_id < 0) + pr_warn("cannot create a dummy_ksym var\n"); + + return dummy_var_btf_id; +} + +static int bpf_object__collect_externs(struct bpf_object *obj) +{ + struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; + const struct btf_type *t; + struct extern_desc *ext; + int i, n, off, dummy_var_btf_id; + const char *ext_name, *sec_name; + Elf_Scn *scn; + Elf64_Shdr *sh; + + if (!obj->efile.symbols) + return 0; + + scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); + sh = elf_sec_hdr(obj, scn); + if (!sh || sh->sh_entsize != sizeof(Elf64_Sym)) + return -LIBBPF_ERRNO__FORMAT; + + dummy_var_btf_id = add_dummy_ksym_var(obj->btf); + if (dummy_var_btf_id < 0) + return dummy_var_btf_id; + + n = sh->sh_size / sh->sh_entsize; + pr_debug("looking for externs among %d symbols...\n", n); + + for (i = 0; i < n; i++) { + Elf64_Sym *sym = elf_sym_by_idx(obj, i); + + if (!sym) + return -LIBBPF_ERRNO__FORMAT; + if (!sym_is_extern(sym)) + continue; + ext_name = elf_sym_str(obj, sym->st_name); + if (!ext_name || !ext_name[0]) + continue; + + ext = obj->externs; + ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); + if (!ext) + return -ENOMEM; + obj->externs = ext; + ext = &ext[obj->nr_extern]; + memset(ext, 0, sizeof(*ext)); + obj->nr_extern++; + + ext->btf_id = find_extern_btf_id(obj->btf, ext_name); + if (ext->btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s': %d\n", + ext_name, ext->btf_id); + return ext->btf_id; + } + t = btf__type_by_id(obj->btf, ext->btf_id); + ext->name = btf__name_by_offset(obj->btf, t->name_off); + ext->sym_idx = i; + ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; + + ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); + if (ext->sec_btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", + ext_name, ext->btf_id, ext->sec_btf_id); + return ext->sec_btf_id; + } + sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); + sec_name = btf__name_by_offset(obj->btf, sec->name_off); + + if (strcmp(sec_name, KCONFIG_SEC) == 0) { + if (btf_is_func(t)) { + pr_warn("extern function %s is unsupported under %s section\n", + ext->name, KCONFIG_SEC); + return -ENOTSUP; + } + kcfg_sec = sec; + ext->type = EXT_KCFG; + ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); + if (ext->kcfg.sz <= 0) { + pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.sz); + return ext->kcfg.sz; + } + ext->kcfg.align = btf__align_of(obj->btf, t->type); + if (ext->kcfg.align <= 0) { + pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.align); + return -EINVAL; + } + ext->kcfg.type = find_kcfg_type(obj->btf, t->type, + &ext->kcfg.is_signed); + if (ext->kcfg.type == KCFG_UNKNOWN) { + pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); + return -ENOTSUP; + } + } else if (strcmp(sec_name, KSYMS_SEC) == 0) { + ksym_sec = sec; + ext->type = EXT_KSYM; + skip_mods_and_typedefs(obj->btf, t->type, + &ext->ksym.type_id); + } else { + pr_warn("unrecognized extern section '%s'\n", sec_name); + return -ENOTSUP; + } + } + pr_debug("collected %d externs total\n", obj->nr_extern); + + if (!obj->nr_extern) + return 0; + + /* sort externs by type, for kcfg ones also by (align, size, name) */ + qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); + + /* for .ksyms section, we need to turn all externs into allocated + * variables in BTF to pass kernel verification; we do this by + * pretending that each extern is a 8-byte variable + */ + if (ksym_sec) { + /* find existing 4-byte integer type in BTF to use for fake + * extern variables in DATASEC + */ + int int_btf_id = find_int_btf_id(obj->btf); + /* For extern function, a dummy_var added earlier + * will be used to replace the vs->type and + * its name string will be used to refill + * the missing param's name. + */ + const struct btf_type *dummy_var; + + dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM) + continue; + pr_debug("extern (ksym) #%d: symbol %d, name %s\n", + i, ext->sym_idx, ext->name); + } + + sec = ksym_sec; + n = btf_vlen(sec); + for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + struct btf_type *vt; + + vt = (void *)btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, vt->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF %s '%s'\n", + btf_kind_str(vt), ext_name); + return -ESRCH; + } + if (btf_is_func(vt)) { + const struct btf_type *func_proto; + struct btf_param *param; + int j; + + func_proto = btf__type_by_id(obj->btf, + vt->type); + param = btf_params(func_proto); + /* Reuse the dummy_var string if the + * func proto does not have param name. + */ + for (j = 0; j < btf_vlen(func_proto); j++) + if (param[j].type && !param[j].name_off) + param[j].name_off = + dummy_var->name_off; + vs->type = dummy_var_btf_id; + vt->info &= ~0xffff; + vt->info |= BTF_FUNC_GLOBAL; + } else { + btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vt->type = int_btf_id; + } + vs->offset = off; + vs->size = sizeof(int); + } + sec->size = off; + } + + if (kcfg_sec) { + sec = kcfg_sec; + /* for kcfg externs calculate their offsets within a .kconfig map */ + off = 0; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KCFG) + continue; + + ext->kcfg.data_off = roundup(off, ext->kcfg.align); + off = ext->kcfg.data_off + ext->kcfg.sz; + pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", + i, ext->sym_idx, ext->kcfg.data_off, ext->name); + } + sec->size = off; + n = btf_vlen(sec); + for (i = 0; i < n; i++) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + + t = btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, t->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vs->offset = ext->kcfg.data_off; + } + } + return 0; +} + +struct bpf_program * +bpf_object__find_program_by_title(const struct bpf_object *obj, + const char *title) +{ + struct bpf_program *pos; + + bpf_object__for_each_program(pos, obj) { + if (pos->sec_name && !strcmp(pos->sec_name, title)) + return pos; + } + return errno = ENOENT, NULL; +} + +static bool prog_is_subprog(const struct bpf_object *obj, + const struct bpf_program *prog) +{ + /* For legacy reasons, libbpf supports an entry-point BPF programs + * without SEC() attribute, i.e., those in the .text section. But if + * there are 2 or more such programs in the .text section, they all + * must be subprograms called from entry-point BPF programs in + * designated SEC()'tions, otherwise there is no way to distinguish + * which of those programs should be loaded vs which are a subprogram. + * Similarly, if there is a function/program in .text and at least one + * other BPF program with custom SEC() attribute, then we just assume + * .text programs are subprograms (even if they are not called from + * other programs), because libbpf never explicitly supported mixing + * SEC()-designated BPF programs and .text entry-point BPF programs. + * + * In libbpf 1.0 strict mode, we always consider .text + * programs to be subprograms. + */ + + if (libbpf_mode & LIBBPF_STRICT_SEC_NAME) + return prog->sec_idx == obj->efile.text_shndx; + + return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; +} + +struct bpf_program * +bpf_object__find_program_by_name(const struct bpf_object *obj, + const char *name) +{ + struct bpf_program *prog; + + bpf_object__for_each_program(prog, obj) { + if (prog_is_subprog(obj, prog)) + continue; + if (!strcmp(prog->name, name)) + return prog; + } + return errno = ENOENT, NULL; +} + +static bool bpf_object__shndx_is_data(const struct bpf_object *obj, + int shndx) +{ + switch (obj->efile.secs[shndx].sec_type) { + case SEC_BSS: + case SEC_DATA: + case SEC_RODATA: + return true; + default: + return false; + } +} + +static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, + int shndx) +{ + return shndx == obj->efile.maps_shndx || + shndx == obj->efile.btf_maps_shndx; +} + +static enum libbpf_map_type +bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) +{ + if (shndx == obj->efile.symbols_shndx) + return LIBBPF_MAP_KCONFIG; + + switch (obj->efile.secs[shndx].sec_type) { + case SEC_BSS: + return LIBBPF_MAP_BSS; + case SEC_DATA: + return LIBBPF_MAP_DATA; + case SEC_RODATA: + return LIBBPF_MAP_RODATA; + default: + return LIBBPF_MAP_UNSPEC; + } +} + +static int bpf_program__record_reloc(struct bpf_program *prog, + struct reloc_desc *reloc_desc, + __u32 insn_idx, const char *sym_name, + const Elf64_Sym *sym, const Elf64_Rel *rel) +{ + struct bpf_insn *insn = &prog->insns[insn_idx]; + size_t map_idx, nr_maps = prog->obj->nr_maps; + struct bpf_object *obj = prog->obj; + __u32 shdr_idx = sym->st_shndx; + enum libbpf_map_type type; + const char *sym_sec_name; + struct bpf_map *map; + + if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { + pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", + prog->name, sym_name, insn_idx, insn->code); + return -LIBBPF_ERRNO__RELOC; + } + + if (sym_is_extern(sym)) { + int sym_idx = ELF64_R_SYM(rel->r_info); + int i, n = obj->nr_extern; + struct extern_desc *ext; + + for (i = 0; i < n; i++) { + ext = &obj->externs[i]; + if (ext->sym_idx == sym_idx) + break; + } + if (i >= n) { + pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", + prog->name, sym_name, sym_idx); + return -LIBBPF_ERRNO__RELOC; + } + pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", + prog->name, i, ext->name, ext->sym_idx, insn_idx); + if (insn->code == (BPF_JMP | BPF_CALL)) + reloc_desc->type = RELO_EXTERN_FUNC; + else + reloc_desc->type = RELO_EXTERN_VAR; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = i; /* sym_off stores extern index */ + return 0; + } + + /* sub-program call relocation */ + if (is_call_insn(insn)) { + if (insn->src_reg != BPF_PSEUDO_CALL) { + pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); + return -LIBBPF_ERRNO__RELOC; + } + /* text_shndx can be 0, if no default "main" program exists */ + if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); + pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + if (sym->st_value % BPF_INSN_SZ) { + pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", + prog->name, sym_name, (size_t)sym->st_value); + return -LIBBPF_ERRNO__RELOC; + } + reloc_desc->type = RELO_CALL; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = sym->st_value; + return 0; + } + + if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { + pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", + prog->name, sym_name, shdr_idx); + return -LIBBPF_ERRNO__RELOC; + } + + /* loading subprog addresses */ + if (sym_is_subprog(sym, obj->efile.text_shndx)) { + /* global_func: sym->st_value = offset in the section, insn->imm = 0. + * local_func: sym->st_value = 0, insn->imm = offset in the section. + */ + if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { + pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", + prog->name, sym_name, (size_t)sym->st_value, insn->imm); + return -LIBBPF_ERRNO__RELOC; + } + + reloc_desc->type = RELO_SUBPROG_ADDR; + reloc_desc->insn_idx = insn_idx; + reloc_desc->sym_off = sym->st_value; + return 0; + } + + type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); + sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); + + /* generic map reference relocation */ + if (type == LIBBPF_MAP_UNSPEC) { + if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { + pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", + prog->name, sym_name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + for (map_idx = 0; map_idx < nr_maps; map_idx++) { + map = &obj->maps[map_idx]; + if (map->libbpf_type != type || + map->sec_idx != sym->st_shndx || + map->sec_offset != sym->st_value) + continue; + pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", + prog->name, map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); + break; + } + if (map_idx >= nr_maps) { + pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", + prog->name, sym_sec_name, (size_t)sym->st_value); + return -LIBBPF_ERRNO__RELOC; + } + reloc_desc->type = RELO_LD64; + reloc_desc->insn_idx = insn_idx; + reloc_desc->map_idx = map_idx; + reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ + return 0; + } + + /* global data map relocation */ + if (!bpf_object__shndx_is_data(obj, shdr_idx)) { + pr_warn("prog '%s': bad data relo against section '%s'\n", + prog->name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + for (map_idx = 0; map_idx < nr_maps; map_idx++) { + map = &obj->maps[map_idx]; + if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) + continue; + pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", + prog->name, map_idx, map->name, map->sec_idx, + map->sec_offset, insn_idx); + break; + } + if (map_idx >= nr_maps) { + pr_warn("prog '%s': data relo failed to find map for section '%s'\n", + prog->name, sym_sec_name); + return -LIBBPF_ERRNO__RELOC; + } + + reloc_desc->type = RELO_DATA; + reloc_desc->insn_idx = insn_idx; + reloc_desc->map_idx = map_idx; + reloc_desc->sym_off = sym->st_value; + return 0; +} + +static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) +{ + return insn_idx >= prog->sec_insn_off && + insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; +} + +static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, + size_t sec_idx, size_t insn_idx) +{ + int l = 0, r = obj->nr_programs - 1, m; + struct bpf_program *prog; + + while (l < r) { + m = l + (r - l + 1) / 2; + prog = &obj->programs[m]; + + if (prog->sec_idx < sec_idx || + (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) + l = m; + else + r = m - 1; + } + /* matching program could be at index l, but it still might be the + * wrong one, so we need to double check conditions for the last time + */ + prog = &obj->programs[l]; + if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) + return prog; + return NULL; +} + +static int +bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) +{ + const char *relo_sec_name, *sec_name; + size_t sec_idx = shdr->sh_info, sym_idx; + struct bpf_program *prog; + struct reloc_desc *relos; + int err, i, nrels; + const char *sym_name; + __u32 insn_idx; + Elf_Scn *scn; + Elf_Data *scn_data; + Elf64_Sym *sym; + Elf64_Rel *rel; + + if (sec_idx >= obj->efile.sec_cnt) + return -EINVAL; + + scn = elf_sec_by_idx(obj, sec_idx); + scn_data = elf_sec_data(obj, scn); + + relo_sec_name = elf_sec_str(obj, shdr->sh_name); + sec_name = elf_sec_name(obj, scn); + if (!relo_sec_name || !sec_name) + return -EINVAL; + + pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", + relo_sec_name, sec_idx, sec_name); + nrels = shdr->sh_size / shdr->sh_entsize; + + for (i = 0; i < nrels; i++) { + rel = elf_rel_by_idx(data, i); + if (!rel) { + pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); + return -LIBBPF_ERRNO__FORMAT; + } + + sym_idx = ELF64_R_SYM(rel->r_info); + sym = elf_sym_by_idx(obj, sym_idx); + if (!sym) { + pr_warn("sec '%s': symbol #%zu not found for relo #%d\n", + relo_sec_name, sym_idx, i); + return -LIBBPF_ERRNO__FORMAT; + } + + if (sym->st_shndx >= obj->efile.sec_cnt) { + pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n", + relo_sec_name, sym_idx, (size_t)sym->st_shndx, i); + return -LIBBPF_ERRNO__FORMAT; + } + + if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { + pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", + relo_sec_name, (size_t)rel->r_offset, i); + return -LIBBPF_ERRNO__FORMAT; + } + + insn_idx = rel->r_offset / BPF_INSN_SZ; + /* relocations against static functions are recorded as + * relocations against the section that contains a function; + * in such case, symbol will be STT_SECTION and sym.st_name + * will point to empty string (0), so fetch section name + * instead + */ + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) + sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); + else + sym_name = elf_sym_str(obj, sym->st_name); + sym_name = sym_name ?: "reloc_desc, + prog->nr_reloc + 1, sizeof(*relos)); + if (!relos) + return -ENOMEM; + prog->reloc_desc = relos; + + /* adjust insn_idx to local BPF program frame of reference */ + insn_idx -= prog->sec_insn_off; + err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], + insn_idx, sym_name, sym, rel); + if (err) + return err; + + prog->nr_reloc++; + } + return 0; +} + +static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) +{ + struct bpf_map_def *def = &map->def; + __u32 key_type_id = 0, value_type_id = 0; + int ret; + + if (!obj->btf) + return -ENOENT; + + /* if it's BTF-defined map, we don't need to search for type IDs. + * For struct_ops map, it does not need btf_key_type_id and + * btf_value_type_id. + */ + if (map->sec_idx == obj->efile.btf_maps_shndx || + bpf_map__is_struct_ops(map)) + return 0; + + if (!bpf_map__is_internal(map)) { + pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n"); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, + def->value_size, &key_type_id, + &value_type_id); +#pragma GCC diagnostic pop + } else { + /* + * LLVM annotates global data differently in BTF, that is, + * only as '.data', '.bss' or '.rodata'. + */ + ret = btf__find_by_name(obj->btf, map->real_name); + } + if (ret < 0) + return ret; + + map->btf_key_type_id = key_type_id; + map->btf_value_type_id = bpf_map__is_internal(map) ? + ret : value_type_id; + return 0; +} + +static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) +{ + char file[PATH_MAX], buff[4096]; + FILE *fp; + __u32 val; + int err; + + snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); + memset(info, 0, sizeof(*info)); + + fp = fopen(file, "r"); + if (!fp) { + err = -errno; + pr_warn("failed to open %s: %d. No procfs support?\n", file, + err); + return err; + } + + while (fgets(buff, sizeof(buff), fp)) { + if (sscanf(buff, "map_type:\t%u", &val) == 1) + info->type = val; + else if (sscanf(buff, "key_size:\t%u", &val) == 1) + info->key_size = val; + else if (sscanf(buff, "value_size:\t%u", &val) == 1) + info->value_size = val; + else if (sscanf(buff, "max_entries:\t%u", &val) == 1) + info->max_entries = val; + else if (sscanf(buff, "map_flags:\t%i", &val) == 1) + info->map_flags = val; + } + + fclose(fp); + + return 0; +} + +int bpf_map__reuse_fd(struct bpf_map *map, int fd) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + int new_fd, err; + char *new_name; + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err && errno == EINVAL) + err = bpf_get_map_info_from_fdinfo(fd, &info); + if (err) + return libbpf_err(err); + + new_name = strdup(info.name); + if (!new_name) + return libbpf_err(-errno); + + new_fd = open("/", O_RDONLY | O_CLOEXEC); + if (new_fd < 0) { + err = -errno; + goto err_free_new_name; + } + + new_fd = dup3(fd, new_fd, O_CLOEXEC); + if (new_fd < 0) { + err = -errno; + goto err_close_new_fd; + } + + err = zclose(map->fd); + if (err) { + err = -errno; + goto err_close_new_fd; + } + free(map->name); + + map->fd = new_fd; + map->name = new_name; + map->def.type = info.type; + map->def.key_size = info.key_size; + map->def.value_size = info.value_size; + map->def.max_entries = info.max_entries; + map->def.map_flags = info.map_flags; + map->btf_key_type_id = info.btf_key_type_id; + map->btf_value_type_id = info.btf_value_type_id; + map->reused = true; + map->map_extra = info.map_extra; + + return 0; + +err_close_new_fd: + close(new_fd); +err_free_new_name: + free(new_name); + return libbpf_err(err); +} + +__u32 bpf_map__max_entries(const struct bpf_map *map) +{ + return map->def.max_entries; +} + +struct bpf_map *bpf_map__inner_map(struct bpf_map *map) +{ + if (!bpf_map_type__is_map_in_map(map->def.type)) + return errno = EINVAL, NULL; + + return map->inner_map; +} + +int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.max_entries = max_entries; + return 0; +} + +int bpf_map__resize(struct bpf_map *map, __u32 max_entries) +{ + if (!map || !max_entries) + return libbpf_err(-EINVAL); + + return bpf_map__set_max_entries(map, max_entries); +} + +static int +bpf_object__probe_loading(struct bpf_object *obj) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret, insn_cnt = ARRAY_SIZE(insns); + + if (obj->gen_loader) + return 0; + + ret = bump_rlimit_memlock(); + if (ret) + pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret); + + /* make sure basic loading works */ + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); + if (ret < 0) + ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL); + if (ret < 0) { + ret = errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " + "program. Make sure your kernel supports BPF " + "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " + "set to big enough value.\n", __func__, cp, ret); + return -ret; + } + close(ret); + + return 0; +} + +static int probe_fd(int fd) +{ + if (fd >= 0) + close(fd); + return fd >= 0; +} + +static int probe_kern_prog_name(void) +{ + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret, insn_cnt = ARRAY_SIZE(insns); + + /* make sure loading with name works */ + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL); + return probe_fd(ret); +} + +static int probe_kern_global_data(void) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + struct bpf_insn insns[] = { + BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret, map, insn_cnt = ARRAY_SIZE(insns); + + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); + if (map < 0) { + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", + __func__, cp, -ret); + return ret; + } + + insns[0].imm = map; + + ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); + close(map); + return probe_fd(ret); +} + +static int probe_kern_btf(void) +{ + static const char strs[] = "\0int"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_func(void) +{ + static const char strs[] = "\0int\0x\0a"; + /* void x(int a) {} */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* FUNC_PROTO */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), + BTF_PARAM_ENC(7, 1), + /* FUNC x */ /* [3] */ + BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_func_global(void) +{ + static const char strs[] = "\0int\0x\0a"; + /* static void x(int a) {} */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* FUNC_PROTO */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), + BTF_PARAM_ENC(7, 1), + /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ + BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_datasec(void) +{ + static const char strs[] = "\0x\0.data"; + /* static int a; */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* VAR x */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), + BTF_VAR_STATIC, + /* DATASEC val */ /* [3] */ + BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_float(void) +{ + static const char strs[] = "\0float"; + __u32 types[] = { + /* float */ + BTF_TYPE_FLOAT_ENC(1, 4), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_decl_tag(void) +{ + static const char strs[] = "\0tag"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* VAR x */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), + BTF_VAR_STATIC, + /* attr */ + BTF_TYPE_DECL_TAG_ENC(1, 2, -1), + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_btf_type_tag(void) +{ + static const char strs[] = "\0tag"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* attr */ + BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */ + /* ptr */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */ + }; + + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs))); +} + +static int probe_kern_array_mmap(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE); + int fd; + + fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts); + return probe_fd(fd); +} + +static int probe_kern_exp_attach_type(void) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE); + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int fd, insn_cnt = ARRAY_SIZE(insns); + + /* use any valid combination of program type and (optional) + * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) + * to see if kernel supports expected_attach_type field for + * BPF_PROG_LOAD command + */ + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts); + return probe_fd(fd); +} + +static int probe_kern_probe_read_kernel(void) +{ + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ + BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ + BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), + BPF_EXIT_INSN(), + }; + int fd, insn_cnt = ARRAY_SIZE(insns); + + fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL); + return probe_fd(fd); +} + +static int probe_prog_bind_map(void) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int ret, map, prog, insn_cnt = ARRAY_SIZE(insns); + + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL); + if (map < 0) { + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); + pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", + __func__, cp, -ret); + return ret; + } + + prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL); + if (prog < 0) { + close(map); + return 0; + } + + ret = bpf_prog_bind_map(prog, map, NULL); + + close(map); + close(prog); + + return ret >= 0; +} + +static int probe_module_btf(void) +{ + static const char strs[] = "\0int"; + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), + }; + struct bpf_btf_info info; + __u32 len = sizeof(info); + char name[16]; + int fd, err; + + fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); + if (fd < 0) + return 0; /* BTF not supported at all */ + + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; + * kernel's module BTF support coincides with support for + * name/name_len fields in struct bpf_btf_info. + */ + err = bpf_obj_get_info_by_fd(fd, &info, &len); + close(fd); + return !err; +} + +static int probe_perf_link(void) +{ + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int prog_fd, link_fd, err; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", + insns, ARRAY_SIZE(insns), NULL); + if (prog_fd < 0) + return -errno; + + /* use invalid perf_event FD to get EBADF, if link is supported; + * otherwise EINVAL should be returned + */ + link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL); + err = -errno; /* close() can clobber errno */ + + if (link_fd >= 0) + close(link_fd); + close(prog_fd); + + return link_fd < 0 && err == -EBADF; +} + +static int probe_kern_bpf_cookie(void) +{ + struct bpf_insn insns[] = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie), + BPF_EXIT_INSN(), + }; + int ret, insn_cnt = ARRAY_SIZE(insns); + + ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL); + return probe_fd(ret); +} + +enum kern_feature_result { + FEAT_UNKNOWN = 0, + FEAT_SUPPORTED = 1, + FEAT_MISSING = 2, +}; + +typedef int (*feature_probe_fn)(void); + +static struct kern_feature_desc { + const char *desc; + feature_probe_fn probe; + enum kern_feature_result res; +} feature_probes[__FEAT_CNT] = { + [FEAT_PROG_NAME] = { + "BPF program name", probe_kern_prog_name, + }, + [FEAT_GLOBAL_DATA] = { + "global variables", probe_kern_global_data, + }, + [FEAT_BTF] = { + "minimal BTF", probe_kern_btf, + }, + [FEAT_BTF_FUNC] = { + "BTF functions", probe_kern_btf_func, + }, + [FEAT_BTF_GLOBAL_FUNC] = { + "BTF global function", probe_kern_btf_func_global, + }, + [FEAT_BTF_DATASEC] = { + "BTF data section and variable", probe_kern_btf_datasec, + }, + [FEAT_ARRAY_MMAP] = { + "ARRAY map mmap()", probe_kern_array_mmap, + }, + [FEAT_EXP_ATTACH_TYPE] = { + "BPF_PROG_LOAD expected_attach_type attribute", + probe_kern_exp_attach_type, + }, + [FEAT_PROBE_READ_KERN] = { + "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, + }, + [FEAT_PROG_BIND_MAP] = { + "BPF_PROG_BIND_MAP support", probe_prog_bind_map, + }, + [FEAT_MODULE_BTF] = { + "module BTF support", probe_module_btf, + }, + [FEAT_BTF_FLOAT] = { + "BTF_KIND_FLOAT support", probe_kern_btf_float, + }, + [FEAT_PERF_LINK] = { + "BPF perf link support", probe_perf_link, + }, + [FEAT_BTF_DECL_TAG] = { + "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, + }, + [FEAT_BTF_TYPE_TAG] = { + "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag, + }, + [FEAT_MEMCG_ACCOUNT] = { + "memcg-based memory accounting", probe_memcg_account, + }, + [FEAT_BPF_COOKIE] = { + "BPF cookie support", probe_kern_bpf_cookie, + }, +}; + +bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) +{ + struct kern_feature_desc *feat = &feature_probes[feat_id]; + int ret; + + if (obj && obj->gen_loader) + /* To generate loader program assume the latest kernel + * to avoid doing extra prog_load, map_create syscalls. + */ + return true; + + if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { + ret = feat->probe(); + if (ret > 0) { + WRITE_ONCE(feat->res, FEAT_SUPPORTED); + } else if (ret == 0) { + WRITE_ONCE(feat->res, FEAT_MISSING); + } else { + pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); + WRITE_ONCE(feat->res, FEAT_MISSING); + } + } + + return READ_ONCE(feat->res) == FEAT_SUPPORTED; +} + +static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) +{ + struct bpf_map_info map_info = {}; + char msg[STRERR_BUFSIZE]; + __u32 map_info_len; + int err; + + map_info_len = sizeof(map_info); + + err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); + if (err && errno == EINVAL) + err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); + if (err) { + pr_warn("failed to get map info for map FD %d: %s\n", map_fd, + libbpf_strerror_r(errno, msg, sizeof(msg))); + return false; + } + + return (map_info.type == map->def.type && + map_info.key_size == map->def.key_size && + map_info.value_size == map->def.value_size && + map_info.max_entries == map->def.max_entries && + map_info.map_flags == map->def.map_flags && + map_info.map_extra == map->map_extra); +} + +static int +bpf_object__reuse_map(struct bpf_map *map) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + int err, pin_fd; + + pin_fd = bpf_obj_get(map->pin_path); + if (pin_fd < 0) { + err = -errno; + if (err == -ENOENT) { + pr_debug("found no pinned map to reuse at '%s'\n", + map->pin_path); + return 0; + } + + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("couldn't retrieve pinned map '%s': %s\n", + map->pin_path, cp); + return err; + } + + if (!map_is_reuse_compat(map, pin_fd)) { + pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", + map->pin_path); + close(pin_fd); + return -EINVAL; + } + + err = bpf_map__reuse_fd(map, pin_fd); + close(pin_fd); + if (err) { + return err; + } + map->pinned = true; + pr_debug("reused pinned map at '%s'\n", map->pin_path); + + return 0; +} + +static int +bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) +{ + enum libbpf_map_type map_type = map->libbpf_type; + char *cp, errmsg[STRERR_BUFSIZE]; + int err, zero = 0; + + if (obj->gen_loader) { + bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, + map->mmaped, map->def.value_size); + if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) + bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); + return 0; + } + err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); + if (err) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error setting initial map(%s) contents: %s\n", + map->name, cp); + return err; + } + + /* Freeze .rodata and .kconfig map as read-only from syscall side. */ + if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { + err = bpf_map_freeze(map->fd); + if (err) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error freezing map(%s) as read-only: %s\n", + map->name, cp); + return err; + } + } + return 0; +} + +static void bpf_map__destroy(struct bpf_map *map); + +static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) +{ + LIBBPF_OPTS(bpf_map_create_opts, create_attr); + struct bpf_map_def *def = &map->def; + const char *map_name = NULL; + int err = 0; + + if (kernel_supports(obj, FEAT_PROG_NAME)) + map_name = map->name; + create_attr.map_ifindex = map->map_ifindex; + create_attr.map_flags = def->map_flags; + create_attr.numa_node = map->numa_node; + create_attr.map_extra = map->map_extra; + + if (bpf_map__is_struct_ops(map)) + create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; + + if (obj->btf && btf__fd(obj->btf) >= 0) { + create_attr.btf_fd = btf__fd(obj->btf); + create_attr.btf_key_type_id = map->btf_key_type_id; + create_attr.btf_value_type_id = map->btf_value_type_id; + } + + if (bpf_map_type__is_map_in_map(def->type)) { + if (map->inner_map) { + err = bpf_object__create_map(obj, map->inner_map, true); + if (err) { + pr_warn("map '%s': failed to create inner map: %d\n", + map->name, err); + return err; + } + map->inner_map_fd = bpf_map__fd(map->inner_map); + } + if (map->inner_map_fd >= 0) + create_attr.inner_map_fd = map->inner_map_fd; + } + + switch (def->type) { + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_STACK_TRACE: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: + case BPF_MAP_TYPE_CPUMAP: + case BPF_MAP_TYPE_XSKMAP: + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_SOCKHASH: + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + case BPF_MAP_TYPE_RINGBUF: + create_attr.btf_fd = 0; + create_attr.btf_key_type_id = 0; + create_attr.btf_value_type_id = 0; + map->btf_key_type_id = 0; + map->btf_value_type_id = 0; + default: + break; + } + + if (obj->gen_loader) { + bpf_gen__map_create(obj->gen_loader, def->type, map_name, + def->key_size, def->value_size, def->max_entries, + &create_attr, is_inner ? -1 : map - obj->maps); + /* Pretend to have valid FD to pass various fd >= 0 checks. + * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. + */ + map->fd = 0; + } else { + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + def->max_entries, &create_attr); + } + if (map->fd < 0 && (create_attr.btf_key_type_id || + create_attr.btf_value_type_id)) { + char *cp, errmsg[STRERR_BUFSIZE]; + + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", + map->name, cp, err); + create_attr.btf_fd = 0; + create_attr.btf_key_type_id = 0; + create_attr.btf_value_type_id = 0; + map->btf_key_type_id = 0; + map->btf_value_type_id = 0; + map->fd = bpf_map_create(def->type, map_name, + def->key_size, def->value_size, + def->max_entries, &create_attr); + } + + err = map->fd < 0 ? -errno : 0; + + if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { + if (obj->gen_loader) + map->inner_map->fd = -1; + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } + + return err; +} + +static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) +{ + const struct bpf_map *targ_map; + unsigned int i; + int fd, err = 0; + + for (i = 0; i < map->init_slots_sz; i++) { + if (!map->init_slots[i]) + continue; + + targ_map = map->init_slots[i]; + fd = bpf_map__fd(targ_map); + + if (obj->gen_loader) { + bpf_gen__populate_outer_map(obj->gen_loader, + map - obj->maps, i, + targ_map - obj->maps); + } else { + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + } + if (err) { + err = -errno; + pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", + map->name, i, targ_map->name, fd, err); + return err; + } + pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", + map->name, i, targ_map->name, fd); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + return 0; +} + +static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) +{ + const struct bpf_program *targ_prog; + unsigned int i; + int fd, err; + + if (obj->gen_loader) + return -ENOTSUP; + + for (i = 0; i < map->init_slots_sz; i++) { + if (!map->init_slots[i]) + continue; + + targ_prog = map->init_slots[i]; + fd = bpf_program__fd(targ_prog); + + err = bpf_map_update_elem(map->fd, &i, &fd, 0); + if (err) { + err = -errno; + pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", + map->name, i, targ_prog->name, fd, err); + return err; + } + pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", + map->name, i, targ_prog->name, fd); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + return 0; +} + +static int bpf_object_init_prog_arrays(struct bpf_object *obj) +{ + struct bpf_map *map; + int i, err; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY) + continue; + + err = init_prog_array_slots(obj, map); + if (err < 0) { + zclose(map->fd); + return err; + } + } + return 0; +} + +static int map_set_def_max_entries(struct bpf_map *map) +{ + if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { + int nr_cpus; + + nr_cpus = libbpf_num_possible_cpus(); + if (nr_cpus < 0) { + pr_warn("map '%s': failed to determine number of system CPUs: %d\n", + map->name, nr_cpus); + return nr_cpus; + } + pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); + map->def.max_entries = nr_cpus; + } + + return 0; +} + +static int +bpf_object__create_maps(struct bpf_object *obj) +{ + struct bpf_map *map; + char *cp, errmsg[STRERR_BUFSIZE]; + unsigned int i, j; + int err; + bool retried; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + + /* To support old kernels, we skip creating global data maps + * (.rodata, .data, .kconfig, etc); later on, during program + * loading, if we detect that at least one of the to-be-loaded + * programs is referencing any global data map, we'll error + * out with program name and relocation index logged. + * This approach allows to accommodate Clang emitting + * unnecessary .rodata.str1.1 sections for string literals, + * but also it allows to have CO-RE applications that use + * global variables in some of BPF programs, but not others. + * If those global variable-using programs are not loaded at + * runtime due to bpf_program__set_autoload(prog, false), + * bpf_object loading will succeed just fine even on old + * kernels. + */ + if (bpf_map__is_internal(map) && + !kernel_supports(obj, FEAT_GLOBAL_DATA)) { + map->skipped = true; + continue; + } + + err = map_set_def_max_entries(map); + if (err) + goto err_out; + + retried = false; +retry: + if (map->pin_path) { + err = bpf_object__reuse_map(map); + if (err) { + pr_warn("map '%s': error reusing pinned map\n", + map->name); + goto err_out; + } + if (retried && map->fd < 0) { + pr_warn("map '%s': cannot find pinned map\n", + map->name); + err = -ENOENT; + goto err_out; + } + } + + if (map->fd >= 0) { + pr_debug("map '%s': skipping creation (preset fd=%d)\n", + map->name, map->fd); + } else { + err = bpf_object__create_map(obj, map, false); + if (err) + goto err_out; + + pr_debug("map '%s': created successfully, fd=%d\n", + map->name, map->fd); + + if (bpf_map__is_internal(map)) { + err = bpf_object__populate_internal_map(obj, map); + if (err < 0) { + zclose(map->fd); + goto err_out; + } + } + + if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) { + err = init_map_in_map_slots(obj, map); + if (err < 0) { + zclose(map->fd); + goto err_out; + } + } + } + + if (map->pin_path && !map->pinned) { + err = bpf_map__pin(map, NULL); + if (err) { + zclose(map->fd); + if (!retried && err == -EEXIST) { + retried = true; + goto retry; + } + pr_warn("map '%s': failed to auto-pin at '%s': %d\n", + map->name, map->pin_path, err); + goto err_out; + } + } + } + + return 0; + +err_out: + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); + pr_perm_msg(err); + for (j = 0; j < i; j++) + zclose(obj->maps[j].fd); + return err; +} + +static bool bpf_core_is_flavor_sep(const char *s) +{ + /* check X___Y name pattern, where X and Y are not underscores */ + return s[0] != '_' && /* X */ + s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ + s[4] != '_'; /* Y */ +} + +/* Given 'some_struct_name___with_flavor' return the length of a name prefix + * before last triple underscore. Struct name part after last triple + * underscore is ignored by BPF CO-RE relocation during relocation matching. + */ +size_t bpf_core_essential_name_len(const char *name) +{ + size_t n = strlen(name); + int i; + + for (i = n - 5; i >= 0; i--) { + if (bpf_core_is_flavor_sep(name + i)) + return i + 1; + } + return n; +} + +void bpf_core_free_cands(struct bpf_core_cand_list *cands) +{ + if (!cands) + return; + + free(cands->cands); + free(cands); +} + +int bpf_core_add_cands(struct bpf_core_cand *local_cand, + size_t local_essent_len, + const struct btf *targ_btf, + const char *targ_btf_name, + int targ_start_id, + struct bpf_core_cand_list *cands) +{ + struct bpf_core_cand *new_cands, *cand; + const struct btf_type *t, *local_t; + const char *targ_name, *local_name; + size_t targ_essent_len; + int n, i; + + local_t = btf__type_by_id(local_cand->btf, local_cand->id); + local_name = btf__str_by_offset(local_cand->btf, local_t->name_off); + + n = btf__type_cnt(targ_btf); + for (i = targ_start_id; i < n; i++) { + t = btf__type_by_id(targ_btf, i); + if (btf_kind(t) != btf_kind(local_t)) + continue; + + targ_name = btf__name_by_offset(targ_btf, t->name_off); + if (str_is_empty(targ_name)) + continue; + + targ_essent_len = bpf_core_essential_name_len(targ_name); + if (targ_essent_len != local_essent_len) + continue; + + if (strncmp(local_name, targ_name, local_essent_len) != 0) + continue; + + pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", + local_cand->id, btf_kind_str(local_t), + local_name, i, btf_kind_str(t), targ_name, + targ_btf_name); + new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, + sizeof(*cands->cands)); + if (!new_cands) + return -ENOMEM; + + cand = &new_cands[cands->len]; + cand->btf = targ_btf; + cand->id = i; + + cands->cands = new_cands; + cands->len++; + } + return 0; +} + +static int load_module_btfs(struct bpf_object *obj) +{ + struct bpf_btf_info info; + struct module_btf *mod_btf; + struct btf *btf; + char name[64]; + __u32 id = 0, len; + int err, fd; + + if (obj->btf_modules_loaded) + return 0; + + if (obj->gen_loader) + return 0; + + /* don't do this again, even if we find no module BTFs */ + obj->btf_modules_loaded = true; + + /* kernel too old to support module BTFs */ + if (!kernel_supports(obj, FEAT_MODULE_BTF)) + return 0; + + while (true) { + err = bpf_btf_get_next_id(id, &id); + if (err && errno == ENOENT) + return 0; + if (err) { + err = -errno; + pr_warn("failed to iterate BTF objects: %d\n", err); + return err; + } + + fd = bpf_btf_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; /* expected race: BTF was unloaded */ + err = -errno; + pr_warn("failed to get BTF object #%d FD: %d\n", id, err); + return err; + } + + len = sizeof(info); + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + err = -errno; + pr_warn("failed to get BTF object #%d info: %d\n", id, err); + goto err_out; + } + + /* ignore non-module BTFs */ + if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { + close(fd); + continue; + } + + btf = btf_get_from_fd(fd, obj->btf_vmlinux); + err = libbpf_get_error(btf); + if (err) { + pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", + name, id, err); + goto err_out; + } + + err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, + sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); + if (err) + goto err_out; + + mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; + + mod_btf->btf = btf; + mod_btf->id = id; + mod_btf->fd = fd; + mod_btf->name = strdup(name); + if (!mod_btf->name) { + err = -ENOMEM; + goto err_out; + } + continue; + +err_out: + close(fd); + return err; + } + + return 0; +} + +static struct bpf_core_cand_list * +bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) +{ + struct bpf_core_cand local_cand = {}; + struct bpf_core_cand_list *cands; + const struct btf *main_btf; + const struct btf_type *local_t; + const char *local_name; + size_t local_essent_len; + int err, i; + + local_cand.btf = local_btf; + local_cand.id = local_type_id; + local_t = btf__type_by_id(local_btf, local_type_id); + if (!local_t) + return ERR_PTR(-EINVAL); + + local_name = btf__name_by_offset(local_btf, local_t->name_off); + if (str_is_empty(local_name)) + return ERR_PTR(-EINVAL); + local_essent_len = bpf_core_essential_name_len(local_name); + + cands = calloc(1, sizeof(*cands)); + if (!cands) + return ERR_PTR(-ENOMEM); + + /* Attempt to find target candidates in vmlinux BTF first */ + main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; + err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); + if (err) + goto err_out; + + /* if vmlinux BTF has any candidate, don't got for module BTFs */ + if (cands->len) + return cands; + + /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ + if (obj->btf_vmlinux_override) + return cands; + + /* now look through module BTFs, trying to still find candidates */ + err = load_module_btfs(obj); + if (err) + goto err_out; + + for (i = 0; i < obj->btf_module_cnt; i++) { + err = bpf_core_add_cands(&local_cand, local_essent_len, + obj->btf_modules[i].btf, + obj->btf_modules[i].name, + btf__type_cnt(obj->btf_vmlinux), + cands); + if (err) + goto err_out; + } + + return cands; +err_out: + bpf_core_free_cands(cands); + return ERR_PTR(err); +} + +/* Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + */ +int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id) +{ + const struct btf_type *local_type, *targ_type; + int depth = 32; /* max recursion depth */ + + /* caller made sure that names match (ignoring flavor suffix) */ + local_type = btf__type_by_id(local_btf, local_id); + targ_type = btf__type_by_id(targ_btf, targ_id); + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + +recur: + depth--; + if (depth < 0) + return -EINVAL; + + local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); + targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); + if (!local_type || !targ_type) + return -EINVAL; + + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + switch (btf_kind(local_type)) { + case BTF_KIND_UNKN: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + return 1; + case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ + return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; + case BTF_KIND_PTR: + local_id = local_type->type; + targ_id = targ_type->type; + goto recur; + case BTF_KIND_ARRAY: + local_id = btf_array(local_type)->type; + targ_id = btf_array(targ_type)->type; + goto recur; + case BTF_KIND_FUNC_PROTO: { + struct btf_param *local_p = btf_params(local_type); + struct btf_param *targ_p = btf_params(targ_type); + __u16 local_vlen = btf_vlen(local_type); + __u16 targ_vlen = btf_vlen(targ_type); + int i, err; + + if (local_vlen != targ_vlen) + return 0; + + for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { + skip_mods_and_typedefs(local_btf, local_p->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); + err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id); + if (err <= 0) + return err; + } + + /* tail recurse for return type check */ + skip_mods_and_typedefs(local_btf, local_type->type, &local_id); + skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); + goto recur; + } + default: + pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", + btf_kind_str(local_type), local_id, targ_id); + return 0; + } +} + +static size_t bpf_core_hash_fn(const void *key, void *ctx) +{ + return (size_t)key; +} + +static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx) +{ + return k1 == k2; +} + +static void *u32_as_hash_key(__u32 x) +{ + return (void *)(uintptr_t)x; +} + +static int record_relo_core(struct bpf_program *prog, + const struct bpf_core_relo *core_relo, int insn_idx) +{ + struct reloc_desc *relos, *relo; + + relos = libbpf_reallocarray(prog->reloc_desc, + prog->nr_reloc + 1, sizeof(*relos)); + if (!relos) + return -ENOMEM; + relo = &relos[prog->nr_reloc]; + relo->type = RELO_CORE; + relo->insn_idx = insn_idx; + relo->core_relo = core_relo; + prog->reloc_desc = relos; + prog->nr_reloc++; + return 0; +} + +static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx) +{ + struct reloc_desc *relo; + int i; + + for (i = 0; i < prog->nr_reloc; i++) { + relo = &prog->reloc_desc[i]; + if (relo->type != RELO_CORE || relo->insn_idx != insn_idx) + continue; + + return relo->core_relo; + } + + return NULL; +} + +static int bpf_core_resolve_relo(struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + struct hashmap *cand_cache, + struct bpf_core_relo_res *targ_res) +{ + struct bpf_core_spec specs_scratch[3] = {}; + const void *type_key = u32_as_hash_key(relo->type_id); + struct bpf_core_cand_list *cands = NULL; + const char *prog_name = prog->name; + const struct btf_type *local_type; + const char *local_name; + __u32 local_id = relo->type_id; + int err; + + local_type = btf__type_by_id(local_btf, local_id); + if (!local_type) + return -EINVAL; + + local_name = btf__name_by_offset(local_btf, local_type->name_off); + if (!local_name) + return -EINVAL; + + if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && + !hashmap__find(cand_cache, type_key, (void **)&cands)) { + cands = bpf_core_find_cands(prog->obj, local_btf, local_id); + if (IS_ERR(cands)) { + pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", + prog_name, relo_idx, local_id, btf_kind_str(local_type), + local_name, PTR_ERR(cands)); + return PTR_ERR(cands); + } + err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); + if (err) { + bpf_core_free_cands(cands); + return err; + } + } + + return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, + targ_res); +} + +static int +bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) +{ + const struct btf_ext_info_sec *sec; + struct bpf_core_relo_res targ_res; + const struct bpf_core_relo *rec; + const struct btf_ext_info *seg; + struct hashmap_entry *entry; + struct hashmap *cand_cache = NULL; + struct bpf_program *prog; + struct bpf_insn *insn; + const char *sec_name; + int i, err = 0, insn_idx, sec_idx, sec_num; + + if (obj->btf_ext->core_relo_info.len == 0) + return 0; + + if (targ_btf_path) { + obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); + err = libbpf_get_error(obj->btf_vmlinux_override); + if (err) { + pr_warn("failed to parse target BTF: %d\n", err); + return err; + } + } + + cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); + if (IS_ERR(cand_cache)) { + err = PTR_ERR(cand_cache); + goto out; + } + + seg = &obj->btf_ext->core_relo_info; + sec_num = 0; + for_each_btf_ext_sec(seg, sec) { + sec_idx = seg->sec_idxs[sec_num]; + sec_num++; + + sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); + if (str_is_empty(sec_name)) { + err = -EINVAL; + goto out; + } + + pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info); + + for_each_btf_ext_rec(seg, sec, i, rec) { + if (rec->insn_off % BPF_INSN_SZ) + return -EINVAL; + insn_idx = rec->insn_off / BPF_INSN_SZ; + prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); + if (!prog) { + /* When __weak subprog is "overridden" by another instance + * of the subprog from a different object file, linker still + * appends all the .BTF.ext info that used to belong to that + * eliminated subprogram. + * This is similar to what x86-64 linker does for relocations. + * So just ignore such relocations just like we ignore + * subprog instructions when discovering subprograms. + */ + pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", + sec_name, i, insn_idx); + continue; + } + /* no need to apply CO-RE relocation if the program is + * not going to be loaded + */ + if (!prog->autoload) + continue; + + /* adjust insn_idx from section frame of reference to the local + * program's frame of reference; (sub-)program code is not yet + * relocated, so it's enough to just subtract in-section offset + */ + insn_idx = insn_idx - prog->sec_insn_off; + if (insn_idx >= prog->insns_cnt) + return -EINVAL; + insn = &prog->insns[insn_idx]; + + err = record_relo_core(prog, rec, insn_idx); + if (err) { + pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", + prog->name, i, err); + goto out; + } + + if (prog->obj->gen_loader) + continue; + + err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); + if (err) { + pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", + prog->name, i, err); + goto out; + } + + err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); + if (err) { + pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", + prog->name, i, insn_idx, err); + goto out; + } + } + } + +out: + /* obj->btf_vmlinux and module BTFs are freed after object load */ + btf__free(obj->btf_vmlinux_override); + obj->btf_vmlinux_override = NULL; + + if (!IS_ERR_OR_NULL(cand_cache)) { + hashmap__for_each_entry(cand_cache, entry, i) { + bpf_core_free_cands(entry->value); + } + hashmap__free(cand_cache); + } + return err; +} + +/* Relocate data references within program code: + * - map references; + * - global variable references; + * - extern references. + */ +static int +bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) +{ + int i; + + for (i = 0; i < prog->nr_reloc; i++) { + struct reloc_desc *relo = &prog->reloc_desc[i]; + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + struct extern_desc *ext; + + switch (relo->type) { + case RELO_LD64: + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX; + insn[0].imm = relo->map_idx; + } else { + insn[0].src_reg = BPF_PSEUDO_MAP_FD; + insn[0].imm = obj->maps[relo->map_idx].fd; + } + break; + case RELO_DATA: + insn[1].imm = insn[0].imm + relo->sym_off; + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; + insn[0].imm = relo->map_idx; + } else { + const struct bpf_map *map = &obj->maps[relo->map_idx]; + + if (map->skipped) { + pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n", + prog->name, i); + return -ENOTSUP; + } + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[relo->map_idx].fd; + } + break; + case RELO_EXTERN_VAR: + ext = &obj->externs[relo->sym_off]; + if (ext->type == EXT_KCFG) { + if (obj->gen_loader) { + insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; + insn[0].imm = obj->kconfig_map_idx; + } else { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + } + insn[1].imm = ext->kcfg.data_off; + } else /* EXT_KSYM */ { + if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ + insn[0].src_reg = BPF_PSEUDO_BTF_ID; + insn[0].imm = ext->ksym.kernel_btf_id; + insn[1].imm = ext->ksym.kernel_btf_obj_fd; + } else { /* typeless ksyms or unresolved typed ksyms */ + insn[0].imm = (__u32)ext->ksym.addr; + insn[1].imm = ext->ksym.addr >> 32; + } + } + break; + case RELO_EXTERN_FUNC: + ext = &obj->externs[relo->sym_off]; + insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; + if (ext->is_set) { + insn[0].imm = ext->ksym.kernel_btf_id; + insn[0].off = ext->ksym.btf_fd_idx; + } else { /* unresolved weak kfunc */ + insn[0].imm = 0; + insn[0].off = 0; + } + break; + case RELO_SUBPROG_ADDR: + if (insn[0].src_reg != BPF_PSEUDO_FUNC) { + pr_warn("prog '%s': relo #%d: bad insn\n", + prog->name, i); + return -EINVAL; + } + /* handled already */ + break; + case RELO_CALL: + /* handled already */ + break; + case RELO_CORE: + /* will be handled by bpf_program_record_relos() */ + break; + default: + pr_warn("prog '%s': relo #%d: bad relo type %d\n", + prog->name, i, relo->type); + return -EINVAL; + } + } + + return 0; +} + +static int adjust_prog_btf_ext_info(const struct bpf_object *obj, + const struct bpf_program *prog, + const struct btf_ext_info *ext_info, + void **prog_info, __u32 *prog_rec_cnt, + __u32 *prog_rec_sz) +{ + void *copy_start = NULL, *copy_end = NULL; + void *rec, *rec_end, *new_prog_info; + const struct btf_ext_info_sec *sec; + size_t old_sz, new_sz; + int i, sec_num, sec_idx, off_adj; + + sec_num = 0; + for_each_btf_ext_sec(ext_info, sec) { + sec_idx = ext_info->sec_idxs[sec_num]; + sec_num++; + if (prog->sec_idx != sec_idx) + continue; + + for_each_btf_ext_rec(ext_info, sec, i, rec) { + __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; + + if (insn_off < prog->sec_insn_off) + continue; + if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) + break; + + if (!copy_start) + copy_start = rec; + copy_end = rec + ext_info->rec_size; + } + + if (!copy_start) + return -ENOENT; + + /* append func/line info of a given (sub-)program to the main + * program func/line info + */ + old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; + new_sz = old_sz + (copy_end - copy_start); + new_prog_info = realloc(*prog_info, new_sz); + if (!new_prog_info) + return -ENOMEM; + *prog_info = new_prog_info; + *prog_rec_cnt = new_sz / ext_info->rec_size; + memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); + + /* Kernel instruction offsets are in units of 8-byte + * instructions, while .BTF.ext instruction offsets generated + * by Clang are in units of bytes. So convert Clang offsets + * into kernel offsets and adjust offset according to program + * relocated position. + */ + off_adj = prog->sub_insn_off - prog->sec_insn_off; + rec = new_prog_info + old_sz; + rec_end = new_prog_info + new_sz; + for (; rec < rec_end; rec += ext_info->rec_size) { + __u32 *insn_off = rec; + + *insn_off = *insn_off / BPF_INSN_SZ + off_adj; + } + *prog_rec_sz = ext_info->rec_size; + return 0; + } + + return -ENOENT; +} + +static int +reloc_prog_func_and_line_info(const struct bpf_object *obj, + struct bpf_program *main_prog, + const struct bpf_program *prog) +{ + int err; + + /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't + * supprot func/line info + */ + if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) + return 0; + + /* only attempt func info relocation if main program's func_info + * relocation was successful + */ + if (main_prog != prog && !main_prog->func_info) + goto line_info; + + err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, + &main_prog->func_info, + &main_prog->func_info_cnt, + &main_prog->func_info_rec_size); + if (err) { + if (err != -ENOENT) { + pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", + prog->name, err); + return err; + } + if (main_prog->func_info) { + /* + * Some info has already been found but has problem + * in the last btf_ext reloc. Must have to error out. + */ + pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); + return err; + } + /* Have problem loading the very first info. Ignore the rest. */ + pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", + prog->name); + } + +line_info: + /* don't relocate line info if main program's relocation failed */ + if (main_prog != prog && !main_prog->line_info) + return 0; + + err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, + &main_prog->line_info, + &main_prog->line_info_cnt, + &main_prog->line_info_rec_size); + if (err) { + if (err != -ENOENT) { + pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", + prog->name, err); + return err; + } + if (main_prog->line_info) { + /* + * Some info has already been found but has problem + * in the last btf_ext reloc. Must have to error out. + */ + pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); + return err; + } + /* Have problem loading the very first info. Ignore the rest. */ + pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", + prog->name); + } + return 0; +} + +static int cmp_relo_by_insn_idx(const void *key, const void *elem) +{ + size_t insn_idx = *(const size_t *)key; + const struct reloc_desc *relo = elem; + + if (insn_idx == relo->insn_idx) + return 0; + return insn_idx < relo->insn_idx ? -1 : 1; +} + +static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) +{ + if (!prog->nr_reloc) + return NULL; + return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, + sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); +} + +static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) +{ + int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; + struct reloc_desc *relos; + int i; + + if (main_prog == subprog) + return 0; + relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); + if (!relos) + return -ENOMEM; + if (subprog->nr_reloc) + memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, + sizeof(*relos) * subprog->nr_reloc); + + for (i = main_prog->nr_reloc; i < new_cnt; i++) + relos[i].insn_idx += subprog->sub_insn_off; + /* After insn_idx adjustment the 'relos' array is still sorted + * by insn_idx and doesn't break bsearch. + */ + main_prog->reloc_desc = relos; + main_prog->nr_reloc = new_cnt; + return 0; +} + +static int +bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, + struct bpf_program *prog) +{ + size_t sub_insn_idx, insn_idx, new_cnt; + struct bpf_program *subprog; + struct bpf_insn *insns, *insn; + struct reloc_desc *relo; + int err; + + err = reloc_prog_func_and_line_info(obj, main_prog, prog); + if (err) + return err; + + for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { + insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; + if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) + continue; + + relo = find_prog_insn_relo(prog, insn_idx); + if (relo && relo->type == RELO_EXTERN_FUNC) + /* kfunc relocations will be handled later + * in bpf_object__relocate_data() + */ + continue; + if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { + pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", + prog->name, insn_idx, relo->type); + return -LIBBPF_ERRNO__RELOC; + } + if (relo) { + /* sub-program instruction index is a combination of + * an offset of a symbol pointed to by relocation and + * call instruction's imm field; for global functions, + * call always has imm = -1, but for static functions + * relocation is against STT_SECTION and insn->imm + * points to a start of a static function + * + * for subprog addr relocation, the relo->sym_off + insn->imm is + * the byte offset in the corresponding section. + */ + if (relo->type == RELO_CALL) + sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; + else + sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; + } else if (insn_is_pseudo_func(insn)) { + /* + * RELO_SUBPROG_ADDR relo is always emitted even if both + * functions are in the same section, so it shouldn't reach here. + */ + pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", + prog->name, insn_idx); + return -LIBBPF_ERRNO__RELOC; + } else { + /* if subprogram call is to a static function within + * the same ELF section, there won't be any relocation + * emitted, but it also means there is no additional + * offset necessary, insns->imm is relative to + * instruction's original position within the section + */ + sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; + } + + /* we enforce that sub-programs should be in .text section */ + subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); + if (!subprog) { + pr_warn("prog '%s': no .text section found yet sub-program call exists\n", + prog->name); + return -LIBBPF_ERRNO__RELOC; + } + + /* if it's the first call instruction calling into this + * subprogram (meaning this subprog hasn't been processed + * yet) within the context of current main program: + * - append it at the end of main program's instructions blog; + * - process is recursively, while current program is put on hold; + * - if that subprogram calls some other not yet processes + * subprogram, same thing will happen recursively until + * there are no more unprocesses subprograms left to append + * and relocate. + */ + if (subprog->sub_insn_off == 0) { + subprog->sub_insn_off = main_prog->insns_cnt; + + new_cnt = main_prog->insns_cnt + subprog->insns_cnt; + insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); + if (!insns) { + pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); + return -ENOMEM; + } + main_prog->insns = insns; + main_prog->insns_cnt = new_cnt; + + memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, + subprog->insns_cnt * sizeof(*insns)); + + pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", + main_prog->name, subprog->insns_cnt, subprog->name); + + /* The subprog insns are now appended. Append its relos too. */ + err = append_subprog_relos(main_prog, subprog); + if (err) + return err; + err = bpf_object__reloc_code(obj, main_prog, subprog); + if (err) + return err; + } + + /* main_prog->insns memory could have been re-allocated, so + * calculate pointer again + */ + insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; + /* calculate correct instruction position within current main + * prog; each main prog can have a different set of + * subprograms appended (potentially in different order as + * well), so position of any subprog can be different for + * different main programs */ + insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; + + pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", + prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); + } + + return 0; +} + +/* + * Relocate sub-program calls. + * + * Algorithm operates as follows. Each entry-point BPF program (referred to as + * main prog) is processed separately. For each subprog (non-entry functions, + * that can be called from either entry progs or other subprogs) gets their + * sub_insn_off reset to zero. This serves as indicator that this subprogram + * hasn't been yet appended and relocated within current main prog. Once its + * relocated, sub_insn_off will point at the position within current main prog + * where given subprog was appended. This will further be used to relocate all + * the call instructions jumping into this subprog. + * + * We start with main program and process all call instructions. If the call + * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off + * is zero), subprog instructions are appended at the end of main program's + * instruction array. Then main program is "put on hold" while we recursively + * process newly appended subprogram. If that subprogram calls into another + * subprogram that hasn't been appended, new subprogram is appended again to + * the *main* prog's instructions (subprog's instructions are always left + * untouched, as they need to be in unmodified state for subsequent main progs + * and subprog instructions are always sent only as part of a main prog) and + * the process continues recursively. Once all the subprogs called from a main + * prog or any of its subprogs are appended (and relocated), all their + * positions within finalized instructions array are known, so it's easy to + * rewrite call instructions with correct relative offsets, corresponding to + * desired target subprog. + * + * Its important to realize that some subprogs might not be called from some + * main prog and any of its called/used subprogs. Those will keep their + * subprog->sub_insn_off as zero at all times and won't be appended to current + * main prog and won't be relocated within the context of current main prog. + * They might still be used from other main progs later. + * + * Visually this process can be shown as below. Suppose we have two main + * programs mainA and mainB and BPF object contains three subprogs: subA, + * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and + * subC both call subB: + * + * +--------+ +-------+ + * | v v | + * +--+---+ +--+-+-+ +---+--+ + * | subA | | subB | | subC | + * +--+---+ +------+ +---+--+ + * ^ ^ + * | | + * +---+-------+ +------+----+ + * | mainA | | mainB | + * +-----------+ +-----------+ + * + * We'll start relocating mainA, will find subA, append it and start + * processing sub A recursively: + * + * +-----------+------+ + * | mainA | subA | + * +-----------+------+ + * + * At this point we notice that subB is used from subA, so we append it and + * relocate (there are no further subcalls from subB): + * + * +-----------+------+------+ + * | mainA | subA | subB | + * +-----------+------+------+ + * + * At this point, we relocate subA calls, then go one level up and finish with + * relocatin mainA calls. mainA is done. + * + * For mainB process is similar but results in different order. We start with + * mainB and skip subA and subB, as mainB never calls them (at least + * directly), but we see subC is needed, so we append and start processing it: + * + * +-----------+------+ + * | mainB | subC | + * +-----------+------+ + * Now we see subC needs subB, so we go back to it, append and relocate it: + * + * +-----------+------+------+ + * | mainB | subC | subB | + * +-----------+------+------+ + * + * At this point we unwind recursion, relocate calls in subC, then in mainB. + */ +static int +bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) +{ + struct bpf_program *subprog; + int i, err; + + /* mark all subprogs as not relocated (yet) within the context of + * current main program + */ + for (i = 0; i < obj->nr_programs; i++) { + subprog = &obj->programs[i]; + if (!prog_is_subprog(obj, subprog)) + continue; + + subprog->sub_insn_off = 0; + } + + err = bpf_object__reloc_code(obj, prog, prog); + if (err) + return err; + + return 0; +} + +static void +bpf_object__free_relocs(struct bpf_object *obj) +{ + struct bpf_program *prog; + int i; + + /* free up relocation descriptors */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + zfree(&prog->reloc_desc); + prog->nr_reloc = 0; + } +} + +static int cmp_relocs(const void *_a, const void *_b) +{ + const struct reloc_desc *a = _a; + const struct reloc_desc *b = _b; + + if (a->insn_idx != b->insn_idx) + return a->insn_idx < b->insn_idx ? -1 : 1; + + /* no two relocations should have the same insn_idx, but ... */ + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + return 0; +} + +static void bpf_object__sort_relos(struct bpf_object *obj) +{ + int i; + + for (i = 0; i < obj->nr_programs; i++) { + struct bpf_program *p = &obj->programs[i]; + + if (!p->nr_reloc) + continue; + + qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); + } +} + +static int +bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) +{ + struct bpf_program *prog; + size_t i, j; + int err; + + if (obj->btf_ext) { + err = bpf_object__relocate_core(obj, targ_btf_path); + if (err) { + pr_warn("failed to perform CO-RE relocations: %d\n", + err); + return err; + } + bpf_object__sort_relos(obj); + } + + /* Before relocating calls pre-process relocations and mark + * few ld_imm64 instructions that points to subprogs. + * Otherwise bpf_object__reloc_code() later would have to consider + * all ld_imm64 insns as relocation candidates. That would + * reduce relocation speed, since amount of find_prog_insn_relo() + * would increase and most of them will fail to find a relo. + */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + for (j = 0; j < prog->nr_reloc; j++) { + struct reloc_desc *relo = &prog->reloc_desc[j]; + struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + + /* mark the insn, so it's recognized by insn_is_pseudo_func() */ + if (relo->type == RELO_SUBPROG_ADDR) + insn[0].src_reg = BPF_PSEUDO_FUNC; + } + } + + /* relocate subprogram calls and append used subprograms to main + * programs; each copy of subprogram code needs to be relocated + * differently for each main program, because its code location might + * have changed. + * Append subprog relos to main programs to allow data relos to be + * processed after text is completely relocated. + */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + /* sub-program's sub-calls are relocated within the context of + * its main program only + */ + if (prog_is_subprog(obj, prog)) + continue; + if (!prog->autoload) + continue; + + err = bpf_object__relocate_calls(obj, prog); + if (err) { + pr_warn("prog '%s': failed to relocate calls: %d\n", + prog->name, err); + return err; + } + } + /* Process data relos for main programs */ + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + if (prog_is_subprog(obj, prog)) + continue; + if (!prog->autoload) + continue; + err = bpf_object__relocate_data(obj, prog); + if (err) { + pr_warn("prog '%s': failed to relocate data references: %d\n", + prog->name, err); + return err; + } + } + + return 0; +} + +static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, + Elf64_Shdr *shdr, Elf_Data *data); + +static int bpf_object__collect_map_relos(struct bpf_object *obj, + Elf64_Shdr *shdr, Elf_Data *data) +{ + const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); + int i, j, nrels, new_sz; + const struct btf_var_secinfo *vi = NULL; + const struct btf_type *sec, *var, *def; + struct bpf_map *map = NULL, *targ_map = NULL; + struct bpf_program *targ_prog = NULL; + bool is_prog_array, is_map_in_map; + const struct btf_member *member; + const char *name, *mname, *type; + unsigned int moff; + Elf64_Sym *sym; + Elf64_Rel *rel; + void *tmp; + + if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) + return -EINVAL; + sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); + if (!sec) + return -EINVAL; + + nrels = shdr->sh_size / shdr->sh_entsize; + for (i = 0; i < nrels; i++) { + rel = elf_rel_by_idx(data, i); + if (!rel) { + pr_warn(".maps relo #%d: failed to get ELF relo\n", i); + return -LIBBPF_ERRNO__FORMAT; + } + + sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + if (!sym) { + pr_warn(".maps relo #%d: symbol %zx not found\n", + i, (size_t)ELF64_R_SYM(rel->r_info)); + return -LIBBPF_ERRNO__FORMAT; + } + name = elf_sym_str(obj, sym->st_name) ?: ""; + + pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", + i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, + (size_t)rel->r_offset, sym->st_name, name); + + for (j = 0; j < obj->nr_maps; j++) { + map = &obj->maps[j]; + if (map->sec_idx != obj->efile.btf_maps_shndx) + continue; + + vi = btf_var_secinfos(sec) + map->btf_var_idx; + if (vi->offset <= rel->r_offset && + rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) + break; + } + if (j == obj->nr_maps) { + pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", + i, name, (size_t)rel->r_offset); + return -EINVAL; + } + + is_map_in_map = bpf_map_type__is_map_in_map(map->def.type); + is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY; + type = is_map_in_map ? "map" : "prog"; + if (is_map_in_map) { + if (sym->st_shndx != obj->efile.btf_maps_shndx) { + pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && + map->def.key_size != sizeof(int)) { + pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", + i, map->name, sizeof(int)); + return -EINVAL; + } + targ_map = bpf_object__find_map_by_name(obj, name); + if (!targ_map) { + pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n", + i, name); + return -ESRCH; + } + } else if (is_prog_array) { + targ_prog = bpf_object__find_program_by_name(obj, name); + if (!targ_prog) { + pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n", + i, name); + return -ESRCH; + } + if (targ_prog->sec_idx != sym->st_shndx || + targ_prog->sec_insn_off * 8 != sym->st_value || + prog_is_subprog(obj, targ_prog)) { + pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n", + i, name); + return -LIBBPF_ERRNO__RELOC; + } + } else { + return -EINVAL; + } + + var = btf__type_by_id(obj->btf, vi->type); + def = skip_mods_and_typedefs(obj->btf, var->type, NULL); + if (btf_vlen(def) == 0) + return -EINVAL; + member = btf_members(def) + btf_vlen(def) - 1; + mname = btf__name_by_offset(obj->btf, member->name_off); + if (strcmp(mname, "values")) + return -EINVAL; + + moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; + if (rel->r_offset - vi->offset < moff) + return -EINVAL; + + moff = rel->r_offset - vi->offset - moff; + /* here we use BPF pointer size, which is always 64 bit, as we + * are parsing ELF that was built for BPF target + */ + if (moff % bpf_ptr_sz) + return -EINVAL; + moff /= bpf_ptr_sz; + if (moff >= map->init_slots_sz) { + new_sz = moff + 1; + tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); + if (!tmp) + return -ENOMEM; + map->init_slots = tmp; + memset(map->init_slots + map->init_slots_sz, 0, + (new_sz - map->init_slots_sz) * host_ptr_sz); + map->init_slots_sz = new_sz; + } + map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog; + + pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n", + i, map->name, moff, type, name); + } + + return 0; +} + +static int bpf_object__collect_relos(struct bpf_object *obj) +{ + int i, err; + + for (i = 0; i < obj->efile.sec_cnt; i++) { + struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; + Elf64_Shdr *shdr; + Elf_Data *data; + int idx; + + if (sec_desc->sec_type != SEC_RELO) + continue; + + shdr = sec_desc->shdr; + data = sec_desc->data; + idx = shdr->sh_info; + + if (shdr->sh_type != SHT_REL) { + pr_warn("internal error at %d\n", __LINE__); + return -LIBBPF_ERRNO__INTERNAL; + } + + if (idx == obj->efile.st_ops_shndx) + err = bpf_object__collect_st_ops_relos(obj, shdr, data); + else if (idx == obj->efile.btf_maps_shndx) + err = bpf_object__collect_map_relos(obj, shdr, data); + else + err = bpf_object__collect_prog_relos(obj, shdr, data); + if (err) + return err; + } + + bpf_object__sort_relos(obj); + return 0; +} + +static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) +{ + if (BPF_CLASS(insn->code) == BPF_JMP && + BPF_OP(insn->code) == BPF_CALL && + BPF_SRC(insn->code) == BPF_K && + insn->src_reg == 0 && + insn->dst_reg == 0) { + *func_id = insn->imm; + return true; + } + return false; +} + +static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) +{ + struct bpf_insn *insn = prog->insns; + enum bpf_func_id func_id; + int i; + + if (obj->gen_loader) + return 0; + + for (i = 0; i < prog->insns_cnt; i++, insn++) { + if (!insn_is_helper_call(insn, &func_id)) + continue; + + /* on kernels that don't yet support + * bpf_probe_read_{kernel,user}[_str] helpers, fall back + * to bpf_probe_read() which works well for old kernels + */ + switch (func_id) { + case BPF_FUNC_probe_read_kernel: + case BPF_FUNC_probe_read_user: + if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read; + break; + case BPF_FUNC_probe_read_kernel_str: + case BPF_FUNC_probe_read_user_str: + if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) + insn->imm = BPF_FUNC_probe_read_str; + break; + default: + break; + } + } + return 0; +} + +static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, + int *btf_obj_fd, int *btf_type_id); + +/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ +static int libbpf_prepare_prog_load(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) +{ + enum sec_def_flags def = cookie; + + /* old kernels might not support specifying expected_attach_type */ + if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) + opts->expected_attach_type = 0; + + if (def & SEC_SLEEPABLE) + opts->prog_flags |= BPF_F_SLEEPABLE; + + if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) + opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + + if (def & SEC_DEPRECATED) + pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n", + prog->sec_name); + + if ((prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_LSM || + prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { + int btf_obj_fd = 0, btf_type_id = 0, err; + const char *attach_name; + + attach_name = strchr(prog->sec_name, '/') + 1; + err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); + if (err) + return err; + + /* cache resolved BTF FD and BTF type ID in the prog */ + prog->attach_btf_obj_fd = btf_obj_fd; + prog->attach_btf_id = btf_type_id; + + /* but by now libbpf common logic is not utilizing + * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because + * this callback is called after opts were populated by + * libbpf, so this callback has to update opts explicitly here + */ + opts->attach_btf_obj_fd = btf_obj_fd; + opts->attach_btf_id = btf_type_id; + } + return 0; +} + +static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz); + +static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog, + struct bpf_insn *insns, int insns_cnt, + const char *license, __u32 kern_version, + int *prog_fd) +{ + LIBBPF_OPTS(bpf_prog_load_opts, load_attr); + const char *prog_name = NULL; + char *cp, errmsg[STRERR_BUFSIZE]; + size_t log_buf_size = 0; + char *log_buf = NULL, *tmp; + int btf_fd, ret, err; + bool own_log_buf = true; + __u32 log_level = prog->log_level; + + if (prog->type == BPF_PROG_TYPE_UNSPEC) { + /* + * The program type must be set. Most likely we couldn't find a proper + * section definition at load time, and thus we didn't infer the type. + */ + pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", + prog->name, prog->sec_name); + return -EINVAL; + } + + if (!insns || !insns_cnt) + return -EINVAL; + + load_attr.expected_attach_type = prog->expected_attach_type; + if (kernel_supports(obj, FEAT_PROG_NAME)) + prog_name = prog->name; + load_attr.attach_prog_fd = prog->attach_prog_fd; + load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; + load_attr.attach_btf_id = prog->attach_btf_id; + load_attr.kern_version = kern_version; + load_attr.prog_ifindex = prog->prog_ifindex; + + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(obj); + if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } + load_attr.log_level = log_level; + load_attr.prog_flags = prog->prog_flags; + load_attr.fd_array = obj->fd_array; + + /* adjust load_attr if sec_def provides custom preload callback */ + if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { + err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); + if (err < 0) { + pr_warn("prog '%s': failed to prepare load attributes: %d\n", + prog->name, err); + return err; + } + } + + if (obj->gen_loader) { + bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, + license, insns, insns_cnt, &load_attr, + prog - obj->programs); + *prog_fd = -1; + return 0; + } + +retry_load: + /* if log_level is zero, we don't request logs initiallly even if + * custom log_buf is specified; if the program load fails, then we'll + * bump log_level to 1 and use either custom log_buf or we'll allocate + * our own and retry the load to get details on what failed + */ + if (log_level) { + if (prog->log_buf) { + log_buf = prog->log_buf; + log_buf_size = prog->log_size; + own_log_buf = false; + } else if (obj->log_buf) { + log_buf = obj->log_buf; + log_buf_size = obj->log_size; + own_log_buf = false; + } else { + log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2); + tmp = realloc(log_buf, log_buf_size); + if (!tmp) { + ret = -ENOMEM; + goto out; + } + log_buf = tmp; + log_buf[0] = '\0'; + own_log_buf = true; + } + } + + load_attr.log_buf = log_buf; + load_attr.log_size = log_buf_size; + load_attr.log_level = log_level; + + ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr); + if (ret >= 0) { + if (log_level && own_log_buf) { + pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } + + if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { + struct bpf_map *map; + int i; + + for (i = 0; i < obj->nr_maps; i++) { + map = &prog->obj->maps[i]; + if (map->libbpf_type != LIBBPF_MAP_RODATA) + continue; + + if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("prog '%s': failed to bind map '%s': %s\n", + prog->name, map->real_name, cp); + /* Don't fail hard if can't bind rodata. */ + } + } + } + + *prog_fd = ret; + ret = 0; + goto out; + } + + if (log_level == 0) { + log_level = 1; + goto retry_load; + } + /* On ENOSPC, increase log buffer size and retry, unless custom + * log_buf is specified. + * Be careful to not overflow u32, though. Kernel's log buf size limit + * isn't part of UAPI so it can always be bumped to full 4GB. So don't + * multiply by 2 unless we are sure we'll fit within 32 bits. + * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2). + */ + if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2) + goto retry_load; + + ret = -errno; + + /* post-process verifier log to improve error descriptions */ + fixup_verifier_log(prog, log_buf, log_buf_size); + + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); + pr_perm_msg(ret); + + if (own_log_buf && log_buf && log_buf[0] != '\0') { + pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n", + prog->name, log_buf); + } + +out: + if (own_log_buf) + free(log_buf); + return ret; +} + +static char *find_prev_line(char *buf, char *cur) +{ + char *p; + + if (cur == buf) /* end of a log buf */ + return NULL; + + p = cur - 1; + while (p - 1 >= buf && *(p - 1) != '\n') + p--; + + return p; +} + +static void patch_log(char *buf, size_t buf_sz, size_t log_sz, + char *orig, size_t orig_sz, const char *patch) +{ + /* size of the remaining log content to the right from the to-be-replaced part */ + size_t rem_sz = (buf + log_sz) - (orig + orig_sz); + size_t patch_sz = strlen(patch); + + if (patch_sz != orig_sz) { + /* If patch line(s) are longer than original piece of verifier log, + * shift log contents by (patch_sz - orig_sz) bytes to the right + * starting from after to-be-replaced part of the log. + * + * If patch line(s) are shorter than original piece of verifier log, + * shift log contents by (orig_sz - patch_sz) bytes to the left + * starting from after to-be-replaced part of the log + * + * We need to be careful about not overflowing available + * buf_sz capacity. If that's the case, we'll truncate the end + * of the original log, as necessary. + */ + if (patch_sz > orig_sz) { + if (orig + patch_sz >= buf + buf_sz) { + /* patch is big enough to cover remaining space completely */ + patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1; + rem_sz = 0; + } else if (patch_sz - orig_sz > buf_sz - log_sz) { + /* patch causes part of remaining log to be truncated */ + rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz); + } + } + /* shift remaining log to the right by calculated amount */ + memmove(orig + patch_sz, orig + orig_sz, rem_sz); + } + + memcpy(orig, patch, patch_sz); +} + +static void fixup_log_failed_core_relo(struct bpf_program *prog, + char *buf, size_t buf_sz, size_t log_sz, + char *line1, char *line2, char *line3) +{ + /* Expected log for failed and not properly guarded CO-RE relocation: + * line1 -> 123: (85) call unknown#195896080 + * line2 -> invalid func unknown#195896080 + * line3 -> + * + * "123" is the index of the instruction that was poisoned. We extract + * instruction index to find corresponding CO-RE relocation and + * replace this part of the log with more relevant information about + * failed CO-RE relocation. + */ + const struct bpf_core_relo *relo; + struct bpf_core_spec spec; + char patch[512], spec_buf[256]; + int insn_idx, err; + + if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1) + return; + + relo = find_relo_core(prog, insn_idx); + if (!relo) + return; + + err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec); + if (err) + return; + + bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec); + snprintf(patch, sizeof(patch), + "%d: \n" + "failed to resolve CO-RE relocation %s\n", + insn_idx, spec_buf); + + patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch); +} + +static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz) +{ + /* look for familiar error patterns in last N lines of the log */ + const size_t max_last_line_cnt = 10; + char *prev_line, *cur_line, *next_line; + size_t log_sz; + int i; + + if (!buf) + return; + + log_sz = strlen(buf) + 1; + next_line = buf + log_sz - 1; + + for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) { + cur_line = find_prev_line(buf, next_line); + if (!cur_line) + return; + + /* failed CO-RE relocation case */ + if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) { + prev_line = find_prev_line(buf, cur_line); + if (!prev_line) + continue; + + fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz, + prev_line, cur_line, next_line); + return; + } + } +} + +static int bpf_program_record_relos(struct bpf_program *prog) +{ + struct bpf_object *obj = prog->obj; + int i; + + for (i = 0; i < prog->nr_reloc; i++) { + struct reloc_desc *relo = &prog->reloc_desc[i]; + struct extern_desc *ext = &obj->externs[relo->sym_off]; + + switch (relo->type) { + case RELO_EXTERN_VAR: + if (ext->type != EXT_KSYM) + continue; + bpf_gen__record_extern(obj->gen_loader, ext->name, + ext->is_weak, !ext->ksym.type_id, + BTF_KIND_VAR, relo->insn_idx); + break; + case RELO_EXTERN_FUNC: + bpf_gen__record_extern(obj->gen_loader, ext->name, + ext->is_weak, false, BTF_KIND_FUNC, + relo->insn_idx); + break; + case RELO_CORE: { + struct bpf_core_relo cr = { + .insn_off = relo->insn_idx * 8, + .type_id = relo->core_relo->type_id, + .access_str_off = relo->core_relo->access_str_off, + .kind = relo->core_relo->kind, + }; + + bpf_gen__record_relo_core(obj->gen_loader, &cr); + break; + } + default: + continue; + } + } + return 0; +} + +static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog, + const char *license, __u32 kern_ver) +{ + int err = 0, fd, i; + + if (obj->loaded) { + pr_warn("prog '%s': can't load after object was loaded\n", prog->name); + return libbpf_err(-EINVAL); + } + + if (prog->instances.nr < 0 || !prog->instances.fds) { + if (prog->preprocessor) { + pr_warn("Internal error: can't load program '%s'\n", + prog->name); + return libbpf_err(-LIBBPF_ERRNO__INTERNAL); + } + + prog->instances.fds = malloc(sizeof(int)); + if (!prog->instances.fds) { + pr_warn("Not enough memory for BPF fds\n"); + return libbpf_err(-ENOMEM); + } + prog->instances.nr = 1; + prog->instances.fds[0] = -1; + } + + if (!prog->preprocessor) { + if (prog->instances.nr != 1) { + pr_warn("prog '%s': inconsistent nr(%d) != 1\n", + prog->name, prog->instances.nr); + } + if (obj->gen_loader) + bpf_program_record_relos(prog); + err = bpf_object_load_prog_instance(obj, prog, + prog->insns, prog->insns_cnt, + license, kern_ver, &fd); + if (!err) + prog->instances.fds[0] = fd; + goto out; + } + + for (i = 0; i < prog->instances.nr; i++) { + struct bpf_prog_prep_result result; + bpf_program_prep_t preprocessor = prog->preprocessor; + + memset(&result, 0, sizeof(result)); + err = preprocessor(prog, i, prog->insns, + prog->insns_cnt, &result); + if (err) { + pr_warn("Preprocessing the %dth instance of program '%s' failed\n", + i, prog->name); + goto out; + } + + if (!result.new_insn_ptr || !result.new_insn_cnt) { + pr_debug("Skip loading the %dth instance of program '%s'\n", + i, prog->name); + prog->instances.fds[i] = -1; + if (result.pfd) + *result.pfd = -1; + continue; + } + + err = bpf_object_load_prog_instance(obj, prog, + result.new_insn_ptr, result.new_insn_cnt, + license, kern_ver, &fd); + if (err) { + pr_warn("Loading the %dth instance of program '%s' failed\n", + i, prog->name); + goto out; + } + + if (result.pfd) + *result.pfd = fd; + prog->instances.fds[i] = fd; + } +out: + if (err) + pr_warn("failed to load program '%s'\n", prog->name); + return libbpf_err(err); +} + +int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver) +{ + return bpf_object_load_prog(prog->obj, prog, license, kern_ver); +} + +static int +bpf_object__load_progs(struct bpf_object *obj, int log_level) +{ + struct bpf_program *prog; + size_t i; + int err; + + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + err = bpf_object__sanitize_prog(obj, prog); + if (err) + return err; + } + + for (i = 0; i < obj->nr_programs; i++) { + prog = &obj->programs[i]; + if (prog_is_subprog(obj, prog)) + continue; + if (!prog->autoload) { + pr_debug("prog '%s': skipped loading\n", prog->name); + continue; + } + prog->log_level |= log_level; + err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version); + if (err) + return err; + } + + bpf_object__free_relocs(obj); + return 0; +} + +static const struct bpf_sec_def *find_sec_def(const char *sec_name); + +static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) +{ + struct bpf_program *prog; + int err; + + bpf_object__for_each_program(prog, obj) { + prog->sec_def = find_sec_def(prog->sec_name); + if (!prog->sec_def) { + /* couldn't guess, but user might manually specify */ + pr_debug("prog '%s': unrecognized ELF section name '%s'\n", + prog->name, prog->sec_name); + continue; + } + + prog->type = prog->sec_def->prog_type; + prog->expected_attach_type = prog->sec_def->expected_attach_type; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || + prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) + prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); +#pragma GCC diagnostic pop + + /* sec_def can have custom callback which should be called + * after bpf_program is initialized to adjust its properties + */ + if (prog->sec_def->prog_setup_fn) { + err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); + if (err < 0) { + pr_warn("prog '%s': failed to initialize: %d\n", + prog->name, err); + return err; + } + } + } + + return 0; +} + +static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) +{ + const char *obj_name, *kconfig, *btf_tmp_path; + struct bpf_object *obj; + char tmp_name[64]; + int err; + char *log_buf; + size_t log_size; + __u32 log_level; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("failed to init libelf for %s\n", + path ? : "(mem buf)"); + return ERR_PTR(-LIBBPF_ERRNO__LIBELF); + } + + if (!OPTS_VALID(opts, bpf_object_open_opts)) + return ERR_PTR(-EINVAL); + + obj_name = OPTS_GET(opts, object_name, NULL); + if (obj_buf) { + if (!obj_name) { + snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", + (unsigned long)obj_buf, + (unsigned long)obj_buf_sz); + obj_name = tmp_name; + } + path = obj_name; + pr_debug("loading object '%s' from buffer\n", obj_name); + } + + log_buf = OPTS_GET(opts, kernel_log_buf, NULL); + log_size = OPTS_GET(opts, kernel_log_size, 0); + log_level = OPTS_GET(opts, kernel_log_level, 0); + if (log_size > UINT_MAX) + return ERR_PTR(-EINVAL); + if (log_size && !log_buf) + return ERR_PTR(-EINVAL); + + obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); + if (IS_ERR(obj)) + return obj; + + obj->log_buf = log_buf; + obj->log_size = log_size; + obj->log_level = log_level; + + btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); + if (btf_tmp_path) { + if (strlen(btf_tmp_path) >= PATH_MAX) { + err = -ENAMETOOLONG; + goto out; + } + obj->btf_custom_path = strdup(btf_tmp_path); + if (!obj->btf_custom_path) { + err = -ENOMEM; + goto out; + } + } + + kconfig = OPTS_GET(opts, kconfig, NULL); + if (kconfig) { + obj->kconfig = strdup(kconfig); + if (!obj->kconfig) { + err = -ENOMEM; + goto out; + } + } + + err = bpf_object__elf_init(obj); + err = err ? : bpf_object__check_endianness(obj); + err = err ? : bpf_object__elf_collect(obj); + err = err ? : bpf_object__collect_externs(obj); + err = err ? : bpf_object__finalize_btf(obj); + err = err ? : bpf_object__init_maps(obj, opts); + err = err ? : bpf_object_init_progs(obj, opts); + err = err ? : bpf_object__collect_relos(obj); + if (err) + goto out; + + bpf_object__elf_finish(obj); + + return obj; +out: + bpf_object__close(obj); + return ERR_PTR(err); +} + +static struct bpf_object * +__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .relaxed_maps = flags & MAPS_RELAX_COMPAT, + ); + + /* param validation */ + if (!attr->file) + return NULL; + + pr_debug("loading %s\n", attr->file); + return bpf_object_open(attr->file, NULL, 0, &opts); +} + +struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) +{ + return libbpf_ptr(__bpf_object__open_xattr(attr, 0)); +} + +struct bpf_object *bpf_object__open(const char *path) +{ + struct bpf_object_open_attr attr = { + .file = path, + .prog_type = BPF_PROG_TYPE_UNSPEC, + }; + + return libbpf_ptr(__bpf_object__open_xattr(&attr, 0)); +} + +struct bpf_object * +bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) +{ + if (!path) + return libbpf_err_ptr(-EINVAL); + + pr_debug("loading %s\n", path); + + return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); +} + +struct bpf_object * +bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) +{ + if (!obj_buf || obj_buf_sz == 0) + return libbpf_err_ptr(-EINVAL); + + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); +} + +struct bpf_object * +bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, + const char *name) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .object_name = name, + /* wrong default, but backwards-compatible */ + .relaxed_maps = true, + ); + + /* returning NULL is wrong, but backwards-compatible */ + if (!obj_buf || obj_buf_sz == 0) + return errno = EINVAL, NULL; + + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts)); +} + +static int bpf_object_unload(struct bpf_object *obj) +{ + size_t i; + + if (!obj) + return libbpf_err(-EINVAL); + + for (i = 0; i < obj->nr_maps; i++) { + zclose(obj->maps[i].fd); + if (obj->maps[i].st_ops) + zfree(&obj->maps[i].st_ops->kern_vdata); + } + + for (i = 0; i < obj->nr_programs; i++) + bpf_program__unload(&obj->programs[i]); + + return 0; +} + +int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload"))); + +static int bpf_object__sanitize_maps(struct bpf_object *obj) +{ + struct bpf_map *m; + + bpf_object__for_each_map(m, obj) { + if (!bpf_map__is_internal(m)) + continue; + if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) + m->def.map_flags ^= BPF_F_MMAPABLE; + } + + return 0; +} + +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) +{ + char sym_type, sym_name[500]; + unsigned long long sym_addr; + int ret, err = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) { + err = -errno; + pr_warn("failed to open /proc/kallsyms: %d\n", err); + return err; + } + + while (true) { + ret = fscanf(f, "%llx %c %499s%*[^\n]\n", + &sym_addr, &sym_type, sym_name); + if (ret == EOF && feof(f)) + break; + if (ret != 3) { + pr_warn("failed to read kallsyms entry: %d\n", ret); + err = -EINVAL; + break; + } + + err = cb(sym_addr, sym_type, sym_name, ctx); + if (err) + break; + } + + fclose(f); + return err; +} + +static int kallsyms_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct bpf_object *obj = ctx; + const struct btf_type *t; + struct extern_desc *ext; + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + return 0; + + t = btf__type_by_id(obj->btf, ext->btf_id); + if (!btf_is_var(t)) + return 0; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + return -EINVAL; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + return 0; +} + +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + return libbpf_kallsyms_parse(kallsyms_cb, obj); +} + +static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, + __u16 kind, struct btf **res_btf, + struct module_btf **res_mod_btf) +{ + struct module_btf *mod_btf; + struct btf *btf; + int i, id, err; + + btf = obj->btf_vmlinux; + mod_btf = NULL; + id = btf__find_by_name_kind(btf, ksym_name, kind); + + if (id == -ENOENT) { + err = load_module_btfs(obj); + if (err) + return err; + + for (i = 0; i < obj->btf_module_cnt; i++) { + /* we assume module_btf's BTF FD is always >0 */ + mod_btf = &obj->btf_modules[i]; + btf = mod_btf->btf; + id = btf__find_by_name_kind_own(btf, ksym_name, kind); + if (id != -ENOENT) + break; + } + } + if (id <= 0) + return -ESRCH; + + *res_btf = btf; + *res_mod_btf = mod_btf; + return id; +} + +static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, + struct extern_desc *ext) +{ + const struct btf_type *targ_var, *targ_type; + __u32 targ_type_id, local_type_id; + struct module_btf *mod_btf = NULL; + const char *targ_var_name; + struct btf *btf = NULL; + int id, err; + + id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); + if (id < 0) { + if (id == -ESRCH && ext->is_weak) + return 0; + pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", + ext->name); + return id; + } + + /* find local type_id */ + local_type_id = ext->ksym.type_id; + + /* find target type_id */ + targ_var = btf__type_by_id(btf, id); + targ_var_name = btf__name_by_offset(btf, targ_var->name_off); + targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); + + err = bpf_core_types_are_compat(obj->btf, local_type_id, + btf, targ_type_id); + if (err <= 0) { + const struct btf_type *local_type; + const char *targ_name, *local_name; + + local_type = btf__type_by_id(obj->btf, local_type_id); + local_name = btf__name_by_offset(obj->btf, local_type->name_off); + targ_name = btf__name_by_offset(btf, targ_type->name_off); + + pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", + ext->name, local_type_id, + btf_kind_str(local_type), local_name, targ_type_id, + btf_kind_str(targ_type), targ_name); + return -EINVAL; + } + + ext->is_set = true; + ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; + ext->ksym.kernel_btf_id = id; + pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", + ext->name, id, btf_kind_str(targ_var), targ_var_name); + + return 0; +} + +static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, + struct extern_desc *ext) +{ + int local_func_proto_id, kfunc_proto_id, kfunc_id; + struct module_btf *mod_btf = NULL; + const struct btf_type *kern_func; + struct btf *kern_btf = NULL; + int ret; + + local_func_proto_id = ext->ksym.type_id; + + kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf); + if (kfunc_id < 0) { + if (kfunc_id == -ESRCH && ext->is_weak) + return 0; + pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", + ext->name); + return kfunc_id; + } + + kern_func = btf__type_by_id(kern_btf, kfunc_id); + kfunc_proto_id = kern_func->type; + + ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, + kern_btf, kfunc_proto_id); + if (ret <= 0) { + pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n", + ext->name, local_func_proto_id, kfunc_proto_id); + return -EINVAL; + } + + /* set index for module BTF fd in fd_array, if unset */ + if (mod_btf && !mod_btf->fd_array_idx) { + /* insn->off is s16 */ + if (obj->fd_array_cnt == INT16_MAX) { + pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", + ext->name, mod_btf->fd_array_idx); + return -E2BIG; + } + /* Cannot use index 0 for module BTF fd */ + if (!obj->fd_array_cnt) + obj->fd_array_cnt = 1; + + ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), + obj->fd_array_cnt + 1); + if (ret) + return ret; + mod_btf->fd_array_idx = obj->fd_array_cnt; + /* we assume module BTF FD is always >0 */ + obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; + } + + ext->is_set = true; + ext->ksym.kernel_btf_id = kfunc_id; + ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; + pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", + ext->name, kfunc_id); + + return 0; +} + +static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) +{ + const struct btf_type *t; + struct extern_desc *ext; + int i, err; + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM || !ext->ksym.type_id) + continue; + + if (obj->gen_loader) { + ext->is_set = true; + ext->ksym.kernel_btf_obj_fd = 0; + ext->ksym.kernel_btf_id = 0; + continue; + } + t = btf__type_by_id(obj->btf, ext->btf_id); + if (btf_is_var(t)) + err = bpf_object__resolve_ksym_var_btf_id(obj, ext); + else + err = bpf_object__resolve_ksym_func_btf_id(obj, ext); + if (err) + return err; + } + return 0; +} + +static int bpf_object__resolve_externs(struct bpf_object *obj, + const char *extra_kconfig) +{ + bool need_config = false, need_kallsyms = false; + bool need_vmlinux_btf = false; + struct extern_desc *ext; + void *kcfg_data = NULL; + int err, i; + + if (obj->nr_extern == 0) + return 0; + + if (obj->kconfig_map_idx >= 0) + kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + + if (ext->type == EXT_KCFG && + strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { + void *ext_val = kcfg_data + ext->kcfg.data_off; + __u32 kver = get_kernel_version(); + + if (!kver) { + pr_warn("failed to get kernel version\n"); + return -EINVAL; + } + err = set_kcfg_value_num(ext, ext_val, kver); + if (err) + return err; + pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); + } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) { + need_config = true; + } else if (ext->type == EXT_KSYM) { + if (ext->ksym.type_id) + need_vmlinux_btf = true; + else + need_kallsyms = true; + } else { + pr_warn("unrecognized extern '%s'\n", ext->name); + return -EINVAL; + } + } + if (need_config && extra_kconfig) { + err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); + if (err) + return -EINVAL; + need_config = false; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG && !ext->is_set) { + need_config = true; + break; + } + } + } + if (need_config) { + err = bpf_object__read_kconfig_file(obj, kcfg_data); + if (err) + return -EINVAL; + } + if (need_kallsyms) { + err = bpf_object__read_kallsyms_file(obj); + if (err) + return -EINVAL; + } + if (need_vmlinux_btf) { + err = bpf_object__resolve_ksyms_btf_id(obj); + if (err) + return -EINVAL; + } + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + + if (!ext->is_set && !ext->is_weak) { + pr_warn("extern %s (strong) not resolved\n", ext->name); + return -ESRCH; + } else if (!ext->is_set) { + pr_debug("extern %s (weak) not resolved, defaulting to zero\n", + ext->name); + } + } + + return 0; +} + +static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path) +{ + int err, i; + + if (!obj) + return libbpf_err(-EINVAL); + + if (obj->loaded) { + pr_warn("object '%s': load can't be attempted twice\n", obj->name); + return libbpf_err(-EINVAL); + } + + if (obj->gen_loader) + bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); + + err = bpf_object__probe_loading(obj); + err = err ? : bpf_object__load_vmlinux_btf(obj, false); + err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); + err = err ? : bpf_object__sanitize_and_load_btf(obj); + err = err ? : bpf_object__sanitize_maps(obj); + err = err ? : bpf_object__init_kern_struct_ops_maps(obj); + err = err ? : bpf_object__create_maps(obj); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path); + err = err ? : bpf_object__load_progs(obj, extra_log_level); + err = err ? : bpf_object_init_prog_arrays(obj); + + if (obj->gen_loader) { + /* reset FDs */ + if (obj->btf) + btf__set_fd(obj->btf, -1); + for (i = 0; i < obj->nr_maps; i++) + obj->maps[i].fd = -1; + if (!err) + err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); + } + + /* clean up fd_array */ + zfree(&obj->fd_array); + + /* clean up module BTFs */ + for (i = 0; i < obj->btf_module_cnt; i++) { + close(obj->btf_modules[i].fd); + btf__free(obj->btf_modules[i].btf); + free(obj->btf_modules[i].name); + } + free(obj->btf_modules); + + /* clean up vmlinux BTF */ + btf__free(obj->btf_vmlinux); + obj->btf_vmlinux = NULL; + + obj->loaded = true; /* doesn't matter if successfully or not */ + + if (err) + goto out; + + return 0; +out: + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); + + bpf_object_unload(obj); + pr_warn("failed to load object '%s'\n", obj->path); + return libbpf_err(err); +} + +int bpf_object__load_xattr(struct bpf_object_load_attr *attr) +{ + return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path); +} + +int bpf_object__load(struct bpf_object *obj) +{ + return bpf_object_load(obj, 0, NULL); +} + +static int make_parent_dir(const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + char *dname, *dir; + int err = 0; + + dname = strdup(path); + if (dname == NULL) + return -ENOMEM; + + dir = dirname(dname); + if (mkdir(dir, 0700) && errno != EEXIST) + err = -errno; + + free(dname); + if (err) { + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to mkdir %s: %s\n", path, cp); + } + return err; +} + +static int check_path(const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + struct statfs st_fs; + char *dname, *dir; + int err = 0; + + if (path == NULL) + return -EINVAL; + + dname = strdup(path); + if (dname == NULL) + return -ENOMEM; + + dir = dirname(dname); + if (statfs(dir, &st_fs)) { + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + pr_warn("failed to statfs %s: %s\n", dir, cp); + err = -errno; + } + free(dname); + + if (!err && st_fs.f_type != BPF_FS_MAGIC) { + pr_warn("specified path %s is not on BPF FS\n", path); + err = -EINVAL; + } + + return err; +} + +static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + int err; + + err = make_parent_dir(path); + if (err) + return libbpf_err(err); + + err = check_path(path); + if (err) + return libbpf_err(err); + + if (prog == NULL) { + pr_warn("invalid program pointer\n"); + return libbpf_err(-EINVAL); + } + + if (instance < 0 || instance >= prog->instances.nr) { + pr_warn("invalid prog instance %d of prog %s (max %d)\n", + instance, prog->name, prog->instances.nr); + return libbpf_err(-EINVAL); + } + + if (bpf_obj_pin(prog->instances.fds[instance], path)) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin program: %s\n", cp); + return libbpf_err(err); + } + pr_debug("pinned program '%s'\n", path); + + return 0; +} + +static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance) +{ + int err; + + err = check_path(path); + if (err) + return libbpf_err(err); + + if (prog == NULL) { + pr_warn("invalid program pointer\n"); + return libbpf_err(-EINVAL); + } + + if (instance < 0 || instance >= prog->instances.nr) { + pr_warn("invalid prog instance %d of prog %s (max %d)\n", + instance, prog->name, prog->instances.nr); + return libbpf_err(-EINVAL); + } + + err = unlink(path); + if (err != 0) + return libbpf_err(-errno); + + pr_debug("unpinned program '%s'\n", path); + + return 0; +} + +__attribute__((alias("bpf_program_pin_instance"))) +int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance); + +__attribute__((alias("bpf_program_unpin_instance"))) +int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance); + +int bpf_program__pin(struct bpf_program *prog, const char *path) +{ + int i, err; + + err = make_parent_dir(path); + if (err) + return libbpf_err(err); + + err = check_path(path); + if (err) + return libbpf_err(err); + + if (prog == NULL) { + pr_warn("invalid program pointer\n"); + return libbpf_err(-EINVAL); + } + + if (prog->instances.nr <= 0) { + pr_warn("no instances of prog %s to pin\n", prog->name); + return libbpf_err(-EINVAL); + } + + if (prog->instances.nr == 1) { + /* don't create subdirs when pinning single instance */ + return bpf_program_pin_instance(prog, path, 0); + } + + for (i = 0; i < prog->instances.nr; i++) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%d", path, i); + if (len < 0) { + err = -EINVAL; + goto err_unpin; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin; + } + + err = bpf_program_pin_instance(prog, buf, i); + if (err) + goto err_unpin; + } + + return 0; + +err_unpin: + for (i = i - 1; i >= 0; i--) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%d", path, i); + if (len < 0) + continue; + else if (len >= PATH_MAX) + continue; + + bpf_program_unpin_instance(prog, buf, i); + } + + rmdir(path); + + return libbpf_err(err); +} + +int bpf_program__unpin(struct bpf_program *prog, const char *path) +{ + int i, err; + + err = check_path(path); + if (err) + return libbpf_err(err); + + if (prog == NULL) { + pr_warn("invalid program pointer\n"); + return libbpf_err(-EINVAL); + } + + if (prog->instances.nr <= 0) { + pr_warn("no instances of prog %s to pin\n", prog->name); + return libbpf_err(-EINVAL); + } + + if (prog->instances.nr == 1) { + /* don't create subdirs when pinning single instance */ + return bpf_program_unpin_instance(prog, path, 0); + } + + for (i = 0; i < prog->instances.nr; i++) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%d", path, i); + if (len < 0) + return libbpf_err(-EINVAL); + else if (len >= PATH_MAX) + return libbpf_err(-ENAMETOOLONG); + + err = bpf_program_unpin_instance(prog, buf, i); + if (err) + return err; + } + + err = rmdir(path); + if (err) + return libbpf_err(-errno); + + return 0; +} + +int bpf_map__pin(struct bpf_map *map, const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + int err; + + if (map == NULL) { + pr_warn("invalid map pointer\n"); + return libbpf_err(-EINVAL); + } + + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return libbpf_err(-EINVAL); + } else if (map->pinned) { + pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", + bpf_map__name(map), map->pin_path); + return 0; + } + } else { + if (!path) { + pr_warn("missing a path to pin map '%s' at\n", + bpf_map__name(map)); + return libbpf_err(-EINVAL); + } else if (map->pinned) { + pr_warn("map '%s' already pinned\n", bpf_map__name(map)); + return libbpf_err(-EEXIST); + } + + map->pin_path = strdup(path); + if (!map->pin_path) { + err = -errno; + goto out_err; + } + } + + err = make_parent_dir(map->pin_path); + if (err) + return libbpf_err(err); + + err = check_path(map->pin_path); + if (err) + return libbpf_err(err); + + if (bpf_obj_pin(map->fd, map->pin_path)) { + err = -errno; + goto out_err; + } + + map->pinned = true; + pr_debug("pinned map '%s'\n", map->pin_path); + + return 0; + +out_err: + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin map: %s\n", cp); + return libbpf_err(err); +} + +int bpf_map__unpin(struct bpf_map *map, const char *path) +{ + int err; + + if (map == NULL) { + pr_warn("invalid map pointer\n"); + return libbpf_err(-EINVAL); + } + + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return libbpf_err(-EINVAL); + } + path = map->pin_path; + } else if (!path) { + pr_warn("no path to unpin map '%s' from\n", + bpf_map__name(map)); + return libbpf_err(-EINVAL); + } + + err = check_path(path); + if (err) + return libbpf_err(err); + + err = unlink(path); + if (err != 0) + return libbpf_err(-errno); + + map->pinned = false; + pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); + + return 0; +} + +int bpf_map__set_pin_path(struct bpf_map *map, const char *path) +{ + char *new = NULL; + + if (path) { + new = strdup(path); + if (!new) + return libbpf_err(-errno); + } + + free(map->pin_path); + map->pin_path = new; + return 0; +} + +__alias(bpf_map__pin_path) +const char *bpf_map__get_pin_path(const struct bpf_map *map); + +const char *bpf_map__pin_path(const struct bpf_map *map) +{ + return map->pin_path; +} + +bool bpf_map__is_pinned(const struct bpf_map *map) +{ + return map->pinned; +} + +static void sanitize_pin_path(char *s) +{ + /* bpffs disallows periods in path names */ + while (*s) { + if (*s == '.') + *s = '_'; + s++; + } +} + +int bpf_object__pin_maps(struct bpf_object *obj, const char *path) +{ + struct bpf_map *map; + int err; + + if (!obj) + return libbpf_err(-ENOENT); + + if (!obj->loaded) { + pr_warn("object not yet loaded; load it first\n"); + return libbpf_err(-ENOENT); + } + + bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; + char buf[PATH_MAX]; + + if (map->skipped) + continue; + + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) { + err = -EINVAL; + goto err_unpin_maps; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin_maps; + } + sanitize_pin_path(buf); + pin_path = buf; + } else if (!map->pin_path) { + continue; + } + + err = bpf_map__pin(map, pin_path); + if (err) + goto err_unpin_maps; + } + + return 0; + +err_unpin_maps: + while ((map = bpf_object__prev_map(obj, map))) { + if (!map->pin_path) + continue; + + bpf_map__unpin(map, NULL); + } + + return libbpf_err(err); +} + +int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) +{ + struct bpf_map *map; + int err; + + if (!obj) + return libbpf_err(-ENOENT); + + bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; + char buf[PATH_MAX]; + + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) + return libbpf_err(-EINVAL); + else if (len >= PATH_MAX) + return libbpf_err(-ENAMETOOLONG); + sanitize_pin_path(buf); + pin_path = buf; + } else if (!map->pin_path) { + continue; + } + + err = bpf_map__unpin(map, pin_path); + if (err) + return libbpf_err(err); + } + + return 0; +} + +int bpf_object__pin_programs(struct bpf_object *obj, const char *path) +{ + struct bpf_program *prog; + int err; + + if (!obj) + return libbpf_err(-ENOENT); + + if (!obj->loaded) { + pr_warn("object not yet loaded; load it first\n"); + return libbpf_err(-ENOENT); + } + + bpf_object__for_each_program(prog, obj) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + prog->pin_name); + if (len < 0) { + err = -EINVAL; + goto err_unpin_programs; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin_programs; + } + + err = bpf_program__pin(prog, buf); + if (err) + goto err_unpin_programs; + } + + return 0; + +err_unpin_programs: + while ((prog = bpf_object__prev_program(obj, prog))) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + prog->pin_name); + if (len < 0) + continue; + else if (len >= PATH_MAX) + continue; + + bpf_program__unpin(prog, buf); + } + + return libbpf_err(err); +} + +int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) +{ + struct bpf_program *prog; + int err; + + if (!obj) + return libbpf_err(-ENOENT); + + bpf_object__for_each_program(prog, obj) { + char buf[PATH_MAX]; + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + prog->pin_name); + if (len < 0) + return libbpf_err(-EINVAL); + else if (len >= PATH_MAX) + return libbpf_err(-ENAMETOOLONG); + + err = bpf_program__unpin(prog, buf); + if (err) + return libbpf_err(err); + } + + return 0; +} + +int bpf_object__pin(struct bpf_object *obj, const char *path) +{ + int err; + + err = bpf_object__pin_maps(obj, path); + if (err) + return libbpf_err(err); + + err = bpf_object__pin_programs(obj, path); + if (err) { + bpf_object__unpin_maps(obj, path); + return libbpf_err(err); + } + + return 0; +} + +static void bpf_map__destroy(struct bpf_map *map) +{ + if (map->clear_priv) + map->clear_priv(map, map->priv); + map->priv = NULL; + map->clear_priv = NULL; + + if (map->inner_map) { + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } + + zfree(&map->init_slots); + map->init_slots_sz = 0; + + if (map->mmaped) { + munmap(map->mmaped, bpf_map_mmap_sz(map)); + map->mmaped = NULL; + } + + if (map->st_ops) { + zfree(&map->st_ops->data); + zfree(&map->st_ops->progs); + zfree(&map->st_ops->kern_func_off); + zfree(&map->st_ops); + } + + zfree(&map->name); + zfree(&map->real_name); + zfree(&map->pin_path); + + if (map->fd >= 0) + zclose(map->fd); +} + +void bpf_object__close(struct bpf_object *obj) +{ + size_t i; + + if (IS_ERR_OR_NULL(obj)) + return; + + if (obj->clear_priv) + obj->clear_priv(obj, obj->priv); + + usdt_manager_free(obj->usdt_man); + obj->usdt_man = NULL; + + bpf_gen__free(obj->gen_loader); + bpf_object__elf_finish(obj); + bpf_object_unload(obj); + btf__free(obj->btf); + btf_ext__free(obj->btf_ext); + + for (i = 0; i < obj->nr_maps; i++) + bpf_map__destroy(&obj->maps[i]); + + zfree(&obj->btf_custom_path); + zfree(&obj->kconfig); + zfree(&obj->externs); + obj->nr_extern = 0; + + zfree(&obj->maps); + obj->nr_maps = 0; + + if (obj->programs && obj->nr_programs) { + for (i = 0; i < obj->nr_programs; i++) + bpf_program__exit(&obj->programs[i]); + } + zfree(&obj->programs); + + list_del(&obj->list); + free(obj); +} + +struct bpf_object * +bpf_object__next(struct bpf_object *prev) +{ + struct bpf_object *next; + bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST); + + if (strict) + return NULL; + + if (!prev) + next = list_first_entry(&bpf_objects_list, + struct bpf_object, + list); + else + next = list_next_entry(prev, list); + + /* Empty list is noticed here so don't need checking on entry. */ + if (&next->list == &bpf_objects_list) + return NULL; + + return next; +} + +const char *bpf_object__name(const struct bpf_object *obj) +{ + return obj ? obj->name : libbpf_err_ptr(-EINVAL); +} + +unsigned int bpf_object__kversion(const struct bpf_object *obj) +{ + return obj ? obj->kern_version : 0; +} + +struct btf *bpf_object__btf(const struct bpf_object *obj) +{ + return obj ? obj->btf : NULL; +} + +int bpf_object__btf_fd(const struct bpf_object *obj) +{ + return obj->btf ? btf__fd(obj->btf) : -1; +} + +int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) +{ + if (obj->loaded) + return libbpf_err(-EINVAL); + + obj->kern_version = kern_version; + + return 0; +} + +int bpf_object__set_priv(struct bpf_object *obj, void *priv, + bpf_object_clear_priv_t clear_priv) +{ + if (obj->priv && obj->clear_priv) + obj->clear_priv(obj, obj->priv); + + obj->priv = priv; + obj->clear_priv = clear_priv; + return 0; +} + +void *bpf_object__priv(const struct bpf_object *obj) +{ + return obj ? obj->priv : libbpf_err_ptr(-EINVAL); +} + +int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) +{ + struct bpf_gen *gen; + + if (!opts) + return -EFAULT; + if (!OPTS_VALID(opts, gen_loader_opts)) + return -EINVAL; + gen = calloc(sizeof(*gen), 1); + if (!gen) + return -ENOMEM; + gen->opts = opts; + obj->gen_loader = gen; + return 0; +} + +static struct bpf_program * +__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, + bool forward) +{ + size_t nr_programs = obj->nr_programs; + ssize_t idx; + + if (!nr_programs) + return NULL; + + if (!p) + /* Iter from the beginning */ + return forward ? &obj->programs[0] : + &obj->programs[nr_programs - 1]; + + if (p->obj != obj) { + pr_warn("error: program handler doesn't match object\n"); + return errno = EINVAL, NULL; + } + + idx = (p - obj->programs) + (forward ? 1 : -1); + if (idx >= obj->nr_programs || idx < 0) + return NULL; + return &obj->programs[idx]; +} + +struct bpf_program * +bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) +{ + return bpf_object__next_program(obj, prev); +} + +struct bpf_program * +bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) +{ + struct bpf_program *prog = prev; + + do { + prog = __bpf_program__iter(prog, obj, true); + } while (prog && prog_is_subprog(obj, prog)); + + return prog; +} + +struct bpf_program * +bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) +{ + return bpf_object__prev_program(obj, next); +} + +struct bpf_program * +bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) +{ + struct bpf_program *prog = next; + + do { + prog = __bpf_program__iter(prog, obj, false); + } while (prog && prog_is_subprog(obj, prog)); + + return prog; +} + +int bpf_program__set_priv(struct bpf_program *prog, void *priv, + bpf_program_clear_priv_t clear_priv) +{ + if (prog->priv && prog->clear_priv) + prog->clear_priv(prog, prog->priv); + + prog->priv = priv; + prog->clear_priv = clear_priv; + return 0; +} + +void *bpf_program__priv(const struct bpf_program *prog) +{ + return prog ? prog->priv : libbpf_err_ptr(-EINVAL); +} + +void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) +{ + prog->prog_ifindex = ifindex; +} + +const char *bpf_program__name(const struct bpf_program *prog) +{ + return prog->name; +} + +const char *bpf_program__section_name(const struct bpf_program *prog) +{ + return prog->sec_name; +} + +const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) +{ + const char *title; + + title = prog->sec_name; + if (needs_copy) { + title = strdup(title); + if (!title) { + pr_warn("failed to strdup program title\n"); + return libbpf_err_ptr(-ENOMEM); + } + } + + return title; +} + +bool bpf_program__autoload(const struct bpf_program *prog) +{ + return prog->autoload; +} + +int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) +{ + if (prog->obj->loaded) + return libbpf_err(-EINVAL); + + prog->autoload = autoload; + return 0; +} + +static int bpf_program_nth_fd(const struct bpf_program *prog, int n); + +int bpf_program__fd(const struct bpf_program *prog) +{ + return bpf_program_nth_fd(prog, 0); +} + +size_t bpf_program__size(const struct bpf_program *prog) +{ + return prog->insns_cnt * BPF_INSN_SZ; +} + +const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog) +{ + return prog->insns; +} + +size_t bpf_program__insn_cnt(const struct bpf_program *prog) +{ + return prog->insns_cnt; +} + +int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, + bpf_program_prep_t prep) +{ + int *instances_fds; + + if (nr_instances <= 0 || !prep) + return libbpf_err(-EINVAL); + + if (prog->instances.nr > 0 || prog->instances.fds) { + pr_warn("Can't set pre-processor after loading\n"); + return libbpf_err(-EINVAL); + } + + instances_fds = malloc(sizeof(int) * nr_instances); + if (!instances_fds) { + pr_warn("alloc memory failed for fds\n"); + return libbpf_err(-ENOMEM); + } + + /* fill all fd with -1 */ + memset(instances_fds, -1, sizeof(int) * nr_instances); + + prog->instances.nr = nr_instances; + prog->instances.fds = instances_fds; + prog->preprocessor = prep; + return 0; +} + +__attribute__((alias("bpf_program_nth_fd"))) +int bpf_program__nth_fd(const struct bpf_program *prog, int n); + +static int bpf_program_nth_fd(const struct bpf_program *prog, int n) +{ + int fd; + + if (!prog) + return libbpf_err(-EINVAL); + + if (n >= prog->instances.nr || n < 0) { + pr_warn("Can't get the %dth fd from program %s: only %d instances\n", + n, prog->name, prog->instances.nr); + return libbpf_err(-EINVAL); + } + + fd = prog->instances.fds[n]; + if (fd < 0) { + pr_warn("%dth instance of program '%s' is invalid\n", + n, prog->name); + return libbpf_err(-ENOENT); + } + + return fd; +} + +__alias(bpf_program__type) +enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); + +enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) +{ + return prog->type; +} + +int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->type = type; + return 0; +} + +static bool bpf_program__is_type(const struct bpf_program *prog, + enum bpf_prog_type type) +{ + return prog ? (prog->type == type) : false; +} + +#define BPF_PROG_TYPE_FNS(NAME, TYPE) \ +int bpf_program__set_##NAME(struct bpf_program *prog) \ +{ \ + if (!prog) \ + return libbpf_err(-EINVAL); \ + return bpf_program__set_type(prog, TYPE); \ +} \ + \ +bool bpf_program__is_##NAME(const struct bpf_program *prog) \ +{ \ + return bpf_program__is_type(prog, TYPE); \ +} \ + +BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); +BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM); +BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); +BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); +BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); +BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); +BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); +BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); +BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); +BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); +BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); +BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); +BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); + +__alias(bpf_program__expected_attach_type) +enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); + +enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) +{ + return prog->expected_attach_type; +} + +int bpf_program__set_expected_attach_type(struct bpf_program *prog, + enum bpf_attach_type type) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->expected_attach_type = type; + return 0; +} + +__u32 bpf_program__flags(const struct bpf_program *prog) +{ + return prog->prog_flags; +} + +int bpf_program__set_flags(struct bpf_program *prog, __u32 flags) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->prog_flags = flags; + return 0; +} + +__u32 bpf_program__log_level(const struct bpf_program *prog) +{ + return prog->log_level; +} + +int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level) +{ + if (prog->obj->loaded) + return libbpf_err(-EBUSY); + + prog->log_level = log_level; + return 0; +} + +const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size) +{ + *log_size = prog->log_size; + return prog->log_buf; +} + +int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size) +{ + if (log_size && !log_buf) + return -EINVAL; + if (prog->log_size > UINT_MAX) + return -EINVAL; + if (prog->obj->loaded) + return -EBUSY; + + prog->log_buf = log_buf; + prog->log_size = log_size; + return 0; +} + +#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ + .sec = (char *)sec_pfx, \ + .prog_type = BPF_PROG_TYPE_##ptype, \ + .expected_attach_type = atype, \ + .cookie = (long)(flags), \ + .prog_prepare_load_fn = libbpf_prepare_prog_load, \ + __VA_ARGS__ \ +} + +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); + +static const struct bpf_sec_def section_defs[] = { + SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), + SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), + SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), + SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe), + SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt), + SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), + SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED), + SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp), + SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp), + SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), + SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), + SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), + SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), + SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), + SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), + SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), + SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED), + SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), + SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED), + SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), + SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), + SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), + SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX), +}; + +static size_t custom_sec_def_cnt; +static struct bpf_sec_def *custom_sec_defs; +static struct bpf_sec_def custom_fallback_def; +static bool has_custom_fallback_def; + +static int last_custom_sec_def_handler_id; + +int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts) +{ + struct bpf_sec_def *sec_def; + + if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) + return libbpf_err(-EINVAL); + + if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ + return libbpf_err(-E2BIG); + + if (sec) { + sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, + sizeof(*sec_def)); + if (!sec_def) + return libbpf_err(-ENOMEM); + + custom_sec_defs = sec_def; + sec_def = &custom_sec_defs[custom_sec_def_cnt]; + } else { + if (has_custom_fallback_def) + return libbpf_err(-EBUSY); + + sec_def = &custom_fallback_def; + } + + sec_def->sec = sec ? strdup(sec) : NULL; + if (sec && !sec_def->sec) + return libbpf_err(-ENOMEM); + + sec_def->prog_type = prog_type; + sec_def->expected_attach_type = exp_attach_type; + sec_def->cookie = OPTS_GET(opts, cookie, 0); + + sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); + sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); + sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); + + sec_def->handler_id = ++last_custom_sec_def_handler_id; + + if (sec) + custom_sec_def_cnt++; + else + has_custom_fallback_def = true; + + return sec_def->handler_id; +} + +int libbpf_unregister_prog_handler(int handler_id) +{ + struct bpf_sec_def *sec_defs; + int i; + + if (handler_id <= 0) + return libbpf_err(-EINVAL); + + if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { + memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); + has_custom_fallback_def = false; + return 0; + } + + for (i = 0; i < custom_sec_def_cnt; i++) { + if (custom_sec_defs[i].handler_id == handler_id) + break; + } + + if (i == custom_sec_def_cnt) + return libbpf_err(-ENOENT); + + free(custom_sec_defs[i].sec); + for (i = i + 1; i < custom_sec_def_cnt; i++) + custom_sec_defs[i - 1] = custom_sec_defs[i]; + custom_sec_def_cnt--; + + /* try to shrink the array, but it's ok if we couldn't */ + sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); + if (sec_defs) + custom_sec_defs = sec_defs; + + return 0; +} + +static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name, + bool allow_sloppy) +{ + size_t len = strlen(sec_def->sec); + + /* "type/" always has to have proper SEC("type/extras") form */ + if (sec_def->sec[len - 1] == '/') { + if (str_has_pfx(sec_name, sec_def->sec)) + return true; + return false; + } + + /* "type+" means it can be either exact SEC("type") or + * well-formed SEC("type/extras") with proper '/' separator + */ + if (sec_def->sec[len - 1] == '+') { + len--; + /* not even a prefix */ + if (strncmp(sec_name, sec_def->sec, len) != 0) + return false; + /* exact match or has '/' separator */ + if (sec_name[len] == '\0' || sec_name[len] == '/') + return true; + return false; + } + + /* SEC_SLOPPY_PFX definitions are allowed to be just prefix + * matches, unless strict section name mode + * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the + * match has to be exact. + */ + if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec)) + return true; + + /* Definitions not marked SEC_SLOPPY_PFX (e.g., + * SEC("syscall")) are exact matches in both modes. + */ + return strcmp(sec_name, sec_def->sec) == 0; +} + +static const struct bpf_sec_def *find_sec_def(const char *sec_name) +{ + const struct bpf_sec_def *sec_def; + int i, n; + bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy; + + n = custom_sec_def_cnt; + for (i = 0; i < n; i++) { + sec_def = &custom_sec_defs[i]; + if (sec_def_matches(sec_def, sec_name, false)) + return sec_def; + } + + n = ARRAY_SIZE(section_defs); + for (i = 0; i < n; i++) { + sec_def = §ion_defs[i]; + allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict; + if (sec_def_matches(sec_def, sec_name, allow_sloppy)) + return sec_def; + } + + if (has_custom_fallback_def) + return &custom_fallback_def; + + return NULL; +} + +#define MAX_TYPE_NAME_SIZE 32 + +static char *libbpf_get_type_names(bool attach_type) +{ + int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; + char *buf; + + buf = malloc(len); + if (!buf) + return NULL; + + buf[0] = '\0'; + /* Forge string buf with all available names */ + for (i = 0; i < ARRAY_SIZE(section_defs); i++) { + const struct bpf_sec_def *sec_def = §ion_defs[i]; + + if (attach_type) { + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) + continue; + + if (!(sec_def->cookie & SEC_ATTACHABLE)) + continue; + } + + if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { + free(buf); + return NULL; + } + strcat(buf, " "); + strcat(buf, section_defs[i].sec); + } + + return buf; +} + +int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type) +{ + const struct bpf_sec_def *sec_def; + char *type_names; + + if (!name) + return libbpf_err(-EINVAL); + + sec_def = find_sec_def(name); + if (sec_def) { + *prog_type = sec_def->prog_type; + *expected_attach_type = sec_def->expected_attach_type; + return 0; + } + + pr_debug("failed to guess program type from ELF section '%s'\n", name); + type_names = libbpf_get_type_names(false); + if (type_names != NULL) { + pr_debug("supported section(type) names are:%s\n", type_names); + free(type_names); + } + + return libbpf_err(-ESRCH); +} + +static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, + size_t offset) +{ + struct bpf_map *map; + size_t i; + + for (i = 0; i < obj->nr_maps; i++) { + map = &obj->maps[i]; + if (!bpf_map__is_struct_ops(map)) + continue; + if (map->sec_offset <= offset && + offset - map->sec_offset < map->def.value_size) + return map; + } + + return NULL; +} + +/* Collect the reloc from ELF and populate the st_ops->progs[] */ +static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, + Elf64_Shdr *shdr, Elf_Data *data) +{ + const struct btf_member *member; + struct bpf_struct_ops *st_ops; + struct bpf_program *prog; + unsigned int shdr_idx; + const struct btf *btf; + struct bpf_map *map; + unsigned int moff, insn_idx; + const char *name; + __u32 member_idx; + Elf64_Sym *sym; + Elf64_Rel *rel; + int i, nrels; + + btf = obj->btf; + nrels = shdr->sh_size / shdr->sh_entsize; + for (i = 0; i < nrels; i++) { + rel = elf_rel_by_idx(data, i); + if (!rel) { + pr_warn("struct_ops reloc: failed to get %d reloc\n", i); + return -LIBBPF_ERRNO__FORMAT; + } + + sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); + if (!sym) { + pr_warn("struct_ops reloc: symbol %zx not found\n", + (size_t)ELF64_R_SYM(rel->r_info)); + return -LIBBPF_ERRNO__FORMAT; + } + + name = elf_sym_str(obj, sym->st_name) ?: ""; + map = find_struct_ops_map_by_offset(obj, rel->r_offset); + if (!map) { + pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", + (size_t)rel->r_offset); + return -EINVAL; + } + + moff = rel->r_offset - map->sec_offset; + shdr_idx = sym->st_shndx; + st_ops = map->st_ops; + pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", + map->name, + (long long)(rel->r_info >> 32), + (long long)sym->st_value, + shdr_idx, (size_t)rel->r_offset, + map->sec_offset, sym->st_name, name); + + if (shdr_idx >= SHN_LORESERVE) { + pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", + map->name, (size_t)rel->r_offset, shdr_idx); + return -LIBBPF_ERRNO__RELOC; + } + if (sym->st_value % BPF_INSN_SZ) { + pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", + map->name, (unsigned long long)sym->st_value); + return -LIBBPF_ERRNO__FORMAT; + } + insn_idx = sym->st_value / BPF_INSN_SZ; + + member = find_member_by_offset(st_ops->type, moff * 8); + if (!member) { + pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", + map->name, moff); + return -EINVAL; + } + member_idx = member - btf_members(st_ops->type); + name = btf__name_by_offset(btf, member->name_off); + + if (!resolve_func_ptr(btf, member->type, NULL)) { + pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", + map->name, name); + return -EINVAL; + } + + prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); + if (!prog) { + pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", + map->name, shdr_idx, name); + return -EINVAL; + } + + /* prevent the use of BPF prog with invalid type */ + if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { + pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", + map->name, prog->name); + return -EINVAL; + } + + /* if we haven't yet processed this BPF program, record proper + * attach_btf_id and member_idx + */ + if (!prog->attach_btf_id) { + prog->attach_btf_id = st_ops->type_id; + prog->expected_attach_type = member_idx; + } + + /* struct_ops BPF prog can be re-used between multiple + * .struct_ops as long as it's the same struct_ops struct + * definition and the same function pointer field + */ + if (prog->attach_btf_id != st_ops->type_id || + prog->expected_attach_type != member_idx) { + pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", + map->name, prog->name, prog->sec_name, prog->type, + prog->attach_btf_id, prog->expected_attach_type, name); + return -EINVAL; + } + + st_ops->progs[member_idx] = prog; + } + + return 0; +} + +#define BTF_TRACE_PREFIX "btf_trace_" +#define BTF_LSM_PREFIX "bpf_lsm_" +#define BTF_ITER_PREFIX "bpf_iter_" +#define BTF_MAX_NAME_SIZE 128 + +void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, + const char **prefix, int *kind) +{ + switch (attach_type) { + case BPF_TRACE_RAW_TP: + *prefix = BTF_TRACE_PREFIX; + *kind = BTF_KIND_TYPEDEF; + break; + case BPF_LSM_MAC: + *prefix = BTF_LSM_PREFIX; + *kind = BTF_KIND_FUNC; + break; + case BPF_TRACE_ITER: + *prefix = BTF_ITER_PREFIX; + *kind = BTF_KIND_FUNC; + break; + default: + *prefix = ""; + *kind = BTF_KIND_FUNC; + } +} + +static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, + const char *name, __u32 kind) +{ + char btf_type_name[BTF_MAX_NAME_SIZE]; + int ret; + + ret = snprintf(btf_type_name, sizeof(btf_type_name), + "%s%s", prefix, name); + /* snprintf returns the number of characters written excluding the + * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it + * indicates truncation. + */ + if (ret < 0 || ret >= sizeof(btf_type_name)) + return -ENAMETOOLONG; + return btf__find_by_name_kind(btf, btf_type_name, kind); +} + +static inline int find_attach_btf_id(struct btf *btf, const char *name, + enum bpf_attach_type attach_type) +{ + const char *prefix; + int kind; + + btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); + return find_btf_by_prefix_kind(btf, prefix, name, kind); +} + +int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type) +{ + struct btf *btf; + int err; + + btf = btf__load_vmlinux_btf(); + err = libbpf_get_error(btf); + if (err) { + pr_warn("vmlinux BTF is not found\n"); + return libbpf_err(err); + } + + err = find_attach_btf_id(btf, name, attach_type); + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + + btf__free(btf); + return libbpf_err(err); +} + +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + struct btf *btf; + int err; + + err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); + if (err) { + pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", + attach_prog_fd, err); + return err; + } + + err = -EINVAL; + if (!info.btf_id) { + pr_warn("The target program doesn't have BTF\n"); + goto out; + } + btf = btf__load_from_kernel_by_id(info.btf_id); + err = libbpf_get_error(btf); + if (err) { + pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); + goto out; + } + err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + btf__free(btf); + if (err <= 0) { + pr_warn("%s is not found in prog's BTF\n", name); + goto out; + } +out: + return err; +} + +static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, + enum bpf_attach_type attach_type, + int *btf_obj_fd, int *btf_type_id) +{ + int ret, i; + + ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type); + if (ret > 0) { + *btf_obj_fd = 0; /* vmlinux BTF */ + *btf_type_id = ret; + return 0; + } + if (ret != -ENOENT) + return ret; + + ret = load_module_btfs(obj); + if (ret) + return ret; + + for (i = 0; i < obj->btf_module_cnt; i++) { + const struct module_btf *mod = &obj->btf_modules[i]; + + ret = find_attach_btf_id(mod->btf, attach_name, attach_type); + if (ret > 0) { + *btf_obj_fd = mod->fd; + *btf_type_id = ret; + return 0; + } + if (ret == -ENOENT) + continue; + + return ret; + } + + return -ESRCH; +} + +static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, + int *btf_obj_fd, int *btf_type_id) +{ + enum bpf_attach_type attach_type = prog->expected_attach_type; + __u32 attach_prog_fd = prog->attach_prog_fd; + int err = 0; + + /* BPF program's BTF ID */ + if (attach_prog_fd) { + err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); + if (err < 0) { + pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n", + attach_prog_fd, attach_name, err); + return err; + } + *btf_obj_fd = 0; + *btf_type_id = err; + return 0; + } + + /* kernel/module BTF ID */ + if (prog->obj->gen_loader) { + bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); + *btf_obj_fd = 0; + *btf_type_id = 1; + } else { + err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + } + if (err) { + pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err); + return err; + } + return 0; +} + +int libbpf_attach_type_by_name(const char *name, + enum bpf_attach_type *attach_type) +{ + char *type_names; + const struct bpf_sec_def *sec_def; + + if (!name) + return libbpf_err(-EINVAL); + + sec_def = find_sec_def(name); + if (!sec_def) { + pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); + type_names = libbpf_get_type_names(true); + if (type_names != NULL) { + pr_debug("attachable section(type) names are:%s\n", type_names); + free(type_names); + } + + return libbpf_err(-EINVAL); + } + + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) + return libbpf_err(-EINVAL); + if (!(sec_def->cookie & SEC_ATTACHABLE)) + return libbpf_err(-EINVAL); + + *attach_type = sec_def->expected_attach_type; + return 0; +} + +int bpf_map__fd(const struct bpf_map *map) +{ + return map ? map->fd : libbpf_err(-EINVAL); +} + +const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) +{ + return map ? &map->def : libbpf_err_ptr(-EINVAL); +} + +static bool map_uses_real_name(const struct bpf_map *map) +{ + /* Since libbpf started to support custom .data.* and .rodata.* maps, + * their user-visible name differs from kernel-visible name. Users see + * such map's corresponding ELF section name as a map name. + * This check distinguishes .data/.rodata from .data.* and .rodata.* + * maps to know which name has to be returned to the user. + */ + if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) + return true; + if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) + return true; + return false; +} + +const char *bpf_map__name(const struct bpf_map *map) +{ + if (!map) + return NULL; + + if (map_uses_real_name(map)) + return map->real_name; + + return map->name; +} + +enum bpf_map_type bpf_map__type(const struct bpf_map *map) +{ + return map->def.type; +} + +int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.type = type; + return 0; +} + +__u32 bpf_map__map_flags(const struct bpf_map *map) +{ + return map->def.map_flags; +} + +int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.map_flags = flags; + return 0; +} + +__u64 bpf_map__map_extra(const struct bpf_map *map) +{ + return map->map_extra; +} + +int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->map_extra = map_extra; + return 0; +} + +__u32 bpf_map__numa_node(const struct bpf_map *map) +{ + return map->numa_node; +} + +int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->numa_node = numa_node; + return 0; +} + +__u32 bpf_map__key_size(const struct bpf_map *map) +{ + return map->def.key_size; +} + +int bpf_map__set_key_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.key_size = size; + return 0; +} + +__u32 bpf_map__value_size(const struct bpf_map *map) +{ + return map->def.value_size; +} + +int bpf_map__set_value_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.value_size = size; + return 0; +} + +__u32 bpf_map__btf_key_type_id(const struct bpf_map *map) +{ + return map ? map->btf_key_type_id : 0; +} + +__u32 bpf_map__btf_value_type_id(const struct bpf_map *map) +{ + return map ? map->btf_value_type_id : 0; +} + +int bpf_map__set_priv(struct bpf_map *map, void *priv, + bpf_map_clear_priv_t clear_priv) +{ + if (!map) + return libbpf_err(-EINVAL); + + if (map->priv) { + if (map->clear_priv) + map->clear_priv(map, map->priv); + } + + map->priv = priv; + map->clear_priv = clear_priv; + return 0; +} + +void *bpf_map__priv(const struct bpf_map *map) +{ + return map ? map->priv : libbpf_err_ptr(-EINVAL); +} + +int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size) +{ + if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || + size != map->def.value_size || map->fd >= 0) + return libbpf_err(-EINVAL); + + memcpy(map->mmaped, data, size); + return 0; +} + +const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize) +{ + if (!map->mmaped) + return NULL; + *psize = map->def.value_size; + return map->mmaped; +} + +bool bpf_map__is_offload_neutral(const struct bpf_map *map) +{ + return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + +bool bpf_map__is_internal(const struct bpf_map *map) +{ + return map->libbpf_type != LIBBPF_MAP_UNSPEC; +} + +__u32 bpf_map__ifindex(const struct bpf_map *map) +{ + return map->map_ifindex; +} + +int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->map_ifindex = ifindex; + return 0; +} + +int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) +{ + if (!bpf_map_type__is_map_in_map(map->def.type)) { + pr_warn("error: unsupported map type\n"); + return libbpf_err(-EINVAL); + } + if (map->inner_map_fd != -1) { + pr_warn("error: inner_map_fd already specified\n"); + return libbpf_err(-EINVAL); + } + if (map->inner_map) { + bpf_map__destroy(map->inner_map); + zfree(&map->inner_map); + } + map->inner_map_fd = fd; + return 0; +} + +static struct bpf_map * +__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) +{ + ssize_t idx; + struct bpf_map *s, *e; + + if (!obj || !obj->maps) + return errno = EINVAL, NULL; + + s = obj->maps; + e = obj->maps + obj->nr_maps; + + if ((m < s) || (m >= e)) { + pr_warn("error in %s: map handler doesn't belong to object\n", + __func__); + return errno = EINVAL, NULL; + } + + idx = (m - obj->maps) + i; + if (idx >= obj->nr_maps || idx < 0) + return NULL; + return &obj->maps[idx]; +} + +struct bpf_map * +bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) +{ + return bpf_object__next_map(obj, prev); +} + +struct bpf_map * +bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) +{ + if (prev == NULL) + return obj->maps; + + return __bpf_map__iter(prev, obj, 1); +} + +struct bpf_map * +bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj) +{ + return bpf_object__prev_map(obj, next); +} + +struct bpf_map * +bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) +{ + if (next == NULL) { + if (!obj->nr_maps) + return NULL; + return obj->maps + obj->nr_maps - 1; + } + + return __bpf_map__iter(next, obj, -1); +} + +struct bpf_map * +bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) +{ + struct bpf_map *pos; + + bpf_object__for_each_map(pos, obj) { + /* if it's a special internal map name (which always starts + * with dot) then check if that special name matches the + * real map name (ELF section name) + */ + if (name[0] == '.') { + if (pos->real_name && strcmp(pos->real_name, name) == 0) + return pos; + continue; + } + /* otherwise map name has to be an exact match */ + if (map_uses_real_name(pos)) { + if (strcmp(pos->real_name, name) == 0) + return pos; + continue; + } + if (strcmp(pos->name, name) == 0) + return pos; + } + return errno = ENOENT, NULL; +} + +int +bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) +{ + return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); +} + +struct bpf_map * +bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) +{ + return libbpf_err_ptr(-ENOTSUP); +} + +long libbpf_get_error(const void *ptr) +{ + if (!IS_ERR_OR_NULL(ptr)) + return 0; + + if (IS_ERR(ptr)) + errno = -PTR_ERR(ptr); + + /* If ptr == NULL, then errno should be already set by the failing + * API, because libbpf never returns NULL on success and it now always + * sets errno on error. So no extra errno handling for ptr == NULL + * case. + */ + return -errno; +} + +__attribute__((alias("bpf_prog_load_xattr2"))) +int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, + struct bpf_object **pobj, int *prog_fd); + +static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_object_open_attr open_attr = {}; + struct bpf_program *prog, *first_prog = NULL; + struct bpf_object *obj; + struct bpf_map *map; + int err; + + if (!attr) + return libbpf_err(-EINVAL); + if (!attr->file) + return libbpf_err(-EINVAL); + + open_attr.file = attr->file; + open_attr.prog_type = attr->prog_type; + + obj = __bpf_object__open_xattr(&open_attr, 0); + err = libbpf_get_error(obj); + if (err) + return libbpf_err(-ENOENT); + + bpf_object__for_each_program(prog, obj) { + enum bpf_attach_type attach_type = attr->expected_attach_type; + /* + * to preserve backwards compatibility, bpf_prog_load treats + * attr->prog_type, if specified, as an override to whatever + * bpf_object__open guessed + */ + if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { + prog->type = attr->prog_type; + prog->expected_attach_type = attach_type; + } + if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) { + /* + * we haven't guessed from section name and user + * didn't provide a fallback type, too bad... + */ + bpf_object__close(obj); + return libbpf_err(-EINVAL); + } + + prog->prog_ifindex = attr->ifindex; + prog->log_level = attr->log_level; + prog->prog_flags |= attr->prog_flags; + if (!first_prog) + first_prog = prog; + } + + bpf_object__for_each_map(map, obj) { + if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) + map->map_ifindex = attr->ifindex; + } + + if (!first_prog) { + pr_warn("object file doesn't contain bpf program\n"); + bpf_object__close(obj); + return libbpf_err(-ENOENT); + } + + err = bpf_object__load(obj); + if (err) { + bpf_object__close(obj); + return libbpf_err(err); + } + + *pobj = obj; + *prog_fd = bpf_program__fd(first_prog); + return 0; +} + +COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1) +int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_prog_load_attr attr; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = file; + attr.prog_type = type; + attr.expected_attach_type = 0; + + return bpf_prog_load_xattr2(&attr, pobj, prog_fd); +} + +/* Replace link's underlying BPF program with the new one */ +int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) +{ + int ret; + + ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); + return libbpf_err_errno(ret); +} + +/* Release "ownership" of underlying BPF resource (typically, BPF program + * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected + * link, when destructed through bpf_link__destroy() call won't attempt to + * detach/unregisted that BPF resource. This is useful in situations where, + * say, attached BPF program has to outlive userspace program that attached it + * in the system. Depending on type of BPF program, though, there might be + * additional steps (like pinning BPF program in BPF FS) necessary to ensure + * exit of userspace program doesn't trigger automatic detachment and clean up + * inside the kernel. + */ +void bpf_link__disconnect(struct bpf_link *link) +{ + link->disconnected = true; +} + +int bpf_link__destroy(struct bpf_link *link) +{ + int err = 0; + + if (IS_ERR_OR_NULL(link)) + return 0; + + if (!link->disconnected && link->detach) + err = link->detach(link); + if (link->pin_path) + free(link->pin_path); + if (link->dealloc) + link->dealloc(link); + else + free(link); + + return libbpf_err(err); +} + +int bpf_link__fd(const struct bpf_link *link) +{ + return link->fd; +} + +const char *bpf_link__pin_path(const struct bpf_link *link) +{ + return link->pin_path; +} + +static int bpf_link__detach_fd(struct bpf_link *link) +{ + return libbpf_err_errno(close(link->fd)); +} + +struct bpf_link *bpf_link__open(const char *path) +{ + struct bpf_link *link; + int fd; + + fd = bpf_obj_get(path); + if (fd < 0) { + fd = -errno; + pr_warn("failed to open link at %s: %d\n", path, fd); + return libbpf_err_ptr(fd); + } + + link = calloc(1, sizeof(*link)); + if (!link) { + close(fd); + return libbpf_err_ptr(-ENOMEM); + } + link->detach = &bpf_link__detach_fd; + link->fd = fd; + + link->pin_path = strdup(path); + if (!link->pin_path) { + bpf_link__destroy(link); + return libbpf_err_ptr(-ENOMEM); + } + + return link; +} + +int bpf_link__detach(struct bpf_link *link) +{ + return bpf_link_detach(link->fd) ? -errno : 0; +} + +int bpf_link__pin(struct bpf_link *link, const char *path) +{ + int err; + + if (link->pin_path) + return libbpf_err(-EBUSY); + err = make_parent_dir(path); + if (err) + return libbpf_err(err); + err = check_path(path); + if (err) + return libbpf_err(err); + + link->pin_path = strdup(path); + if (!link->pin_path) + return libbpf_err(-ENOMEM); + + if (bpf_obj_pin(link->fd, link->pin_path)) { + err = -errno; + zfree(&link->pin_path); + return libbpf_err(err); + } + + pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); + return 0; +} + +int bpf_link__unpin(struct bpf_link *link) +{ + int err; + + if (!link->pin_path) + return libbpf_err(-EINVAL); + + err = unlink(link->pin_path); + if (err != 0) + return -errno; + + pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); + zfree(&link->pin_path); + return 0; +} + +struct bpf_link_perf { + struct bpf_link link; + int perf_event_fd; + /* legacy kprobe support: keep track of probe identifier and type */ + char *legacy_probe_name; + bool legacy_is_kprobe; + bool legacy_is_retprobe; +}; + +static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); +static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); + +static int bpf_link_perf_detach(struct bpf_link *link) +{ + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + int err = 0; + + if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) + err = -errno; + + if (perf_link->perf_event_fd != link->fd) + close(perf_link->perf_event_fd); + close(link->fd); + + /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ + if (perf_link->legacy_probe_name) { + if (perf_link->legacy_is_kprobe) { + err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, + perf_link->legacy_is_retprobe); + } else { + err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, + perf_link->legacy_is_retprobe); + } + } + + return err; +} + +static void bpf_link_perf_dealloc(struct bpf_link *link) +{ + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + + free(perf_link->legacy_probe_name); + free(perf_link); +} + +struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, + const struct bpf_perf_event_opts *opts) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link_perf *link; + int prog_fd, link_fd = -1, err; + + if (!OPTS_VALID(opts, bpf_perf_event_opts)) + return libbpf_err_ptr(-EINVAL); + + if (pfd < 0) { + pr_warn("prog '%s': invalid perf event FD %d\n", + prog->name, pfd); + return libbpf_err_ptr(-EINVAL); + } + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + link->link.detach = &bpf_link_perf_detach; + link->link.dealloc = &bpf_link_perf_dealloc; + link->perf_event_fd = pfd; + + if (kernel_supports(prog->obj, FEAT_PERF_LINK)) { + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, + .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); + + link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", + prog->name, pfd, + err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + link->link.fd = link_fd; + } else { + if (OPTS_GET(opts, bpf_cookie, 0)) { + pr_warn("prog '%s': user context value is not supported\n", prog->name); + err = -EOPNOTSUPP; + goto err_out; + } + + if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", + prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + if (err == -EPROTO) + pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", + prog->name, pfd); + goto err_out; + } + link->link.fd = pfd; + } + if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { + err = -errno; + pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", + prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + + return &link->link; +err_out: + if (link_fd >= 0) + close(link_fd); + free(link); + return libbpf_err_ptr(err); +} + +struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) +{ + return bpf_program__attach_perf_event_opts(prog, pfd, NULL); +} + +/* + * this function is expected to parse integer in the range of [0, 2^31-1] from + * given file using scanf format string fmt. If actual parsed value is + * negative, the result might be indistinguishable from error + */ +static int parse_uint_from_file(const char *file, const char *fmt) +{ + char buf[STRERR_BUFSIZE]; + int err, ret; + FILE *f; + + f = fopen(file, "r"); + if (!f) { + err = -errno; + pr_debug("failed to open '%s': %s\n", file, + libbpf_strerror_r(err, buf, sizeof(buf))); + return err; + } + err = fscanf(f, fmt, &ret); + if (err != 1) { + err = err == EOF ? -EIO : -errno; + pr_debug("failed to parse '%s': %s\n", file, + libbpf_strerror_r(err, buf, sizeof(buf))); + fclose(f); + return err; + } + fclose(f); + return ret; +} + +static int determine_kprobe_perf_type(void) +{ + const char *file = "/sys/bus/event_source/devices/kprobe/type"; + + return parse_uint_from_file(file, "%d\n"); +} + +static int determine_uprobe_perf_type(void) +{ + const char *file = "/sys/bus/event_source/devices/uprobe/type"; + + return parse_uint_from_file(file, "%d\n"); +} + +static int determine_kprobe_retprobe_bit(void) +{ + const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; + + return parse_uint_from_file(file, "config:%d\n"); +} + +static int determine_uprobe_retprobe_bit(void) +{ + const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; + + return parse_uint_from_file(file, "config:%d\n"); +} + +#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 +#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 + +static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, + uint64_t offset, int pid, size_t ref_ctr_off) +{ + struct perf_event_attr attr = {}; + char errmsg[STRERR_BUFSIZE]; + int type, pfd, err; + + if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) + return -EINVAL; + + type = uprobe ? determine_uprobe_perf_type() + : determine_kprobe_perf_type(); + if (type < 0) { + pr_warn("failed to determine %s perf type: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(type, errmsg, sizeof(errmsg))); + return type; + } + if (retprobe) { + int bit = uprobe ? determine_uprobe_retprobe_bit() + : determine_kprobe_retprobe_bit(); + + if (bit < 0) { + pr_warn("failed to determine %s retprobe bit: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); + return bit; + } + attr.config |= 1 << bit; + } + attr.size = sizeof(attr); + attr.type = type; + attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; + attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ + attr.config2 = offset; /* kprobe_addr or probe_offset */ + + /* pid filter is meaningful only for uprobes */ + pfd = syscall(__NR_perf_event_open, &attr, + pid < 0 ? -1 : pid /* pid */, + pid == -1 ? 0 : -1 /* cpu */, + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("%s perf_event_open() failed: %s\n", + uprobe ? "uprobe" : "kprobe", + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + return pfd; +} + +static int append_to_file(const char *file, const char *fmt, ...) +{ + int fd, n, err = 0; + va_list ap; + + fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0); + if (fd < 0) + return -errno; + + va_start(ap, fmt); + n = vdprintf(fd, fmt, ap); + va_end(ap); + + if (n < 0) + err = -errno; + + close(fd); + return err; +} + +static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, + const char *kfunc_name, size_t offset) +{ + static int index = 0; + + snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, + __sync_fetch_and_add(&index, 1)); +} + +static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, + const char *kfunc_name, size_t offset) +{ + const char *file = "/sys/kernel/debug/tracing/kprobe_events"; + + return append_to_file(file, "%c:%s/%s %s+0x%zx", + retprobe ? 'r' : 'p', + retprobe ? "kretprobes" : "kprobes", + probe_name, kfunc_name, offset); +} + +static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) +{ + const char *file = "/sys/kernel/debug/tracing/kprobe_events"; + + return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name); +} + +static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) +{ + char file[256]; + + snprintf(file, sizeof(file), + "/sys/kernel/debug/tracing/events/%s/%s/id", + retprobe ? "kretprobes" : "kprobes", probe_name); + + return parse_uint_from_file(file, "%d\n"); +} + +static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, + const char *kfunc_name, size_t offset, int pid) +{ + struct perf_event_attr attr = {}; + char errmsg[STRERR_BUFSIZE]; + int type, pfd, err; + + err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); + if (err < 0) { + pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", + kfunc_name, offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + type = determine_kprobe_perf_type_legacy(probe_name, retprobe); + if (type < 0) { + pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", + kfunc_name, offset, + libbpf_strerror_r(type, errmsg, sizeof(errmsg))); + return type; + } + attr.size = sizeof(attr); + attr.config = type; + attr.type = PERF_TYPE_TRACEPOINT; + + pfd = syscall(__NR_perf_event_open, &attr, + pid < 0 ? -1 : pid, /* pid */ + pid == -1 ? 0 : -1, /* cpu */ + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("legacy kprobe perf_event_open() failed: %s\n", + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + return pfd; +} + +struct bpf_link * +bpf_program__attach_kprobe_opts(const struct bpf_program *prog, + const char *func_name, + const struct bpf_kprobe_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + char errmsg[STRERR_BUFSIZE]; + char *legacy_probe = NULL; + struct bpf_link *link; + size_t offset; + bool retprobe, legacy; + int pfd, err; + + if (!OPTS_VALID(opts, bpf_kprobe_opts)) + return libbpf_err_ptr(-EINVAL); + + retprobe = OPTS_GET(opts, retprobe, false); + offset = OPTS_GET(opts, offset, 0); + pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); + + legacy = determine_kprobe_perf_type() < 0; + if (!legacy) { + pfd = perf_event_open_probe(false /* uprobe */, retprobe, + func_name, offset, + -1 /* pid */, 0 /* ref_ctr_off */); + } else { + char probe_name[256]; + + gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), + func_name, offset); + + legacy_probe = strdup(probe_name); + if (!legacy_probe) + return libbpf_err_ptr(-ENOMEM); + + pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, + offset, -1 /* pid */); + } + if (pfd < 0) { + err = -errno; + pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", + func_name, offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); + err = libbpf_get_error(link); + if (err) { + close(pfd); + pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", + prog->name, retprobe ? "kretprobe" : "kprobe", + func_name, offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + if (legacy) { + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + + perf_link->legacy_probe_name = legacy_probe; + perf_link->legacy_is_kprobe = true; + perf_link->legacy_is_retprobe = retprobe; + } + + return link; +err_out: + free(legacy_probe); + return libbpf_err_ptr(err); +} + +struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, + bool retprobe, + const char *func_name) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, + .retprobe = retprobe, + ); + + return bpf_program__attach_kprobe_opts(prog, func_name, &opts); +} + +/* Adapted from perf/util/string.c */ +static bool glob_match(const char *str, const char *pat) +{ + while (*str && *pat && *pat != '*') { + if (*pat == '?') { /* Matches any single character */ + str++; + pat++; + continue; + } + if (*str != *pat) + return false; + str++; + pat++; + } + /* Check wild card */ + if (*pat == '*') { + while (*pat == '*') + pat++; + if (!*pat) /* Tail wild card matches all */ + return true; + while (*str) + if (glob_match(str++, pat)) + return true; + } + return !*str && !*pat; +} + +struct kprobe_multi_resolve { + const char *pattern; + unsigned long *addrs; + size_t cap; + size_t cnt; +}; + +static int +resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct kprobe_multi_resolve *res = ctx; + int err; + + if (!glob_match(sym_name, res->pattern)) + return 0; + + err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long), + res->cnt + 1); + if (err) + return err; + + res->addrs[res->cnt++] = (unsigned long) sym_addr; + return 0; +} + +struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, lopts); + struct kprobe_multi_resolve res = { + .pattern = pattern, + }; + struct bpf_link *link = NULL; + char errmsg[STRERR_BUFSIZE]; + const unsigned long *addrs; + int err, link_fd, prog_fd; + const __u64 *cookies; + const char **syms; + bool retprobe; + size_t cnt; + + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) + return libbpf_err_ptr(-EINVAL); + + syms = OPTS_GET(opts, syms, false); + addrs = OPTS_GET(opts, addrs, false); + cnt = OPTS_GET(opts, cnt, false); + cookies = OPTS_GET(opts, cookies, false); + + if (!pattern && !addrs && !syms) + return libbpf_err_ptr(-EINVAL); + if (pattern && (addrs || syms || cookies || cnt)) + return libbpf_err_ptr(-EINVAL); + if (!pattern && !cnt) + return libbpf_err_ptr(-EINVAL); + if (addrs && syms) + return libbpf_err_ptr(-EINVAL); + + if (pattern) { + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res); + if (err) + goto error; + if (!res.cnt) { + err = -ENOENT; + goto error; + } + addrs = res.addrs; + cnt = res.cnt; + } + + retprobe = OPTS_GET(opts, retprobe, false); + + lopts.kprobe_multi.syms = syms; + lopts.kprobe_multi.addrs = addrs; + lopts.kprobe_multi.cookies = cookies; + lopts.kprobe_multi.cnt = cnt; + lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto error; + } + link->detach = &bpf_link__detach_fd; + + prog_fd = bpf_program__fd(prog); + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto error; + } + link->fd = link_fd; + free(res.addrs); + return link; + +error: + free(link); + free(res.addrs); + return libbpf_err_ptr(err); +} + +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); + unsigned long offset = 0; + const char *func_name; + char *func; + int n; + + opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); + if (opts.retprobe) + func_name = prog->sec_name + sizeof("kretprobe/") - 1; + else + func_name = prog->sec_name + sizeof("kprobe/") - 1; + + n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); + if (n < 1) { + pr_warn("kprobe name is invalid: %s\n", func_name); + return -EINVAL; + } + if (opts.retprobe && offset != 0) { + free(func); + pr_warn("kretprobes do not support offset specification\n"); + return -EINVAL; + } + + opts.offset = offset; + *link = bpf_program__attach_kprobe_opts(prog, func, &opts); + free(func); + return libbpf_get_error(*link); +} + +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *spec; + char *pattern; + int n; + + opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); + if (opts.retprobe) + spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; + else + spec = prog->sec_name + sizeof("kprobe.multi/") - 1; + + n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); + if (n < 1) { + pr_warn("kprobe multi pattern is invalid: %s\n", pattern); + return -EINVAL; + } + + *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); + free(pattern); + return libbpf_get_error(*link); +} + +static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, + const char *binary_path, uint64_t offset) +{ + int i; + + snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); + + /* sanitize binary_path in the probe name */ + for (i = 0; buf[i]; i++) { + if (!isalnum(buf[i])) + buf[i] = '_'; + } +} + +static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, + const char *binary_path, size_t offset) +{ + const char *file = "/sys/kernel/debug/tracing/uprobe_events"; + + return append_to_file(file, "%c:%s/%s %s:0x%zx", + retprobe ? 'r' : 'p', + retprobe ? "uretprobes" : "uprobes", + probe_name, binary_path, offset); +} + +static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) +{ + const char *file = "/sys/kernel/debug/tracing/uprobe_events"; + + return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name); +} + +static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) +{ + char file[512]; + + snprintf(file, sizeof(file), + "/sys/kernel/debug/tracing/events/%s/%s/id", + retprobe ? "uretprobes" : "uprobes", probe_name); + + return parse_uint_from_file(file, "%d\n"); +} + +static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, + const char *binary_path, size_t offset, int pid) +{ + struct perf_event_attr attr; + int type, pfd, err; + + err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); + if (err < 0) { + pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", + binary_path, (size_t)offset, err); + return err; + } + type = determine_uprobe_perf_type_legacy(probe_name, retprobe); + if (type < 0) { + pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", + binary_path, offset, err); + return type; + } + + memset(&attr, 0, sizeof(attr)); + attr.size = sizeof(attr); + attr.config = type; + attr.type = PERF_TYPE_TRACEPOINT; + + pfd = syscall(__NR_perf_event_open, &attr, + pid < 0 ? -1 : pid, /* pid */ + pid == -1 ? 0 : -1, /* cpu */ + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); + return err; + } + return pfd; +} + +/* uprobes deal in relative offsets; subtract the base address associated with + * the mapped binary. See Documentation/trace/uprobetracer.rst for more + * details. + */ +static long elf_find_relative_offset(const char *filename, Elf *elf, long addr) +{ + size_t n; + int i; + + if (elf_getphdrnum(elf, &n)) { + pr_warn("elf: failed to find program headers for '%s': %s\n", filename, + elf_errmsg(-1)); + return -ENOENT; + } + + for (i = 0; i < n; i++) { + int seg_start, seg_end, seg_offset; + GElf_Phdr phdr; + + if (!gelf_getphdr(elf, i, &phdr)) { + pr_warn("elf: failed to get program header %d from '%s': %s\n", i, filename, + elf_errmsg(-1)); + return -ENOENT; + } + if (phdr.p_type != PT_LOAD || !(phdr.p_flags & PF_X)) + continue; + + seg_start = phdr.p_vaddr; + seg_end = seg_start + phdr.p_memsz; + seg_offset = phdr.p_offset; + if (addr >= seg_start && addr < seg_end) + return addr - seg_start + seg_offset; + } + pr_warn("elf: failed to find prog header containing 0x%lx in '%s'\n", addr, filename); + return -ENOENT; +} + +/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ +static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) +{ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + GElf_Shdr sh; + + if (!gelf_getshdr(scn, &sh)) + continue; + if (sh.sh_type == sh_type) + return scn; + } + return NULL; +} + +/* Find offset of function name in object specified by path. "name" matches + * symbol name or name@@LIB for library functions. + */ +static long elf_find_func_offset(const char *binary_path, const char *name) +{ + int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + bool is_shared_lib, is_name_qualified; + char errmsg[STRERR_BUFSIZE]; + long ret = -ENOENT; + size_t name_len; + GElf_Ehdr ehdr; + Elf *elf; + + fd = open(binary_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = -errno; + pr_warn("failed to open %s: %s\n", binary_path, + libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + return ret; + } + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); + close(fd); + return -LIBBPF_ERRNO__FORMAT; + } + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + /* for shared lib case, we do not need to calculate relative offset */ + is_shared_lib = ehdr.e_type == ET_DYN; + + name_len = strlen(name); + /* Does name specify "@@LIB"? */ + is_name_qualified = strstr(name, "@@") != NULL; + + /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if + * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically + * linked binary may not have SHT_DYMSYM, so absence of a section should not be + * reported as a warning/error. + */ + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + size_t nr_syms, strtabidx, idx; + Elf_Data *symbols = NULL; + Elf_Scn *scn = NULL; + int last_bind = -1; + const char *sname; + GElf_Shdr sh; + + scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL); + if (!scn) { + pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", + binary_path); + continue; + } + if (!gelf_getshdr(scn, &sh)) + continue; + strtabidx = sh.sh_link; + symbols = elf_getdata(scn, 0); + if (!symbols) { + pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", + binary_path, elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + nr_syms = symbols->d_size / sh.sh_entsize; + + for (idx = 0; idx < nr_syms; idx++) { + int curr_bind; + GElf_Sym sym; + + if (!gelf_getsym(symbols, idx, &sym)) + continue; + + if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) + continue; + + sname = elf_strptr(elf, strtabidx, sym.st_name); + if (!sname) + continue; + + curr_bind = GELF_ST_BIND(sym.st_info); + + /* User can specify func, func@@LIB or func@@LIB_VERSION. */ + if (strncmp(sname, name, name_len) != 0) + continue; + /* ...but we don't want a search for "foo" to match 'foo2" also, so any + * additional characters in sname should be of the form "@@LIB". + */ + if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@') + continue; + + if (ret >= 0) { + /* handle multiple matches */ + if (last_bind != STB_WEAK && curr_bind != STB_WEAK) { + /* Only accept one non-weak bind. */ + pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", + sname, name, binary_path); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } else if (curr_bind == STB_WEAK) { + /* already have a non-weak bind, and + * this is a weak bind, so ignore. + */ + continue; + } + } + ret = sym.st_value; + last_bind = curr_bind; + } + /* For binaries that are not shared libraries, we need relative offset */ + if (ret > 0 && !is_shared_lib) + ret = elf_find_relative_offset(binary_path, elf, ret); + if (ret > 0) + break; + } + + if (ret > 0) { + pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, + ret); + } else { + if (ret == 0) { + pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, + is_shared_lib ? "should not be 0 in a shared library" : + "try using shared library path instead"); + ret = -ENOENT; + } else { + pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); + } + } +out: + elf_end(elf); + close(fd); + return ret; +} + +static const char *arch_specific_lib_paths(void) +{ + /* + * Based on https://packages.debian.org/sid/libc6. + * + * Assume that the traced program is built for the same architecture + * as libbpf, which should cover the vast majority of cases. + */ +#if defined(__x86_64__) + return "/lib/x86_64-linux-gnu"; +#elif defined(__i386__) + return "/lib/i386-linux-gnu"; +#elif defined(__s390x__) + return "/lib/s390x-linux-gnu"; +#elif defined(__s390__) + return "/lib/s390-linux-gnu"; +#elif defined(__arm__) && defined(__SOFTFP__) + return "/lib/arm-linux-gnueabi"; +#elif defined(__arm__) && !defined(__SOFTFP__) + return "/lib/arm-linux-gnueabihf"; +#elif defined(__aarch64__) + return "/lib/aarch64-linux-gnu"; +#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64 + return "/lib/mips64el-linux-gnuabi64"; +#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32 + return "/lib/mipsel-linux-gnu"; +#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return "/lib/powerpc64le-linux-gnu"; +#elif defined(__sparc__) && defined(__arch64__) + return "/lib/sparc64-linux-gnu"; +#elif defined(__riscv) && __riscv_xlen == 64 + return "/lib/riscv64-linux-gnu"; +#else + return NULL; +#endif +} + +/* Get full path to program/shared library. */ +static int resolve_full_path(const char *file, char *result, size_t result_sz) +{ + const char *search_paths[3] = {}; + int i; + + if (str_has_sfx(file, ".so") || strstr(file, ".so.")) { + search_paths[0] = getenv("LD_LIBRARY_PATH"); + search_paths[1] = "/usr/lib64:/usr/lib"; + search_paths[2] = arch_specific_lib_paths(); + } else { + search_paths[0] = getenv("PATH"); + search_paths[1] = "/usr/bin:/usr/sbin"; + } + + for (i = 0; i < ARRAY_SIZE(search_paths); i++) { + const char *s; + + if (!search_paths[i]) + continue; + for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) { + char *next_path; + int seg_len; + + if (s[0] == ':') + s++; + next_path = strchr(s, ':'); + seg_len = next_path ? next_path - s : strlen(s); + if (!seg_len) + continue; + snprintf(result, result_sz, "%.*s/%s", seg_len, s, file); + /* ensure it is an executable file/link */ + if (access(result, R_OK | X_OK) < 0) + continue; + pr_debug("resolved '%s' to '%s'\n", file, result); + return 0; + } + } + return -ENOENT; +} + +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, + const char *binary_path, size_t func_offset, + const struct bpf_uprobe_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; + char full_binary_path[PATH_MAX]; + struct bpf_link *link; + size_t ref_ctr_off; + int pfd, err; + bool retprobe, legacy; + const char *func_name; + + if (!OPTS_VALID(opts, bpf_uprobe_opts)) + return libbpf_err_ptr(-EINVAL); + + retprobe = OPTS_GET(opts, retprobe, false); + ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); + pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); + + if (binary_path && !strchr(binary_path, '/')) { + err = resolve_full_path(binary_path, full_binary_path, + sizeof(full_binary_path)); + if (err) { + pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", + prog->name, binary_path, err); + return libbpf_err_ptr(err); + } + binary_path = full_binary_path; + } + func_name = OPTS_GET(opts, func_name, NULL); + if (func_name) { + long sym_off; + + if (!binary_path) { + pr_warn("prog '%s': name-based attach requires binary_path\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + sym_off = elf_find_func_offset(binary_path, func_name); + if (sym_off < 0) + return libbpf_err_ptr(sym_off); + func_offset += sym_off; + } + + legacy = determine_uprobe_perf_type() < 0; + if (!legacy) { + pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, + func_offset, pid, ref_ctr_off); + } else { + char probe_name[PATH_MAX + 64]; + + if (ref_ctr_off) + return libbpf_err_ptr(-EINVAL); + + gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), + binary_path, func_offset); + + legacy_probe = strdup(probe_name); + if (!legacy_probe) + return libbpf_err_ptr(-ENOMEM); + + pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, + binary_path, func_offset, pid); + } + if (pfd < 0) { + err = -errno; + pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", + prog->name, retprobe ? "uretprobe" : "uprobe", + binary_path, func_offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + + link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); + err = libbpf_get_error(link); + if (err) { + close(pfd); + pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", + prog->name, retprobe ? "uretprobe" : "uprobe", + binary_path, func_offset, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto err_out; + } + if (legacy) { + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); + + perf_link->legacy_probe_name = legacy_probe; + perf_link->legacy_is_kprobe = false; + perf_link->legacy_is_retprobe = retprobe; + } + return link; +err_out: + free(legacy_probe); + return libbpf_err_ptr(err); + +} + +/* Format of u[ret]probe section definition supporting auto-attach: + * u[ret]probe/binary:function[+offset] + * + * binary can be an absolute/relative path or a filename; the latter is resolved to a + * full binary path via bpf_program__attach_uprobe_opts. + * + * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be + * specified (and auto-attach is not possible) or the above format is specified for + * auto-attach. + */ +static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); + char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; + int n, ret = -EINVAL; + long offset = 0; + + *link = NULL; + + n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li", + &probe_type, &binary_path, &func_name, &offset); + switch (n) { + case 1: + /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ + ret = 0; + break; + case 2: + pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n", + prog->name, prog->sec_name); + break; + case 3: + case 4: + opts.retprobe = strcmp(probe_type, "uretprobe") == 0; + if (opts.retprobe && offset != 0) { + pr_warn("prog '%s': uretprobes do not support offset specification\n", + prog->name); + break; + } + opts.func_name = func_name; + *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts); + ret = libbpf_get_error(*link); + break; + default: + pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, + prog->sec_name); + break; + } + free(probe_type); + free(binary_path); + free(func_name); + + return ret; +} + +struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, + bool retprobe, pid_t pid, + const char *binary_path, + size_t func_offset) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); + + return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); +} + +struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, + pid_t pid, const char *binary_path, + const char *usdt_provider, const char *usdt_name, + const struct bpf_usdt_opts *opts) +{ + char resolved_path[512]; + struct bpf_object *obj = prog->obj; + struct bpf_link *link; + __u64 usdt_cookie; + int err; + + if (!OPTS_VALID(opts, bpf_uprobe_opts)) + return libbpf_err_ptr(-EINVAL); + + if (bpf_program__fd(prog) < 0) { + pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + if (!strchr(binary_path, '/')) { + err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); + if (err) { + pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", + prog->name, binary_path, err); + return libbpf_err_ptr(err); + } + binary_path = resolved_path; + } + + /* USDT manager is instantiated lazily on first USDT attach. It will + * be destroyed together with BPF object in bpf_object__close(). + */ + if (IS_ERR(obj->usdt_man)) + return libbpf_ptr(obj->usdt_man); + if (!obj->usdt_man) { + obj->usdt_man = usdt_manager_new(obj); + if (IS_ERR(obj->usdt_man)) + return libbpf_ptr(obj->usdt_man); + } + + usdt_cookie = OPTS_GET(opts, usdt_cookie, 0); + link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path, + usdt_provider, usdt_name, usdt_cookie); + err = libbpf_get_error(link); + if (err) + return libbpf_err_ptr(err); + return link; +} + +static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + char *path = NULL, *provider = NULL, *name = NULL; + const char *sec_name; + int n, err; + + sec_name = bpf_program__section_name(prog); + if (strcmp(sec_name, "usdt") == 0) { + /* no auto-attach for just SEC("usdt") */ + *link = NULL; + return 0; + } + + n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name); + if (n != 3) { + pr_warn("invalid section '%s', expected SEC(\"usdt/::\")\n", + sec_name); + err = -EINVAL; + } else { + *link = bpf_program__attach_usdt(prog, -1 /* any process */, path, + provider, name, NULL); + err = libbpf_get_error(*link); + } + free(path); + free(provider); + free(name); + return err; +} + +static int determine_tracepoint_id(const char *tp_category, + const char *tp_name) +{ + char file[PATH_MAX]; + int ret; + + ret = snprintf(file, sizeof(file), + "/sys/kernel/debug/tracing/events/%s/%s/id", + tp_category, tp_name); + if (ret < 0) + return -errno; + if (ret >= sizeof(file)) { + pr_debug("tracepoint %s/%s path is too long\n", + tp_category, tp_name); + return -E2BIG; + } + return parse_uint_from_file(file, "%d\n"); +} + +static int perf_event_open_tracepoint(const char *tp_category, + const char *tp_name) +{ + struct perf_event_attr attr = {}; + char errmsg[STRERR_BUFSIZE]; + int tp_id, pfd, err; + + tp_id = determine_tracepoint_id(tp_category, tp_name); + if (tp_id < 0) { + pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", + tp_category, tp_name, + libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); + return tp_id; + } + + attr.type = PERF_TYPE_TRACEPOINT; + attr.size = sizeof(attr); + attr.config = tp_id; + + pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (pfd < 0) { + err = -errno; + pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", + tp_category, tp_name, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return err; + } + return pfd; +} + +struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, + const char *tp_category, + const char *tp_name, + const struct bpf_tracepoint_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int pfd, err; + + if (!OPTS_VALID(opts, bpf_tracepoint_opts)) + return libbpf_err_ptr(-EINVAL); + + pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); + + pfd = perf_event_open_tracepoint(tp_category, tp_name); + if (pfd < 0) { + pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", + prog->name, tp_category, tp_name, + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(pfd); + } + link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); + err = libbpf_get_error(link); + if (err) { + close(pfd); + pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", + prog->name, tp_category, tp_name, + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(err); + } + return link; +} + +struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, + const char *tp_category, + const char *tp_name) +{ + return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); +} + +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + char *sec_name, *tp_cat, *tp_name; + + sec_name = strdup(prog->sec_name); + if (!sec_name) + return -ENOMEM; + + /* extract "tp//" or "tracepoint//" */ + if (str_has_pfx(prog->sec_name, "tp/")) + tp_cat = sec_name + sizeof("tp/") - 1; + else + tp_cat = sec_name + sizeof("tracepoint/") - 1; + tp_name = strchr(tp_cat, '/'); + if (!tp_name) { + free(sec_name); + return -EINVAL; + } + *tp_name = '\0'; + tp_name++; + + *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); + free(sec_name); + return libbpf_get_error(*link); +} + +struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, + const char *tp_name) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, pfd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); + if (pfd < 0) { + pfd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", + prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(pfd); + } + link->fd = pfd; + return link; +} + +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + static const char *const prefixes[] = { + "raw_tp/", + "raw_tracepoint/", + "raw_tp.w/", + "raw_tracepoint.w/", + }; + size_t i; + const char *tp_name = NULL; + + for (i = 0; i < ARRAY_SIZE(prefixes); i++) { + if (str_has_pfx(prog->sec_name, prefixes[i])) { + tp_name = prog->sec_name + strlen(prefixes[i]); + break; + } + } + if (!tp_name) { + pr_warn("prog '%s': invalid section name '%s'\n", + prog->name, prog->sec_name); + return -EINVAL; + } + + *link = bpf_program__attach_raw_tracepoint(prog, tp_name); + return libbpf_get_error(link); +} + +/* Common logic for all BPF program types that attach to a btf_id */ +static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, pfd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */ + pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), NULL); + if (pfd < 0) { + pfd = -errno; + free(link); + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(pfd); + } + link->fd = pfd; + return link; +} + +struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) +{ + return bpf_program__attach_btf_id(prog); +} + +struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) +{ + return bpf_program__attach_btf_id(prog); +} + +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + *link = bpf_program__attach_trace(prog); + return libbpf_get_error(*link); +} + +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + *link = bpf_program__attach_lsm(prog); + return libbpf_get_error(*link); +} + +static struct bpf_link * +bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id, + const char *target_name) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, + .target_btf_id = btf_id); + enum bpf_attach_type attach_type; + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, link_fd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + attach_type = bpf_program__expected_attach_type(prog); + link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts); + if (link_fd < 0) { + link_fd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to %s: %s\n", + prog->name, target_name, + libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(link_fd); + } + link->fd = link_fd; + return link; +} + +struct bpf_link * +bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) +{ + return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); +} + +struct bpf_link * +bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) +{ + return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); +} + +struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) +{ + /* target_fd/target_ifindex use the same field in LINK_CREATE */ + return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); +} + +struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, + int target_fd, + const char *attach_func_name) +{ + int btf_id; + + if (!!target_fd != !!attach_func_name) { + pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + if (prog->type != BPF_PROG_TYPE_EXT) { + pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + if (target_fd) { + btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); + if (btf_id < 0) + return libbpf_err_ptr(btf_id); + + return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace"); + } else { + /* no target, so use raw_tracepoint_open for compatibility + * with old kernels + */ + return bpf_program__attach_trace(prog); + } +} + +struct bpf_link * +bpf_program__attach_iter(const struct bpf_program *prog, + const struct bpf_iter_attach_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); + char errmsg[STRERR_BUFSIZE]; + struct bpf_link *link; + int prog_fd, link_fd; + __u32 target_fd = 0; + + if (!OPTS_VALID(opts, bpf_iter_attach_opts)) + return libbpf_err_ptr(-EINVAL); + + link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); + link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("prog '%s': can't attach before loaded\n", prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-ENOMEM); + link->detach = &bpf_link__detach_fd; + + link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, + &link_create_opts); + if (link_fd < 0) { + link_fd = -errno; + free(link); + pr_warn("prog '%s': failed to attach to iterator: %s\n", + prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + return libbpf_err_ptr(link_fd); + } + link->fd = link_fd; + return link; +} + +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + *link = bpf_program__attach_iter(prog, NULL); + return libbpf_get_error(*link); +} + +struct bpf_link *bpf_program__attach(const struct bpf_program *prog) +{ + struct bpf_link *link = NULL; + int err; + + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) + return libbpf_err_ptr(-EOPNOTSUPP); + + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); + if (err) + return libbpf_err_ptr(err); + + /* When calling bpf_program__attach() explicitly, auto-attach support + * is expected to work, so NULL returned link is considered an error. + * This is different for skeleton's attach, see comment in + * bpf_object__attach_skeleton(). + */ + if (!link) + return libbpf_err_ptr(-EOPNOTSUPP); + + return link; +} + +static int bpf_link__detach_struct_ops(struct bpf_link *link) +{ + __u32 zero = 0; + + if (bpf_map_delete_elem(link->fd, &zero)) + return -errno; + + return 0; +} + +struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) +{ + struct bpf_struct_ops *st_ops; + struct bpf_link *link; + __u32 i, zero = 0; + int err; + + if (!bpf_map__is_struct_ops(map) || map->fd == -1) + return libbpf_err_ptr(-EINVAL); + + link = calloc(1, sizeof(*link)); + if (!link) + return libbpf_err_ptr(-EINVAL); + + st_ops = map->st_ops; + for (i = 0; i < btf_vlen(st_ops->type); i++) { + struct bpf_program *prog = st_ops->progs[i]; + void *kern_data; + int prog_fd; + + if (!prog) + continue; + + prog_fd = bpf_program__fd(prog); + kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; + *(unsigned long *)kern_data = prog_fd; + } + + err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); + if (err) { + err = -errno; + free(link); + return libbpf_err_ptr(err); + } + + link->detach = bpf_link__detach_struct_ops; + link->fd = map->fd; + + return link; +} + +static enum bpf_perf_event_ret +perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data) +{ + struct perf_event_mmap_page *header = mmap_mem; + __u64 data_head = ring_buffer_read_head(header); + __u64 data_tail = header->data_tail; + void *base = ((__u8 *)header) + page_size; + int ret = LIBBPF_PERF_EVENT_CONT; + struct perf_event_header *ehdr; + size_t ehdr_size; + + while (data_head != data_tail) { + ehdr = base + (data_tail & (mmap_size - 1)); + ehdr_size = ehdr->size; + + if (((void *)ehdr) + ehdr_size > base + mmap_size) { + void *copy_start = ehdr; + size_t len_first = base + mmap_size - copy_start; + size_t len_secnd = ehdr_size - len_first; + + if (*copy_size < ehdr_size) { + free(*copy_mem); + *copy_mem = malloc(ehdr_size); + if (!*copy_mem) { + *copy_size = 0; + ret = LIBBPF_PERF_EVENT_ERROR; + break; + } + *copy_size = ehdr_size; + } + + memcpy(*copy_mem, copy_start, len_first); + memcpy(*copy_mem + len_first, base, len_secnd); + ehdr = *copy_mem; + } + + ret = fn(ehdr, private_data); + data_tail += ehdr_size; + if (ret != LIBBPF_PERF_EVENT_CONT) + break; + } + + ring_buffer_write_tail(header, data_tail); + return libbpf_err(ret); +} + +__attribute__((alias("perf_event_read_simple"))) +enum bpf_perf_event_ret +bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data); + +struct perf_buffer; + +struct perf_buffer_params { + struct perf_event_attr *attr; + /* if event_cb is specified, it takes precendence */ + perf_buffer_event_fn event_cb; + /* sample_cb and lost_cb are higher-level common-case callbacks */ + perf_buffer_sample_fn sample_cb; + perf_buffer_lost_fn lost_cb; + void *ctx; + int cpu_cnt; + int *cpus; + int *map_keys; +}; + +struct perf_cpu_buf { + struct perf_buffer *pb; + void *base; /* mmap()'ed memory */ + void *buf; /* for reconstructing segmented data */ + size_t buf_size; + int fd; + int cpu; + int map_key; +}; + +struct perf_buffer { + perf_buffer_event_fn event_cb; + perf_buffer_sample_fn sample_cb; + perf_buffer_lost_fn lost_cb; + void *ctx; /* passed into callbacks */ + + size_t page_size; + size_t mmap_size; + struct perf_cpu_buf **cpu_bufs; + struct epoll_event *events; + int cpu_cnt; /* number of allocated CPU buffers */ + int epoll_fd; /* perf event FD */ + int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ +}; + +static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, + struct perf_cpu_buf *cpu_buf) +{ + if (!cpu_buf) + return; + if (cpu_buf->base && + munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) + pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); + if (cpu_buf->fd >= 0) { + ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); + close(cpu_buf->fd); + } + free(cpu_buf->buf); + free(cpu_buf); +} + +void perf_buffer__free(struct perf_buffer *pb) +{ + int i; + + if (IS_ERR_OR_NULL(pb)) + return; + if (pb->cpu_bufs) { + for (i = 0; i < pb->cpu_cnt; i++) { + struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + + if (!cpu_buf) + continue; + + bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); + perf_buffer__free_cpu_buf(pb, cpu_buf); + } + free(pb->cpu_bufs); + } + if (pb->epoll_fd >= 0) + close(pb->epoll_fd); + free(pb->events); + free(pb); +} + +static struct perf_cpu_buf * +perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, + int cpu, int map_key) +{ + struct perf_cpu_buf *cpu_buf; + char msg[STRERR_BUFSIZE]; + int err; + + cpu_buf = calloc(1, sizeof(*cpu_buf)); + if (!cpu_buf) + return ERR_PTR(-ENOMEM); + + cpu_buf->pb = pb; + cpu_buf->cpu = cpu; + cpu_buf->map_key = map_key; + + cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, + -1, PERF_FLAG_FD_CLOEXEC); + if (cpu_buf->fd < 0) { + err = -errno; + pr_warn("failed to open perf buffer event on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + + cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, + PROT_READ | PROT_WRITE, MAP_SHARED, + cpu_buf->fd, 0); + if (cpu_buf->base == MAP_FAILED) { + cpu_buf->base = NULL; + err = -errno; + pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + + if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { + err = -errno; + pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", + cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + + return cpu_buf; + +error: + perf_buffer__free_cpu_buf(pb, cpu_buf); + return (struct perf_cpu_buf *)ERR_PTR(err); +} + +static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, + struct perf_buffer_params *p); + +DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0) +struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, + perf_buffer_lost_fn lost_cb, + void *ctx, + const struct perf_buffer_opts *opts) +{ + struct perf_buffer_params p = {}; + struct perf_event_attr attr = {}; + + if (!OPTS_VALID(opts, perf_buffer_opts)) + return libbpf_err_ptr(-EINVAL); + + attr.config = PERF_COUNT_SW_BPF_OUTPUT; + attr.type = PERF_TYPE_SOFTWARE; + attr.sample_type = PERF_SAMPLE_RAW; + attr.sample_period = 1; + attr.wakeup_events = 1; + + p.attr = &attr; + p.sample_cb = sample_cb; + p.lost_cb = lost_cb; + p.ctx = ctx; + + return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); +} + +COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4) +struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_opts *opts) +{ + return perf_buffer__new_v0_6_0(map_fd, page_cnt, + opts ? opts->sample_cb : NULL, + opts ? opts->lost_cb : NULL, + opts ? opts->ctx : NULL, + NULL); +} + +DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0) +struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, + struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, + const struct perf_buffer_raw_opts *opts) +{ + struct perf_buffer_params p = {}; + + if (!attr) + return libbpf_err_ptr(-EINVAL); + + if (!OPTS_VALID(opts, perf_buffer_raw_opts)) + return libbpf_err_ptr(-EINVAL); + + p.attr = attr; + p.event_cb = event_cb; + p.ctx = ctx; + p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0); + p.cpus = OPTS_GET(opts, cpus, NULL); + p.map_keys = OPTS_GET(opts, map_keys, NULL); + + return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); +} + +COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4) +struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_raw_opts *opts) +{ + LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts, + .cpu_cnt = opts->cpu_cnt, + .cpus = opts->cpus, + .map_keys = opts->map_keys, + ); + + return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr, + opts->event_cb, opts->ctx, &inner_opts); +} + +static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, + struct perf_buffer_params *p) +{ + const char *online_cpus_file = "/sys/devices/system/cpu/online"; + struct bpf_map_info map; + char msg[STRERR_BUFSIZE]; + struct perf_buffer *pb; + bool *online = NULL; + __u32 map_info_len; + int err, i, j, n; + + if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { + pr_warn("page count should be power of two, but is %zu\n", + page_cnt); + return ERR_PTR(-EINVAL); + } + + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); + map_info_len = sizeof(map); + err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); + if (err) { + err = -errno; + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } + } + + pb = calloc(1, sizeof(*pb)); + if (!pb) + return ERR_PTR(-ENOMEM); + + pb->event_cb = p->event_cb; + pb->sample_cb = p->sample_cb; + pb->lost_cb = p->lost_cb; + pb->ctx = p->ctx; + + pb->page_size = getpagesize(); + pb->mmap_size = pb->page_size * page_cnt; + pb->map_fd = map_fd; + + pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); + if (pb->epoll_fd < 0) { + err = -errno; + pr_warn("failed to create epoll instance: %s\n", + libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + + if (p->cpu_cnt > 0) { + pb->cpu_cnt = p->cpu_cnt; + } else { + pb->cpu_cnt = libbpf_num_possible_cpus(); + if (pb->cpu_cnt < 0) { + err = pb->cpu_cnt; + goto error; + } + if (map.max_entries && map.max_entries < pb->cpu_cnt) + pb->cpu_cnt = map.max_entries; + } + + pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); + if (!pb->events) { + err = -ENOMEM; + pr_warn("failed to allocate events: out of memory\n"); + goto error; + } + pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); + if (!pb->cpu_bufs) { + err = -ENOMEM; + pr_warn("failed to allocate buffers: out of memory\n"); + goto error; + } + + err = parse_cpu_mask_file(online_cpus_file, &online, &n); + if (err) { + pr_warn("failed to get online CPU mask: %d\n", err); + goto error; + } + + for (i = 0, j = 0; i < pb->cpu_cnt; i++) { + struct perf_cpu_buf *cpu_buf; + int cpu, map_key; + + cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; + map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; + + /* in case user didn't explicitly requested particular CPUs to + * be attached to, skip offline/not present CPUs + */ + if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) + continue; + + cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); + if (IS_ERR(cpu_buf)) { + err = PTR_ERR(cpu_buf); + goto error; + } + + pb->cpu_bufs[j] = cpu_buf; + + err = bpf_map_update_elem(pb->map_fd, &map_key, + &cpu_buf->fd, 0); + if (err) { + err = -errno; + pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", + cpu, map_key, cpu_buf->fd, + libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + + pb->events[j].events = EPOLLIN; + pb->events[j].data.ptr = cpu_buf; + if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, + &pb->events[j]) < 0) { + err = -errno; + pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", + cpu, cpu_buf->fd, + libbpf_strerror_r(err, msg, sizeof(msg))); + goto error; + } + j++; + } + pb->cpu_cnt = j; + free(online); + + return pb; + +error: + free(online); + if (pb) + perf_buffer__free(pb); + return ERR_PTR(err); +} + +struct perf_sample_raw { + struct perf_event_header header; + uint32_t size; + char data[]; +}; + +struct perf_sample_lost { + struct perf_event_header header; + uint64_t id; + uint64_t lost; + uint64_t sample_id; +}; + +static enum bpf_perf_event_ret +perf_buffer__process_record(struct perf_event_header *e, void *ctx) +{ + struct perf_cpu_buf *cpu_buf = ctx; + struct perf_buffer *pb = cpu_buf->pb; + void *data = e; + + /* user wants full control over parsing perf event */ + if (pb->event_cb) + return pb->event_cb(pb->ctx, cpu_buf->cpu, e); + + switch (e->type) { + case PERF_RECORD_SAMPLE: { + struct perf_sample_raw *s = data; + + if (pb->sample_cb) + pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); + break; + } + case PERF_RECORD_LOST: { + struct perf_sample_lost *s = data; + + if (pb->lost_cb) + pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); + break; + } + default: + pr_warn("unknown perf sample type %d\n", e->type); + return LIBBPF_PERF_EVENT_ERROR; + } + return LIBBPF_PERF_EVENT_CONT; +} + +static int perf_buffer__process_records(struct perf_buffer *pb, + struct perf_cpu_buf *cpu_buf) +{ + enum bpf_perf_event_ret ret; + + ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size, + pb->page_size, &cpu_buf->buf, + &cpu_buf->buf_size, + perf_buffer__process_record, cpu_buf); + if (ret != LIBBPF_PERF_EVENT_CONT) + return ret; + return 0; +} + +int perf_buffer__epoll_fd(const struct perf_buffer *pb) +{ + return pb->epoll_fd; +} + +int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) +{ + int i, cnt, err; + + cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); + if (cnt < 0) + return -errno; + + for (i = 0; i < cnt; i++) { + struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; + + err = perf_buffer__process_records(pb, cpu_buf); + if (err) { + pr_warn("error while processing records: %d\n", err); + return libbpf_err(err); + } + } + return cnt; +} + +/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer + * manager. + */ +size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) +{ + return pb->cpu_cnt; +} + +/* + * Return perf_event FD of a ring buffer in *buf_idx* slot of + * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using + * select()/poll()/epoll() Linux syscalls. + */ +int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) +{ + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return libbpf_err(-EINVAL); + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return libbpf_err(-ENOENT); + + return cpu_buf->fd; +} + +/* + * Consume data from perf ring buffer corresponding to slot *buf_idx* in + * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to + * consume, do nothing and return success. + * Returns: + * - 0 on success; + * - <0 on failure. + */ +int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) +{ + struct perf_cpu_buf *cpu_buf; + + if (buf_idx >= pb->cpu_cnt) + return libbpf_err(-EINVAL); + + cpu_buf = pb->cpu_bufs[buf_idx]; + if (!cpu_buf) + return libbpf_err(-ENOENT); + + return perf_buffer__process_records(pb, cpu_buf); +} + +int perf_buffer__consume(struct perf_buffer *pb) +{ + int i, err; + + for (i = 0; i < pb->cpu_cnt; i++) { + struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + + if (!cpu_buf) + continue; + + err = perf_buffer__process_records(pb, cpu_buf); + if (err) { + pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); + return libbpf_err(err); + } + } + return 0; +} + +struct bpf_prog_info_array_desc { + int array_offset; /* e.g. offset of jited_prog_insns */ + int count_offset; /* e.g. offset of jited_prog_len */ + int size_offset; /* > 0: offset of rec size, + * < 0: fix size of -size_offset + */ +}; + +static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { + [BPF_PROG_INFO_JITED_INSNS] = { + offsetof(struct bpf_prog_info, jited_prog_insns), + offsetof(struct bpf_prog_info, jited_prog_len), + -1, + }, + [BPF_PROG_INFO_XLATED_INSNS] = { + offsetof(struct bpf_prog_info, xlated_prog_insns), + offsetof(struct bpf_prog_info, xlated_prog_len), + -1, + }, + [BPF_PROG_INFO_MAP_IDS] = { + offsetof(struct bpf_prog_info, map_ids), + offsetof(struct bpf_prog_info, nr_map_ids), + -(int)sizeof(__u32), + }, + [BPF_PROG_INFO_JITED_KSYMS] = { + offsetof(struct bpf_prog_info, jited_ksyms), + offsetof(struct bpf_prog_info, nr_jited_ksyms), + -(int)sizeof(__u64), + }, + [BPF_PROG_INFO_JITED_FUNC_LENS] = { + offsetof(struct bpf_prog_info, jited_func_lens), + offsetof(struct bpf_prog_info, nr_jited_func_lens), + -(int)sizeof(__u32), + }, + [BPF_PROG_INFO_FUNC_INFO] = { + offsetof(struct bpf_prog_info, func_info), + offsetof(struct bpf_prog_info, nr_func_info), + offsetof(struct bpf_prog_info, func_info_rec_size), + }, + [BPF_PROG_INFO_LINE_INFO] = { + offsetof(struct bpf_prog_info, line_info), + offsetof(struct bpf_prog_info, nr_line_info), + offsetof(struct bpf_prog_info, line_info_rec_size), + }, + [BPF_PROG_INFO_JITED_LINE_INFO] = { + offsetof(struct bpf_prog_info, jited_line_info), + offsetof(struct bpf_prog_info, nr_jited_line_info), + offsetof(struct bpf_prog_info, jited_line_info_rec_size), + }, + [BPF_PROG_INFO_PROG_TAGS] = { + offsetof(struct bpf_prog_info, prog_tags), + offsetof(struct bpf_prog_info, nr_prog_tags), + -(int)sizeof(__u8) * BPF_TAG_SIZE, + }, + +}; + +static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, + int offset) +{ + __u32 *array = (__u32 *)info; + + if (offset >= 0) + return array[offset / sizeof(__u32)]; + return -(int)offset; +} + +static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, + int offset) +{ + __u64 *array = (__u64 *)info; + + if (offset >= 0) + return array[offset / sizeof(__u64)]; + return -(int)offset; +} + +static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, + __u32 val) +{ + __u32 *array = (__u32 *)info; + + if (offset >= 0) + array[offset / sizeof(__u32)] = val; +} + +static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, + __u64 val) +{ + __u64 *array = (__u64 *)info; + + if (offset >= 0) + array[offset / sizeof(__u64)] = val; +} + +struct bpf_prog_info_linear * +bpf_program__get_prog_info_linear(int fd, __u64 arrays) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 data_len = 0; + int i, err; + void *ptr; + + if (arrays >> BPF_PROG_INFO_LAST_ARRAY) + return libbpf_err_ptr(-EINVAL); + + /* step 1: get array dimensions */ + err = bpf_obj_get_info_by_fd(fd, &info, &info_len); + if (err) { + pr_debug("can't get prog info: %s", strerror(errno)); + return libbpf_err_ptr(-EFAULT); + } + + /* step 2: calculate total size of all arrays */ + for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { + bool include_array = (arrays & (1UL << i)) > 0; + struct bpf_prog_info_array_desc *desc; + __u32 count, size; + + desc = bpf_prog_info_array_desc + i; + + /* kernel is too old to support this field */ + if (info_len < desc->array_offset + sizeof(__u32) || + info_len < desc->count_offset + sizeof(__u32) || + (desc->size_offset > 0 && info_len < desc->size_offset)) + include_array = false; + + if (!include_array) { + arrays &= ~(1UL << i); /* clear the bit */ + continue; + } + + count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + + data_len += count * size; + } + + /* step 3: allocate continuous memory */ + data_len = roundup(data_len, sizeof(__u64)); + info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); + if (!info_linear) + return libbpf_err_ptr(-ENOMEM); + + /* step 4: fill data to info_linear->info */ + info_linear->arrays = arrays; + memset(&info_linear->info, 0, sizeof(info)); + ptr = info_linear->data; + + for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { + struct bpf_prog_info_array_desc *desc; + __u32 count, size; + + if ((arrays & (1UL << i)) == 0) + continue; + + desc = bpf_prog_info_array_desc + i; + count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + bpf_prog_info_set_offset_u32(&info_linear->info, + desc->count_offset, count); + bpf_prog_info_set_offset_u32(&info_linear->info, + desc->size_offset, size); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, + ptr_to_u64(ptr)); + ptr += count * size; + } + + /* step 5: call syscall again to get required arrays */ + err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); + if (err) { + pr_debug("can't get prog info: %s", strerror(errno)); + free(info_linear); + return libbpf_err_ptr(-EFAULT); + } + + /* step 6: verify the data */ + for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { + struct bpf_prog_info_array_desc *desc; + __u32 v1, v2; + + if ((arrays & (1UL << i)) == 0) + continue; + + desc = bpf_prog_info_array_desc + i; + v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); + v2 = bpf_prog_info_read_offset_u32(&info_linear->info, + desc->count_offset); + if (v1 != v2) + pr_warn("%s: mismatch in element count\n", __func__); + + v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); + v2 = bpf_prog_info_read_offset_u32(&info_linear->info, + desc->size_offset); + if (v1 != v2) + pr_warn("%s: mismatch in rec size\n", __func__); + } + + /* step 7: update info_len and data_len */ + info_linear->info_len = sizeof(struct bpf_prog_info); + info_linear->data_len = data_len; + + return info_linear; +} + +void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) +{ + int i; + + for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { + struct bpf_prog_info_array_desc *desc; + __u64 addr, offs; + + if ((info_linear->arrays & (1UL << i)) == 0) + continue; + + desc = bpf_prog_info_array_desc + i; + addr = bpf_prog_info_read_offset_u64(&info_linear->info, + desc->array_offset); + offs = addr - ptr_to_u64(info_linear->data); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, offs); + } +} + +void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) +{ + int i; + + for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { + struct bpf_prog_info_array_desc *desc; + __u64 addr, offs; + + if ((info_linear->arrays & (1UL << i)) == 0) + continue; + + desc = bpf_prog_info_array_desc + i; + offs = bpf_prog_info_read_offset_u64(&info_linear->info, + desc->array_offset); + addr = offs + ptr_to_u64(info_linear->data); + bpf_prog_info_set_offset_u64(&info_linear->info, + desc->array_offset, addr); + } +} + +int bpf_program__set_attach_target(struct bpf_program *prog, + int attach_prog_fd, + const char *attach_func_name) +{ + int btf_obj_fd = 0, btf_id = 0, err; + + if (!prog || attach_prog_fd < 0) + return libbpf_err(-EINVAL); + + if (prog->obj->loaded) + return libbpf_err(-EINVAL); + + if (attach_prog_fd && !attach_func_name) { + /* remember attach_prog_fd and let bpf_program__load() find + * BTF ID during the program load + */ + prog->attach_prog_fd = attach_prog_fd; + return 0; + } + + if (attach_prog_fd) { + btf_id = libbpf_find_prog_btf_id(attach_func_name, + attach_prog_fd); + if (btf_id < 0) + return libbpf_err(btf_id); + } else { + if (!attach_func_name) + return libbpf_err(-EINVAL); + + /* load btf_vmlinux, if not yet */ + err = bpf_object__load_vmlinux_btf(prog->obj, true); + if (err) + return libbpf_err(err); + err = find_kernel_btf_id(prog->obj, attach_func_name, + prog->expected_attach_type, + &btf_obj_fd, &btf_id); + if (err) + return libbpf_err(err); + } + + prog->attach_btf_id = btf_id; + prog->attach_btf_obj_fd = btf_obj_fd; + prog->attach_prog_fd = attach_prog_fd; + return 0; +} + +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) +{ + int err = 0, n, len, start, end = -1; + bool *tmp; + + *mask = NULL; + *mask_sz = 0; + + /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ + while (*s) { + if (*s == ',' || *s == '\n') { + s++; + continue; + } + n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); + if (n <= 0 || n > 2) { + pr_warn("Failed to get CPU range %s: %d\n", s, n); + err = -EINVAL; + goto cleanup; + } else if (n == 1) { + end = start; + } + if (start < 0 || start > end) { + pr_warn("Invalid CPU range [%d,%d] in %s\n", + start, end, s); + err = -EINVAL; + goto cleanup; + } + tmp = realloc(*mask, end + 1); + if (!tmp) { + err = -ENOMEM; + goto cleanup; + } + *mask = tmp; + memset(tmp + *mask_sz, 0, start - *mask_sz); + memset(tmp + start, 1, end - start + 1); + *mask_sz = end + 1; + s += len; + } + if (!*mask_sz) { + pr_warn("Empty CPU range\n"); + return -EINVAL; + } + return 0; +cleanup: + free(*mask); + *mask = NULL; + return err; +} + +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) +{ + int fd, err = 0, len; + char buf[128]; + + fd = open(fcpu, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + err = -errno; + pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); + return err; + } + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len <= 0) { + err = len ? -errno : -EINVAL; + pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); + return err; + } + if (len >= sizeof(buf)) { + pr_warn("CPU mask is too big in file %s\n", fcpu); + return -E2BIG; + } + buf[len] = '\0'; + + return parse_cpu_mask_str(buf, mask, mask_sz); +} + +int libbpf_num_possible_cpus(void) +{ + static const char *fcpu = "/sys/devices/system/cpu/possible"; + static int cpus; + int err, n, i, tmp_cpus; + bool *mask; + + tmp_cpus = READ_ONCE(cpus); + if (tmp_cpus > 0) + return tmp_cpus; + + err = parse_cpu_mask_file(fcpu, &mask, &n); + if (err) + return libbpf_err(err); + + tmp_cpus = 0; + for (i = 0; i < n; i++) { + if (mask[i]) + tmp_cpus++; + } + free(mask); + + WRITE_ONCE(cpus, tmp_cpus); + return tmp_cpus; +} + +static int populate_skeleton_maps(const struct bpf_object *obj, + struct bpf_map_skeleton *maps, + size_t map_cnt) +{ + int i; + + for (i = 0; i < map_cnt; i++) { + struct bpf_map **map = maps[i].map; + const char *name = maps[i].name; + void **mmaped = maps[i].mmaped; + + *map = bpf_object__find_map_by_name(obj, name); + if (!*map) { + pr_warn("failed to find skeleton map '%s'\n", name); + return -ESRCH; + } + + /* externs shouldn't be pre-setup from user code */ + if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) + *mmaped = (*map)->mmaped; + } + return 0; +} + +static int populate_skeleton_progs(const struct bpf_object *obj, + struct bpf_prog_skeleton *progs, + size_t prog_cnt) +{ + int i; + + for (i = 0; i < prog_cnt; i++) { + struct bpf_program **prog = progs[i].prog; + const char *name = progs[i].name; + + *prog = bpf_object__find_program_by_name(obj, name); + if (!*prog) { + pr_warn("failed to find skeleton program '%s'\n", name); + return -ESRCH; + } + } + return 0; +} + +int bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, + .object_name = s->name, + ); + struct bpf_object *obj; + int err; + + /* Attempt to preserve opts->object_name, unless overriden by user + * explicitly. Overwriting object name for skeletons is discouraged, + * as it breaks global data maps, because they contain object name + * prefix as their own map name prefix. When skeleton is generated, + * bpftool is making an assumption that this name will stay the same. + */ + if (opts) { + memcpy(&skel_opts, opts, sizeof(*opts)); + if (!opts->object_name) + skel_opts.object_name = s->name; + } + + obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); + err = libbpf_get_error(obj); + if (err) { + pr_warn("failed to initialize skeleton BPF object '%s': %d\n", + s->name, err); + return libbpf_err(err); + } + + *s->obj = obj; + err = populate_skeleton_maps(obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); + return libbpf_err(err); + } + + err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); + return libbpf_err(err); + } + + return 0; +} + +int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) +{ + int err, len, var_idx, i; + const char *var_name; + const struct bpf_map *map; + struct btf *btf; + __u32 map_type_id; + const struct btf_type *map_type, *var_type; + const struct bpf_var_skeleton *var_skel; + struct btf_var_secinfo *var; + + if (!s->obj) + return libbpf_err(-EINVAL); + + btf = bpf_object__btf(s->obj); + if (!btf) { + pr_warn("subskeletons require BTF at runtime (object %s)\n", + bpf_object__name(s->obj)); + return libbpf_err(-errno); + } + + err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); + } + + err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); + } + + for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { + var_skel = &s->vars[var_idx]; + map = *var_skel->map; + map_type_id = bpf_map__btf_value_type_id(map); + map_type = btf__type_by_id(btf, map_type_id); + + if (!btf_is_datasec(map_type)) { + pr_warn("type for map '%1$s' is not a datasec: %2$s", + bpf_map__name(map), + __btf_kind_str(btf_kind(map_type))); + return libbpf_err(-EINVAL); + } + + len = btf_vlen(map_type); + var = btf_var_secinfos(map_type); + for (i = 0; i < len; i++, var++) { + var_type = btf__type_by_id(btf, var->type); + var_name = btf__name_by_offset(btf, var_type->name_off); + if (strcmp(var_name, var_skel->name) == 0) { + *var_skel->addr = map->mmaped + var->offset; + break; + } + } + } + return 0; +} + +void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) +{ + if (!s) + return; + free(s->maps); + free(s->progs); + free(s->vars); + free(s); +} + +int bpf_object__load_skeleton(struct bpf_object_skeleton *s) +{ + int i, err; + + err = bpf_object__load(*s->obj); + if (err) { + pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); + return libbpf_err(err); + } + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map *map = *s->maps[i].map; + size_t mmap_sz = bpf_map_mmap_sz(map); + int prot, map_fd = bpf_map__fd(map); + void **mmaped = s->maps[i].mmaped; + + if (!mmaped) + continue; + + if (!(map->def.map_flags & BPF_F_MMAPABLE)) { + *mmaped = NULL; + continue; + } + + if (map->def.map_flags & BPF_F_RDONLY_PROG) + prot = PROT_READ; + else + prot = PROT_READ | PROT_WRITE; + + /* Remap anonymous mmap()-ed "map initialization image" as + * a BPF map-backed mmap()-ed memory, but preserving the same + * memory address. This will cause kernel to change process' + * page table to point to a different piece of kernel memory, + * but from userspace point of view memory address (and its + * contents, being identical at this point) will stay the + * same. This mapping will be released by bpf_object__close() + * as per normal clean up procedure, so we don't need to worry + * about it from skeleton's clean up perspective. + */ + *mmaped = mmap(map->mmaped, mmap_sz, prot, + MAP_SHARED | MAP_FIXED, map_fd, 0); + if (*mmaped == MAP_FAILED) { + err = -errno; + *mmaped = NULL; + pr_warn("failed to re-mmap() map '%s': %d\n", + bpf_map__name(map), err); + return libbpf_err(err); + } + } + + return 0; +} + +int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) +{ + int i, err; + + for (i = 0; i < s->prog_cnt; i++) { + struct bpf_program *prog = *s->progs[i].prog; + struct bpf_link **link = s->progs[i].link; + + if (!prog->autoload) + continue; + + /* auto-attaching not supported for this program */ + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) + continue; + + /* if user already set the link manually, don't attempt auto-attach */ + if (*link) + continue; + + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); + if (err) { + pr_warn("prog '%s': failed to auto-attach: %d\n", + bpf_program__name(prog), err); + return libbpf_err(err); + } + + /* It's possible that for some SEC() definitions auto-attach + * is supported in some cases (e.g., if definition completely + * specifies target information), but is not in other cases. + * SEC("uprobe") is one such case. If user specified target + * binary and function name, such BPF program can be + * auto-attached. But if not, it shouldn't trigger skeleton's + * attach to fail. It should just be skipped. + * attach_fn signals such case with returning 0 (no error) and + * setting link to NULL. + */ + } + + return 0; +} + +void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) +{ + int i; + + for (i = 0; i < s->prog_cnt; i++) { + struct bpf_link **link = s->progs[i].link; + + bpf_link__destroy(*link); + *link = NULL; + } +} + +void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) +{ + if (!s) + return; + + if (s->progs) + bpf_object__detach_skeleton(s); + if (s->obj) + bpf_object__close(*s->obj); + free(s->maps); + free(s->progs); + free(s); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf.h b/pkg/plugin/lib/common/libbpf/_src/libbpf.h new file mode 100644 index 000000000..cdbfee60e --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf.h @@ -0,0 +1,1611 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Common eBPF ELF object loading operations. + * + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + */ +#ifndef __LIBBPF_LIBBPF_H +#define __LIBBPF_LIBBPF_H + +#include +#include +#include +#include +#include // for size_t +#include + +#include "libbpf_common.h" +#include "libbpf_legacy.h" + +#ifdef __cplusplus +extern "C" { +#endif + +LIBBPF_API __u32 libbpf_major_version(void); +LIBBPF_API __u32 libbpf_minor_version(void); +LIBBPF_API const char *libbpf_version_string(void); + +enum libbpf_errno { + __LIBBPF_ERRNO__START = 4000, + + /* Something wrong in libelf */ + LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START, + LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */ + LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */ + LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */ + LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */ + LIBBPF_ERRNO__RELOC, /* Relocation failed */ + LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */ + LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */ + LIBBPF_ERRNO__PROG2BIG, /* Program too big */ + LIBBPF_ERRNO__KVER, /* Incorrect kernel version */ + LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */ + LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */ + LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */ + LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */ + __LIBBPF_ERRNO__END, +}; + +LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); + +enum libbpf_print_level { + LIBBPF_WARN, + LIBBPF_INFO, + LIBBPF_DEBUG, +}; + +typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, + const char *, va_list ap); + +LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn); + +/* Hide internal to user */ +struct bpf_object; + +struct bpf_object_open_attr { + const char *file; + enum bpf_prog_type prog_type; +}; + +struct bpf_object_open_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* object name override, if provided: + * - for object open from file, this will override setting object + * name from file path's base name; + * - for object open from memory buffer, this will specify an object + * name and will override default "-" name; + */ + const char *object_name; + /* parse map definitions non-strictly, allowing extra attributes/data */ + bool relaxed_maps; + /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures. + * Value is ignored. Relocations always are processed non-strictly. + * Non-relocatable instructions are replaced with invalid ones to + * prevent accidental errors. + * */ + LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect") + bool relaxed_core_relocs; + /* maps that set the 'pinning' attribute in their definition will have + * their pin_path attribute set to a file in this directory, and be + * auto-pinned to that path on load; defaults to "/sys/fs/bpf". + */ + const char *pin_root_path; + + LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program") + __u32 attach_prog_fd; + /* Additional kernel config content that augments and overrides + * system Kconfig for CONFIG_xxx externs. + */ + const char *kconfig; + /* Path to the custom BTF to be used for BPF CO-RE relocations. + * This custom BTF completely replaces the use of vmlinux BTF + * for the purpose of CO-RE relocations. + * NOTE: any other BPF feature (e.g., fentry/fexit programs, + * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. + */ + const char *btf_custom_path; + /* Pointer to a buffer for storing kernel logs for applicable BPF + * commands. Valid kernel_log_size has to be specified as well and are + * passed-through to bpf() syscall. Keep in mind that kernel might + * fail operation with -ENOSPC error if provided buffer is too small + * to contain entire log output. + * See the comment below for kernel_log_level for interaction between + * log_buf and log_level settings. + * + * If specified, this log buffer will be passed for: + * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden + * with bpf_program__set_log() on per-program level, to get + * BPF verifier log output. + * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get + * BTF sanity checking log. + * + * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite + * previous contents, so if you need more fine-grained control, set + * per-program buffer with bpf_program__set_log_buf() to preserve each + * individual program's verification log. Keep using kernel_log_buf + * for BTF verification log, if necessary. + */ + char *kernel_log_buf; + size_t kernel_log_size; + /* + * Log level can be set independently from log buffer. Log_level=0 + * means that libbpf will attempt loading BTF or program without any + * logging requested, but will retry with either its own or custom log + * buffer, if provided, and log_level=1 on any error. + * And vice versa, setting log_level>0 will request BTF or prog + * loading with verbose log from the first attempt (and as such also + * for successfully loaded BTF or program), and the actual log buffer + * could be either libbpf's own auto-allocated log buffer, if + * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf. + * If user didn't provide custom log buffer, libbpf will emit captured + * logs through its print callback. + */ + __u32 kernel_log_level; + + size_t :0; +}; +#define bpf_object_open_opts__last_field kernel_log_level + +LIBBPF_API struct bpf_object *bpf_object__open(const char *path); + +/** + * @brief **bpf_object__open_file()** creates a bpf_object by opening + * the BPF ELF object file pointed to by the passed path and loading it + * into memory. + * @param path BPF object file path + * @param opts options for how to load the bpf object, this parameter is + * optional and can be set to NULL + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ +LIBBPF_API struct bpf_object * +bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts); + +/** + * @brief **bpf_object__open_mem()** creates a bpf_object by reading + * the BPF objects raw bytes from a memory buffer containing a valid + * BPF ELF object file. + * @param obj_buf pointer to the buffer containing ELF file bytes + * @param obj_buf_sz number of bytes in the buffer + * @param opts options for how to load the bpf object + * @return pointer to the new bpf_object; or NULL is returned on error, + * error code is stored in errno + */ +LIBBPF_API struct bpf_object * +bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts); + +/* deprecated bpf_object__open variants */ +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead") +LIBBPF_API struct bpf_object * +bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, + const char *name); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead") +LIBBPF_API struct bpf_object * +bpf_object__open_xattr(struct bpf_object_open_attr *attr); + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +/* pin_maps and unpin_maps can both be called with a NULL path, in which case + * they will use the pin_path attribute of each map (and ignore all maps that + * don't have a pin_path set). + */ +LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); +LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj, + const char *path); +LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); +LIBBPF_API void bpf_object__close(struct bpf_object *object); + +struct bpf_object_load_attr { + struct bpf_object *obj; + int log_level; + const char *target_btf_path; +}; + +/* Load/unload object into/from kernel */ +LIBBPF_API int bpf_object__load(struct bpf_object *obj); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead") +LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr); +LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead") +LIBBPF_API int bpf_object__unload(struct bpf_object *obj); + +LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj); +LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj); +LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version); + +struct btf; +LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj); +LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj); + +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead") +LIBBPF_API struct bpf_program * +bpf_object__find_program_by_title(const struct bpf_object *obj, + const char *title); +LIBBPF_API struct bpf_program * +bpf_object__find_program_by_name(const struct bpf_object *obj, + const char *name); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead") +struct bpf_object *bpf_object__next(struct bpf_object *prev); +#define bpf_object__for_each_safe(pos, tmp) \ + for ((pos) = bpf_object__next(NULL), \ + (tmp) = bpf_object__next(pos); \ + (pos) != NULL; \ + (pos) = (tmp), (tmp) = bpf_object__next(tmp)) + +typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv, + bpf_object_clear_priv_t clear_priv); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog); + +LIBBPF_API int +libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, + enum bpf_attach_type *expected_attach_type); +LIBBPF_API int libbpf_attach_type_by_name(const char *name, + enum bpf_attach_type *attach_type); +LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type); + +/* Accessors of bpf_program */ +struct bpf_program; +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead") +struct bpf_program *bpf_program__next(struct bpf_program *prog, + const struct bpf_object *obj); +LIBBPF_API struct bpf_program * +bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog); + +#define bpf_object__for_each_program(pos, obj) \ + for ((pos) = bpf_object__next_program((obj), NULL); \ + (pos) != NULL; \ + (pos) = bpf_object__next_program((obj), (pos))) + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead") +struct bpf_program *bpf_program__prev(struct bpf_program *prog, + const struct bpf_object *obj); +LIBBPF_API struct bpf_program * +bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog); + +typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *); + +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv, + bpf_program_clear_priv_t clear_priv); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog); +LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, + __u32 ifindex); + +LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); +LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog); +LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead") +const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); + +/* returns program size in bytes */ +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead") +LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); + +struct bpf_insn; + +/** + * @brief **bpf_program__insns()** gives read-only access to BPF program's + * underlying BPF instructions. + * @param prog BPF program for which to return instructions + * @return a pointer to an array of BPF instructions that belong to the + * specified BPF program + * + * Returned pointer is always valid and not NULL. Number of `struct bpf_insn` + * pointed to can be fetched using **bpf_program__insn_cnt()** API. + * + * Keep in mind, libbpf can modify and append/delete BPF program's + * instructions as it processes BPF object file and prepares everything for + * uploading into the kernel. So depending on the point in BPF object + * lifetime, **bpf_program__insns()** can return different sets of + * instructions. As an example, during BPF object load phase BPF program + * instructions will be CO-RE-relocated, BPF subprograms instructions will be + * appended, ldimm64 instructions will have FDs embedded, etc. So instructions + * returned before **bpf_object__load()** and after it might be quite + * different. + */ +LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog); +/** + * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s + * that form specified BPF program. + * @param prog BPF program for which to return number of BPF instructions + * + * See **bpf_program__insns()** documentation for notes on how libbpf can + * change instructions and their count during different phases of + * **bpf_object** lifetime. + */ +LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog); + +LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead") +LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version); +LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") +LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog, + const char *path, + int instance); +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") +LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog, + const char *path, + int instance); + +/** + * @brief **bpf_program__pin()** pins the BPF program to a file + * in the BPF FS specified by a path. This increments the programs + * reference count, allowing it to stay loaded after the process + * which loaded it has exited. + * + * @param prog BPF program to pin, must already be loaded + * @param path file path in a BPF file system + * @return 0, on success; negative error code, otherwise + */ +LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path); + +/** + * @brief **bpf_program__unpin()** unpins the BPF program from a file + * in the BPFFS specified by a path. This decrements the programs + * reference count. + * + * The file pinning the BPF program can also be unlinked by a different + * process in which case this function will return an error. + * + * @param prog BPF program to unpin + * @param path file path to the pin in a BPF file system + * @return 0, on success; negative error code, otherwise + */ +LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path); +LIBBPF_API void bpf_program__unload(struct bpf_program *prog); + +struct bpf_link; + +LIBBPF_API struct bpf_link *bpf_link__open(const char *path); +LIBBPF_API int bpf_link__fd(const struct bpf_link *link); +LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); +/** + * @brief **bpf_link__pin()** pins the BPF link to a file + * in the BPF FS specified by a path. This increments the links + * reference count, allowing it to stay loaded after the process + * which loaded it has exited. + * + * @param link BPF link to pin, must already be loaded + * @param path file path in a BPF file system + * @return 0, on success; negative error code, otherwise + */ + +LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); + +/** + * @brief **bpf_link__unpin()** unpins the BPF link from a file + * in the BPFFS specified by a path. This decrements the links + * reference count. + * + * The file pinning the BPF link can also be unlinked by a different + * process in which case this function will return an error. + * + * @param prog BPF program to unpin + * @param path file path to the pin in a BPF file system + * @return 0, on success; negative error code, otherwise + */ +LIBBPF_API int bpf_link__unpin(struct bpf_link *link); +LIBBPF_API int bpf_link__update_program(struct bpf_link *link, + struct bpf_program *prog); +LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); +LIBBPF_API int bpf_link__detach(struct bpf_link *link); +LIBBPF_API int bpf_link__destroy(struct bpf_link *link); + +/** + * @brief **bpf_program__attach()** is a generic function for attaching + * a BPF program based on auto-detection of program type, attach type, + * and extra paremeters, where applicable. + * + * @param prog BPF program to attach + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + * + * This is supported for: + * - kprobe/kretprobe (depends on SEC() definition) + * - uprobe/uretprobe (depends on SEC() definition) + * - tracepoint + * - raw tracepoint + * - tracing programs (typed raw TP/fentry/fexit/fmod_ret) + */ +LIBBPF_API struct bpf_link * +bpf_program__attach(const struct bpf_program *prog); + +struct bpf_perf_event_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* custom user-provided value fetchable through bpf_get_attach_cookie() */ + __u64 bpf_cookie; +}; +#define bpf_perf_event_opts__last_field bpf_cookie + +LIBBPF_API struct bpf_link * +bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); + +LIBBPF_API struct bpf_link * +bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, + const struct bpf_perf_event_opts *opts); + +struct bpf_kprobe_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* custom user-provided value fetchable through bpf_get_attach_cookie() */ + __u64 bpf_cookie; + /* function's offset to install kprobe to */ + size_t offset; + /* kprobe is return probe */ + bool retprobe; + size_t :0; +}; +#define bpf_kprobe_opts__last_field retprobe + +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, + const char *func_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe_opts(const struct bpf_program *prog, + const char *func_name, + const struct bpf_kprobe_opts *opts); + +struct bpf_kprobe_multi_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* array of function symbols to attach */ + const char **syms; + /* array of function addresses to attach */ + const unsigned long *addrs; + /* array of user-provided values fetchable through bpf_get_attach_cookie */ + const __u64 *cookies; + /* number of elements in syms/addrs/cookies arrays */ + size_t cnt; + /* create return kprobes */ + bool retprobe; + size_t :0; +}; + +#define bpf_kprobe_multi_opts__last_field retprobe + +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts); + +struct bpf_uprobe_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* offset of kernel reference counted USDT semaphore, added in + * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") + */ + size_t ref_ctr_offset; + /* custom user-provided value fetchable through bpf_get_attach_cookie() */ + __u64 bpf_cookie; + /* uprobe is return probe, invoked at function return time */ + bool retprobe; + /* Function name to attach to. Could be an unqualified ("abc") or library-qualified + * "abc@LIBXYZ" name. To specify function entry, func_name should be set while + * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an + * offset within a function, specify func_name and use func_offset argument to specify + * offset within the function. Shared library functions must specify the shared library + * binary_path. + */ + const char *func_name; + size_t :0; +}; +#define bpf_uprobe_opts__last_field func_name + +/** + * @brief **bpf_program__attach_uprobe()** attaches a BPF program + * to the userspace function which is found by binary path and + * offset. You can optionally specify a particular proccess to attach + * to. You can also optionally attach the program to the function + * exit instead of entry. + * + * @param prog BPF program to attach + * @param retprobe Attach to function exit + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, + pid_t pid, const char *binary_path, + size_t func_offset); + +/** + * @brief **bpf_program__attach_uprobe_opts()** is just like + * bpf_program__attach_uprobe() except with a options struct + * for various configurations. + * + * @param prog BPF program to attach + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains the function symbol + * @param func_offset Offset within the binary of the function symbol + * @param opts Options for altering program attachment + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, + const char *binary_path, size_t func_offset, + const struct bpf_uprobe_opts *opts); + +struct bpf_usdt_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* custom user-provided value accessible through usdt_cookie() */ + __u64 usdt_cookie; + size_t :0; +}; +#define bpf_usdt_opts__last_field usdt_cookie + +/** + * @brief **bpf_program__attach_usdt()** is just like + * bpf_program__attach_uprobe_opts() except it covers USDT (User-space + * Statically Defined Tracepoint) attachment, instead of attaching to + * user-space function entry or exit. + * + * @param prog BPF program to attach + * @param pid Process ID to attach the uprobe to, 0 for self (own process), + * -1 for all processes + * @param binary_path Path to binary that contains provided USDT probe + * @param usdt_provider USDT provider name + * @param usdt_name USDT probe name + * @param opts Options for altering program attachment + * @return Reference to the newly created BPF link; or NULL is returned on error, + * error code is stored in errno + */ +LIBBPF_API struct bpf_link * +bpf_program__attach_usdt(const struct bpf_program *prog, + pid_t pid, const char *binary_path, + const char *usdt_provider, const char *usdt_name, + const struct bpf_usdt_opts *opts); + +struct bpf_tracepoint_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* custom user-provided value fetchable through bpf_get_attach_cookie() */ + __u64 bpf_cookie; +}; +#define bpf_tracepoint_opts__last_field bpf_cookie + +LIBBPF_API struct bpf_link * +bpf_program__attach_tracepoint(const struct bpf_program *prog, + const char *tp_category, + const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, + const char *tp_category, + const char *tp_name, + const struct bpf_tracepoint_opts *opts); + +LIBBPF_API struct bpf_link * +bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, + const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_trace(const struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_lsm(const struct bpf_program *prog); +LIBBPF_API struct bpf_link * +bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); +LIBBPF_API struct bpf_link * +bpf_program__attach_freplace(const struct bpf_program *prog, + int target_fd, const char *attach_func_name); + +struct bpf_map; + +LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); + +struct bpf_iter_attach_opts { + size_t sz; /* size of this struct for forward/backward compatibility */ + union bpf_iter_link_info *link_info; + __u32 link_info_len; +}; +#define bpf_iter_attach_opts__last_field link_info_len + +LIBBPF_API struct bpf_link * +bpf_program__attach_iter(const struct bpf_program *prog, + const struct bpf_iter_attach_opts *opts); + +/* + * Libbpf allows callers to adjust BPF programs before being loaded + * into kernel. One program in an object file can be transformed into + * multiple variants to be attached to different hooks. + * + * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd + * form an API for this purpose. + * + * - bpf_program_prep_t: + * Defines a 'preprocessor', which is a caller defined function + * passed to libbpf through bpf_program__set_prep(), and will be + * called before program is loaded. The processor should adjust + * the program one time for each instance according to the instance id + * passed to it. + * + * - bpf_program__set_prep: + * Attaches a preprocessor to a BPF program. The number of instances + * that should be created is also passed through this function. + * + * - bpf_program__nth_fd: + * After the program is loaded, get resulting FD of a given instance + * of the BPF program. + * + * If bpf_program__set_prep() is not used, the program would be loaded + * without adjustment during bpf_object__load(). The program has only + * one instance. In this case bpf_program__fd(prog) is equal to + * bpf_program__nth_fd(prog, 0). + */ +struct bpf_prog_prep_result { + /* + * If not NULL, load new instruction array. + * If set to NULL, don't load this instance. + */ + struct bpf_insn *new_insn_ptr; + int new_insn_cnt; + + /* If not NULL, result FD is written to it. */ + int *pfd; +}; + +/* + * Parameters of bpf_program_prep_t: + * - prog: The bpf_program being loaded. + * - n: Index of instance being generated. + * - insns: BPF instructions array. + * - insns_cnt:Number of instructions in insns. + * - res: Output parameter, result of transformation. + * + * Return value: + * - Zero: pre-processing success. + * - Non-zero: pre-processing error, stop loading. + */ +typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, + struct bpf_insn *insns, int insns_cnt, + struct bpf_prog_prep_result *res); + +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions") +LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance, + bpf_program_prep_t prep); + +LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated") +LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n); + +/* + * Adjust type of BPF program. Default is kprobe. + */ +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") +LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); + +LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); + +/** + * @brief **bpf_program__set_type()** sets the program + * type of the passed BPF program. + * @param prog BPF program to set the program type for + * @param type program type to set the BPF map to have + * @return error code; or 0 if no error. An error occurs + * if the object is already loaded. + * + * This must be called before the BPF object is loaded, + * otherwise it has no effect and an error is returned. + */ +LIBBPF_API int bpf_program__set_type(struct bpf_program *prog, + enum bpf_prog_type type); + +LIBBPF_API enum bpf_attach_type +bpf_program__expected_attach_type(const struct bpf_program *prog); + +/** + * @brief **bpf_program__set_expected_attach_type()** sets the + * attach type of the passed BPF program. This is used for + * auto-detection of attachment when programs are loaded. + * @param prog BPF program to set the attach type for + * @param type attach type to set the BPF map to have + * @return error code; or 0 if no error. An error occurs + * if the object is already loaded. + * + * This must be called before the BPF object is loaded, + * otherwise it has no effect and an error is returned. + */ +LIBBPF_API int +bpf_program__set_expected_attach_type(struct bpf_program *prog, + enum bpf_attach_type type); + +LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags); + +/* Per-program log level and log buffer getters/setters. + * See bpf_object_open_opts comments regarding log_level and log_buf + * interactions. + */ +LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level); +LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); +LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); + +/** + * @brief **bpf_program__set_attach_target()** sets BTF-based attach target + * for supported BPF program types: + * - BTF-aware raw tracepoints (tp_btf); + * - fentry/fexit/fmod_ret; + * - lsm; + * - freplace. + * @param prog BPF program to set the attach type for + * @param type attach type to set the BPF map to have + * @return error code; or 0 if no error occurred. + */ +LIBBPF_API int +bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, + const char *attach_func_name); + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") +LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); + +/* + * No need for __attribute__((packed)), all members of 'bpf_map_def' + * are all aligned. In addition, using __attribute__((packed)) + * would trigger a -Wpacked warning message, and lead to an error + * if -Werror is set. + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +}; + +/** + * @brief **bpf_object__find_map_by_name()** returns BPF map of + * the given name, if it exists within the passed BPF object + * @param obj BPF object + * @param name name of the BPF map + * @return BPF map instance, if such map exists within the BPF object; + * or NULL otherwise. + */ +LIBBPF_API struct bpf_map * +bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name); + +LIBBPF_API int +bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name); + +/* + * Get bpf_map through the offset of corresponding struct bpf_map_def + * in the BPF object file. + */ +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__find_map_by_name() instead") +struct bpf_map * +bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead") +struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); +LIBBPF_API struct bpf_map * +bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map); + +#define bpf_object__for_each_map(pos, obj) \ + for ((pos) = bpf_object__next_map((obj), NULL); \ + (pos) != NULL; \ + (pos) = bpf_object__next_map((obj), (pos))) +#define bpf_map__for_each bpf_object__for_each_map + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead") +struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); +LIBBPF_API struct bpf_map * +bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); + +/** + * @brief **bpf_map__fd()** gets the file descriptor of the passed + * BPF map + * @param map the BPF map instance + * @return the file descriptor; or -EINVAL in case of an error + */ +LIBBPF_API int bpf_map__fd(const struct bpf_map *map); +LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); +/* get map definition */ +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead") +const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +/* get map name */ +LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); +/* get/set map type */ +LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); +/* get/set map size (max_entries) */ +LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead") +LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +/* get/set map flags */ +LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); +/* get/set map NUMA node */ +LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); +/* get/set map key size */ +LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); +/* get/set map value size */ +LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); +/* get map key/value BTF type IDs */ +LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); +LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); +/* get/set map if_index */ +LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +/* get/set map map_extra flags */ +LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); + +typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, + bpf_map_clear_priv_t clear_priv); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") +LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size); +LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__type() instead") +LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); + +/** + * @brief **bpf_map__is_internal()** tells the caller whether or not the + * passed map is a special map created by libbpf automatically for things like + * global variables, __ksym externs, Kconfig values, etc + * @param map the bpf_map + * @return true, if the map is an internal map; false, otherwise + */ +LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); +LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); +LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); +LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); + +LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); +LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); + +/** + * @brief **libbpf_get_error()** extracts the error code from the passed + * pointer + * @param ptr pointer returned from libbpf API function + * @return error code; or 0 if no error occured + * + * Many libbpf API functions which return pointers have logic to encode error + * codes as pointers, and do not return NULL. Meaning **libbpf_get_error()** + * should be used on the return value from these functions immediately after + * calling the API function, with no intervening calls that could clobber the + * `errno` variable. Consult the individual functions documentation to verify + * if this logic applies should be used. + * + * For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` + * is enabled, NULL is returned on error instead. + * + * If ptr is NULL, then errno should be already set by the failing + * API, because libbpf never returns NULL on success and it now always + * sets errno on error. + * + * Example usage: + * + * struct perf_buffer *pb; + * + * pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts); + * err = libbpf_get_error(pb); + * if (err) { + * pb = NULL; + * fprintf(stderr, "failed to open perf buffer: %d\n", err); + * goto cleanup; + * } + */ +LIBBPF_API long libbpf_get_error(const void *ptr); + +struct bpf_prog_load_attr { + const char *file; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + int ifindex; + int log_level; + int prog_flags; +}; + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead") +LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, + struct bpf_object **pobj, int *prog_fd); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead") +LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd); + +/* XDP related API */ +struct xdp_link_info { + __u32 prog_id; + __u32 drv_prog_id; + __u32 hw_prog_id; + __u32 skb_prog_id; + __u8 attach_mode; +}; + +struct bpf_xdp_set_link_opts { + size_t sz; + int old_fd; + size_t :0; +}; +#define bpf_xdp_set_link_opts__last_field old_fd + +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead") +LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead") +LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query_id() instead") +LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query() instead") +LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags); + +struct bpf_xdp_attach_opts { + size_t sz; + int old_prog_fd; + size_t :0; +}; +#define bpf_xdp_attach_opts__last_field old_prog_fd + +struct bpf_xdp_query_opts { + size_t sz; + __u32 prog_id; /* output */ + __u32 drv_prog_id; /* output */ + __u32 hw_prog_id; /* output */ + __u32 skb_prog_id; /* output */ + __u8 attach_mode; /* output */ + size_t :0; +}; +#define bpf_xdp_query_opts__last_field attach_mode + +LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, + const struct bpf_xdp_attach_opts *opts); +LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, + const struct bpf_xdp_attach_opts *opts); +LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); +LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); + +/* TC related API */ +enum bpf_tc_attach_point { + BPF_TC_INGRESS = 1 << 0, + BPF_TC_EGRESS = 1 << 1, + BPF_TC_CUSTOM = 1 << 2, +}; + +#define BPF_TC_PARENT(a, b) \ + ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU)) + +enum bpf_tc_flags { + BPF_TC_F_REPLACE = 1 << 0, +}; + +struct bpf_tc_hook { + size_t sz; + int ifindex; + enum bpf_tc_attach_point attach_point; + __u32 parent; + size_t :0; +}; +#define bpf_tc_hook__last_field parent + +struct bpf_tc_opts { + size_t sz; + int prog_fd; + __u32 flags; + __u32 prog_id; + __u32 handle; + __u32 priority; + size_t :0; +}; +#define bpf_tc_opts__last_field priority + +LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook); +LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook); +LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook, + struct bpf_tc_opts *opts); +LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook, + const struct bpf_tc_opts *opts); +LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook, + struct bpf_tc_opts *opts); + +/* Ring buffer APIs */ +struct ring_buffer; + +typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size); + +struct ring_buffer_opts { + size_t sz; /* size of this struct, for forward/backward compatiblity */ +}; + +#define ring_buffer_opts__last_field sz + +LIBBPF_API struct ring_buffer * +ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, + const struct ring_buffer_opts *opts); +LIBBPF_API void ring_buffer__free(struct ring_buffer *rb); +LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd, + ring_buffer_sample_fn sample_cb, void *ctx); +LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms); +LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb); +LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb); + +/* Perf buffer APIs */ +struct perf_buffer; + +typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu, + void *data, __u32 size); +typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt); + +/* common use perf buffer options */ +struct perf_buffer_opts { + union { + size_t sz; + struct { /* DEPRECATED: will be removed in v1.0 */ + /* if specified, sample_cb is called for each sample */ + perf_buffer_sample_fn sample_cb; + /* if specified, lost_cb is called for each batch of lost samples */ + perf_buffer_lost_fn lost_cb; + /* ctx is provided to sample_cb and lost_cb */ + void *ctx; + }; + }; +}; +#define perf_buffer_opts__last_field sz + +/** + * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified + * BPF_PERF_EVENT_ARRAY map + * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF + * code to send data over to user-space + * @param page_cnt number of memory pages allocated for each per-CPU buffer + * @param sample_cb function called on each received data record + * @param lost_cb function called when record loss has occurred + * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb* + * @return a new instance of struct perf_buffer on success, NULL on error with + * *errno* containing an error code + */ +LIBBPF_API struct perf_buffer * +perf_buffer__new(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, + const struct perf_buffer_opts *opts); + +LIBBPF_API struct perf_buffer * +perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt, + perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx, + const struct perf_buffer_opts *opts); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead") +struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_opts *opts); + +#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__) +#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \ + perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) +#define ___perf_buffer_new3(map_fd, page_cnt, opts) \ + perf_buffer__new_deprecated(map_fd, page_cnt, opts) + +enum bpf_perf_event_ret { + LIBBPF_PERF_EVENT_DONE = 0, + LIBBPF_PERF_EVENT_ERROR = -1, + LIBBPF_PERF_EVENT_CONT = -2, +}; + +struct perf_event_header; + +typedef enum bpf_perf_event_ret +(*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event); + +/* raw perf buffer options, giving most power and control */ +struct perf_buffer_raw_opts { + union { + struct { + size_t sz; + long :0; + long :0; + }; + struct { /* DEPRECATED: will be removed in v1.0 */ + /* perf event attrs passed directly into perf_event_open() */ + struct perf_event_attr *attr; + /* raw event callback */ + perf_buffer_event_fn event_cb; + /* ctx is provided to event_cb */ + void *ctx; + }; + }; + /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of + * max_entries of given PERF_EVENT_ARRAY map) + */ + int cpu_cnt; + /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */ + int *cpus; + /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */ + int *map_keys; +}; +#define perf_buffer_raw_opts__last_field map_keys + +LIBBPF_API struct perf_buffer * +perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, + const struct perf_buffer_raw_opts *opts); + +LIBBPF_API struct perf_buffer * +perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr, + perf_buffer_event_fn event_cb, void *ctx, + const struct perf_buffer_raw_opts *opts); + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead") +struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt, + const struct perf_buffer_raw_opts *opts); + +#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__) +#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \ + perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts) +#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \ + perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts) + +LIBBPF_API void perf_buffer__free(struct perf_buffer *pb); +LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb); +LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms); +LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb); +LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx); +LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb); +LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx); + +typedef enum bpf_perf_event_ret + (*bpf_perf_event_print_t)(struct perf_event_header *hdr, + void *private_data); +LIBBPF_DEPRECATED_SINCE(0, 8, "use perf_buffer__poll() or perf_buffer__consume() instead") +LIBBPF_API enum bpf_perf_event_ret +bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data); + +struct bpf_prog_linfo; +struct bpf_prog_info; + +LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo); +LIBBPF_API struct bpf_prog_linfo * +bpf_prog_linfo__new(const struct bpf_prog_info *info); +LIBBPF_API const struct bpf_line_info * +bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo, + __u64 addr, __u32 func_idx, __u32 nr_skip); +LIBBPF_API const struct bpf_line_info * +bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, + __u32 insn_off, __u32 nr_skip); + +/* + * Probe for supported system features + * + * Note that running many of these probes in a short amount of time can cause + * the kernel to reach the maximal size of lockable memory allowed for the + * user, causing subsequent probes to fail. In this case, the caller may want + * to adjust that limit with setrlimit(). + */ +LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead") +LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex); +LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead") +LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); +LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead") +LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex); +LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection") +LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex); + +/** + * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports + * BPF programs of a given type. + * @param prog_type BPF program type to detect kernel support for + * @param opts reserved for future extensibility, should be NULL + * @return 1, if given program type is supported; 0, if given program type is + * not supported; negative error code if feature detection failed or can't be + * performed + * + * Make sure the process has required set of CAP_* permissions (or runs as + * root) when performing feature checking. + */ +LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts); +/** + * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports + * BPF maps of a given type. + * @param map_type BPF map type to detect kernel support for + * @param opts reserved for future extensibility, should be NULL + * @return 1, if given map type is supported; 0, if given map type is + * not supported; negative error code if feature detection failed or can't be + * performed + * + * Make sure the process has required set of CAP_* permissions (or runs as + * root) when performing feature checking. + */ +LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts); +/** + * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the + * use of a given BPF helper from specified BPF program type. + * @param prog_type BPF program type used to check the support of BPF helper + * @param helper_id BPF helper ID (enum bpf_func_id) to check support for + * @param opts reserved for future extensibility, should be NULL + * @return 1, if given combination of program type and helper is supported; 0, + * if the combination is not supported; negative error code if feature + * detection for provided input arguments failed or can't be performed + * + * Make sure the process has required set of CAP_* permissions (or runs as + * root) when performing feature checking. + */ +LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, + enum bpf_func_id helper_id, const void *opts); + +/* + * Get bpf_prog_info in continuous memory + * + * struct bpf_prog_info has multiple arrays. The user has option to choose + * arrays to fetch from kernel. The following APIs provide an uniform way to + * fetch these data. All arrays in bpf_prog_info are stored in a single + * continuous memory region. This makes it easy to store the info in a + * file. + * + * Before writing bpf_prog_info_linear to files, it is necessary to + * translate pointers in bpf_prog_info to offsets. Helper functions + * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr() + * are introduced to switch between pointers and offsets. + * + * Examples: + * # To fetch map_ids and prog_tags: + * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) | + * (1UL << BPF_PROG_INFO_PROG_TAGS); + * struct bpf_prog_info_linear *info_linear = + * bpf_program__get_prog_info_linear(fd, arrays); + * + * # To save data in file + * bpf_program__bpil_addr_to_offs(info_linear); + * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len); + * + * # To read data from file + * read(f, info_linear, ); + * bpf_program__bpil_offs_to_addr(info_linear); + */ +enum bpf_prog_info_array { + BPF_PROG_INFO_FIRST_ARRAY = 0, + BPF_PROG_INFO_JITED_INSNS = 0, + BPF_PROG_INFO_XLATED_INSNS, + BPF_PROG_INFO_MAP_IDS, + BPF_PROG_INFO_JITED_KSYMS, + BPF_PROG_INFO_JITED_FUNC_LENS, + BPF_PROG_INFO_FUNC_INFO, + BPF_PROG_INFO_LINE_INFO, + BPF_PROG_INFO_JITED_LINE_INFO, + BPF_PROG_INFO_PROG_TAGS, + BPF_PROG_INFO_LAST_ARRAY, +}; + +struct bpf_prog_info_linear { + /* size of struct bpf_prog_info, when the tool is compiled */ + __u32 info_len; + /* total bytes allocated for data, round up to 8 bytes */ + __u32 data_len; + /* which arrays are included in data */ + __u64 arrays; + struct bpf_prog_info info; + __u8 data[]; +}; + +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") +LIBBPF_API struct bpf_prog_info_linear * +bpf_program__get_prog_info_linear(int fd, __u64 arrays); + +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") +LIBBPF_API void +bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear); + +LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper") +LIBBPF_API void +bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear); + +/** + * @brief **libbpf_num_possible_cpus()** is a helper function to get the + * number of possible CPUs that the host kernel supports and expects. + * @return number of possible CPUs; or error code on failure + * + * Example usage: + * + * int ncpus = libbpf_num_possible_cpus(); + * if (ncpus < 0) { + * // error handling + * } + * long values[ncpus]; + * bpf_map_lookup_elem(per_cpu_map_fd, key, values); + */ +LIBBPF_API int libbpf_num_possible_cpus(void); + +struct bpf_map_skeleton { + const char *name; + struct bpf_map **map; + void **mmaped; +}; + +struct bpf_prog_skeleton { + const char *name; + struct bpf_program **prog; + struct bpf_link **link; +}; + +struct bpf_object_skeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const char *name; + const void *data; + size_t data_sz; + + struct bpf_object **obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ + struct bpf_prog_skeleton *progs; +}; + +LIBBPF_API int +bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts); +LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); +LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); + +struct bpf_var_skeleton { + const char *name; + struct bpf_map **map; + void **addr; +}; + +struct bpf_object_subskeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const struct bpf_object *obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ + struct bpf_prog_skeleton *progs; + + int var_cnt; + int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ + struct bpf_var_skeleton *vars; +}; + +LIBBPF_API int +bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); +LIBBPF_API void +bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); + +struct gen_loader_opts { + size_t sz; /* size of this struct, for forward/backward compatiblity */ + const char *data; + const char *insns; + __u32 data_sz; + __u32 insns_sz; +}; + +#define gen_loader_opts__last_field insns_sz +LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj, + struct gen_loader_opts *opts); + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +struct bpf_linker_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; +}; +#define bpf_linker_opts__last_field sz + +struct bpf_linker_file_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; +}; +#define bpf_linker_file_opts__last_field sz + +struct bpf_linker; + +LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts); +LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, + const char *filename, + const struct bpf_linker_file_opts *opts); +LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); +LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); + +/* + * Custom handling of BPF program's SEC() definitions + */ + +struct bpf_prog_load_opts; /* defined in bpf.h */ + +/* Called during bpf_object__open() for each recognized BPF program. Callback + * can use various bpf_program__set_*() setters to adjust whatever properties + * are necessary. + */ +typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); + +/* Called right before libbpf performs bpf_prog_load() to load BPF program + * into the kernel. Callback can adjust opts as necessary. + */ +typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie); + +/* Called during skeleton attach or through bpf_program__attach(). If + * auto-attach is not supported, callback should return 0 and set link to + * NULL (it's not considered an error during skeleton attach, but it will be + * an error for bpf_program__attach() calls). On error, error should be + * returned directly and link set to NULL. On success, return 0 and set link + * to a valid struct bpf_link. + */ +typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, + struct bpf_link **link); + +struct libbpf_prog_handler_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* User-provided value that is passed to prog_setup_fn, + * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to + * register one set of callbacks for multiple SEC() definitions and + * still be able to distinguish them, if necessary. For example, + * libbpf itself is using this to pass necessary flags (e.g., + * sleepable flag) to a common internal SEC() handler. + */ + long cookie; + /* BPF program initialization callback (see libbpf_prog_setup_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_setup_fn_t prog_setup_fn; + /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + /* BPF program attach callback (see libbpf_prog_attach_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_attach_fn_t prog_attach_fn; +}; +#define libbpf_prog_handler_opts__last_field prog_attach_fn + +/** + * @brief **libbpf_register_prog_handler()** registers a custom BPF program + * SEC() handler. + * @param sec section prefix for which custom handler is registered + * @param prog_type BPF program type associated with specified section + * @param exp_attach_type Expected BPF attach type associated with specified section + * @param opts optional cookie, callbacks, and other extra options + * @return Non-negative handler ID is returned on success. This handler ID has + * to be passed to *libbpf_unregister_prog_handler()* to unregister such + * custom handler. Negative error code is returned on error. + * + * *sec* defines which SEC() definitions are handled by this custom handler + * registration. *sec* can have few different forms: + * - if *sec* is just a plain string (e.g., "abc"), it will match only + * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result + * in an error; + * - if *sec* is of the form "abc/", proper SEC() form is + * SEC("abc/something"), where acceptable "something" should be checked by + * *prog_init_fn* callback, if there are additional restrictions; + * - if *sec* is of the form "abc+", it will successfully match both + * SEC("abc") and SEC("abc/whatever") forms; + * - if *sec* is NULL, custom handler is registered for any BPF program that + * doesn't match any of the registered (custom or libbpf's own) SEC() + * handlers. There could be only one such generic custom handler registered + * at any given time. + * + * All custom handlers (except the one with *sec* == NULL) are processed + * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's + * SEC() handlers by registering custom ones for the same section prefix + * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") + * handler). + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts); +/** + * @brief *libbpf_unregister_prog_handler()* unregisters previously registered + * custom BPF program SEC() handler. + * @param handler_id handler ID returned by *libbpf_register_prog_handler()* + * after successful registration + * @return 0 on success, negative error code if handler isn't found + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_LIBBPF_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_common.h b/pkg/plugin/lib/common/libbpf/_src/libbpf_common.h new file mode 100644 index 000000000..000e37798 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_common.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Common user-facing libbpf helpers. + * + * Copyright (c) 2019 Facebook + */ + +#ifndef __LIBBPF_LIBBPF_COMMON_H +#define __LIBBPF_LIBBPF_COMMON_H + +#include +#include "libbpf_version.h" + +#ifndef LIBBPF_API +#define LIBBPF_API __attribute__((visibility("default"))) +#endif + +#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg))) + +/* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */ +#define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \ + __LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \ + (LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg)) + +#define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \ + (LIBBPF_MAJOR_VERSION > (major) || \ + (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor))) + +/* Add checks for other versions below when planning deprecation of API symbols + * with the LIBBPF_DEPRECATED_SINCE macro. + */ +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6) +#define __LIBBPF_MARK_DEPRECATED_0_6(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_6(X) +#endif +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7) +#define __LIBBPF_MARK_DEPRECATED_0_7(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_7(X) +#endif +#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8) +#define __LIBBPF_MARK_DEPRECATED_0_8(X) X +#else +#define __LIBBPF_MARK_DEPRECATED_0_8(X) +#endif + +/* This set of internal macros allows to do "function overloading" based on + * number of arguments provided by used in backwards-compatible way during the + * transition to libbpf 1.0 + * It's ugly but necessary evil that will be cleaned up when we get to 1.0. + * See bpf_prog_load() overload for example. + */ +#define ___libbpf_cat(A, B) A ## B +#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM) +#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N +#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1) +#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__) + +/* Helper macro to declare and initialize libbpf options struct + * + * This dance with uninitialized declaration, followed by memset to zero, + * followed by assignment using compound literal syntax is done to preserve + * ability to use a nice struct field initialization syntax and **hopefully** + * have all the padding bytes initialized to zero. It's not guaranteed though, + * when copying literal, that compiler won't copy garbage in literal's padding + * bytes, but that's the best way I've found and it seems to work in practice. + * + * Macro declares opts struct of given type and name, zero-initializes, + * including any extra padding, it with memset() and then assigns initial + * values provided by users in struct initializer-syntax as varargs. + */ +#define LIBBPF_OPTS(TYPE, NAME, ...) \ + struct TYPE NAME = ({ \ + memset(&NAME, 0, sizeof(struct TYPE)); \ + (struct TYPE) { \ + .sz = sizeof(struct TYPE), \ + __VA_ARGS__ \ + }; \ + }) + +#endif /* __LIBBPF_LIBBPF_COMMON_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_errno.c b/pkg/plugin/lib/common/libbpf/_src/libbpf_errno.c new file mode 100644 index 000000000..96f67a772 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_errno.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Copyright (C) 2013-2015 Alexei Starovoitov + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + * Copyright (C) 2017 Nicira, Inc. + */ + +#undef _GNU_SOURCE +#include +#include + +#include "libbpf.h" +#include "libbpf_internal.h" + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START) +#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) +#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START) + +static const char *libbpf_strerror_table[NR_ERRNO] = { + [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf", + [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid", + [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost", + [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch", + [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf", + [ERRCODE_OFFSET(RELOC)] = "Relocation failed", + [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading", + [ERRCODE_OFFSET(PROG2BIG)] = "Program too big", + [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version", + [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type", + [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message", + [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence", + [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing", +}; + +int libbpf_strerror(int err, char *buf, size_t size) +{ + if (!buf || !size) + return libbpf_err(-EINVAL); + + err = err > 0 ? err : -err; + + if (err < __LIBBPF_ERRNO__START) { + int ret; + + ret = strerror_r(err, buf, size); + buf[size - 1] = '\0'; + return libbpf_err_errno(ret); + } + + if (err < __LIBBPF_ERRNO__END) { + const char *msg; + + msg = libbpf_strerror_table[ERRNO_OFFSET(err)]; + snprintf(buf, size, "%s", msg); + buf[size - 1] = '\0'; + return 0; + } + + snprintf(buf, size, "Unknown libbpf error %d", err); + buf[size - 1] = '\0'; + return libbpf_err(-ENOENT); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_internal.h b/pkg/plugin/lib/common/libbpf/_src/libbpf_internal.h new file mode 100644 index 000000000..4abdbe2fe --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_internal.h @@ -0,0 +1,583 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Internal libbpf helpers. + * + * Copyright (c) 2019 Facebook + */ + +#ifndef __LIBBPF_LIBBPF_INTERNAL_H +#define __LIBBPF_LIBBPF_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include "libbpf_legacy.h" +#include "relo_core.h" + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + +#include "libbpf.h" +#include "btf.h" + +#ifndef EM_BPF +#define EM_BPF 247 +#endif + +#ifndef R_BPF_64_64 +#define R_BPF_64_64 1 +#endif +#ifndef R_BPF_64_ABS64 +#define R_BPF_64_ABS64 2 +#endif +#ifndef R_BPF_64_ABS32 +#define R_BPF_64_ABS32 3 +#endif +#ifndef R_BPF_64_32 +#define R_BPF_64_32 10 +#endif + +#ifndef SHT_LLVM_ADDRSIG +#define SHT_LLVM_ADDRSIG 0x6FFF4C03 +#endif + +/* if libelf is old and doesn't support mmap(), fall back to read() */ +#ifndef ELF_C_READ_MMAP +#define ELF_C_READ_MMAP ELF_C_READ +#endif + +/* Older libelf all end up in this expression, for both 32 and 64 bit */ +#ifndef ELF64_ST_VISIBILITY +#define ELF64_ST_VISIBILITY(o) ((o) & 0x03) +#endif + +#define BTF_INFO_ENC(kind, kind_flag, vlen) \ + ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) +#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) +#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \ + ((encoding) << 24 | (bits_offset) << 16 | (nr_bits)) +#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \ + BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ + BTF_INT_ENC(encoding, bits_offset, bits) +#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset) +#define BTF_PARAM_ENC(name, type) (name), (type) +#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) +#define BTF_TYPE_FLOAT_ENC(name, sz) \ + BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) +#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx) +#define BTF_TYPE_TYPE_TAG_ENC(value, type) \ + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type) + +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#ifndef min +# define min(x, y) ((x) < (y) ? (x) : (y)) +#endif +#ifndef max +# define max(x, y) ((x) < (y) ? (y) : (x)) +#endif +#ifndef offsetofend +# define offsetofend(TYPE, FIELD) \ + (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) +#endif +#ifndef __alias +#define __alias(symbol) __attribute__((alias(#symbol))) +#endif + +/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is + * a string literal known at compilation time or char * pointer known only at + * runtime. + */ +#define str_has_pfx(str, pfx) \ + (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) + +/* suffix check */ +static inline bool str_has_sfx(const char *str, const char *sfx) +{ + size_t str_len = strlen(str); + size_t sfx_len = strlen(sfx); + + if (sfx_len <= str_len) + return strcmp(str + str_len - sfx_len, sfx); + return false; +} + +/* Symbol versioning is different between static and shared library. + * Properly versioned symbols are needed for shared library, but + * only the symbol of the new version is needed for static library. + * Starting with GNU C 10, use symver attribute instead of .symver assembler + * directive, which works better with GCC LTO builds. + */ +#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10 + +#define DEFAULT_VERSION(internal_name, api_name, version) \ + __attribute__((symver(#api_name "@@" #version))) +#define COMPAT_VERSION(internal_name, api_name, version) \ + __attribute__((symver(#api_name "@" #version))) + +#elif defined(SHARED) + +#define COMPAT_VERSION(internal_name, api_name, version) \ + asm(".symver " #internal_name "," #api_name "@" #version); +#define DEFAULT_VERSION(internal_name, api_name, version) \ + asm(".symver " #internal_name "," #api_name "@@" #version); + +#else /* !SHARED */ + +#define COMPAT_VERSION(internal_name, api_name, version) +#define DEFAULT_VERSION(internal_name, api_name, version) \ + extern typeof(internal_name) api_name \ + __attribute__((alias(#internal_name))); + +#endif + +extern void libbpf_print(enum libbpf_print_level level, + const char *format, ...) + __attribute__((format(printf, 2, 3))); + +#define __pr(level, fmt, ...) \ +do { \ + libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ +} while (0) + +#define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) +#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) +#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +struct bpf_link { + int (*detach)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + char *pin_path; /* NULL, if not pinned */ + int fd; /* hook FD, -1 if not applicable */ + bool disconnected; +}; + +/* + * Re-implement glibc's reallocarray() for libbpf internal-only use. + * reallocarray(), unfortunately, is not available in all versions of glibc, + * so requires extra feature detection and using reallocarray() stub from + * and COMPAT_NEED_REALLOCARRAY. All this complicates + * build of libbpf unnecessarily and is just a maintenance burden. Instead, + * it's trivial to implement libbpf-specific internal version and use it + * throughout libbpf. + */ +static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) +{ + size_t total; + +#if __has_builtin(__builtin_mul_overflow) + if (unlikely(__builtin_mul_overflow(nmemb, size, &total))) + return NULL; +#else + if (size == 0 || nmemb > ULONG_MAX / size) + return NULL; + total = nmemb * size; +#endif + return realloc(ptr, total); +} + +/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst + * is zero-terminated string no matter what (unless sz == 0, in which case + * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs + * in what is returned. Given this is internal helper, it's trivial to extend + * this, when necessary. Use this instead of strncpy inside libbpf source code. + */ +static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz) +{ + size_t i; + + if (sz == 0) + return; + + sz--; + for (i = 0; i < sz && src[i]; i++) + dst[i] = src[i]; + dst[i] = '\0'; +} + +__u32 get_kernel_version(void); + +struct btf; +struct btf_type; + +struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); +const char *btf_kind_str(const struct btf_type *t); +const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); + +static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t) +{ + return (enum btf_func_linkage)(int)btf_vlen(t); +} + +static inline __u32 btf_type_info(int kind, int vlen, int kflag) +{ + return (kflag << 31) | (kind << 24) | vlen; +} + +enum map_def_parts { + MAP_DEF_MAP_TYPE = 0x001, + MAP_DEF_KEY_TYPE = 0x002, + MAP_DEF_KEY_SIZE = 0x004, + MAP_DEF_VALUE_TYPE = 0x008, + MAP_DEF_VALUE_SIZE = 0x010, + MAP_DEF_MAX_ENTRIES = 0x020, + MAP_DEF_MAP_FLAGS = 0x040, + MAP_DEF_NUMA_NODE = 0x080, + MAP_DEF_PINNING = 0x100, + MAP_DEF_INNER_MAP = 0x200, + MAP_DEF_MAP_EXTRA = 0x400, + + MAP_DEF_ALL = 0x7ff, /* combination of all above */ +}; + +struct btf_map_def { + enum map_def_parts parts; + __u32 map_type; + __u32 key_type_id; + __u32 key_size; + __u32 value_type_id; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 numa_node; + __u32 pinning; + __u64 map_extra; +}; + +int parse_btf_map_def(const char *map_name, struct btf *btf, + const struct btf_type *def_t, bool strict, + struct btf_map_def *map_def, struct btf_map_def *inner_def); + +void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, + size_t cur_cnt, size_t max_cnt, size_t add_cnt); +int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt); + +static inline bool libbpf_is_mem_zeroed(const char *p, ssize_t len) +{ + while (len > 0) { + if (*p) + return false; + p++; + len--; + } + return true; +} + +static inline bool libbpf_validate_opts(const char *opts, + size_t opts_sz, size_t user_sz, + const char *type_name) +{ + if (user_sz < sizeof(size_t)) { + pr_warn("%s size (%zu) is too small\n", type_name, user_sz); + return false; + } + if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) { + pr_warn("%s has non-zero extra bytes\n", type_name); + return false; + } + return true; +} + +#define OPTS_VALID(opts, type) \ + (!(opts) || libbpf_validate_opts((const char *)opts, \ + offsetofend(struct type, \ + type##__last_field), \ + (opts)->sz, #type)) +#define OPTS_HAS(opts, field) \ + ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) +#define OPTS_GET(opts, field, fallback_value) \ + (OPTS_HAS(opts, field) ? (opts)->field : fallback_value) +#define OPTS_SET(opts, field, value) \ + do { \ + if (OPTS_HAS(opts, field)) \ + (opts)->field = value; \ + } while (0) + +#define OPTS_ZEROED(opts, last_nonzero_field) \ +({ \ + ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field); \ + !(opts) || libbpf_is_mem_zeroed((const void *)opts + __off, \ + (opts)->sz - __off); \ +}) + +enum kern_feature_id { + /* v4.14: kernel support for program & map names. */ + FEAT_PROG_NAME, + /* v5.2: kernel support for global data sections. */ + FEAT_GLOBAL_DATA, + /* BTF support */ + FEAT_BTF, + /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ + FEAT_BTF_FUNC, + /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ + FEAT_BTF_DATASEC, + /* BTF_FUNC_GLOBAL is supported */ + FEAT_BTF_GLOBAL_FUNC, + /* BPF_F_MMAPABLE is supported for arrays */ + FEAT_ARRAY_MMAP, + /* kernel support for expected_attach_type in BPF_PROG_LOAD */ + FEAT_EXP_ATTACH_TYPE, + /* bpf_probe_read_{kernel,user}[_str] helpers */ + FEAT_PROBE_READ_KERN, + /* BPF_PROG_BIND_MAP is supported */ + FEAT_PROG_BIND_MAP, + /* Kernel support for module BTFs */ + FEAT_MODULE_BTF, + /* BTF_KIND_FLOAT support */ + FEAT_BTF_FLOAT, + /* BPF perf link support */ + FEAT_PERF_LINK, + /* BTF_KIND_DECL_TAG support */ + FEAT_BTF_DECL_TAG, + /* BTF_KIND_TYPE_TAG support */ + FEAT_BTF_TYPE_TAG, + /* memcg-based accounting for BPF maps and progs */ + FEAT_MEMCG_ACCOUNT, + /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */ + FEAT_BPF_COOKIE, + __FEAT_CNT, +}; + +int probe_memcg_account(void); +bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id); +int bump_rlimit_memlock(void); + +int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz); +int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz); +int libbpf__load_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len); +int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level); + +struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); +void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, + const char **prefix, int *kind); + +struct btf_ext_info { + /* + * info points to the individual info section (e.g. func_info and + * line_info) from the .BTF.ext. It does not include the __u32 rec_size. + */ + void *info; + __u32 rec_size; + __u32 len; + /* optional (maintained internally by libbpf) mapping between .BTF.ext + * section and corresponding ELF section. This is used to join + * information like CO-RE relocation records with corresponding BPF + * programs defined in ELF sections + */ + __u32 *sec_idxs; + int sec_cnt; +}; + +#define for_each_btf_ext_sec(seg, sec) \ + for (sec = (seg)->info; \ + (void *)sec < (seg)->info + (seg)->len; \ + sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \ + (seg)->rec_size * sec->num_info) + +#define for_each_btf_ext_rec(seg, sec, i, rec) \ + for (i = 0, rec = (void *)&(sec)->data; \ + i < (sec)->num_info; \ + i++, rec = (void *)rec + (seg)->rec_size) + +/* + * The .BTF.ext ELF section layout defined as + * struct btf_ext_header + * func_info subsection + * + * The func_info subsection layout: + * record size for struct bpf_func_info in the func_info subsection + * struct btf_sec_func_info for section #1 + * a list of bpf_func_info records for section #1 + * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h + * but may not be identical + * struct btf_sec_func_info for section #2 + * a list of bpf_func_info records for section #2 + * ...... + * + * Note that the bpf_func_info record size in .BTF.ext may not + * be the same as the one defined in include/uapi/linux/bpf.h. + * The loader should ensure that record_size meets minimum + * requirement and pass the record as is to the kernel. The + * kernel will handle the func_info properly based on its contents. + */ +struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + + /* optional part of .BTF.ext header */ + __u32 core_relo_off; + __u32 core_relo_len; +}; + +struct btf_ext { + union { + struct btf_ext_header *hdr; + void *data; + }; + struct btf_ext_info func_info; + struct btf_ext_info line_info; + struct btf_ext_info core_relo_info; + __u32 data_size; +}; + +struct btf_ext_info_sec { + __u32 sec_name_off; + __u32 num_info; + /* Followed by num_info * record_size number of bytes */ + __u8 data[]; +}; + +/* The minimum bpf_func_info checked by the loader */ +struct bpf_func_info_min { + __u32 insn_off; + __u32 type_id; +}; + +/* The minimum bpf_line_info checked by the loader */ +struct bpf_line_info_min { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + + +typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx); +typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); +int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx); +int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx); +int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); +int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); +__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, + __u32 kind); + +extern enum libbpf_strict_mode libbpf_mode; + +typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx); + +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg); + +/* handle direct returned errors */ +static inline int libbpf_err(int ret) +{ + if (ret < 0) + errno = -ret; + return ret; +} + +/* handle errno-based (e.g., syscall or libc) errors according to libbpf's + * strict mode settings + */ +static inline int libbpf_err_errno(int ret) +{ + if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS) + /* errno is already assumed to be set on error */ + return ret < 0 ? -errno : ret; + + /* legacy: on error return -1 directly and don't touch errno */ + return ret; +} + +/* handle error for pointer-returning APIs, err is assumed to be < 0 always */ +static inline void *libbpf_err_ptr(int err) +{ + /* set errno on error, this doesn't break anything */ + errno = -err; + + if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS) + return NULL; + + /* legacy: encode err as ptr */ + return ERR_PTR(err); +} + +/* handle pointer-returning APIs' error handling */ +static inline void *libbpf_ptr(void *ret) +{ + /* set errno on error, this doesn't break anything */ + if (IS_ERR(ret)) + errno = -PTR_ERR(ret); + + if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS) + return IS_ERR(ret) ? NULL : ret; + + /* legacy: pass-through original pointer */ + return ret; +} + +static inline bool str_is_empty(const char *s) +{ + return !s || !s[0]; +} + +static inline bool is_ldimm64_insn(struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + +/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2 + * Takes ownership of the fd passed in, and closes it if calling + * fcntl(fd, F_DUPFD_CLOEXEC, 3). + */ +static inline int ensure_good_fd(int fd) +{ + int old_fd = fd, saved_errno; + + if (fd < 0) + return fd; + if (fd < 3) { + fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); + saved_errno = errno; + close(old_fd); + if (fd < 0) { + pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno); + errno = saved_errno; + } + } + return fd; +} + +/* The following two functions are exposed to bpftool */ +int bpf_core_add_cands(struct bpf_core_cand *local_cand, + size_t local_essent_len, + const struct btf *targ_btf, + const char *targ_btf_name, + int targ_start_id, + struct bpf_core_cand_list *cands); +void bpf_core_free_cands(struct bpf_core_cand_list *cands); + +struct usdt_manager *usdt_manager_new(struct bpf_object *obj); +void usdt_manager_free(struct usdt_manager *man); +struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man, + const struct bpf_program *prog, + pid_t pid, const char *path, + const char *usdt_provider, const char *usdt_name, + __u64 usdt_cookie); + +#endif /* __LIBBPF_LIBBPF_INTERNAL_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_legacy.h b/pkg/plugin/lib/common/libbpf/_src/libbpf_legacy.h new file mode 100644 index 000000000..d7bcbd01f --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_legacy.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * Libbpf legacy APIs (either discouraged or deprecated, as mentioned in [0]) + * + * [0] https://docs.google.com/document/d/1UyjTZuPFWiPFyKk1tV5an11_iaRuec6U-ZESZ54nNTY + * + * Copyright (C) 2021 Facebook + */ +#ifndef __LIBBPF_LEGACY_BPF_H +#define __LIBBPF_LEGACY_BPF_H + +#include +#include +#include +#include +#include "libbpf_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum libbpf_strict_mode { + /* Turn on all supported strict features of libbpf to simulate libbpf + * v1.0 behavior. + * This will be the default behavior in libbpf v1.0. + */ + LIBBPF_STRICT_ALL = 0xffffffff, + + /* + * Disable any libbpf 1.0 behaviors. This is the default before libbpf + * v1.0. It won't be supported anymore in v1.0, please update your + * code so that it handles LIBBPF_STRICT_ALL mode before libbpf v1.0. + */ + LIBBPF_STRICT_NONE = 0x00, + /* + * Return NULL pointers on error, not ERR_PTR(err). + * Additionally, libbpf also always sets errno to corresponding Exx + * (positive) error code. + */ + LIBBPF_STRICT_CLEAN_PTRS = 0x01, + /* + * Return actual error codes from low-level APIs directly, not just -1. + * Additionally, libbpf also always sets errno to corresponding Exx + * (positive) error code. + */ + LIBBPF_STRICT_DIRECT_ERRS = 0x02, + /* + * Enforce strict BPF program section (SEC()) names. + * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were + * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become + * unrecognized by libbpf and would have to be just SEC("xdp") and + * SEC("xdp") and SEC("perf_event"). + * + * Note, in this mode the program pin path will be based on the + * function name instead of section name. + * + * Additionally, routines in the .text section are always considered + * sub-programs. Legacy behavior allows for a single routine in .text + * to be a program. + */ + LIBBPF_STRICT_SEC_NAME = 0x04, + /* + * Disable the global 'bpf_objects_list'. Maintaining this list adds + * a race condition to bpf_object__open() and bpf_object__close(). + * Clients can maintain it on their own if it is valuable for them. + */ + LIBBPF_STRICT_NO_OBJECT_LIST = 0x08, + /* + * Automatically bump RLIMIT_MEMLOCK using setrlimit() before the + * first BPF program or map creation operation. This is done only if + * kernel is too old to support memcg-based memory accounting for BPF + * subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY, + * but it can be overriden with libbpf_set_memlock_rlim_max() API. + * Note that libbpf_set_memlock_rlim_max() needs to be called before + * the very first bpf_prog_load(), bpf_map_create() or bpf_object__load() + * operation. + */ + LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10, + /* + * Error out on any SEC("maps") map definition, which are deprecated + * in favor of BTF-defined map definitions in SEC(".maps"). + */ + LIBBPF_STRICT_MAP_DEFINITIONS = 0x20, + + __LIBBPF_STRICT_LAST, +}; + +LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); + +#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS + +/* "Discouraged" APIs which don't follow consistent libbpf naming patterns. + * They are normally a trivial aliases or wrappers for proper APIs and are + * left to minimize unnecessary disruption for users of libbpf. But they + * shouldn't be used going forward. + */ + +struct bpf_program; +struct bpf_map; +struct btf; +struct btf_ext; + +LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); +LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); +LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_LEGACY_BPF_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_probes.c b/pkg/plugin/lib/common/libbpf/_src/libbpf_probes.c new file mode 100644 index 000000000..97b06cede --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_probes.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2019 Netronome Systems, Inc. */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +static bool grep(const char *buffer, const char *pattern) +{ + return !!strstr(buffer, pattern); +} + +static int get_vendor_id(int ifindex) +{ + char ifname[IF_NAMESIZE], path[64], buf[8]; + ssize_t len; + int fd; + + if (!if_indextoname(ifindex, ifname)) + return -1; + + snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) + return -1; + + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len < 0) + return -1; + if (len >= (ssize_t)sizeof(buf)) + return -1; + buf[len] = '\0'; + + return strtol(buf, NULL, 0); +} + +static int probe_prog_load(enum bpf_prog_type prog_type, + const struct bpf_insn *insns, size_t insns_cnt, + char *log_buf, size_t log_buf_sz, + __u32 ifindex) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = log_buf, + .log_size = log_buf_sz, + .log_level = log_buf ? 1 : 0, + .prog_ifindex = ifindex, + ); + int fd, err, exp_err = 0; + const char *exp_msg = NULL; + char buf[4096]; + + switch (prog_type) { + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT; + break; + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT; + break; + case BPF_PROG_TYPE_SK_LOOKUP: + opts.expected_attach_type = BPF_SK_LOOKUP; + break; + case BPF_PROG_TYPE_KPROBE: + opts.kern_version = get_kernel_version(); + break; + case BPF_PROG_TYPE_LIRC_MODE2: + opts.expected_attach_type = BPF_LIRC_MODE2; + break; + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + opts.log_buf = buf; + opts.log_size = sizeof(buf); + opts.log_level = 1; + if (prog_type == BPF_PROG_TYPE_TRACING) + opts.expected_attach_type = BPF_TRACE_FENTRY; + else + opts.expected_attach_type = BPF_MODIFY_RETURN; + opts.attach_btf_id = 1; + + exp_err = -EINVAL; + exp_msg = "attach_btf_id 1 is not a function"; + break; + case BPF_PROG_TYPE_EXT: + opts.log_buf = buf; + opts.log_size = sizeof(buf); + opts.log_level = 1; + opts.attach_btf_id = 1; + + exp_err = -EINVAL; + exp_msg = "Cannot replace kernel functions"; + break; + case BPF_PROG_TYPE_SYSCALL: + opts.prog_flags = BPF_F_SLEEPABLE; + break; + case BPF_PROG_TYPE_STRUCT_OPS: + exp_err = -524; /* -ENOTSUPP */ + break; + case BPF_PROG_TYPE_UNSPEC: + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_LWT_IN: + case BPF_PROG_TYPE_LWT_OUT: + case BPF_PROG_TYPE_LWT_XMIT: + case BPF_PROG_TYPE_SOCK_OPS: + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: + case BPF_PROG_TYPE_LWT_SEG6LOCAL: + case BPF_PROG_TYPE_SK_REUSEPORT: + case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_CGROUP_SYSCTL: + break; + default: + return -EOPNOTSUPP; + } + + fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts); + err = -errno; + if (fd >= 0) + close(fd); + if (exp_err) { + if (fd >= 0 || err != exp_err) + return 0; + if (exp_msg && !strstr(buf, exp_msg)) + return 0; + return 1; + } + return fd >= 0 ? 1 : 0; +} + +int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts) +{ + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() + }; + const size_t insn_cnt = ARRAY_SIZE(insns); + int ret; + + if (opts) + return libbpf_err(-EINVAL); + + ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0); + return libbpf_err(ret); +} + +bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) +{ + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() + }; + + /* prefer libbpf_probe_bpf_prog_type() unless offload is requested */ + if (ifindex == 0) + return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1; + + if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS) + /* nfp returns -EINVAL on exit(0) with TC offload */ + insns[0].imm = 2; + + errno = 0; + probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex); + + return errno != EINVAL && errno != EOPNOTSUPP; +} + +int libbpf__load_raw_btf(const char *raw_types, size_t types_len, + const char *str_sec, size_t str_len) +{ + struct btf_header hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_len = types_len, + .str_off = types_len, + .str_len = str_len, + }; + int btf_fd, btf_len; + __u8 *raw_btf; + + btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len; + raw_btf = malloc(btf_len); + if (!raw_btf) + return -ENOMEM; + + memcpy(raw_btf, &hdr, sizeof(hdr)); + memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len); + memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len); + + btf_fd = bpf_btf_load(raw_btf, btf_len, NULL); + + free(raw_btf); + return btf_fd; +} + +static int load_local_storage_btf(void) +{ + const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l"; + /* struct bpf_spin_lock { + * int val; + * }; + * struct val { + * int cnt; + * struct bpf_spin_lock l; + * }; + */ + __u32 types[] = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* struct bpf_spin_lock */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), + BTF_MEMBER_ENC(15, 1, 0), /* int val; */ + /* struct val */ /* [3] */ + BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), + BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ + BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ + }; + + return libbpf__load_raw_btf((char *)types, sizeof(types), + strs, sizeof(strs)); +} + +static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + int key_size, value_size, max_entries; + __u32 btf_key_type_id = 0, btf_value_type_id = 0; + int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err; + + opts.map_ifindex = ifindex; + + key_size = sizeof(__u32); + value_size = sizeof(__u32); + max_entries = 1; + + switch (map_type) { + case BPF_MAP_TYPE_STACK_TRACE: + value_size = sizeof(__u64); + break; + case BPF_MAP_TYPE_LPM_TRIE: + key_size = sizeof(__u64); + value_size = sizeof(__u64); + opts.map_flags = BPF_F_NO_PREALLOC; + break; + case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: + key_size = sizeof(struct bpf_cgroup_storage_key); + value_size = sizeof(__u64); + max_entries = 0; + break; + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + key_size = 0; + break; + case BPF_MAP_TYPE_SK_STORAGE: + case BPF_MAP_TYPE_INODE_STORAGE: + case BPF_MAP_TYPE_TASK_STORAGE: + btf_key_type_id = 1; + btf_value_type_id = 3; + value_size = 8; + max_entries = 0; + opts.map_flags = BPF_F_NO_PREALLOC; + btf_fd = load_local_storage_btf(); + if (btf_fd < 0) + return btf_fd; + break; + case BPF_MAP_TYPE_RINGBUF: + key_size = 0; + value_size = 0; + max_entries = 4096; + break; + case BPF_MAP_TYPE_STRUCT_OPS: + /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */ + opts.btf_vmlinux_value_type_id = 1; + exp_err = -524; /* -ENOTSUPP */ + break; + case BPF_MAP_TYPE_BLOOM_FILTER: + key_size = 0; + max_entries = 1; + break; + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_ARRAY: + case BPF_MAP_TYPE_PROG_ARRAY: + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + case BPF_MAP_TYPE_PERCPU_HASH: + case BPF_MAP_TYPE_PERCPU_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_LRU_HASH: + case BPF_MAP_TYPE_LRU_PERCPU_HASH: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_CPUMAP: + case BPF_MAP_TYPE_XSKMAP: + case BPF_MAP_TYPE_SOCKHASH: + case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + break; + case BPF_MAP_TYPE_UNSPEC: + default: + return -EOPNOTSUPP; + } + + if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || + map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + /* TODO: probe for device, once libbpf has a function to create + * map-in-map for offload + */ + if (ifindex) + goto cleanup; + + fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, + sizeof(__u32), sizeof(__u32), 1, NULL); + if (fd_inner < 0) + goto cleanup; + + opts.inner_map_fd = fd_inner; + } + + if (btf_fd >= 0) { + opts.btf_fd = btf_fd; + opts.btf_key_type_id = btf_key_type_id; + opts.btf_value_type_id = btf_value_type_id; + } + + fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts); + err = -errno; + +cleanup: + if (fd >= 0) + close(fd); + if (fd_inner >= 0) + close(fd_inner); + if (btf_fd >= 0) + close(btf_fd); + + if (exp_err) + return fd < 0 && err == exp_err ? 1 : 0; + else + return fd >= 0 ? 1 : 0; +} + +int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts) +{ + int ret; + + if (opts) + return libbpf_err(-EINVAL); + + ret = probe_map_create(map_type, 0); + return libbpf_err(ret); +} + +bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) +{ + return probe_map_create(map_type, ifindex) == 1; +} + +int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id, + const void *opts) +{ + struct bpf_insn insns[] = { + BPF_EMIT_CALL((__u32)helper_id), + BPF_EXIT_INSN(), + }; + const size_t insn_cnt = ARRAY_SIZE(insns); + char buf[4096]; + int ret; + + if (opts) + return libbpf_err(-EINVAL); + + /* we can't successfully load all prog types to check for BPF helper + * support, so bail out with -EOPNOTSUPP error + */ + switch (prog_type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_STRUCT_OPS: + return -EOPNOTSUPP; + default: + break; + } + + buf[0] = '\0'; + ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0); + if (ret < 0) + return libbpf_err(ret); + + /* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id) + * at all, it will emit something like "invalid func unknown#181". + * If BPF verifier recognizes BPF helper but it's not supported for + * given BPF program type, it will emit "unknown func bpf_sys_bpf#166". + * In both cases, provided combination of BPF program type and BPF + * helper is not supported by the kernel. + * In all other cases, probe_prog_load() above will either succeed (e.g., + * because BPF helper happens to accept no input arguments or it + * accepts one input argument and initial PTR_TO_CTX is fine for + * that), or we'll get some more specific BPF verifier error about + * some unsatisfied conditions. + */ + if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func "))) + return 0; + return 1; /* assume supported */ +} + +bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, + __u32 ifindex) +{ + struct bpf_insn insns[2] = { + BPF_EMIT_CALL(id), + BPF_EXIT_INSN() + }; + char buf[4096] = {}; + bool res; + + probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex); + res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); + + if (ifindex) { + switch (get_vendor_id(ifindex)) { + case 0x19ee: /* Netronome specific */ + res = res && !grep(buf, "not supported by FW") && + !grep(buf, "unsupported function id"); + break; + default: + break; + } + } + + return res; +} + +/* + * Probe for availability of kernel commit (5.3): + * + * c04c0d2b968a ("bpf: increase complexity limit and maximum program size") + */ +bool bpf_probe_large_insn_limit(__u32 ifindex) +{ + struct bpf_insn insns[BPF_MAXINSNS + 1]; + int i; + + for (i = 0; i < BPF_MAXINSNS; i++) + insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1); + insns[BPF_MAXINSNS] = BPF_EXIT_INSN(); + + errno = 0; + probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0, + ifindex); + + return errno != E2BIG && errno != EINVAL; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/libbpf_version.h b/pkg/plugin/lib/common/libbpf/_src/libbpf_version.h new file mode 100644 index 000000000..61f203940 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/libbpf_version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (C) 2021 Facebook */ +#ifndef __LIBBPF_VERSION_H +#define __LIBBPF_VERSION_H + +#define LIBBPF_MAJOR_VERSION 0 +#define LIBBPF_MINOR_VERSION 8 + +#endif /* __LIBBPF_VERSION_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/linker.c b/pkg/plugin/lib/common/libbpf/_src/linker.c new file mode 100644 index 000000000..9aa016fb5 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/linker.c @@ -0,0 +1,2903 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* + * BPF static linker + * + * Copyright (c) 2021 Facebook + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "btf.h" +#include "libbpf_internal.h" +#include "strset.h" + +#define BTF_EXTERN_SEC ".extern" + +struct src_sec { + const char *sec_name; + /* positional (not necessarily ELF) index in an array of sections */ + int id; + /* positional (not necessarily ELF) index of a matching section in a final object file */ + int dst_id; + /* section data offset in a matching output section */ + int dst_off; + /* whether section is omitted from the final ELF file */ + bool skipped; + /* whether section is an ephemeral section, not mapped to an ELF section */ + bool ephemeral; + + /* ELF info */ + size_t sec_idx; + Elf_Scn *scn; + Elf64_Shdr *shdr; + Elf_Data *data; + + /* corresponding BTF DATASEC type ID */ + int sec_type_id; +}; + +struct src_obj { + const char *filename; + int fd; + Elf *elf; + /* Section header strings section index */ + size_t shstrs_sec_idx; + /* SYMTAB section index */ + size_t symtab_sec_idx; + + struct btf *btf; + struct btf_ext *btf_ext; + + /* List of sections (including ephemeral). Slot zero is unused. */ + struct src_sec *secs; + int sec_cnt; + + /* mapping of symbol indices from src to dst ELF */ + int *sym_map; + /* mapping from the src BTF type IDs to dst ones */ + int *btf_type_map; +}; + +/* single .BTF.ext data section */ +struct btf_ext_sec_data { + size_t rec_cnt; + __u32 rec_sz; + void *recs; +}; + +struct glob_sym { + /* ELF symbol index */ + int sym_idx; + /* associated section id for .ksyms, .kconfig, etc, but not .extern */ + int sec_id; + /* extern name offset in STRTAB */ + int name_off; + /* optional associated BTF type ID */ + int btf_id; + /* BTF type ID to which VAR/FUNC type is pointing to; used for + * rewriting types when extern VAR/FUNC is resolved to a concrete + * definition + */ + int underlying_btf_id; + /* sec_var index in the corresponding dst_sec, if exists */ + int var_idx; + + /* extern or resolved/global symbol */ + bool is_extern; + /* weak or strong symbol, never goes back from strong to weak */ + bool is_weak; +}; + +struct dst_sec { + char *sec_name; + /* positional (not necessarily ELF) index in an array of sections */ + int id; + + bool ephemeral; + + /* ELF info */ + size_t sec_idx; + Elf_Scn *scn; + Elf64_Shdr *shdr; + Elf_Data *data; + + /* final output section size */ + int sec_sz; + /* final output contents of the section */ + void *raw_data; + + /* corresponding STT_SECTION symbol index in SYMTAB */ + int sec_sym_idx; + + /* section's DATASEC variable info, emitted on BTF finalization */ + bool has_btf; + int sec_var_cnt; + struct btf_var_secinfo *sec_vars; + + /* section's .BTF.ext data */ + struct btf_ext_sec_data func_info; + struct btf_ext_sec_data line_info; + struct btf_ext_sec_data core_relo_info; +}; + +struct bpf_linker { + char *filename; + int fd; + Elf *elf; + Elf64_Ehdr *elf_hdr; + + /* Output sections metadata */ + struct dst_sec *secs; + int sec_cnt; + + struct strset *strtab_strs; /* STRTAB unique strings */ + size_t strtab_sec_idx; /* STRTAB section index */ + size_t symtab_sec_idx; /* SYMTAB section index */ + + struct btf *btf; + struct btf_ext *btf_ext; + + /* global (including extern) ELF symbols */ + int glob_sym_cnt; + struct glob_sym *glob_syms; +}; + +#define pr_warn_elf(fmt, ...) \ + libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1)) + +static int init_output_elf(struct bpf_linker *linker, const char *file); + +static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, + const struct bpf_linker_file_opts *opts, + struct src_obj *obj); +static int linker_sanity_check_elf(struct src_obj *obj); +static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec); +static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec); +static int linker_sanity_check_btf(struct src_obj *obj); +static int linker_sanity_check_btf_ext(struct src_obj *obj); +static int linker_fixup_btf(struct src_obj *obj); +static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj); +static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj); +static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, + Elf64_Sym *sym, const char *sym_name, int src_sym_idx); +static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj); +static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj); +static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj); + +static int finalize_btf(struct bpf_linker *linker); +static int finalize_btf_ext(struct bpf_linker *linker); + +void bpf_linker__free(struct bpf_linker *linker) +{ + int i; + + if (!linker) + return; + + free(linker->filename); + + if (linker->elf) + elf_end(linker->elf); + + if (linker->fd >= 0) + close(linker->fd); + + strset__free(linker->strtab_strs); + + btf__free(linker->btf); + btf_ext__free(linker->btf_ext); + + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + free(sec->sec_name); + free(sec->raw_data); + free(sec->sec_vars); + + free(sec->func_info.recs); + free(sec->line_info.recs); + free(sec->core_relo_info.recs); + } + free(linker->secs); + + free(linker->glob_syms); + free(linker); +} + +struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts) +{ + struct bpf_linker *linker; + int err; + + if (!OPTS_VALID(opts, bpf_linker_opts)) + return errno = EINVAL, NULL; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn_elf("libelf initialization failed"); + return errno = EINVAL, NULL; + } + + linker = calloc(1, sizeof(*linker)); + if (!linker) + return errno = ENOMEM, NULL; + + linker->fd = -1; + + err = init_output_elf(linker, filename); + if (err) + goto err_out; + + return linker; + +err_out: + bpf_linker__free(linker); + return errno = -err, NULL; +} + +static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name) +{ + struct dst_sec *secs = linker->secs, *sec; + size_t new_cnt = linker->sec_cnt ? linker->sec_cnt + 1 : 2; + + secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs)); + if (!secs) + return NULL; + + /* zero out newly allocated memory */ + memset(secs + linker->sec_cnt, 0, (new_cnt - linker->sec_cnt) * sizeof(*secs)); + + linker->secs = secs; + linker->sec_cnt = new_cnt; + + sec = &linker->secs[new_cnt - 1]; + sec->id = new_cnt - 1; + sec->sec_name = strdup(sec_name); + if (!sec->sec_name) + return NULL; + + return sec; +} + +static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx) +{ + struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx]; + Elf64_Sym *syms, *sym; + size_t sym_cnt = symtab->sec_sz / sizeof(*sym); + + syms = libbpf_reallocarray(symtab->raw_data, sym_cnt + 1, sizeof(*sym)); + if (!syms) + return NULL; + + sym = &syms[sym_cnt]; + memset(sym, 0, sizeof(*sym)); + + symtab->raw_data = syms; + symtab->sec_sz += sizeof(*sym); + symtab->shdr->sh_size += sizeof(*sym); + symtab->data->d_size += sizeof(*sym); + + if (sym_idx) + *sym_idx = sym_cnt; + + return sym; +} + +static int init_output_elf(struct bpf_linker *linker, const char *file) +{ + int err, str_off; + Elf64_Sym *init_sym; + struct dst_sec *sec; + + linker->filename = strdup(file); + if (!linker->filename) + return -ENOMEM; + + linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); + if (linker->fd < 0) { + err = -errno; + pr_warn("failed to create '%s': %d\n", file, err); + return err; + } + + linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL); + if (!linker->elf) { + pr_warn_elf("failed to create ELF object"); + return -EINVAL; + } + + /* ELF header */ + linker->elf_hdr = elf64_newehdr(linker->elf); + if (!linker->elf_hdr) { + pr_warn_elf("failed to create ELF header"); + return -EINVAL; + } + + linker->elf_hdr->e_machine = EM_BPF; + linker->elf_hdr->e_type = ET_REL; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB; +#else +#error "Unknown __BYTE_ORDER__" +#endif + + /* STRTAB */ + /* initialize strset with an empty string to conform to ELF */ + linker->strtab_strs = strset__new(INT_MAX, "", sizeof("")); + if (libbpf_get_error(linker->strtab_strs)) + return libbpf_get_error(linker->strtab_strs); + + sec = add_dst_sec(linker, ".strtab"); + if (!sec) + return -ENOMEM; + + sec->scn = elf_newscn(linker->elf); + if (!sec->scn) { + pr_warn_elf("failed to create STRTAB section"); + return -EINVAL; + } + + sec->shdr = elf64_getshdr(sec->scn); + if (!sec->shdr) + return -EINVAL; + + sec->data = elf_newdata(sec->scn); + if (!sec->data) { + pr_warn_elf("failed to create STRTAB data"); + return -EINVAL; + } + + str_off = strset__add_str(linker->strtab_strs, sec->sec_name); + if (str_off < 0) + return str_off; + + sec->sec_idx = elf_ndxscn(sec->scn); + linker->elf_hdr->e_shstrndx = sec->sec_idx; + linker->strtab_sec_idx = sec->sec_idx; + + sec->shdr->sh_name = str_off; + sec->shdr->sh_type = SHT_STRTAB; + sec->shdr->sh_flags = SHF_STRINGS; + sec->shdr->sh_offset = 0; + sec->shdr->sh_link = 0; + sec->shdr->sh_info = 0; + sec->shdr->sh_addralign = 1; + sec->shdr->sh_size = sec->sec_sz = 0; + sec->shdr->sh_entsize = 0; + + /* SYMTAB */ + sec = add_dst_sec(linker, ".symtab"); + if (!sec) + return -ENOMEM; + + sec->scn = elf_newscn(linker->elf); + if (!sec->scn) { + pr_warn_elf("failed to create SYMTAB section"); + return -EINVAL; + } + + sec->shdr = elf64_getshdr(sec->scn); + if (!sec->shdr) + return -EINVAL; + + sec->data = elf_newdata(sec->scn); + if (!sec->data) { + pr_warn_elf("failed to create SYMTAB data"); + return -EINVAL; + } + + str_off = strset__add_str(linker->strtab_strs, sec->sec_name); + if (str_off < 0) + return str_off; + + sec->sec_idx = elf_ndxscn(sec->scn); + linker->symtab_sec_idx = sec->sec_idx; + + sec->shdr->sh_name = str_off; + sec->shdr->sh_type = SHT_SYMTAB; + sec->shdr->sh_flags = 0; + sec->shdr->sh_offset = 0; + sec->shdr->sh_link = linker->strtab_sec_idx; + /* sh_info should be one greater than the index of the last local + * symbol (i.e., binding is STB_LOCAL). But why and who cares? + */ + sec->shdr->sh_info = 0; + sec->shdr->sh_addralign = 8; + sec->shdr->sh_entsize = sizeof(Elf64_Sym); + + /* .BTF */ + linker->btf = btf__new_empty(); + err = libbpf_get_error(linker->btf); + if (err) + return err; + + /* add the special all-zero symbol */ + init_sym = add_new_sym(linker, NULL); + if (!init_sym) + return -EINVAL; + + init_sym->st_name = 0; + init_sym->st_info = 0; + init_sym->st_other = 0; + init_sym->st_shndx = SHN_UNDEF; + init_sym->st_value = 0; + init_sym->st_size = 0; + + return 0; +} + +int bpf_linker__add_file(struct bpf_linker *linker, const char *filename, + const struct bpf_linker_file_opts *opts) +{ + struct src_obj obj = {}; + int err = 0; + + if (!OPTS_VALID(opts, bpf_linker_file_opts)) + return libbpf_err(-EINVAL); + + if (!linker->elf) + return libbpf_err(-EINVAL); + + err = err ?: linker_load_obj_file(linker, filename, opts, &obj); + err = err ?: linker_append_sec_data(linker, &obj); + err = err ?: linker_append_elf_syms(linker, &obj); + err = err ?: linker_append_elf_relos(linker, &obj); + err = err ?: linker_append_btf(linker, &obj); + err = err ?: linker_append_btf_ext(linker, &obj); + + /* free up src_obj resources */ + free(obj.btf_type_map); + btf__free(obj.btf); + btf_ext__free(obj.btf_ext); + free(obj.secs); + free(obj.sym_map); + if (obj.elf) + elf_end(obj.elf); + if (obj.fd >= 0) + close(obj.fd); + + return libbpf_err(err); +} + +static bool is_dwarf_sec_name(const char *name) +{ + /* approximation, but the actual list is too long */ + return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; +} + +static bool is_ignored_sec(struct src_sec *sec) +{ + Elf64_Shdr *shdr = sec->shdr; + const char *name = sec->sec_name; + + /* no special handling of .strtab */ + if (shdr->sh_type == SHT_STRTAB) + return true; + + /* ignore .llvm_addrsig section as well */ + if (shdr->sh_type == SHT_LLVM_ADDRSIG) + return true; + + /* no subprograms will lead to an empty .text section, ignore it */ + if (shdr->sh_type == SHT_PROGBITS && shdr->sh_size == 0 && + strcmp(sec->sec_name, ".text") == 0) + return true; + + /* DWARF sections */ + if (is_dwarf_sec_name(sec->sec_name)) + return true; + + if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { + name += sizeof(".rel") - 1; + /* DWARF section relocations */ + if (is_dwarf_sec_name(name)) + return true; + + /* .BTF and .BTF.ext don't need relocations */ + if (strcmp(name, BTF_ELF_SEC) == 0 || + strcmp(name, BTF_EXT_ELF_SEC) == 0) + return true; + } + + return false; +} + +static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name) +{ + struct src_sec *secs = obj->secs, *sec; + size_t new_cnt = obj->sec_cnt ? obj->sec_cnt + 1 : 2; + + secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs)); + if (!secs) + return NULL; + + /* zero out newly allocated memory */ + memset(secs + obj->sec_cnt, 0, (new_cnt - obj->sec_cnt) * sizeof(*secs)); + + obj->secs = secs; + obj->sec_cnt = new_cnt; + + sec = &obj->secs[new_cnt - 1]; + sec->id = new_cnt - 1; + sec->sec_name = sec_name; + + return sec; +} + +static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, + const struct bpf_linker_file_opts *opts, + struct src_obj *obj) +{ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const int host_endianness = ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + const int host_endianness = ELFDATA2MSB; +#else +#error "Unknown __BYTE_ORDER__" +#endif + int err = 0; + Elf_Scn *scn; + Elf_Data *data; + Elf64_Ehdr *ehdr; + Elf64_Shdr *shdr; + struct src_sec *sec; + + pr_debug("linker: adding object file '%s'...\n", filename); + + obj->filename = filename; + + obj->fd = open(filename, O_RDONLY | O_CLOEXEC); + if (obj->fd < 0) { + err = -errno; + pr_warn("failed to open file '%s': %d\n", filename, err); + return err; + } + obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); + if (!obj->elf) { + err = -errno; + pr_warn_elf("failed to parse ELF file '%s'", filename); + return err; + } + + /* Sanity check ELF file high-level properties */ + ehdr = elf64_getehdr(obj->elf); + if (!ehdr) { + err = -errno; + pr_warn_elf("failed to get ELF header for %s", filename); + return err; + } + if (ehdr->e_ident[EI_DATA] != host_endianness) { + err = -EOPNOTSUPP; + pr_warn_elf("unsupported byte order of ELF file %s", filename); + return err; + } + if (ehdr->e_type != ET_REL + || ehdr->e_machine != EM_BPF + || ehdr->e_ident[EI_CLASS] != ELFCLASS64) { + err = -EOPNOTSUPP; + pr_warn_elf("unsupported kind of ELF file %s", filename); + return err; + } + + if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) { + err = -errno; + pr_warn_elf("failed to get SHSTRTAB section index for %s", filename); + return err; + } + + scn = NULL; + while ((scn = elf_nextscn(obj->elf, scn)) != NULL) { + size_t sec_idx = elf_ndxscn(scn); + const char *sec_name; + + shdr = elf64_getshdr(scn); + if (!shdr) { + err = -errno; + pr_warn_elf("failed to get section #%zu header for %s", + sec_idx, filename); + return err; + } + + sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name); + if (!sec_name) { + err = -errno; + pr_warn_elf("failed to get section #%zu name for %s", + sec_idx, filename); + return err; + } + + data = elf_getdata(scn, 0); + if (!data) { + err = -errno; + pr_warn_elf("failed to get section #%zu (%s) data from %s", + sec_idx, sec_name, filename); + return err; + } + + sec = add_src_sec(obj, sec_name); + if (!sec) + return -ENOMEM; + + sec->scn = scn; + sec->shdr = shdr; + sec->data = data; + sec->sec_idx = elf_ndxscn(scn); + + if (is_ignored_sec(sec)) { + sec->skipped = true; + continue; + } + + switch (shdr->sh_type) { + case SHT_SYMTAB: + if (obj->symtab_sec_idx) { + err = -EOPNOTSUPP; + pr_warn("multiple SYMTAB sections found, not supported\n"); + return err; + } + obj->symtab_sec_idx = sec_idx; + break; + case SHT_STRTAB: + /* we'll construct our own string table */ + break; + case SHT_PROGBITS: + if (strcmp(sec_name, BTF_ELF_SEC) == 0) { + obj->btf = btf__new(data->d_buf, shdr->sh_size); + err = libbpf_get_error(obj->btf); + if (err) { + pr_warn("failed to parse .BTF from %s: %d\n", filename, err); + return err; + } + sec->skipped = true; + continue; + } + if (strcmp(sec_name, BTF_EXT_ELF_SEC) == 0) { + obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size); + err = libbpf_get_error(obj->btf_ext); + if (err) { + pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err); + return err; + } + sec->skipped = true; + continue; + } + + /* data & code */ + break; + case SHT_NOBITS: + /* BSS */ + break; + case SHT_REL: + /* relocations */ + break; + default: + pr_warn("unrecognized section #%zu (%s) in %s\n", + sec_idx, sec_name, filename); + err = -EINVAL; + return err; + } + } + + err = err ?: linker_sanity_check_elf(obj); + err = err ?: linker_sanity_check_btf(obj); + err = err ?: linker_sanity_check_btf_ext(obj); + err = err ?: linker_fixup_btf(obj); + + return err; +} + +static bool is_pow_of_2(size_t x) +{ + return x && (x & (x - 1)) == 0; +} + +static int linker_sanity_check_elf(struct src_obj *obj) +{ + struct src_sec *sec; + int i, err; + + if (!obj->symtab_sec_idx) { + pr_warn("ELF is missing SYMTAB section in %s\n", obj->filename); + return -EINVAL; + } + if (!obj->shstrs_sec_idx) { + pr_warn("ELF is missing section headers STRTAB section in %s\n", obj->filename); + return -EINVAL; + } + + for (i = 1; i < obj->sec_cnt; i++) { + sec = &obj->secs[i]; + + if (sec->sec_name[0] == '\0') { + pr_warn("ELF section #%zu has empty name in %s\n", sec->sec_idx, obj->filename); + return -EINVAL; + } + + if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) + return -EINVAL; + if (sec->shdr->sh_addralign != sec->data->d_align) + return -EINVAL; + + if (sec->shdr->sh_size != sec->data->d_size) + return -EINVAL; + + switch (sec->shdr->sh_type) { + case SHT_SYMTAB: + err = linker_sanity_check_elf_symtab(obj, sec); + if (err) + return err; + break; + case SHT_STRTAB: + break; + case SHT_PROGBITS: + if (sec->shdr->sh_flags & SHF_EXECINSTR) { + if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0) + return -EINVAL; + } + break; + case SHT_NOBITS: + break; + case SHT_REL: + err = linker_sanity_check_elf_relos(obj, sec); + if (err) + return err; + break; + case SHT_LLVM_ADDRSIG: + break; + default: + pr_warn("ELF section #%zu (%s) has unrecognized type %zu in %s\n", + sec->sec_idx, sec->sec_name, (size_t)sec->shdr->sh_type, obj->filename); + return -EINVAL; + } + } + + return 0; +} + +static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec) +{ + struct src_sec *link_sec; + Elf64_Sym *sym; + int i, n; + + if (sec->shdr->sh_entsize != sizeof(Elf64_Sym)) + return -EINVAL; + if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0) + return -EINVAL; + + if (!sec->shdr->sh_link || sec->shdr->sh_link >= obj->sec_cnt) { + pr_warn("ELF SYMTAB section #%zu points to missing STRTAB section #%zu in %s\n", + sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); + return -EINVAL; + } + link_sec = &obj->secs[sec->shdr->sh_link]; + if (link_sec->shdr->sh_type != SHT_STRTAB) { + pr_warn("ELF SYMTAB section #%zu points to invalid STRTAB section #%zu in %s\n", + sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); + return -EINVAL; + } + + n = sec->shdr->sh_size / sec->shdr->sh_entsize; + sym = sec->data->d_buf; + for (i = 0; i < n; i++, sym++) { + int sym_type = ELF64_ST_TYPE(sym->st_info); + int sym_bind = ELF64_ST_BIND(sym->st_info); + int sym_vis = ELF64_ST_VISIBILITY(sym->st_other); + + if (i == 0) { + if (sym->st_name != 0 || sym->st_info != 0 + || sym->st_other != 0 || sym->st_shndx != 0 + || sym->st_value != 0 || sym->st_size != 0) { + pr_warn("ELF sym #0 is invalid in %s\n", obj->filename); + return -EINVAL; + } + continue; + } + if (sym_bind != STB_LOCAL && sym_bind != STB_GLOBAL && sym_bind != STB_WEAK) { + pr_warn("ELF sym #%d in section #%zu has unsupported symbol binding %d\n", + i, sec->sec_idx, sym_bind); + return -EINVAL; + } + if (sym_vis != STV_DEFAULT && sym_vis != STV_HIDDEN) { + pr_warn("ELF sym #%d in section #%zu has unsupported symbol visibility %d\n", + i, sec->sec_idx, sym_vis); + return -EINVAL; + } + if (sym->st_shndx == 0) { + if (sym_type != STT_NOTYPE || sym_bind == STB_LOCAL + || sym->st_value != 0 || sym->st_size != 0) { + pr_warn("ELF sym #%d is invalid extern symbol in %s\n", + i, obj->filename); + + return -EINVAL; + } + continue; + } + if (sym->st_shndx < SHN_LORESERVE && sym->st_shndx >= obj->sec_cnt) { + pr_warn("ELF sym #%d in section #%zu points to missing section #%zu in %s\n", + i, sec->sec_idx, (size_t)sym->st_shndx, obj->filename); + return -EINVAL; + } + if (sym_type == STT_SECTION) { + if (sym->st_value != 0) + return -EINVAL; + continue; + } + } + + return 0; +} + +static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec) +{ + struct src_sec *link_sec, *sym_sec; + Elf64_Rel *relo; + int i, n; + + if (sec->shdr->sh_entsize != sizeof(Elf64_Rel)) + return -EINVAL; + if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0) + return -EINVAL; + + /* SHT_REL's sh_link should point to SYMTAB */ + if (sec->shdr->sh_link != obj->symtab_sec_idx) { + pr_warn("ELF relo section #%zu points to invalid SYMTAB section #%zu in %s\n", + sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename); + return -EINVAL; + } + + /* SHT_REL's sh_info points to relocated section */ + if (!sec->shdr->sh_info || sec->shdr->sh_info >= obj->sec_cnt) { + pr_warn("ELF relo section #%zu points to missing section #%zu in %s\n", + sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename); + return -EINVAL; + } + link_sec = &obj->secs[sec->shdr->sh_info]; + + /* .rel -> pattern is followed */ + if (strncmp(sec->sec_name, ".rel", sizeof(".rel") - 1) != 0 + || strcmp(sec->sec_name + sizeof(".rel") - 1, link_sec->sec_name) != 0) { + pr_warn("ELF relo section #%zu name has invalid name in %s\n", + sec->sec_idx, obj->filename); + return -EINVAL; + } + + /* don't further validate relocations for ignored sections */ + if (link_sec->skipped) + return 0; + + /* relocatable section is data or instructions */ + if (link_sec->shdr->sh_type != SHT_PROGBITS && link_sec->shdr->sh_type != SHT_NOBITS) { + pr_warn("ELF relo section #%zu points to invalid section #%zu in %s\n", + sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename); + return -EINVAL; + } + + /* check sanity of each relocation */ + n = sec->shdr->sh_size / sec->shdr->sh_entsize; + relo = sec->data->d_buf; + sym_sec = &obj->secs[obj->symtab_sec_idx]; + for (i = 0; i < n; i++, relo++) { + size_t sym_idx = ELF64_R_SYM(relo->r_info); + size_t sym_type = ELF64_R_TYPE(relo->r_info); + + if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32 && + sym_type != R_BPF_64_ABS64 && sym_type != R_BPF_64_ABS32) { + pr_warn("ELF relo #%d in section #%zu has unexpected type %zu in %s\n", + i, sec->sec_idx, sym_type, obj->filename); + return -EINVAL; + } + + if (!sym_idx || sym_idx * sizeof(Elf64_Sym) >= sym_sec->shdr->sh_size) { + pr_warn("ELF relo #%d in section #%zu points to invalid symbol #%zu in %s\n", + i, sec->sec_idx, sym_idx, obj->filename); + return -EINVAL; + } + + if (link_sec->shdr->sh_flags & SHF_EXECINSTR) { + if (relo->r_offset % sizeof(struct bpf_insn) != 0) { + pr_warn("ELF relo #%d in section #%zu points to missing symbol #%zu in %s\n", + i, sec->sec_idx, sym_idx, obj->filename); + return -EINVAL; + } + } + } + + return 0; +} + +static int check_btf_type_id(__u32 *type_id, void *ctx) +{ + struct btf *btf = ctx; + + if (*type_id >= btf__type_cnt(btf)) + return -EINVAL; + + return 0; +} + +static int check_btf_str_off(__u32 *str_off, void *ctx) +{ + struct btf *btf = ctx; + const char *s; + + s = btf__str_by_offset(btf, *str_off); + + if (!s) + return -EINVAL; + + return 0; +} + +static int linker_sanity_check_btf(struct src_obj *obj) +{ + struct btf_type *t; + int i, n, err = 0; + + if (!obj->btf) + return 0; + + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { + t = btf_type_by_id(obj->btf, i); + + err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf); + err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf); + if (err) + return err; + } + + return 0; +} + +static int linker_sanity_check_btf_ext(struct src_obj *obj) +{ + int err = 0; + + if (!obj->btf_ext) + return 0; + + /* can't use .BTF.ext without .BTF */ + if (!obj->btf) + return -EINVAL; + + err = err ?: btf_ext_visit_type_ids(obj->btf_ext, check_btf_type_id, obj->btf); + err = err ?: btf_ext_visit_str_offs(obj->btf_ext, check_btf_str_off, obj->btf); + if (err) + return err; + + return 0; +} + +static int init_sec(struct bpf_linker *linker, struct dst_sec *dst_sec, struct src_sec *src_sec) +{ + Elf_Scn *scn; + Elf_Data *data; + Elf64_Shdr *shdr; + int name_off; + + dst_sec->sec_sz = 0; + dst_sec->sec_idx = 0; + dst_sec->ephemeral = src_sec->ephemeral; + + /* ephemeral sections are just thin section shells lacking most parts */ + if (src_sec->ephemeral) + return 0; + + scn = elf_newscn(linker->elf); + if (!scn) + return -ENOMEM; + data = elf_newdata(scn); + if (!data) + return -ENOMEM; + shdr = elf64_getshdr(scn); + if (!shdr) + return -ENOMEM; + + dst_sec->scn = scn; + dst_sec->shdr = shdr; + dst_sec->data = data; + dst_sec->sec_idx = elf_ndxscn(scn); + + name_off = strset__add_str(linker->strtab_strs, src_sec->sec_name); + if (name_off < 0) + return name_off; + + shdr->sh_name = name_off; + shdr->sh_type = src_sec->shdr->sh_type; + shdr->sh_flags = src_sec->shdr->sh_flags; + shdr->sh_size = 0; + /* sh_link and sh_info have different meaning for different types of + * sections, so we leave it up to the caller code to fill them in, if + * necessary + */ + shdr->sh_link = 0; + shdr->sh_info = 0; + shdr->sh_addralign = src_sec->shdr->sh_addralign; + shdr->sh_entsize = src_sec->shdr->sh_entsize; + + data->d_type = src_sec->data->d_type; + data->d_size = 0; + data->d_buf = NULL; + data->d_align = src_sec->data->d_align; + data->d_off = 0; + + return 0; +} + +static struct dst_sec *find_dst_sec_by_name(struct bpf_linker *linker, const char *sec_name) +{ + struct dst_sec *sec; + int i; + + for (i = 1; i < linker->sec_cnt; i++) { + sec = &linker->secs[i]; + + if (strcmp(sec->sec_name, sec_name) == 0) + return sec; + } + + return NULL; +} + +static bool secs_match(struct dst_sec *dst, struct src_sec *src) +{ + if (dst->ephemeral || src->ephemeral) + return true; + + if (dst->shdr->sh_type != src->shdr->sh_type) { + pr_warn("sec %s types mismatch\n", dst->sec_name); + return false; + } + if (dst->shdr->sh_flags != src->shdr->sh_flags) { + pr_warn("sec %s flags mismatch\n", dst->sec_name); + return false; + } + if (dst->shdr->sh_entsize != src->shdr->sh_entsize) { + pr_warn("sec %s entsize mismatch\n", dst->sec_name); + return false; + } + + return true; +} + +static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec) +{ + if (dst_sec->sec_sz != src_sec->shdr->sh_size) + return false; + if (memcmp(dst_sec->raw_data, src_sec->data->d_buf, dst_sec->sec_sz) != 0) + return false; + return true; +} + +static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src) +{ + void *tmp; + size_t dst_align, src_align; + size_t dst_align_sz, dst_final_sz; + int err; + + /* Ephemeral source section doesn't contribute anything to ELF + * section data. + */ + if (src->ephemeral) + return 0; + + /* Some sections (like .maps) can contain both externs (and thus be + * ephemeral) and non-externs (map definitions). So it's possible that + * it has to be "upgraded" from ephemeral to non-ephemeral when the + * first non-ephemeral entity appears. In such case, we add ELF + * section, data, etc. + */ + if (dst->ephemeral) { + err = init_sec(linker, dst, src); + if (err) + return err; + } + + dst_align = dst->shdr->sh_addralign; + src_align = src->shdr->sh_addralign; + if (dst_align == 0) + dst_align = 1; + if (dst_align < src_align) + dst_align = src_align; + + dst_align_sz = (dst->sec_sz + dst_align - 1) / dst_align * dst_align; + + /* no need to re-align final size */ + dst_final_sz = dst_align_sz + src->shdr->sh_size; + + if (src->shdr->sh_type != SHT_NOBITS) { + tmp = realloc(dst->raw_data, dst_final_sz); + if (!tmp) + return -ENOMEM; + dst->raw_data = tmp; + + /* pad dst section, if it's alignment forced size increase */ + memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz); + /* now copy src data at a properly aligned offset */ + memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size); + } + + dst->sec_sz = dst_final_sz; + dst->shdr->sh_size = dst_final_sz; + dst->data->d_size = dst_final_sz; + + dst->shdr->sh_addralign = dst_align; + dst->data->d_align = dst_align; + + src->dst_off = dst_align_sz; + + return 0; +} + +static bool is_data_sec(struct src_sec *sec) +{ + if (!sec || sec->skipped) + return false; + /* ephemeral sections are data sections, e.g., .kconfig, .ksyms */ + if (sec->ephemeral) + return true; + return sec->shdr->sh_type == SHT_PROGBITS || sec->shdr->sh_type == SHT_NOBITS; +} + +static bool is_relo_sec(struct src_sec *sec) +{ + if (!sec || sec->skipped || sec->ephemeral) + return false; + return sec->shdr->sh_type == SHT_REL; +} + +static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj) +{ + int i, err; + + for (i = 1; i < obj->sec_cnt; i++) { + struct src_sec *src_sec; + struct dst_sec *dst_sec; + + src_sec = &obj->secs[i]; + if (!is_data_sec(src_sec)) + continue; + + dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name); + if (!dst_sec) { + dst_sec = add_dst_sec(linker, src_sec->sec_name); + if (!dst_sec) + return -ENOMEM; + err = init_sec(linker, dst_sec, src_sec); + if (err) { + pr_warn("failed to init section '%s'\n", src_sec->sec_name); + return err; + } + } else { + if (!secs_match(dst_sec, src_sec)) { + pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); + return -1; + } + + /* "license" and "version" sections are deduped */ + if (strcmp(src_sec->sec_name, "license") == 0 + || strcmp(src_sec->sec_name, "version") == 0) { + if (!sec_content_is_same(dst_sec, src_sec)) { + pr_warn("non-identical contents of section '%s' are not supported\n", src_sec->sec_name); + return -EINVAL; + } + src_sec->skipped = true; + src_sec->dst_id = dst_sec->id; + continue; + } + } + + /* record mapped section index */ + src_sec->dst_id = dst_sec->id; + + err = extend_sec(linker, dst_sec, src_sec); + if (err) + return err; + } + + return 0; +} + +static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj) +{ + struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx]; + Elf64_Sym *sym = symtab->data->d_buf; + int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize, err; + int str_sec_idx = symtab->shdr->sh_link; + const char *sym_name; + + obj->sym_map = calloc(n + 1, sizeof(*obj->sym_map)); + if (!obj->sym_map) + return -ENOMEM; + + for (i = 0; i < n; i++, sym++) { + /* We already validated all-zero symbol #0 and we already + * appended it preventively to the final SYMTAB, so skip it. + */ + if (i == 0) + continue; + + sym_name = elf_strptr(obj->elf, str_sec_idx, sym->st_name); + if (!sym_name) { + pr_warn("can't fetch symbol name for symbol #%d in '%s'\n", i, obj->filename); + return -EINVAL; + } + + err = linker_append_elf_sym(linker, obj, sym, sym_name, i); + if (err) + return err; + } + + return 0; +} + +static Elf64_Sym *get_sym_by_idx(struct bpf_linker *linker, size_t sym_idx) +{ + struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx]; + Elf64_Sym *syms = symtab->raw_data; + + return &syms[sym_idx]; +} + +static struct glob_sym *find_glob_sym(struct bpf_linker *linker, const char *sym_name) +{ + struct glob_sym *glob_sym; + const char *name; + int i; + + for (i = 0; i < linker->glob_sym_cnt; i++) { + glob_sym = &linker->glob_syms[i]; + name = strset__data(linker->strtab_strs) + glob_sym->name_off; + + if (strcmp(name, sym_name) == 0) + return glob_sym; + } + + return NULL; +} + +static struct glob_sym *add_glob_sym(struct bpf_linker *linker) +{ + struct glob_sym *syms, *sym; + + syms = libbpf_reallocarray(linker->glob_syms, linker->glob_sym_cnt + 1, + sizeof(*linker->glob_syms)); + if (!syms) + return NULL; + + sym = &syms[linker->glob_sym_cnt]; + memset(sym, 0, sizeof(*sym)); + sym->var_idx = -1; + + linker->glob_syms = syms; + linker->glob_sym_cnt++; + + return sym; +} + +static bool glob_sym_btf_matches(const char *sym_name, bool exact, + const struct btf *btf1, __u32 id1, + const struct btf *btf2, __u32 id2) +{ + const struct btf_type *t1, *t2; + bool is_static1, is_static2; + const char *n1, *n2; + int i, n; + +recur: + n1 = n2 = NULL; + t1 = skip_mods_and_typedefs(btf1, id1, &id1); + t2 = skip_mods_and_typedefs(btf2, id2, &id2); + + /* check if only one side is FWD, otherwise handle with common logic */ + if (!exact && btf_is_fwd(t1) != btf_is_fwd(t2)) { + n1 = btf__str_by_offset(btf1, t1->name_off); + n2 = btf__str_by_offset(btf2, t2->name_off); + if (strcmp(n1, n2) != 0) { + pr_warn("global '%s': incompatible forward declaration names '%s' and '%s'\n", + sym_name, n1, n2); + return false; + } + /* validate if FWD kind matches concrete kind */ + if (btf_is_fwd(t1)) { + if (btf_kflag(t1) && btf_is_union(t2)) + return true; + if (!btf_kflag(t1) && btf_is_struct(t2)) + return true; + pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n", + sym_name, btf_kflag(t1) ? "union" : "struct", btf_kind_str(t2)); + } else { + if (btf_kflag(t2) && btf_is_union(t1)) + return true; + if (!btf_kflag(t2) && btf_is_struct(t1)) + return true; + pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n", + sym_name, btf_kflag(t2) ? "union" : "struct", btf_kind_str(t1)); + } + return false; + } + + if (btf_kind(t1) != btf_kind(t2)) { + pr_warn("global '%s': incompatible BTF kinds %s and %s\n", + sym_name, btf_kind_str(t1), btf_kind_str(t2)); + return false; + } + + switch (btf_kind(t1)) { + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + case BTF_KIND_FWD: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + n1 = btf__str_by_offset(btf1, t1->name_off); + n2 = btf__str_by_offset(btf2, t2->name_off); + if (strcmp(n1, n2) != 0) { + pr_warn("global '%s': incompatible %s names '%s' and '%s'\n", + sym_name, btf_kind_str(t1), n1, n2); + return false; + } + break; + default: + break; + } + + switch (btf_kind(t1)) { + case BTF_KIND_UNKN: /* void */ + case BTF_KIND_FWD: + return true; + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + /* ignore encoding for int and enum values for enum */ + if (t1->size != t2->size) { + pr_warn("global '%s': incompatible %s '%s' size %u and %u\n", + sym_name, btf_kind_str(t1), n1, t1->size, t2->size); + return false; + } + return true; + case BTF_KIND_PTR: + /* just validate overall shape of the referenced type, so no + * contents comparison for struct/union, and allowd fwd vs + * struct/union + */ + exact = false; + id1 = t1->type; + id2 = t2->type; + goto recur; + case BTF_KIND_ARRAY: + /* ignore index type and array size */ + id1 = btf_array(t1)->type; + id2 = btf_array(t2)->type; + goto recur; + case BTF_KIND_FUNC: + /* extern and global linkages are compatible */ + is_static1 = btf_func_linkage(t1) == BTF_FUNC_STATIC; + is_static2 = btf_func_linkage(t2) == BTF_FUNC_STATIC; + if (is_static1 != is_static2) { + pr_warn("global '%s': incompatible func '%s' linkage\n", sym_name, n1); + return false; + } + + id1 = t1->type; + id2 = t2->type; + goto recur; + case BTF_KIND_VAR: + /* extern and global linkages are compatible */ + is_static1 = btf_var(t1)->linkage == BTF_VAR_STATIC; + is_static2 = btf_var(t2)->linkage == BTF_VAR_STATIC; + if (is_static1 != is_static2) { + pr_warn("global '%s': incompatible var '%s' linkage\n", sym_name, n1); + return false; + } + + id1 = t1->type; + id2 = t2->type; + goto recur; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m1, *m2; + + if (!exact) + return true; + + if (btf_vlen(t1) != btf_vlen(t2)) { + pr_warn("global '%s': incompatible number of %s fields %u and %u\n", + sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2)); + return false; + } + + n = btf_vlen(t1); + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0; i < n; i++, m1++, m2++) { + n1 = btf__str_by_offset(btf1, m1->name_off); + n2 = btf__str_by_offset(btf2, m2->name_off); + if (strcmp(n1, n2) != 0) { + pr_warn("global '%s': incompatible field #%d names '%s' and '%s'\n", + sym_name, i, n1, n2); + return false; + } + if (m1->offset != m2->offset) { + pr_warn("global '%s': incompatible field #%d ('%s') offsets\n", + sym_name, i, n1); + return false; + } + if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type)) + return false; + } + + return true; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *m1, *m2; + + if (btf_vlen(t1) != btf_vlen(t2)) { + pr_warn("global '%s': incompatible number of %s params %u and %u\n", + sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2)); + return false; + } + + n = btf_vlen(t1); + m1 = btf_params(t1); + m2 = btf_params(t2); + for (i = 0; i < n; i++, m1++, m2++) { + /* ignore func arg names */ + if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type)) + return false; + } + + /* now check return type as well */ + id1 = t1->type; + id2 = t2->type; + goto recur; + } + + /* skip_mods_and_typedefs() make this impossible */ + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + /* DATASECs are never compared with each other */ + case BTF_KIND_DATASEC: + default: + pr_warn("global '%s': unsupported BTF kind %s\n", + sym_name, btf_kind_str(t1)); + return false; + } +} + +static bool map_defs_match(const char *sym_name, + const struct btf *main_btf, + const struct btf_map_def *main_def, + const struct btf_map_def *main_inner_def, + const struct btf *extra_btf, + const struct btf_map_def *extra_def, + const struct btf_map_def *extra_inner_def) +{ + const char *reason; + + if (main_def->map_type != extra_def->map_type) { + reason = "type"; + goto mismatch; + } + + /* check key type/size match */ + if (main_def->key_size != extra_def->key_size) { + reason = "key_size"; + goto mismatch; + } + if (!!main_def->key_type_id != !!extra_def->key_type_id) { + reason = "key type"; + goto mismatch; + } + if ((main_def->parts & MAP_DEF_KEY_TYPE) + && !glob_sym_btf_matches(sym_name, true /*exact*/, + main_btf, main_def->key_type_id, + extra_btf, extra_def->key_type_id)) { + reason = "key type"; + goto mismatch; + } + + /* validate value type/size match */ + if (main_def->value_size != extra_def->value_size) { + reason = "value_size"; + goto mismatch; + } + if (!!main_def->value_type_id != !!extra_def->value_type_id) { + reason = "value type"; + goto mismatch; + } + if ((main_def->parts & MAP_DEF_VALUE_TYPE) + && !glob_sym_btf_matches(sym_name, true /*exact*/, + main_btf, main_def->value_type_id, + extra_btf, extra_def->value_type_id)) { + reason = "key type"; + goto mismatch; + } + + if (main_def->max_entries != extra_def->max_entries) { + reason = "max_entries"; + goto mismatch; + } + if (main_def->map_flags != extra_def->map_flags) { + reason = "map_flags"; + goto mismatch; + } + if (main_def->numa_node != extra_def->numa_node) { + reason = "numa_node"; + goto mismatch; + } + if (main_def->pinning != extra_def->pinning) { + reason = "pinning"; + goto mismatch; + } + + if ((main_def->parts & MAP_DEF_INNER_MAP) != (extra_def->parts & MAP_DEF_INNER_MAP)) { + reason = "inner map"; + goto mismatch; + } + + if (main_def->parts & MAP_DEF_INNER_MAP) { + char inner_map_name[128]; + + snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", sym_name); + + return map_defs_match(inner_map_name, + main_btf, main_inner_def, NULL, + extra_btf, extra_inner_def, NULL); + } + + return true; + +mismatch: + pr_warn("global '%s': map %s mismatch\n", sym_name, reason); + return false; +} + +static bool glob_map_defs_match(const char *sym_name, + struct bpf_linker *linker, struct glob_sym *glob_sym, + struct src_obj *obj, Elf64_Sym *sym, int btf_id) +{ + struct btf_map_def dst_def = {}, dst_inner_def = {}; + struct btf_map_def src_def = {}, src_inner_def = {}; + const struct btf_type *t; + int err; + + t = btf__type_by_id(obj->btf, btf_id); + if (!btf_is_var(t)) { + pr_warn("global '%s': invalid map definition type [%d]\n", sym_name, btf_id); + return false; + } + t = skip_mods_and_typedefs(obj->btf, t->type, NULL); + + err = parse_btf_map_def(sym_name, obj->btf, t, true /*strict*/, &src_def, &src_inner_def); + if (err) { + pr_warn("global '%s': invalid map definition\n", sym_name); + return false; + } + + /* re-parse existing map definition */ + t = btf__type_by_id(linker->btf, glob_sym->btf_id); + t = skip_mods_and_typedefs(linker->btf, t->type, NULL); + err = parse_btf_map_def(sym_name, linker->btf, t, true /*strict*/, &dst_def, &dst_inner_def); + if (err) { + /* this should not happen, because we already validated it */ + pr_warn("global '%s': invalid dst map definition\n", sym_name); + return false; + } + + /* Currently extern map definition has to be complete and match + * concrete map definition exactly. This restriction might be lifted + * in the future. + */ + return map_defs_match(sym_name, linker->btf, &dst_def, &dst_inner_def, + obj->btf, &src_def, &src_inner_def); +} + +static bool glob_syms_match(const char *sym_name, + struct bpf_linker *linker, struct glob_sym *glob_sym, + struct src_obj *obj, Elf64_Sym *sym, size_t sym_idx, int btf_id) +{ + const struct btf_type *src_t; + + /* if we are dealing with externs, BTF types describing both global + * and extern VARs/FUNCs should be completely present in all files + */ + if (!glob_sym->btf_id || !btf_id) { + pr_warn("BTF info is missing for global symbol '%s'\n", sym_name); + return false; + } + + src_t = btf__type_by_id(obj->btf, btf_id); + if (!btf_is_var(src_t) && !btf_is_func(src_t)) { + pr_warn("only extern variables and functions are supported, but got '%s' for '%s'\n", + btf_kind_str(src_t), sym_name); + return false; + } + + /* deal with .maps definitions specially */ + if (glob_sym->sec_id && strcmp(linker->secs[glob_sym->sec_id].sec_name, MAPS_ELF_SEC) == 0) + return glob_map_defs_match(sym_name, linker, glob_sym, obj, sym, btf_id); + + if (!glob_sym_btf_matches(sym_name, true /*exact*/, + linker->btf, glob_sym->btf_id, obj->btf, btf_id)) + return false; + + return true; +} + +static bool btf_is_non_static(const struct btf_type *t) +{ + return (btf_is_var(t) && btf_var(t)->linkage != BTF_VAR_STATIC) + || (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_STATIC); +} + +static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name, + int *out_btf_sec_id, int *out_btf_id) +{ + int i, j, n, m, btf_id = 0; + const struct btf_type *t; + const struct btf_var_secinfo *vi; + const char *name; + + if (!obj->btf) { + pr_warn("failed to find BTF info for object '%s'\n", obj->filename); + return -EINVAL; + } + + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { + t = btf__type_by_id(obj->btf, i); + + /* some global and extern FUNCs and VARs might not be associated with any + * DATASEC, so try to detect them in the same pass + */ + if (btf_is_non_static(t)) { + name = btf__str_by_offset(obj->btf, t->name_off); + if (strcmp(name, sym_name) != 0) + continue; + + /* remember and still try to find DATASEC */ + btf_id = i; + continue; + } + + if (!btf_is_datasec(t)) + continue; + + vi = btf_var_secinfos(t); + for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { + t = btf__type_by_id(obj->btf, vi->type); + name = btf__str_by_offset(obj->btf, t->name_off); + + if (strcmp(name, sym_name) != 0) + continue; + if (btf_is_var(t) && btf_var(t)->linkage == BTF_VAR_STATIC) + continue; + if (btf_is_func(t) && btf_func_linkage(t) == BTF_FUNC_STATIC) + continue; + + if (btf_id && btf_id != vi->type) { + pr_warn("global/extern '%s' BTF is ambiguous: both types #%d and #%u match\n", + sym_name, btf_id, vi->type); + return -EINVAL; + } + + *out_btf_sec_id = i; + *out_btf_id = vi->type; + + return 0; + } + } + + /* free-floating extern or global FUNC */ + if (btf_id) { + *out_btf_sec_id = 0; + *out_btf_id = btf_id; + return 0; + } + + pr_warn("failed to find BTF info for global/extern symbol '%s'\n", sym_name); + return -ENOENT; +} + +static struct src_sec *find_src_sec_by_name(struct src_obj *obj, const char *sec_name) +{ + struct src_sec *sec; + int i; + + for (i = 1; i < obj->sec_cnt; i++) { + sec = &obj->secs[i]; + + if (strcmp(sec->sec_name, sec_name) == 0) + return sec; + } + + return NULL; +} + +static int complete_extern_btf_info(struct btf *dst_btf, int dst_id, + struct btf *src_btf, int src_id) +{ + struct btf_type *dst_t = btf_type_by_id(dst_btf, dst_id); + struct btf_type *src_t = btf_type_by_id(src_btf, src_id); + struct btf_param *src_p, *dst_p; + const char *s; + int i, n, off; + + /* We already made sure that source and destination types (FUNC or + * VAR) match in terms of types and argument names. + */ + if (btf_is_var(dst_t)) { + btf_var(dst_t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + return 0; + } + + dst_t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_GLOBAL, 0); + + /* now onto FUNC_PROTO types */ + src_t = btf_type_by_id(src_btf, src_t->type); + dst_t = btf_type_by_id(dst_btf, dst_t->type); + + /* Fill in all the argument names, which for extern FUNCs are missing. + * We'll end up with two copies of FUNCs/VARs for externs, but that + * will be taken care of by BTF dedup at the very end. + * It might be that BTF types for extern in one file has less/more BTF + * information (e.g., FWD instead of full STRUCT/UNION information), + * but that should be (in most cases, subject to BTF dedup rules) + * handled and resolved by BTF dedup algorithm as well, so we won't + * worry about it. Our only job is to make sure that argument names + * are populated on both sides, otherwise BTF dedup will pedantically + * consider them different. + */ + src_p = btf_params(src_t); + dst_p = btf_params(dst_t); + for (i = 0, n = btf_vlen(dst_t); i < n; i++, src_p++, dst_p++) { + if (!src_p->name_off) + continue; + + /* src_btf has more complete info, so add name to dst_btf */ + s = btf__str_by_offset(src_btf, src_p->name_off); + off = btf__add_str(dst_btf, s); + if (off < 0) + return off; + dst_p->name_off = off; + } + return 0; +} + +static void sym_update_bind(Elf64_Sym *sym, int sym_bind) +{ + sym->st_info = ELF64_ST_INFO(sym_bind, ELF64_ST_TYPE(sym->st_info)); +} + +static void sym_update_type(Elf64_Sym *sym, int sym_type) +{ + sym->st_info = ELF64_ST_INFO(ELF64_ST_BIND(sym->st_info), sym_type); +} + +static void sym_update_visibility(Elf64_Sym *sym, int sym_vis) +{ + /* libelf doesn't provide setters for ST_VISIBILITY, + * but it is stored in the lower 2 bits of st_other + */ + sym->st_other &= ~0x03; + sym->st_other |= sym_vis; +} + +static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj, + Elf64_Sym *sym, const char *sym_name, int src_sym_idx) +{ + struct src_sec *src_sec = NULL; + struct dst_sec *dst_sec = NULL; + struct glob_sym *glob_sym = NULL; + int name_off, sym_type, sym_bind, sym_vis, err; + int btf_sec_id = 0, btf_id = 0; + size_t dst_sym_idx; + Elf64_Sym *dst_sym; + bool sym_is_extern; + + sym_type = ELF64_ST_TYPE(sym->st_info); + sym_bind = ELF64_ST_BIND(sym->st_info); + sym_vis = ELF64_ST_VISIBILITY(sym->st_other); + sym_is_extern = sym->st_shndx == SHN_UNDEF; + + if (sym_is_extern) { + if (!obj->btf) { + pr_warn("externs without BTF info are not supported\n"); + return -ENOTSUP; + } + } else if (sym->st_shndx < SHN_LORESERVE) { + src_sec = &obj->secs[sym->st_shndx]; + if (src_sec->skipped) + return 0; + dst_sec = &linker->secs[src_sec->dst_id]; + + /* allow only one STT_SECTION symbol per section */ + if (sym_type == STT_SECTION && dst_sec->sec_sym_idx) { + obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx; + return 0; + } + } + + if (sym_bind == STB_LOCAL) + goto add_sym; + + /* find matching BTF info */ + err = find_glob_sym_btf(obj, sym, sym_name, &btf_sec_id, &btf_id); + if (err) + return err; + + if (sym_is_extern && btf_sec_id) { + const char *sec_name = NULL; + const struct btf_type *t; + + t = btf__type_by_id(obj->btf, btf_sec_id); + sec_name = btf__str_by_offset(obj->btf, t->name_off); + + /* Clang puts unannotated extern vars into + * '.extern' BTF DATASEC. Treat them the same + * as unannotated extern funcs (which are + * currently not put into any DATASECs). + * Those don't have associated src_sec/dst_sec. + */ + if (strcmp(sec_name, BTF_EXTERN_SEC) != 0) { + src_sec = find_src_sec_by_name(obj, sec_name); + if (!src_sec) { + pr_warn("failed to find matching ELF sec '%s'\n", sec_name); + return -ENOENT; + } + dst_sec = &linker->secs[src_sec->dst_id]; + } + } + + glob_sym = find_glob_sym(linker, sym_name); + if (glob_sym) { + /* Preventively resolve to existing symbol. This is + * needed for further relocation symbol remapping in + * the next step of linking. + */ + obj->sym_map[src_sym_idx] = glob_sym->sym_idx; + + /* If both symbols are non-externs, at least one of + * them has to be STB_WEAK, otherwise they are in + * a conflict with each other. + */ + if (!sym_is_extern && !glob_sym->is_extern + && !glob_sym->is_weak && sym_bind != STB_WEAK) { + pr_warn("conflicting non-weak symbol #%d (%s) definition in '%s'\n", + src_sym_idx, sym_name, obj->filename); + return -EINVAL; + } + + if (!glob_syms_match(sym_name, linker, glob_sym, obj, sym, src_sym_idx, btf_id)) + return -EINVAL; + + dst_sym = get_sym_by_idx(linker, glob_sym->sym_idx); + + /* If new symbol is strong, then force dst_sym to be strong as + * well; this way a mix of weak and non-weak extern + * definitions will end up being strong. + */ + if (sym_bind == STB_GLOBAL) { + /* We still need to preserve type (NOTYPE or + * OBJECT/FUNC, depending on whether the symbol is + * extern or not) + */ + sym_update_bind(dst_sym, STB_GLOBAL); + glob_sym->is_weak = false; + } + + /* Non-default visibility is "contaminating", with stricter + * visibility overwriting more permissive ones, even if more + * permissive visibility comes from just an extern definition. + * Currently only STV_DEFAULT and STV_HIDDEN are allowed and + * ensured by ELF symbol sanity checks above. + */ + if (sym_vis > ELF64_ST_VISIBILITY(dst_sym->st_other)) + sym_update_visibility(dst_sym, sym_vis); + + /* If the new symbol is extern, then regardless if + * existing symbol is extern or resolved global, just + * keep the existing one untouched. + */ + if (sym_is_extern) + return 0; + + /* If existing symbol is a strong resolved symbol, bail out, + * because we lost resolution battle have nothing to + * contribute. We already checked abover that there is no + * strong-strong conflict. We also already tightened binding + * and visibility, so nothing else to contribute at that point. + */ + if (!glob_sym->is_extern && sym_bind == STB_WEAK) + return 0; + + /* At this point, new symbol is strong non-extern, + * so overwrite glob_sym with new symbol information. + * Preserve binding and visibility. + */ + sym_update_type(dst_sym, sym_type); + dst_sym->st_shndx = dst_sec->sec_idx; + dst_sym->st_value = src_sec->dst_off + sym->st_value; + dst_sym->st_size = sym->st_size; + + /* see comment below about dst_sec->id vs dst_sec->sec_idx */ + glob_sym->sec_id = dst_sec->id; + glob_sym->is_extern = false; + + if (complete_extern_btf_info(linker->btf, glob_sym->btf_id, + obj->btf, btf_id)) + return -EINVAL; + + /* request updating VAR's/FUNC's underlying BTF type when appending BTF type */ + glob_sym->underlying_btf_id = 0; + + obj->sym_map[src_sym_idx] = glob_sym->sym_idx; + return 0; + } + +add_sym: + name_off = strset__add_str(linker->strtab_strs, sym_name); + if (name_off < 0) + return name_off; + + dst_sym = add_new_sym(linker, &dst_sym_idx); + if (!dst_sym) + return -ENOMEM; + + dst_sym->st_name = name_off; + dst_sym->st_info = sym->st_info; + dst_sym->st_other = sym->st_other; + dst_sym->st_shndx = dst_sec ? dst_sec->sec_idx : sym->st_shndx; + dst_sym->st_value = (src_sec ? src_sec->dst_off : 0) + sym->st_value; + dst_sym->st_size = sym->st_size; + + obj->sym_map[src_sym_idx] = dst_sym_idx; + + if (sym_type == STT_SECTION && dst_sym) { + dst_sec->sec_sym_idx = dst_sym_idx; + dst_sym->st_value = 0; + } + + if (sym_bind != STB_LOCAL) { + glob_sym = add_glob_sym(linker); + if (!glob_sym) + return -ENOMEM; + + glob_sym->sym_idx = dst_sym_idx; + /* we use dst_sec->id (and not dst_sec->sec_idx), because + * ephemeral sections (.kconfig, .ksyms, etc) don't have + * sec_idx (as they don't have corresponding ELF section), but + * still have id. .extern doesn't have even ephemeral section + * associated with it, so dst_sec->id == dst_sec->sec_idx == 0. + */ + glob_sym->sec_id = dst_sec ? dst_sec->id : 0; + glob_sym->name_off = name_off; + /* we will fill btf_id in during BTF merging step */ + glob_sym->btf_id = 0; + glob_sym->is_extern = sym_is_extern; + glob_sym->is_weak = sym_bind == STB_WEAK; + } + + return 0; +} + +static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj) +{ + struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx]; + struct dst_sec *dst_symtab; + int i, err; + + for (i = 1; i < obj->sec_cnt; i++) { + struct src_sec *src_sec, *src_linked_sec; + struct dst_sec *dst_sec, *dst_linked_sec; + Elf64_Rel *src_rel, *dst_rel; + int j, n; + + src_sec = &obj->secs[i]; + if (!is_relo_sec(src_sec)) + continue; + + /* shdr->sh_info points to relocatable section */ + src_linked_sec = &obj->secs[src_sec->shdr->sh_info]; + if (src_linked_sec->skipped) + continue; + + dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name); + if (!dst_sec) { + dst_sec = add_dst_sec(linker, src_sec->sec_name); + if (!dst_sec) + return -ENOMEM; + err = init_sec(linker, dst_sec, src_sec); + if (err) { + pr_warn("failed to init section '%s'\n", src_sec->sec_name); + return err; + } + } else if (!secs_match(dst_sec, src_sec)) { + pr_warn("sections %s are not compatible\n", src_sec->sec_name); + return -1; + } + + /* add_dst_sec() above could have invalidated linker->secs */ + dst_symtab = &linker->secs[linker->symtab_sec_idx]; + + /* shdr->sh_link points to SYMTAB */ + dst_sec->shdr->sh_link = linker->symtab_sec_idx; + + /* shdr->sh_info points to relocated section */ + dst_linked_sec = &linker->secs[src_linked_sec->dst_id]; + dst_sec->shdr->sh_info = dst_linked_sec->sec_idx; + + src_sec->dst_id = dst_sec->id; + err = extend_sec(linker, dst_sec, src_sec); + if (err) + return err; + + src_rel = src_sec->data->d_buf; + dst_rel = dst_sec->raw_data + src_sec->dst_off; + n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize; + for (j = 0; j < n; j++, src_rel++, dst_rel++) { + size_t src_sym_idx = ELF64_R_SYM(src_rel->r_info); + size_t sym_type = ELF64_R_TYPE(src_rel->r_info); + Elf64_Sym *src_sym, *dst_sym; + size_t dst_sym_idx; + + src_sym_idx = ELF64_R_SYM(src_rel->r_info); + src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx; + + dst_sym_idx = obj->sym_map[src_sym_idx]; + dst_sym = dst_symtab->raw_data + sizeof(*dst_sym) * dst_sym_idx; + dst_rel->r_offset += src_linked_sec->dst_off; + sym_type = ELF64_R_TYPE(src_rel->r_info); + dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type); + + if (ELF64_ST_TYPE(src_sym->st_info) == STT_SECTION) { + struct src_sec *sec = &obj->secs[src_sym->st_shndx]; + struct bpf_insn *insn; + + if (src_linked_sec->shdr->sh_flags & SHF_EXECINSTR) { + /* calls to the very first static function inside + * .text section at offset 0 will + * reference section symbol, not the + * function symbol. Fix that up, + * otherwise it won't be possible to + * relocate calls to two different + * static functions with the same name + * (rom two different object files) + */ + insn = dst_linked_sec->raw_data + dst_rel->r_offset; + if (insn->code == (BPF_JMP | BPF_CALL)) + insn->imm += sec->dst_off / sizeof(struct bpf_insn); + else + insn->imm += sec->dst_off; + } else { + pr_warn("relocation against STT_SECTION in non-exec section is not supported!\n"); + return -EINVAL; + } + } + + } + } + + return 0; +} + +static Elf64_Sym *find_sym_by_name(struct src_obj *obj, size_t sec_idx, + int sym_type, const char *sym_name) +{ + struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx]; + Elf64_Sym *sym = symtab->data->d_buf; + int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize; + int str_sec_idx = symtab->shdr->sh_link; + const char *name; + + for (i = 0; i < n; i++, sym++) { + if (sym->st_shndx != sec_idx) + continue; + if (ELF64_ST_TYPE(sym->st_info) != sym_type) + continue; + + name = elf_strptr(obj->elf, str_sec_idx, sym->st_name); + if (!name) + return NULL; + + if (strcmp(sym_name, name) != 0) + continue; + + return sym; + } + + return NULL; +} + +static int linker_fixup_btf(struct src_obj *obj) +{ + const char *sec_name; + struct src_sec *sec; + int i, j, n, m; + + if (!obj->btf) + return 0; + + n = btf__type_cnt(obj->btf); + for (i = 1; i < n; i++) { + struct btf_var_secinfo *vi; + struct btf_type *t; + + t = btf_type_by_id(obj->btf, i); + if (btf_kind(t) != BTF_KIND_DATASEC) + continue; + + sec_name = btf__str_by_offset(obj->btf, t->name_off); + sec = find_src_sec_by_name(obj, sec_name); + if (sec) { + /* record actual section size, unless ephemeral */ + if (sec->shdr) + t->size = sec->shdr->sh_size; + } else { + /* BTF can have some sections that are not represented + * in ELF, e.g., .kconfig, .ksyms, .extern, which are used + * for special extern variables. + * + * For all but one such special (ephemeral) + * sections, we pre-create "section shells" to be able + * to keep track of extra per-section metadata later + * (e.g., those BTF extern variables). + * + * .extern is even more special, though, because it + * contains extern variables that need to be resolved + * by static linker, not libbpf and kernel. When such + * externs are resolved, we are going to remove them + * from .extern BTF section and might end up not + * needing it at all. Each resolved extern should have + * matching non-extern VAR/FUNC in other sections. + * + * We do support leaving some of the externs + * unresolved, though, to support cases of building + * libraries, which will later be linked against final + * BPF applications. So if at finalization we still + * see unresolved externs, we'll create .extern + * section on our own. + */ + if (strcmp(sec_name, BTF_EXTERN_SEC) == 0) + continue; + + sec = add_src_sec(obj, sec_name); + if (!sec) + return -ENOMEM; + + sec->ephemeral = true; + sec->sec_idx = 0; /* will match UNDEF shndx in ELF */ + } + + /* remember ELF section and its BTF type ID match */ + sec->sec_type_id = i; + + /* fix up variable offsets */ + vi = btf_var_secinfos(t); + for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { + const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); + const char *var_name = btf__str_by_offset(obj->btf, vt->name_off); + int var_linkage = btf_var(vt)->linkage; + Elf64_Sym *sym; + + /* no need to patch up static or extern vars */ + if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED) + continue; + + sym = find_sym_by_name(obj, sec->sec_idx, STT_OBJECT, var_name); + if (!sym) { + pr_warn("failed to find symbol for variable '%s' in section '%s'\n", var_name, sec_name); + return -ENOENT; + } + + vi->offset = sym->st_value; + } + } + + return 0; +} + +static int remap_type_id(__u32 *type_id, void *ctx) +{ + int *id_map = ctx; + int new_id = id_map[*type_id]; + + /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ + if (new_id == 0 && *type_id != 0) { + pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id); + return -EINVAL; + } + + *type_id = id_map[*type_id]; + + return 0; +} + +static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) +{ + const struct btf_type *t; + int i, j, n, start_id, id; + const char *name; + + if (!obj->btf) + return 0; + + start_id = btf__type_cnt(linker->btf); + n = btf__type_cnt(obj->btf); + + obj->btf_type_map = calloc(n + 1, sizeof(int)); + if (!obj->btf_type_map) + return -ENOMEM; + + for (i = 1; i < n; i++) { + struct glob_sym *glob_sym = NULL; + + t = btf__type_by_id(obj->btf, i); + + /* DATASECs are handled specially below */ + if (btf_kind(t) == BTF_KIND_DATASEC) + continue; + + if (btf_is_non_static(t)) { + /* there should be glob_sym already */ + name = btf__str_by_offset(obj->btf, t->name_off); + glob_sym = find_glob_sym(linker, name); + + /* VARs without corresponding glob_sym are those that + * belong to skipped/deduplicated sections (i.e., + * license and version), so just skip them + */ + if (!glob_sym) + continue; + + /* linker_append_elf_sym() might have requested + * updating underlying type ID, if extern was resolved + * to strong symbol or weak got upgraded to non-weak + */ + if (glob_sym->underlying_btf_id == 0) + glob_sym->underlying_btf_id = -t->type; + + /* globals from previous object files that match our + * VAR/FUNC already have a corresponding associated + * BTF type, so just make sure to use it + */ + if (glob_sym->btf_id) { + /* reuse existing BTF type for global var/func */ + obj->btf_type_map[i] = glob_sym->btf_id; + continue; + } + } + + id = btf__add_type(linker->btf, obj->btf, t); + if (id < 0) { + pr_warn("failed to append BTF type #%d from file '%s'\n", i, obj->filename); + return id; + } + + obj->btf_type_map[i] = id; + + /* record just appended BTF type for var/func */ + if (glob_sym) { + glob_sym->btf_id = id; + glob_sym->underlying_btf_id = -t->type; + } + } + + /* remap all the types except DATASECs */ + n = btf__type_cnt(linker->btf); + for (i = start_id; i < n; i++) { + struct btf_type *dst_t = btf_type_by_id(linker->btf, i); + + if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map)) + return -EINVAL; + } + + /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's + * actual type), if necessary + */ + for (i = 0; i < linker->glob_sym_cnt; i++) { + struct glob_sym *glob_sym = &linker->glob_syms[i]; + struct btf_type *glob_t; + + if (glob_sym->underlying_btf_id >= 0) + continue; + + glob_sym->underlying_btf_id = obj->btf_type_map[-glob_sym->underlying_btf_id]; + + glob_t = btf_type_by_id(linker->btf, glob_sym->btf_id); + glob_t->type = glob_sym->underlying_btf_id; + } + + /* append DATASEC info */ + for (i = 1; i < obj->sec_cnt; i++) { + struct src_sec *src_sec; + struct dst_sec *dst_sec; + const struct btf_var_secinfo *src_var; + struct btf_var_secinfo *dst_var; + + src_sec = &obj->secs[i]; + if (!src_sec->sec_type_id || src_sec->skipped) + continue; + dst_sec = &linker->secs[src_sec->dst_id]; + + /* Mark section as having BTF regardless of the presence of + * variables. In some cases compiler might generate empty BTF + * with no variables information. E.g., when promoting local + * array/structure variable initial values and BPF object + * file otherwise has no read-only static variables in + * .rodata. We need to preserve such empty BTF and just set + * correct section size. + */ + dst_sec->has_btf = true; + + t = btf__type_by_id(obj->btf, src_sec->sec_type_id); + src_var = btf_var_secinfos(t); + n = btf_vlen(t); + for (j = 0; j < n; j++, src_var++) { + void *sec_vars = dst_sec->sec_vars; + int new_id = obj->btf_type_map[src_var->type]; + struct glob_sym *glob_sym = NULL; + + t = btf_type_by_id(linker->btf, new_id); + if (btf_is_non_static(t)) { + name = btf__str_by_offset(linker->btf, t->name_off); + glob_sym = find_glob_sym(linker, name); + if (glob_sym->sec_id != dst_sec->id) { + pr_warn("global '%s': section mismatch %d vs %d\n", + name, glob_sym->sec_id, dst_sec->id); + return -EINVAL; + } + } + + /* If there is already a member (VAR or FUNC) mapped + * to the same type, don't add a duplicate entry. + * This will happen when multiple object files define + * the same extern VARs/FUNCs. + */ + if (glob_sym && glob_sym->var_idx >= 0) { + __s64 sz; + + dst_var = &dst_sec->sec_vars[glob_sym->var_idx]; + /* Because underlying BTF type might have + * changed, so might its size have changed, so + * re-calculate and update it in sec_var. + */ + sz = btf__resolve_size(linker->btf, glob_sym->underlying_btf_id); + if (sz < 0) { + pr_warn("global '%s': failed to resolve size of underlying type: %d\n", + name, (int)sz); + return -EINVAL; + } + dst_var->size = sz; + continue; + } + + sec_vars = libbpf_reallocarray(sec_vars, + dst_sec->sec_var_cnt + 1, + sizeof(*dst_sec->sec_vars)); + if (!sec_vars) + return -ENOMEM; + + dst_sec->sec_vars = sec_vars; + dst_sec->sec_var_cnt++; + + dst_var = &dst_sec->sec_vars[dst_sec->sec_var_cnt - 1]; + dst_var->type = obj->btf_type_map[src_var->type]; + dst_var->size = src_var->size; + dst_var->offset = src_sec->dst_off + src_var->offset; + + if (glob_sym) + glob_sym->var_idx = dst_sec->sec_var_cnt - 1; + } + } + + return 0; +} + +static void *add_btf_ext_rec(struct btf_ext_sec_data *ext_data, const void *src_rec) +{ + void *tmp; + + tmp = libbpf_reallocarray(ext_data->recs, ext_data->rec_cnt + 1, ext_data->rec_sz); + if (!tmp) + return NULL; + ext_data->recs = tmp; + + tmp += ext_data->rec_cnt * ext_data->rec_sz; + memcpy(tmp, src_rec, ext_data->rec_sz); + + ext_data->rec_cnt++; + + return tmp; +} + +static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj) +{ + const struct btf_ext_info_sec *ext_sec; + const char *sec_name, *s; + struct src_sec *src_sec; + struct dst_sec *dst_sec; + int rec_sz, str_off, i; + + if (!obj->btf_ext) + return 0; + + rec_sz = obj->btf_ext->func_info.rec_size; + for_each_btf_ext_sec(&obj->btf_ext->func_info, ext_sec) { + struct bpf_func_info_min *src_rec, *dst_rec; + + sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); + src_sec = find_src_sec_by_name(obj, sec_name); + if (!src_sec) { + pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); + return -EINVAL; + } + dst_sec = &linker->secs[src_sec->dst_id]; + + if (dst_sec->func_info.rec_sz == 0) + dst_sec->func_info.rec_sz = rec_sz; + if (dst_sec->func_info.rec_sz != rec_sz) { + pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); + return -EINVAL; + } + + for_each_btf_ext_rec(&obj->btf_ext->func_info, ext_sec, i, src_rec) { + dst_rec = add_btf_ext_rec(&dst_sec->func_info, src_rec); + if (!dst_rec) + return -ENOMEM; + + dst_rec->insn_off += src_sec->dst_off; + dst_rec->type_id = obj->btf_type_map[dst_rec->type_id]; + } + } + + rec_sz = obj->btf_ext->line_info.rec_size; + for_each_btf_ext_sec(&obj->btf_ext->line_info, ext_sec) { + struct bpf_line_info_min *src_rec, *dst_rec; + + sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); + src_sec = find_src_sec_by_name(obj, sec_name); + if (!src_sec) { + pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); + return -EINVAL; + } + dst_sec = &linker->secs[src_sec->dst_id]; + + if (dst_sec->line_info.rec_sz == 0) + dst_sec->line_info.rec_sz = rec_sz; + if (dst_sec->line_info.rec_sz != rec_sz) { + pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); + return -EINVAL; + } + + for_each_btf_ext_rec(&obj->btf_ext->line_info, ext_sec, i, src_rec) { + dst_rec = add_btf_ext_rec(&dst_sec->line_info, src_rec); + if (!dst_rec) + return -ENOMEM; + + dst_rec->insn_off += src_sec->dst_off; + + s = btf__str_by_offset(obj->btf, src_rec->file_name_off); + str_off = btf__add_str(linker->btf, s); + if (str_off < 0) + return -ENOMEM; + dst_rec->file_name_off = str_off; + + s = btf__str_by_offset(obj->btf, src_rec->line_off); + str_off = btf__add_str(linker->btf, s); + if (str_off < 0) + return -ENOMEM; + dst_rec->line_off = str_off; + + /* dst_rec->line_col is fine */ + } + } + + rec_sz = obj->btf_ext->core_relo_info.rec_size; + for_each_btf_ext_sec(&obj->btf_ext->core_relo_info, ext_sec) { + struct bpf_core_relo *src_rec, *dst_rec; + + sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off); + src_sec = find_src_sec_by_name(obj, sec_name); + if (!src_sec) { + pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name); + return -EINVAL; + } + dst_sec = &linker->secs[src_sec->dst_id]; + + if (dst_sec->core_relo_info.rec_sz == 0) + dst_sec->core_relo_info.rec_sz = rec_sz; + if (dst_sec->core_relo_info.rec_sz != rec_sz) { + pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name); + return -EINVAL; + } + + for_each_btf_ext_rec(&obj->btf_ext->core_relo_info, ext_sec, i, src_rec) { + dst_rec = add_btf_ext_rec(&dst_sec->core_relo_info, src_rec); + if (!dst_rec) + return -ENOMEM; + + dst_rec->insn_off += src_sec->dst_off; + dst_rec->type_id = obj->btf_type_map[dst_rec->type_id]; + + s = btf__str_by_offset(obj->btf, src_rec->access_str_off); + str_off = btf__add_str(linker->btf, s); + if (str_off < 0) + return -ENOMEM; + dst_rec->access_str_off = str_off; + + /* dst_rec->kind is fine */ + } + } + + return 0; +} + +int bpf_linker__finalize(struct bpf_linker *linker) +{ + struct dst_sec *sec; + size_t strs_sz; + const void *strs; + int err, i; + + if (!linker->elf) + return libbpf_err(-EINVAL); + + err = finalize_btf(linker); + if (err) + return libbpf_err(err); + + /* Finalize strings */ + strs_sz = strset__data_size(linker->strtab_strs); + strs = strset__data(linker->strtab_strs); + + sec = &linker->secs[linker->strtab_sec_idx]; + sec->data->d_align = 1; + sec->data->d_off = 0LL; + sec->data->d_buf = (void *)strs; + sec->data->d_type = ELF_T_BYTE; + sec->data->d_size = strs_sz; + sec->shdr->sh_size = strs_sz; + + for (i = 1; i < linker->sec_cnt; i++) { + sec = &linker->secs[i]; + + /* STRTAB is handled specially above */ + if (sec->sec_idx == linker->strtab_sec_idx) + continue; + + /* special ephemeral sections (.ksyms, .kconfig, etc) */ + if (!sec->scn) + continue; + + sec->data->d_buf = sec->raw_data; + } + + /* Finalize ELF layout */ + if (elf_update(linker->elf, ELF_C_NULL) < 0) { + err = -errno; + pr_warn_elf("failed to finalize ELF layout"); + return libbpf_err(err); + } + + /* Write out final ELF contents */ + if (elf_update(linker->elf, ELF_C_WRITE) < 0) { + err = -errno; + pr_warn_elf("failed to write ELF contents"); + return libbpf_err(err); + } + + elf_end(linker->elf); + close(linker->fd); + + linker->elf = NULL; + linker->fd = -1; + + return 0; +} + +static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name, + size_t align, const void *raw_data, size_t raw_sz) +{ + Elf_Scn *scn; + Elf_Data *data; + Elf64_Shdr *shdr; + int name_off; + + name_off = strset__add_str(linker->strtab_strs, sec_name); + if (name_off < 0) + return name_off; + + scn = elf_newscn(linker->elf); + if (!scn) + return -ENOMEM; + data = elf_newdata(scn); + if (!data) + return -ENOMEM; + shdr = elf64_getshdr(scn); + if (!shdr) + return -EINVAL; + + shdr->sh_name = name_off; + shdr->sh_type = SHT_PROGBITS; + shdr->sh_flags = 0; + shdr->sh_size = raw_sz; + shdr->sh_link = 0; + shdr->sh_info = 0; + shdr->sh_addralign = align; + shdr->sh_entsize = 0; + + data->d_type = ELF_T_BYTE; + data->d_size = raw_sz; + data->d_buf = (void *)raw_data; + data->d_align = align; + data->d_off = 0; + + return 0; +} + +static int finalize_btf(struct bpf_linker *linker) +{ + LIBBPF_OPTS(btf_dedup_opts, opts); + struct btf *btf = linker->btf; + const void *raw_data; + int i, j, id, err; + __u32 raw_sz; + + /* bail out if no BTF data was produced */ + if (btf__type_cnt(linker->btf) == 1) + return 0; + + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + if (!sec->has_btf) + continue; + + id = btf__add_datasec(btf, sec->sec_name, sec->sec_sz); + if (id < 0) { + pr_warn("failed to add consolidated BTF type for datasec '%s': %d\n", + sec->sec_name, id); + return id; + } + + for (j = 0; j < sec->sec_var_cnt; j++) { + struct btf_var_secinfo *vi = &sec->sec_vars[j]; + + if (btf__add_datasec_var_info(btf, vi->type, vi->offset, vi->size)) + return -EINVAL; + } + } + + err = finalize_btf_ext(linker); + if (err) { + pr_warn(".BTF.ext generation failed: %d\n", err); + return err; + } + + opts.btf_ext = linker->btf_ext; + err = btf__dedup(linker->btf, &opts); + if (err) { + pr_warn("BTF dedup failed: %d\n", err); + return err; + } + + /* Emit .BTF section */ + raw_data = btf__raw_data(linker->btf, &raw_sz); + if (!raw_data) + return -ENOMEM; + + err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz); + if (err) { + pr_warn("failed to write out .BTF ELF section: %d\n", err); + return err; + } + + /* Emit .BTF.ext section */ + if (linker->btf_ext) { + raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz); + if (!raw_data) + return -ENOMEM; + + err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz); + if (err) { + pr_warn("failed to write out .BTF.ext ELF section: %d\n", err); + return err; + } + } + + return 0; +} + +static int emit_btf_ext_data(struct bpf_linker *linker, void *output, + const char *sec_name, struct btf_ext_sec_data *sec_data) +{ + struct btf_ext_info_sec *sec_info; + void *cur = output; + int str_off; + size_t sz; + + if (!sec_data->rec_cnt) + return 0; + + str_off = btf__add_str(linker->btf, sec_name); + if (str_off < 0) + return -ENOMEM; + + sec_info = cur; + sec_info->sec_name_off = str_off; + sec_info->num_info = sec_data->rec_cnt; + cur += sizeof(struct btf_ext_info_sec); + + sz = sec_data->rec_cnt * sec_data->rec_sz; + memcpy(cur, sec_data->recs, sz); + cur += sz; + + return cur - output; +} + +static int finalize_btf_ext(struct bpf_linker *linker) +{ + size_t funcs_sz = 0, lines_sz = 0, core_relos_sz = 0, total_sz = 0; + size_t func_rec_sz = 0, line_rec_sz = 0, core_relo_rec_sz = 0; + struct btf_ext_header *hdr; + void *data, *cur; + int i, err, sz; + + /* validate that all sections have the same .BTF.ext record sizes + * and calculate total data size for each type of data (func info, + * line info, core relos) + */ + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + if (sec->func_info.rec_cnt) { + if (func_rec_sz == 0) + func_rec_sz = sec->func_info.rec_sz; + if (func_rec_sz != sec->func_info.rec_sz) { + pr_warn("mismatch in func_info record size %zu != %u\n", + func_rec_sz, sec->func_info.rec_sz); + return -EINVAL; + } + + funcs_sz += sizeof(struct btf_ext_info_sec) + func_rec_sz * sec->func_info.rec_cnt; + } + if (sec->line_info.rec_cnt) { + if (line_rec_sz == 0) + line_rec_sz = sec->line_info.rec_sz; + if (line_rec_sz != sec->line_info.rec_sz) { + pr_warn("mismatch in line_info record size %zu != %u\n", + line_rec_sz, sec->line_info.rec_sz); + return -EINVAL; + } + + lines_sz += sizeof(struct btf_ext_info_sec) + line_rec_sz * sec->line_info.rec_cnt; + } + if (sec->core_relo_info.rec_cnt) { + if (core_relo_rec_sz == 0) + core_relo_rec_sz = sec->core_relo_info.rec_sz; + if (core_relo_rec_sz != sec->core_relo_info.rec_sz) { + pr_warn("mismatch in core_relo_info record size %zu != %u\n", + core_relo_rec_sz, sec->core_relo_info.rec_sz); + return -EINVAL; + } + + core_relos_sz += sizeof(struct btf_ext_info_sec) + core_relo_rec_sz * sec->core_relo_info.rec_cnt; + } + } + + if (!funcs_sz && !lines_sz && !core_relos_sz) + return 0; + + total_sz += sizeof(struct btf_ext_header); + if (funcs_sz) { + funcs_sz += sizeof(__u32); /* record size prefix */ + total_sz += funcs_sz; + } + if (lines_sz) { + lines_sz += sizeof(__u32); /* record size prefix */ + total_sz += lines_sz; + } + if (core_relos_sz) { + core_relos_sz += sizeof(__u32); /* record size prefix */ + total_sz += core_relos_sz; + } + + cur = data = calloc(1, total_sz); + if (!data) + return -ENOMEM; + + hdr = cur; + hdr->magic = BTF_MAGIC; + hdr->version = BTF_VERSION; + hdr->flags = 0; + hdr->hdr_len = sizeof(struct btf_ext_header); + cur += sizeof(struct btf_ext_header); + + /* All offsets are in bytes relative to the end of this header */ + hdr->func_info_off = 0; + hdr->func_info_len = funcs_sz; + hdr->line_info_off = funcs_sz; + hdr->line_info_len = lines_sz; + hdr->core_relo_off = funcs_sz + lines_sz; + hdr->core_relo_len = core_relos_sz; + + if (funcs_sz) { + *(__u32 *)cur = func_rec_sz; + cur += sizeof(__u32); + + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->func_info); + if (sz < 0) { + err = sz; + goto out; + } + + cur += sz; + } + } + + if (lines_sz) { + *(__u32 *)cur = line_rec_sz; + cur += sizeof(__u32); + + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->line_info); + if (sz < 0) { + err = sz; + goto out; + } + + cur += sz; + } + } + + if (core_relos_sz) { + *(__u32 *)cur = core_relo_rec_sz; + cur += sizeof(__u32); + + for (i = 1; i < linker->sec_cnt; i++) { + struct dst_sec *sec = &linker->secs[i]; + + sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->core_relo_info); + if (sz < 0) { + err = sz; + goto out; + } + + cur += sz; + } + } + + linker->btf_ext = btf_ext__new(data, total_sz); + err = libbpf_get_error(linker->btf_ext); + if (err) { + linker->btf_ext = NULL; + pr_warn("failed to parse final .BTF.ext data: %d\n", err); + goto out; + } + +out: + free(data); + return err; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/netlink.c b/pkg/plugin/lib/common/libbpf/_src/netlink.c new file mode 100644 index 000000000..cbc8967d5 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/netlink.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2018 Facebook */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" +#include "nlattr.h" + +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + +typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); + +typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, + void *cookie); + +struct xdp_id_md { + int ifindex; + __u32 flags; + struct xdp_link_info info; +}; + +static int libbpf_netlink_open(__u32 *nl_pid) +{ + struct sockaddr_nl sa; + socklen_t addrlen; + int one = 1, ret; + int sock; + + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + + sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); + if (sock < 0) + return -errno; + + if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)) < 0) { + pr_warn("Netlink error reporting not supported\n"); + } + + if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { + ret = -errno; + goto cleanup; + } + + addrlen = sizeof(sa); + if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) { + ret = -errno; + goto cleanup; + } + + if (addrlen != sizeof(sa)) { + ret = -LIBBPF_ERRNO__INTERNAL; + goto cleanup; + } + + *nl_pid = sa.nl_pid; + return sock; + +cleanup: + close(sock); + return ret; +} + +static void libbpf_netlink_close(int sock) +{ + close(sock); +} + +enum { + NL_CONT, + NL_NEXT, + NL_DONE, +}; + +static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags) +{ + int len; + + do { + len = recvmsg(sock, mhdr, flags); + } while (len < 0 && (errno == EINTR || errno == EAGAIN)); + + if (len < 0) + return -errno; + return len; +} + +static int alloc_iov(struct iovec *iov, int len) +{ + void *nbuf; + + nbuf = realloc(iov->iov_base, len); + if (!nbuf) + return -ENOMEM; + + iov->iov_base = nbuf; + iov->iov_len = len; + return 0; +} + +static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq, + __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, + void *cookie) +{ + struct iovec iov = {}; + struct msghdr mhdr = { + .msg_iov = &iov, + .msg_iovlen = 1, + }; + bool multipart = true; + struct nlmsgerr *err; + struct nlmsghdr *nh; + int len, ret; + + ret = alloc_iov(&iov, 4096); + if (ret) + goto done; + + while (multipart) { +start: + multipart = false; + len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC); + if (len < 0) { + ret = len; + goto done; + } + + if (len > iov.iov_len) { + ret = alloc_iov(&iov, len); + if (ret) + goto done; + } + + len = netlink_recvmsg(sock, &mhdr, 0); + if (len < 0) { + ret = len; + goto done; + } + + if (len == 0) + break; + + for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len); + nh = NLMSG_NEXT(nh, len)) { + if (nh->nlmsg_pid != nl_pid) { + ret = -LIBBPF_ERRNO__WRNGPID; + goto done; + } + if (nh->nlmsg_seq != seq) { + ret = -LIBBPF_ERRNO__INVSEQ; + goto done; + } + if (nh->nlmsg_flags & NLM_F_MULTI) + multipart = true; + switch (nh->nlmsg_type) { + case NLMSG_ERROR: + err = (struct nlmsgerr *)NLMSG_DATA(nh); + if (!err->error) + continue; + ret = err->error; + libbpf_nla_dump_errormsg(nh); + goto done; + case NLMSG_DONE: + ret = 0; + goto done; + default: + break; + } + if (_fn) { + ret = _fn(nh, fn, cookie); + switch (ret) { + case NL_CONT: + break; + case NL_NEXT: + goto start; + case NL_DONE: + ret = 0; + goto done; + default: + goto done; + } + } + } + } + ret = 0; +done: + free(iov.iov_base); + return ret; +} + +static int libbpf_netlink_send_recv(struct libbpf_nla_req *req, + __dump_nlmsg_t parse_msg, + libbpf_dump_nlmsg_t parse_attr, + void *cookie) +{ + __u32 nl_pid = 0; + int sock, ret; + + sock = libbpf_netlink_open(&nl_pid); + if (sock < 0) + return sock; + + req->nh.nlmsg_pid = 0; + req->nh.nlmsg_seq = time(NULL); + + if (send(sock, req, req->nh.nlmsg_len, 0) < 0) { + ret = -errno; + goto out; + } + + ret = libbpf_netlink_recv(sock, nl_pid, req->nh.nlmsg_seq, + parse_msg, parse_attr, cookie); +out: + libbpf_netlink_close(sock); + return ret; +} + +static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, + __u32 flags) +{ + struct nlattr *nla; + int ret; + struct libbpf_nla_req req; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_type = RTM_SETLINK; + req.ifinfo.ifi_family = AF_UNSPEC; + req.ifinfo.ifi_index = ifindex; + + nla = nlattr_begin_nested(&req, IFLA_XDP); + if (!nla) + return -EMSGSIZE; + ret = nlattr_add(&req, IFLA_XDP_FD, &fd, sizeof(fd)); + if (ret < 0) + return ret; + if (flags) { + ret = nlattr_add(&req, IFLA_XDP_FLAGS, &flags, sizeof(flags)); + if (ret < 0) + return ret; + } + if (flags & XDP_FLAGS_REPLACE) { + ret = nlattr_add(&req, IFLA_XDP_EXPECTED_FD, &old_fd, + sizeof(old_fd)); + if (ret < 0) + return ret; + } + nlattr_end_nested(&req, nla); + + return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); +} + +int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts) +{ + int old_prog_fd, err; + + if (!OPTS_VALID(opts, bpf_xdp_attach_opts)) + return libbpf_err(-EINVAL); + + old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + if (old_prog_fd) + flags |= XDP_FLAGS_REPLACE; + else + old_prog_fd = -1; + + err = __bpf_set_link_xdp_fd_replace(ifindex, prog_fd, old_prog_fd, flags); + return libbpf_err(err); +} + +int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts) +{ + return bpf_xdp_attach(ifindex, -1, flags, opts); +} + +int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts) +{ + int old_fd = -1, ret; + + if (!OPTS_VALID(opts, bpf_xdp_set_link_opts)) + return libbpf_err(-EINVAL); + + if (OPTS_HAS(opts, old_fd)) { + old_fd = OPTS_GET(opts, old_fd, -1); + flags |= XDP_FLAGS_REPLACE; + } + + ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags); + return libbpf_err(ret); +} + +int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +{ + int ret; + + ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags); + return libbpf_err(ret); +} + +static int __dump_link_nlmsg(struct nlmsghdr *nlh, + libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) +{ + struct nlattr *tb[IFLA_MAX + 1], *attr; + struct ifinfomsg *ifi = NLMSG_DATA(nlh); + int len; + + len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)); + attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi))); + + if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0) + return -LIBBPF_ERRNO__NLPARSE; + + return dump_link_nlmsg(cookie, ifi, tb); +} + +static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) +{ + struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; + struct xdp_id_md *xdp_id = cookie; + struct ifinfomsg *ifinfo = msg; + int ret; + + if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) + return 0; + + if (!tb[IFLA_XDP]) + return 0; + + ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL); + if (ret) + return ret; + + if (!xdp_tb[IFLA_XDP_ATTACHED]) + return 0; + + xdp_id->info.attach_mode = libbpf_nla_getattr_u8( + xdp_tb[IFLA_XDP_ATTACHED]); + + if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE) + return 0; + + if (xdp_tb[IFLA_XDP_PROG_ID]) + xdp_id->info.prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_PROG_ID]); + + if (xdp_tb[IFLA_XDP_SKB_PROG_ID]) + xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_SKB_PROG_ID]); + + if (xdp_tb[IFLA_XDP_DRV_PROG_ID]) + xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_DRV_PROG_ID]); + + if (xdp_tb[IFLA_XDP_HW_PROG_ID]) + xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_HW_PROG_ID]); + + return 0; +} + +int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) +{ + struct libbpf_nla_req req = { + .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nh.nlmsg_type = RTM_GETLINK, + .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + .ifinfo.ifi_family = AF_PACKET, + }; + struct xdp_id_md xdp_id = {}; + int err; + + if (!OPTS_VALID(opts, bpf_xdp_query_opts)) + return libbpf_err(-EINVAL); + + if (xdp_flags & ~XDP_FLAGS_MASK) + return libbpf_err(-EINVAL); + + /* Check whether the single {HW,DRV,SKB} mode is set */ + xdp_flags &= XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE; + if (xdp_flags & (xdp_flags - 1)) + return libbpf_err(-EINVAL); + + xdp_id.ifindex = ifindex; + xdp_id.flags = xdp_flags; + + err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg, + get_xdp_info, &xdp_id); + if (err) + return libbpf_err(err); + + OPTS_SET(opts, prog_id, xdp_id.info.prog_id); + OPTS_SET(opts, drv_prog_id, xdp_id.info.drv_prog_id); + OPTS_SET(opts, hw_prog_id, xdp_id.info.hw_prog_id); + OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id); + OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode); + + return 0; +} + +int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags) +{ + LIBBPF_OPTS(bpf_xdp_query_opts, opts); + size_t sz; + int err; + + if (!info_size) + return libbpf_err(-EINVAL); + + err = bpf_xdp_query(ifindex, flags, &opts); + if (err) + return libbpf_err(err); + + /* struct xdp_link_info field layout matches struct bpf_xdp_query_opts + * layout after sz field + */ + sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode)); + memcpy(info, &opts.prog_id, sz); + memset((void *)info + sz, 0, info_size - sz); + + return 0; +} + +int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id) +{ + LIBBPF_OPTS(bpf_xdp_query_opts, opts); + int ret; + + ret = bpf_xdp_query(ifindex, flags, &opts); + if (ret) + return libbpf_err(ret); + + flags &= XDP_FLAGS_MODES; + + if (opts.attach_mode != XDP_ATTACHED_MULTI && !flags) + *prog_id = opts.prog_id; + else if (flags & XDP_FLAGS_DRV_MODE) + *prog_id = opts.drv_prog_id; + else if (flags & XDP_FLAGS_HW_MODE) + *prog_id = opts.hw_prog_id; + else if (flags & XDP_FLAGS_SKB_MODE) + *prog_id = opts.skb_prog_id; + else + *prog_id = 0; + + return 0; +} + + +int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +{ + return bpf_xdp_query_id(ifindex, flags, prog_id); +} + +typedef int (*qdisc_config_t)(struct libbpf_nla_req *req); + +static int clsact_config(struct libbpf_nla_req *req) +{ + req->tc.tcm_parent = TC_H_CLSACT; + req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0); + + return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact")); +} + +static int attach_point_to_config(struct bpf_tc_hook *hook, + qdisc_config_t *config) +{ + switch (OPTS_GET(hook, attach_point, 0)) { + case BPF_TC_INGRESS: + case BPF_TC_EGRESS: + case BPF_TC_INGRESS | BPF_TC_EGRESS: + if (OPTS_GET(hook, parent, 0)) + return -EINVAL; + *config = &clsact_config; + return 0; + case BPF_TC_CUSTOM: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point, + __u32 *parent) +{ + switch (attach_point) { + case BPF_TC_INGRESS: + case BPF_TC_EGRESS: + if (*parent) + return -EINVAL; + *parent = TC_H_MAKE(TC_H_CLSACT, + attach_point == BPF_TC_INGRESS ? + TC_H_MIN_INGRESS : TC_H_MIN_EGRESS); + break; + case BPF_TC_CUSTOM: + if (!*parent) + return -EINVAL; + break; + default: + return -EINVAL; + } + return 0; +} + +static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags) +{ + qdisc_config_t config; + int ret; + struct libbpf_nla_req req; + + ret = attach_point_to_config(hook, &config); + if (ret < 0) + return ret; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags; + req.nh.nlmsg_type = cmd; + req.tc.tcm_family = AF_UNSPEC; + req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0); + + ret = config(&req); + if (ret < 0) + return ret; + + return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); +} + +static int tc_qdisc_create_excl(struct bpf_tc_hook *hook) +{ + return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE | NLM_F_EXCL); +} + +static int tc_qdisc_delete(struct bpf_tc_hook *hook) +{ + return tc_qdisc_modify(hook, RTM_DELQDISC, 0); +} + +int bpf_tc_hook_create(struct bpf_tc_hook *hook) +{ + int ret; + + if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || + OPTS_GET(hook, ifindex, 0) <= 0) + return libbpf_err(-EINVAL); + + ret = tc_qdisc_create_excl(hook); + return libbpf_err(ret); +} + +static int __bpf_tc_detach(const struct bpf_tc_hook *hook, + const struct bpf_tc_opts *opts, + const bool flush); + +int bpf_tc_hook_destroy(struct bpf_tc_hook *hook) +{ + if (!hook || !OPTS_VALID(hook, bpf_tc_hook) || + OPTS_GET(hook, ifindex, 0) <= 0) + return libbpf_err(-EINVAL); + + switch (OPTS_GET(hook, attach_point, 0)) { + case BPF_TC_INGRESS: + case BPF_TC_EGRESS: + return libbpf_err(__bpf_tc_detach(hook, NULL, true)); + case BPF_TC_INGRESS | BPF_TC_EGRESS: + return libbpf_err(tc_qdisc_delete(hook)); + case BPF_TC_CUSTOM: + return libbpf_err(-EOPNOTSUPP); + default: + return libbpf_err(-EINVAL); + } +} + +struct bpf_cb_ctx { + struct bpf_tc_opts *opts; + bool processed; +}; + +static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb, + bool unicast) +{ + struct nlattr *tbb[TCA_BPF_MAX + 1]; + struct bpf_cb_ctx *info = cookie; + + if (!info || !info->opts) + return -EINVAL; + if (unicast && info->processed) + return -EINVAL; + if (!tb[TCA_OPTIONS]) + return NL_CONT; + + libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL); + if (!tbb[TCA_BPF_ID]) + return -EINVAL; + + OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID])); + OPTS_SET(info->opts, handle, tc->tcm_handle); + OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16); + + info->processed = true; + return unicast ? NL_NEXT : NL_DONE; +} + +static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn, + void *cookie) +{ + struct tcmsg *tc = NLMSG_DATA(nh); + struct nlattr *tb[TCA_MAX + 1]; + + libbpf_nla_parse(tb, TCA_MAX, + (struct nlattr *)((void *)tc + NLMSG_ALIGN(sizeof(*tc))), + NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL); + if (!tb[TCA_KIND]) + return NL_CONT; + return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO); +} + +static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + char name[256]; + int len, ret; + + ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); + if (ret < 0) + return ret; + + ret = nlattr_add(req, TCA_BPF_FD, &fd, sizeof(fd)); + if (ret < 0) + return ret; + len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id); + if (len < 0) + return -errno; + if (len >= sizeof(name)) + return -ENAMETOOLONG; + return nlattr_add(req, TCA_BPF_NAME, name, len + 1); +} + +int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) +{ + __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags; + int ret, ifindex, attach_point, prog_fd; + struct bpf_cb_ctx info = {}; + struct libbpf_nla_req req; + struct nlattr *nla; + + if (!hook || !opts || + !OPTS_VALID(hook, bpf_tc_hook) || + !OPTS_VALID(opts, bpf_tc_opts)) + return libbpf_err(-EINVAL); + + ifindex = OPTS_GET(hook, ifindex, 0); + parent = OPTS_GET(hook, parent, 0); + attach_point = OPTS_GET(hook, attach_point, 0); + + handle = OPTS_GET(opts, handle, 0); + priority = OPTS_GET(opts, priority, 0); + prog_fd = OPTS_GET(opts, prog_fd, 0); + prog_id = OPTS_GET(opts, prog_id, 0); + flags = OPTS_GET(opts, flags, 0); + + if (ifindex <= 0 || !prog_fd || prog_id) + return libbpf_err(-EINVAL); + if (priority > UINT16_MAX) + return libbpf_err(-EINVAL); + if (flags & ~BPF_TC_F_REPLACE) + return libbpf_err(-EINVAL); + + flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL; + protocol = ETH_P_ALL; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | + NLM_F_ECHO | flags; + req.nh.nlmsg_type = RTM_NEWTFILTER; + req.tc.tcm_family = AF_UNSPEC; + req.tc.tcm_ifindex = ifindex; + req.tc.tcm_handle = handle; + req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); + + ret = tc_get_tcm_parent(attach_point, &parent); + if (ret < 0) + return libbpf_err(ret); + req.tc.tcm_parent = parent; + + ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); + if (ret < 0) + return libbpf_err(ret); + nla = nlattr_begin_nested(&req, TCA_OPTIONS); + if (!nla) + return libbpf_err(-EMSGSIZE); + ret = tc_add_fd_and_name(&req, prog_fd); + if (ret < 0) + return libbpf_err(ret); + bpf_flags = TCA_BPF_FLAG_ACT_DIRECT; + ret = nlattr_add(&req, TCA_BPF_FLAGS, &bpf_flags, sizeof(bpf_flags)); + if (ret < 0) + return libbpf_err(ret); + nlattr_end_nested(&req, nla); + + info.opts = opts; + + ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + if (ret < 0) + return libbpf_err(ret); + if (!info.processed) + return libbpf_err(-ENOENT); + return ret; +} + +static int __bpf_tc_detach(const struct bpf_tc_hook *hook, + const struct bpf_tc_opts *opts, + const bool flush) +{ + __u32 protocol = 0, handle, priority, parent, prog_id, flags; + int ret, ifindex, attach_point, prog_fd; + struct libbpf_nla_req req; + + if (!hook || + !OPTS_VALID(hook, bpf_tc_hook) || + !OPTS_VALID(opts, bpf_tc_opts)) + return -EINVAL; + + ifindex = OPTS_GET(hook, ifindex, 0); + parent = OPTS_GET(hook, parent, 0); + attach_point = OPTS_GET(hook, attach_point, 0); + + handle = OPTS_GET(opts, handle, 0); + priority = OPTS_GET(opts, priority, 0); + prog_fd = OPTS_GET(opts, prog_fd, 0); + prog_id = OPTS_GET(opts, prog_id, 0); + flags = OPTS_GET(opts, flags, 0); + + if (ifindex <= 0 || flags || prog_fd || prog_id) + return -EINVAL; + if (priority > UINT16_MAX) + return -EINVAL; + if (!flush) { + if (!handle || !priority) + return -EINVAL; + protocol = ETH_P_ALL; + } else { + if (handle || priority) + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK; + req.nh.nlmsg_type = RTM_DELTFILTER; + req.tc.tcm_family = AF_UNSPEC; + req.tc.tcm_ifindex = ifindex; + if (!flush) { + req.tc.tcm_handle = handle; + req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); + } + + ret = tc_get_tcm_parent(attach_point, &parent); + if (ret < 0) + return ret; + req.tc.tcm_parent = parent; + + if (!flush) { + ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); + if (ret < 0) + return ret; + } + + return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); +} + +int bpf_tc_detach(const struct bpf_tc_hook *hook, + const struct bpf_tc_opts *opts) +{ + int ret; + + if (!opts) + return libbpf_err(-EINVAL); + + ret = __bpf_tc_detach(hook, opts, false); + return libbpf_err(ret); +} + +int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts) +{ + __u32 protocol, handle, priority, parent, prog_id, flags; + int ret, ifindex, attach_point, prog_fd; + struct bpf_cb_ctx info = {}; + struct libbpf_nla_req req; + + if (!hook || !opts || + !OPTS_VALID(hook, bpf_tc_hook) || + !OPTS_VALID(opts, bpf_tc_opts)) + return libbpf_err(-EINVAL); + + ifindex = OPTS_GET(hook, ifindex, 0); + parent = OPTS_GET(hook, parent, 0); + attach_point = OPTS_GET(hook, attach_point, 0); + + handle = OPTS_GET(opts, handle, 0); + priority = OPTS_GET(opts, priority, 0); + prog_fd = OPTS_GET(opts, prog_fd, 0); + prog_id = OPTS_GET(opts, prog_id, 0); + flags = OPTS_GET(opts, flags, 0); + + if (ifindex <= 0 || flags || prog_fd || prog_id || + !handle || !priority) + return libbpf_err(-EINVAL); + if (priority > UINT16_MAX) + return libbpf_err(-EINVAL); + + protocol = ETH_P_ALL; + + memset(&req, 0, sizeof(req)); + req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); + req.nh.nlmsg_flags = NLM_F_REQUEST; + req.nh.nlmsg_type = RTM_GETTFILTER; + req.tc.tcm_family = AF_UNSPEC; + req.tc.tcm_ifindex = ifindex; + req.tc.tcm_handle = handle; + req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol)); + + ret = tc_get_tcm_parent(attach_point, &parent); + if (ret < 0) + return libbpf_err(ret); + req.tc.tcm_parent = parent; + + ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf")); + if (ret < 0) + return libbpf_err(ret); + + info.opts = opts; + + ret = libbpf_netlink_send_recv(&req, get_tc_info, NULL, &info); + if (ret < 0) + return libbpf_err(ret); + if (!info.processed) + return libbpf_err(-ENOENT); + return ret; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/nlattr.c b/pkg/plugin/lib/common/libbpf/_src/nlattr.c new file mode 100644 index 000000000..f57e77a6e --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/nlattr.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * NETLINK Netlink attributes + * + * Copyright (c) 2003-2013 Thomas Graf + */ + +#include +#include +#include +#include +#include "nlattr.h" +#include "libbpf_internal.h" + +static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = { + [LIBBPF_NLA_U8] = sizeof(uint8_t), + [LIBBPF_NLA_U16] = sizeof(uint16_t), + [LIBBPF_NLA_U32] = sizeof(uint32_t), + [LIBBPF_NLA_U64] = sizeof(uint64_t), + [LIBBPF_NLA_STRING] = 1, + [LIBBPF_NLA_FLAG] = 0, +}; + +static struct nlattr *nla_next(const struct nlattr *nla, int *remaining) +{ + int totlen = NLA_ALIGN(nla->nla_len); + + *remaining -= totlen; + return (struct nlattr *)((void *)nla + totlen); +} + +static int nla_ok(const struct nlattr *nla, int remaining) +{ + return remaining >= sizeof(*nla) && + nla->nla_len >= sizeof(*nla) && + nla->nla_len <= remaining; +} + +static int nla_type(const struct nlattr *nla) +{ + return nla->nla_type & NLA_TYPE_MASK; +} + +static int validate_nla(struct nlattr *nla, int maxtype, + struct libbpf_nla_policy *policy) +{ + struct libbpf_nla_policy *pt; + unsigned int minlen = 0; + int type = nla_type(nla); + + if (type < 0 || type > maxtype) + return 0; + + pt = &policy[type]; + + if (pt->type > LIBBPF_NLA_TYPE_MAX) + return 0; + + if (pt->minlen) + minlen = pt->minlen; + else if (pt->type != LIBBPF_NLA_UNSPEC) + minlen = nla_attr_minlen[pt->type]; + + if (libbpf_nla_len(nla) < minlen) + return -1; + + if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) + return -1; + + if (pt->type == LIBBPF_NLA_STRING) { + char *data = libbpf_nla_data(nla); + + if (data[libbpf_nla_len(nla) - 1] != '\0') + return -1; + } + + return 0; +} + +static inline int nlmsg_len(const struct nlmsghdr *nlh) +{ + return nlh->nlmsg_len - NLMSG_HDRLEN; +} + +/** + * Create attribute index based on a stream of attributes. + * @arg tb Index array to be filled (maxtype+1 elements). + * @arg maxtype Maximum attribute type expected and accepted. + * @arg head Head of attribute stream. + * @arg len Length of attribute stream. + * @arg policy Attribute validation policy. + * + * Iterates over the stream of attributes and stores a pointer to each + * attribute in the index array using the attribute type as index to + * the array. Attribute with a type greater than the maximum type + * specified will be silently ignored in order to maintain backwards + * compatibility. If \a policy is not NULL, the attribute will be + * validated using the specified policy. + * + * @see nla_validate + * @return 0 on success or a negative error code. + */ +int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, + int len, struct libbpf_nla_policy *policy) +{ + struct nlattr *nla; + int rem, err; + + memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); + + libbpf_nla_for_each_attr(nla, head, len, rem) { + int type = nla_type(nla); + + if (type > maxtype) + continue; + + if (policy) { + err = validate_nla(nla, maxtype, policy); + if (err < 0) + goto errout; + } + + if (tb[type]) + pr_warn("Attribute of type %#x found multiple times in message, " + "previous attribute is being ignored.\n", type); + + tb[type] = nla; + } + + err = 0; +errout: + return err; +} + +/** + * Create attribute index based on nested attribute + * @arg tb Index array to be filled (maxtype+1 elements). + * @arg maxtype Maximum attribute type expected and accepted. + * @arg nla Nested Attribute. + * @arg policy Attribute validation policy. + * + * Feeds the stream of attributes nested into the specified attribute + * to libbpf_nla_parse(). + * + * @see libbpf_nla_parse + * @return 0 on success or a negative error code. + */ +int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, + struct nlattr *nla, + struct libbpf_nla_policy *policy) +{ + return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla), + libbpf_nla_len(nla), policy); +} + +/* dump netlink extended ack error message */ +int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) +{ + struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = { + [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING }, + [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 }, + }; + struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr; + struct nlmsgerr *err; + char *errmsg = NULL; + int hlen, alen; + + /* no TLVs, nothing to do here */ + if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) + return 0; + + err = (struct nlmsgerr *)NLMSG_DATA(nlh); + hlen = sizeof(*err); + + /* if NLM_F_CAPPED is set then the inner err msg was capped */ + if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) + hlen += nlmsg_len(&err->msg); + + attr = (struct nlattr *) ((void *) err + hlen); + alen = nlh->nlmsg_len - hlen; + + if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, + extack_policy) != 0) { + pr_warn("Failed to parse extended error attributes\n"); + return 0; + } + + if (tb[NLMSGERR_ATTR_MSG]) + errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); + + pr_warn("Kernel error message: %s\n", errmsg); + + return 0; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/nlattr.h b/pkg/plugin/lib/common/libbpf/_src/nlattr.h new file mode 100644 index 000000000..4d15ae2ff --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/nlattr.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * NETLINK Netlink attributes + * + * Copyright (c) 2003-2013 Thomas Graf + */ + +#ifndef __LIBBPF_NLATTR_H +#define __LIBBPF_NLATTR_H + +#include +#include +#include +#include +#include + +/* avoid multiple definition of netlink features */ +#define __LINUX_NETLINK_H + +/** + * Standard attribute types to specify validation policy + */ +enum { + LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */ + LIBBPF_NLA_U8, /**< 8 bit integer */ + LIBBPF_NLA_U16, /**< 16 bit integer */ + LIBBPF_NLA_U32, /**< 32 bit integer */ + LIBBPF_NLA_U64, /**< 64 bit integer */ + LIBBPF_NLA_STRING, /**< NUL terminated character string */ + LIBBPF_NLA_FLAG, /**< Flag */ + LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */ + LIBBPF_NLA_NESTED, /**< Nested attributes */ + __LIBBPF_NLA_TYPE_MAX, +}; + +#define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1) + +/** + * @ingroup attr + * Attribute validation policy. + * + * See section @core_doc{core_attr_parse,Attribute Parsing} for more details. + */ +struct libbpf_nla_policy { + /** Type of attribute or LIBBPF_NLA_UNSPEC */ + uint16_t type; + + /** Minimal length of payload required */ + uint16_t minlen; + + /** Maximal length of payload allowed */ + uint16_t maxlen; +}; + +struct libbpf_nla_req { + struct nlmsghdr nh; + union { + struct ifinfomsg ifinfo; + struct tcmsg tc; + }; + char buf[128]; +}; + +/** + * @ingroup attr + * Iterate over a stream of attributes + * @arg pos loop counter, set to current attribute + * @arg head head of attribute stream + * @arg len length of attribute stream + * @arg rem initialized to len, holds bytes currently remaining in stream + */ +#define libbpf_nla_for_each_attr(pos, head, len, rem) \ + for (pos = head, rem = len; \ + nla_ok(pos, rem); \ + pos = nla_next(pos, &(rem))) + +/** + * libbpf_nla_data - head of payload + * @nla: netlink attribute + */ +static inline void *libbpf_nla_data(const struct nlattr *nla) +{ + return (void *)nla + NLA_HDRLEN; +} + +static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla) +{ + return *(uint8_t *)libbpf_nla_data(nla); +} + +static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla) +{ + return *(uint32_t *)libbpf_nla_data(nla); +} + +static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla) +{ + return (const char *)libbpf_nla_data(nla); +} + +/** + * libbpf_nla_len - length of payload + * @nla: netlink attribute + */ +static inline int libbpf_nla_len(const struct nlattr *nla) +{ + return nla->nla_len - NLA_HDRLEN; +} + +int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, + int len, struct libbpf_nla_policy *policy); +int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype, + struct nlattr *nla, + struct libbpf_nla_policy *policy); + +int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh); + +static inline struct nlattr *nla_data(struct nlattr *nla) +{ + return (struct nlattr *)((void *)nla + NLA_HDRLEN); +} + +static inline struct nlattr *req_tail(struct libbpf_nla_req *req) +{ + return (struct nlattr *)((void *)req + NLMSG_ALIGN(req->nh.nlmsg_len)); +} + +static inline int nlattr_add(struct libbpf_nla_req *req, int type, + const void *data, int len) +{ + struct nlattr *nla; + + if (NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > sizeof(*req)) + return -EMSGSIZE; + if (!!data != !!len) + return -EINVAL; + + nla = req_tail(req); + nla->nla_type = type; + nla->nla_len = NLA_HDRLEN + len; + if (data) + memcpy(nla_data(nla), data, len); + req->nh.nlmsg_len = NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(nla->nla_len); + return 0; +} + +static inline struct nlattr *nlattr_begin_nested(struct libbpf_nla_req *req, int type) +{ + struct nlattr *tail; + + tail = req_tail(req); + if (nlattr_add(req, type | NLA_F_NESTED, NULL, 0)) + return NULL; + return tail; +} + +static inline void nlattr_end_nested(struct libbpf_nla_req *req, + struct nlattr *tail) +{ + tail->nla_len = (void *)req_tail(req) - (void *)tail; +} + +#endif /* __LIBBPF_NLATTR_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/placeholder.go b/pkg/plugin/lib/common/libbpf/_src/placeholder.go new file mode 100644 index 000000000..990961df1 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/placeholder.go @@ -0,0 +1,3 @@ +package libbpf //nolint:all + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/lib/common/libbpf/_src/relo_core.c b/pkg/plugin/lib/common/libbpf/_src/relo_core.c new file mode 100644 index 000000000..ba4453dfd --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/relo_core.c @@ -0,0 +1,1307 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2019 Facebook */ + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include "relo_core.h" + +static const char *btf_kind_str(const struct btf_type *t) +{ + return btf_type_str(t); +} + +static bool is_ldimm64_insn(struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW); +} + +static const struct btf_type * +skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id) +{ + return btf_type_skip_modifiers(btf, id, res_id); +} + +static const char *btf__name_by_offset(const struct btf *btf, u32 offset) +{ + return btf_name_by_offset(btf, offset); +} + +static s64 btf__resolve_size(const struct btf *btf, u32 type_id) +{ + const struct btf_type *t; + int size; + + t = btf_type_by_id(btf, type_id); + t = btf_resolve_size(btf, t, &size); + if (IS_ERR(t)) + return PTR_ERR(t); + return size; +} + +enum libbpf_print_level { + LIBBPF_WARN, + LIBBPF_INFO, + LIBBPF_DEBUG, +}; + +#undef pr_warn +#undef pr_info +#undef pr_debug +#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__) +#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__) +#else +#include +#include +#include +#include +#include + +#include "libbpf.h" +#include "bpf.h" +#include "btf.h" +#include "str_error.h" +#include "libbpf_internal.h" +#endif + +static bool is_flex_arr(const struct btf *btf, + const struct bpf_core_accessor *acc, + const struct btf_array *arr) +{ + const struct btf_type *t; + + /* not a flexible array, if not inside a struct or has non-zero size */ + if (!acc->name || arr->nelems > 0) + return false; + + /* has to be the last member of enclosing struct */ + t = btf_type_by_id(btf, acc->type_id); + return acc->idx == btf_vlen(t) - 1; +} + +static const char *core_relo_kind_str(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off"; + case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz"; + case BPF_CORE_FIELD_EXISTS: return "field_exists"; + case BPF_CORE_FIELD_SIGNED: return "signed"; + case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64"; + case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64"; + case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id"; + case BPF_CORE_TYPE_ID_TARGET: return "target_type_id"; + case BPF_CORE_TYPE_EXISTS: return "type_exists"; + case BPF_CORE_TYPE_SIZE: return "type_size"; + case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists"; + case BPF_CORE_ENUMVAL_VALUE: return "enumval_value"; + default: return "unknown"; + } +} + +static bool core_relo_is_field_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_CORE_FIELD_BYTE_OFFSET: + case BPF_CORE_FIELD_BYTE_SIZE: + case BPF_CORE_FIELD_EXISTS: + case BPF_CORE_FIELD_SIGNED: + case BPF_CORE_FIELD_LSHIFT_U64: + case BPF_CORE_FIELD_RSHIFT_U64: + return true; + default: + return false; + } +} + +static bool core_relo_is_type_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_CORE_TYPE_ID_LOCAL: + case BPF_CORE_TYPE_ID_TARGET: + case BPF_CORE_TYPE_EXISTS: + case BPF_CORE_TYPE_SIZE: + return true; + default: + return false; + } +} + +static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) +{ + switch (kind) { + case BPF_CORE_ENUMVAL_EXISTS: + case BPF_CORE_ENUMVAL_VALUE: + return true; + default: + return false; + } +} + +/* + * Turn bpf_core_relo into a low- and high-level spec representation, + * validating correctness along the way, as well as calculating resulting + * field bit offset, specified by accessor string. Low-level spec captures + * every single level of nestedness, including traversing anonymous + * struct/union members. High-level one only captures semantically meaningful + * "turning points": named fields and array indicies. + * E.g., for this case: + * + * struct sample { + * int __unimportant; + * struct { + * int __1; + * int __2; + * int a[7]; + * }; + * }; + * + * struct sample *s = ...; + * + * int x = &s->a[3]; // access string = '0:1:2:3' + * + * Low-level spec has 1:1 mapping with each element of access string (it's + * just a parsed access string representation): [0, 1, 2, 3]. + * + * High-level spec will capture only 3 points: + * - intial zero-index access by pointer (&s->... is the same as &s[0]...); + * - field 'a' access (corresponds to '2' in low-level spec); + * - array element #3 access (corresponds to '3' in low-level spec). + * + * Type-based relocations (TYPE_EXISTS/TYPE_SIZE, + * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their + * spec and raw_spec are kept empty. + * + * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access + * string to specify enumerator's value index that need to be relocated. + */ +int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, + const struct bpf_core_relo *relo, + struct bpf_core_spec *spec) +{ + int access_idx, parsed_len, i; + struct bpf_core_accessor *acc; + const struct btf_type *t; + const char *name, *spec_str; + __u32 id; + __s64 sz; + + spec_str = btf__name_by_offset(btf, relo->access_str_off); + if (str_is_empty(spec_str) || *spec_str == ':') + return -EINVAL; + + memset(spec, 0, sizeof(*spec)); + spec->btf = btf; + spec->root_type_id = relo->type_id; + spec->relo_kind = relo->kind; + + /* type-based relocations don't have a field access string */ + if (core_relo_is_type_based(relo->kind)) { + if (strcmp(spec_str, "0")) + return -EINVAL; + return 0; + } + + /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ + while (*spec_str) { + if (*spec_str == ':') + ++spec_str; + if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1) + return -EINVAL; + if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) + return -E2BIG; + spec_str += parsed_len; + spec->raw_spec[spec->raw_len++] = access_idx; + } + + if (spec->raw_len == 0) + return -EINVAL; + + t = skip_mods_and_typedefs(btf, relo->type_id, &id); + if (!t) + return -EINVAL; + + access_idx = spec->raw_spec[0]; + acc = &spec->spec[0]; + acc->type_id = id; + acc->idx = access_idx; + spec->len++; + + if (core_relo_is_enumval_based(relo->kind)) { + if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) + return -EINVAL; + + /* record enumerator name in a first accessor */ + acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); + return 0; + } + + if (!core_relo_is_field_based(relo->kind)) + return -EINVAL; + + sz = btf__resolve_size(btf, id); + if (sz < 0) + return sz; + spec->bit_offset = access_idx * sz * 8; + + for (i = 1; i < spec->raw_len; i++) { + t = skip_mods_and_typedefs(btf, id, &id); + if (!t) + return -EINVAL; + + access_idx = spec->raw_spec[i]; + acc = &spec->spec[spec->len]; + + if (btf_is_composite(t)) { + const struct btf_member *m; + __u32 bit_offset; + + if (access_idx >= btf_vlen(t)) + return -EINVAL; + + bit_offset = btf_member_bit_offset(t, access_idx); + spec->bit_offset += bit_offset; + + m = btf_members(t) + access_idx; + if (m->name_off) { + name = btf__name_by_offset(btf, m->name_off); + if (str_is_empty(name)) + return -EINVAL; + + acc->type_id = id; + acc->idx = access_idx; + acc->name = name; + spec->len++; + } + + id = m->type; + } else if (btf_is_array(t)) { + const struct btf_array *a = btf_array(t); + bool flex; + + t = skip_mods_and_typedefs(btf, a->type, &id); + if (!t) + return -EINVAL; + + flex = is_flex_arr(btf, acc - 1, a); + if (!flex && access_idx >= a->nelems) + return -EINVAL; + + spec->spec[spec->len].type_id = id; + spec->spec[spec->len].idx = access_idx; + spec->len++; + + sz = btf__resolve_size(btf, id); + if (sz < 0) + return sz; + spec->bit_offset += access_idx * sz * 8; + } else { + pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n", + prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t)); + return -EINVAL; + } + } + + return 0; +} + +/* Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + */ +static int bpf_core_fields_are_compat(const struct btf *local_btf, + __u32 local_id, + const struct btf *targ_btf, + __u32 targ_id) +{ + const struct btf_type *local_type, *targ_type; + +recur: + local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); + targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); + if (!local_type || !targ_type) + return -EINVAL; + + if (btf_is_composite(local_type) && btf_is_composite(targ_type)) + return 1; + if (btf_kind(local_type) != btf_kind(targ_type)) + return 0; + + switch (btf_kind(local_type)) { + case BTF_KIND_PTR: + case BTF_KIND_FLOAT: + return 1; + case BTF_KIND_FWD: + case BTF_KIND_ENUM: { + const char *local_name, *targ_name; + size_t local_len, targ_len; + + local_name = btf__name_by_offset(local_btf, + local_type->name_off); + targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); + local_len = bpf_core_essential_name_len(local_name); + targ_len = bpf_core_essential_name_len(targ_name); + /* one of them is anonymous or both w/ same flavor-less names */ + return local_len == 0 || targ_len == 0 || + (local_len == targ_len && + strncmp(local_name, targ_name, local_len) == 0); + } + case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ + return btf_int_offset(local_type) == 0 && + btf_int_offset(targ_type) == 0; + case BTF_KIND_ARRAY: + local_id = btf_array(local_type)->type; + targ_id = btf_array(targ_type)->type; + goto recur; + default: + return 0; + } +} + +/* + * Given single high-level named field accessor in local type, find + * corresponding high-level accessor for a target type. Along the way, + * maintain low-level spec for target as well. Also keep updating target + * bit offset. + * + * Searching is performed through recursive exhaustive enumeration of all + * fields of a struct/union. If there are any anonymous (embedded) + * structs/unions, they are recursively searched as well. If field with + * desired name is found, check compatibility between local and target types, + * before returning result. + * + * 1 is returned, if field is found. + * 0 is returned if no compatible field is found. + * <0 is returned on error. + */ +static int bpf_core_match_member(const struct btf *local_btf, + const struct bpf_core_accessor *local_acc, + const struct btf *targ_btf, + __u32 targ_id, + struct bpf_core_spec *spec, + __u32 *next_targ_id) +{ + const struct btf_type *local_type, *targ_type; + const struct btf_member *local_member, *m; + const char *local_name, *targ_name; + __u32 local_id; + int i, n, found; + + targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); + if (!targ_type) + return -EINVAL; + if (!btf_is_composite(targ_type)) + return 0; + + local_id = local_acc->type_id; + local_type = btf_type_by_id(local_btf, local_id); + local_member = btf_members(local_type) + local_acc->idx; + local_name = btf__name_by_offset(local_btf, local_member->name_off); + + n = btf_vlen(targ_type); + m = btf_members(targ_type); + for (i = 0; i < n; i++, m++) { + __u32 bit_offset; + + bit_offset = btf_member_bit_offset(targ_type, i); + + /* too deep struct/union/array nesting */ + if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) + return -E2BIG; + + /* speculate this member will be the good one */ + spec->bit_offset += bit_offset; + spec->raw_spec[spec->raw_len++] = i; + + targ_name = btf__name_by_offset(targ_btf, m->name_off); + if (str_is_empty(targ_name)) { + /* embedded struct/union, we need to go deeper */ + found = bpf_core_match_member(local_btf, local_acc, + targ_btf, m->type, + spec, next_targ_id); + if (found) /* either found or error */ + return found; + } else if (strcmp(local_name, targ_name) == 0) { + /* matching named field */ + struct bpf_core_accessor *targ_acc; + + targ_acc = &spec->spec[spec->len++]; + targ_acc->type_id = targ_id; + targ_acc->idx = i; + targ_acc->name = targ_name; + + *next_targ_id = m->type; + found = bpf_core_fields_are_compat(local_btf, + local_member->type, + targ_btf, m->type); + if (!found) + spec->len--; /* pop accessor */ + return found; + } + /* member turned out not to be what we looked for */ + spec->bit_offset -= bit_offset; + spec->raw_len--; + } + + return 0; +} + +/* + * Try to match local spec to a target type and, if successful, produce full + * target spec (high-level, low-level + bit offset). + */ +static int bpf_core_spec_match(struct bpf_core_spec *local_spec, + const struct btf *targ_btf, __u32 targ_id, + struct bpf_core_spec *targ_spec) +{ + const struct btf_type *targ_type; + const struct bpf_core_accessor *local_acc; + struct bpf_core_accessor *targ_acc; + int i, sz, matched; + + memset(targ_spec, 0, sizeof(*targ_spec)); + targ_spec->btf = targ_btf; + targ_spec->root_type_id = targ_id; + targ_spec->relo_kind = local_spec->relo_kind; + + if (core_relo_is_type_based(local_spec->relo_kind)) { + return bpf_core_types_are_compat(local_spec->btf, + local_spec->root_type_id, + targ_btf, targ_id); + } + + local_acc = &local_spec->spec[0]; + targ_acc = &targ_spec->spec[0]; + + if (core_relo_is_enumval_based(local_spec->relo_kind)) { + size_t local_essent_len, targ_essent_len; + const struct btf_enum *e; + const char *targ_name; + + /* has to resolve to an enum */ + targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); + if (!btf_is_enum(targ_type)) + return 0; + + local_essent_len = bpf_core_essential_name_len(local_acc->name); + + for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) { + targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); + targ_essent_len = bpf_core_essential_name_len(targ_name); + if (targ_essent_len != local_essent_len) + continue; + if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { + targ_acc->type_id = targ_id; + targ_acc->idx = i; + targ_acc->name = targ_name; + targ_spec->len++; + targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; + targ_spec->raw_len++; + return 1; + } + } + return 0; + } + + if (!core_relo_is_field_based(local_spec->relo_kind)) + return -EINVAL; + + for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { + targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, + &targ_id); + if (!targ_type) + return -EINVAL; + + if (local_acc->name) { + matched = bpf_core_match_member(local_spec->btf, + local_acc, + targ_btf, targ_id, + targ_spec, &targ_id); + if (matched <= 0) + return matched; + } else { + /* for i=0, targ_id is already treated as array element + * type (because it's the original struct), for others + * we should find array element type first + */ + if (i > 0) { + const struct btf_array *a; + bool flex; + + if (!btf_is_array(targ_type)) + return 0; + + a = btf_array(targ_type); + flex = is_flex_arr(targ_btf, targ_acc - 1, a); + if (!flex && local_acc->idx >= a->nelems) + return 0; + if (!skip_mods_and_typedefs(targ_btf, a->type, + &targ_id)) + return -EINVAL; + } + + /* too deep struct/union/array nesting */ + if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) + return -E2BIG; + + targ_acc->type_id = targ_id; + targ_acc->idx = local_acc->idx; + targ_acc->name = NULL; + targ_spec->len++; + targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; + targ_spec->raw_len++; + + sz = btf__resolve_size(targ_btf, targ_id); + if (sz < 0) + return sz; + targ_spec->bit_offset += local_acc->idx * sz * 8; + } + } + + return 1; +} + +static int bpf_core_calc_field_relo(const char *prog_name, + const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val, __u32 *field_sz, __u32 *type_id, + bool *validate) +{ + const struct bpf_core_accessor *acc; + const struct btf_type *t; + __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id; + const struct btf_member *m; + const struct btf_type *mt; + bool bitfield; + __s64 sz; + + *field_sz = 0; + + if (relo->kind == BPF_CORE_FIELD_EXISTS) { + *val = spec ? 1 : 0; + return 0; + } + + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + + acc = &spec->spec[spec->len - 1]; + t = btf_type_by_id(spec->btf, acc->type_id); + + /* a[n] accessor needs special handling */ + if (!acc->name) { + if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) { + *val = spec->bit_offset / 8; + /* remember field size for load/store mem size */ + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *field_sz = sz; + *type_id = acc->type_id; + } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) { + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + } else { + pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", + prog_name, relo->kind, relo->insn_off / 8); + return -EINVAL; + } + if (validate) + *validate = true; + return 0; + } + + m = btf_members(t) + acc->idx; + mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); + bit_off = spec->bit_offset; + bit_sz = btf_member_bitfield_size(t, acc->idx); + + bitfield = bit_sz > 0; + if (bitfield) { + byte_sz = mt->size; + byte_off = bit_off / 8 / byte_sz * byte_sz; + /* figure out smallest int size necessary for bitfield load */ + while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { + if (byte_sz >= 8) { + /* bitfield can't be read with 64-bit read */ + pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", + prog_name, relo->kind, relo->insn_off / 8); + return -E2BIG; + } + byte_sz *= 2; + byte_off = bit_off / 8 / byte_sz * byte_sz; + } + } else { + sz = btf__resolve_size(spec->btf, field_type_id); + if (sz < 0) + return -EINVAL; + byte_sz = sz; + byte_off = spec->bit_offset / 8; + bit_sz = byte_sz * 8; + } + + /* for bitfields, all the relocatable aspects are ambiguous and we + * might disagree with compiler, so turn off validation of expected + * value, except for signedness + */ + if (validate) + *validate = !bitfield; + + switch (relo->kind) { + case BPF_CORE_FIELD_BYTE_OFFSET: + *val = byte_off; + if (!bitfield) { + *field_sz = byte_sz; + *type_id = field_type_id; + } + break; + case BPF_CORE_FIELD_BYTE_SIZE: + *val = byte_sz; + break; + case BPF_CORE_FIELD_SIGNED: + /* enums will be assumed unsigned */ + *val = btf_is_enum(mt) || + (btf_int_encoding(mt) & BTF_INT_SIGNED); + if (validate) + *validate = true; /* signedness is never ambiguous */ + break; + case BPF_CORE_FIELD_LSHIFT_U64: +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + *val = 64 - (bit_off + bit_sz - byte_off * 8); +#else + *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); +#endif + break; + case BPF_CORE_FIELD_RSHIFT_U64: + *val = 64 - bit_sz; + if (validate) + *validate = true; /* right shift is never ambiguous */ + break; + case BPF_CORE_FIELD_EXISTS: + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val, bool *validate) +{ + __s64 sz; + + /* by default, always check expected value in bpf_insn */ + if (validate) + *validate = true; + + /* type-based relos return zero when target type is not found */ + if (!spec) { + *val = 0; + return 0; + } + + switch (relo->kind) { + case BPF_CORE_TYPE_ID_TARGET: + *val = spec->root_type_id; + /* type ID, embedded in bpf_insn, might change during linking, + * so enforcing it is pointless + */ + if (validate) + *validate = false; + break; + case BPF_CORE_TYPE_EXISTS: + *val = 1; + break; + case BPF_CORE_TYPE_SIZE: + sz = btf__resolve_size(spec->btf, spec->root_type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + break; + case BPF_CORE_TYPE_ID_LOCAL: + /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */ + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, + const struct bpf_core_spec *spec, + __u32 *val) +{ + const struct btf_type *t; + const struct btf_enum *e; + + switch (relo->kind) { + case BPF_CORE_ENUMVAL_EXISTS: + *val = spec ? 1 : 0; + break; + case BPF_CORE_ENUMVAL_VALUE: + if (!spec) + return -EUCLEAN; /* request instruction poisoning */ + t = btf_type_by_id(spec->btf, spec->spec[0].type_id); + e = btf_enum(t) + spec->spec[0].idx; + *val = e->val; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/* Calculate original and target relocation values, given local and target + * specs and relocation kind. These values are calculated for each candidate. + * If there are multiple candidates, resulting values should all be consistent + * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity. + * If instruction has to be poisoned, *poison will be set to true. + */ +static int bpf_core_calc_relo(const char *prog_name, + const struct bpf_core_relo *relo, + int relo_idx, + const struct bpf_core_spec *local_spec, + const struct bpf_core_spec *targ_spec, + struct bpf_core_relo_res *res) +{ + int err = -EOPNOTSUPP; + + res->orig_val = 0; + res->new_val = 0; + res->poison = false; + res->validate = true; + res->fail_memsz_adjust = false; + res->orig_sz = res->new_sz = 0; + res->orig_type_id = res->new_type_id = 0; + + if (core_relo_is_field_based(relo->kind)) { + err = bpf_core_calc_field_relo(prog_name, relo, local_spec, + &res->orig_val, &res->orig_sz, + &res->orig_type_id, &res->validate); + err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec, + &res->new_val, &res->new_sz, + &res->new_type_id, NULL); + if (err) + goto done; + /* Validate if it's safe to adjust load/store memory size. + * Adjustments are performed only if original and new memory + * sizes differ. + */ + res->fail_memsz_adjust = false; + if (res->orig_sz != res->new_sz) { + const struct btf_type *orig_t, *new_t; + + orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id); + new_t = btf_type_by_id(targ_spec->btf, res->new_type_id); + + /* There are two use cases in which it's safe to + * adjust load/store's mem size: + * - reading a 32-bit kernel pointer, while on BPF + * size pointers are always 64-bit; in this case + * it's safe to "downsize" instruction size due to + * pointer being treated as unsigned integer with + * zero-extended upper 32-bits; + * - reading unsigned integers, again due to + * zero-extension is preserving the value correctly. + * + * In all other cases it's incorrect to attempt to + * load/store field because read value will be + * incorrect, so we poison relocated instruction. + */ + if (btf_is_ptr(orig_t) && btf_is_ptr(new_t)) + goto done; + if (btf_is_int(orig_t) && btf_is_int(new_t) && + btf_int_encoding(orig_t) != BTF_INT_SIGNED && + btf_int_encoding(new_t) != BTF_INT_SIGNED) + goto done; + + /* mark as invalid mem size adjustment, but this will + * only be checked for LDX/STX/ST insns + */ + res->fail_memsz_adjust = true; + } + } else if (core_relo_is_type_based(relo->kind)) { + err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate); + err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL); + } else if (core_relo_is_enumval_based(relo->kind)) { + err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); + err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); + } + +done: + if (err == -EUCLEAN) { + /* EUCLEAN is used to signal instruction poisoning request */ + res->poison = true; + err = 0; + } else if (err == -EOPNOTSUPP) { + /* EOPNOTSUPP means unknown/unsupported relocation */ + pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", + prog_name, relo_idx, core_relo_kind_str(relo->kind), + relo->kind, relo->insn_off / 8); + } + + return err; +} + +/* + * Turn instruction for which CO_RE relocation failed into invalid one with + * distinct signature. + */ +static void bpf_core_poison_insn(const char *prog_name, int relo_idx, + int insn_idx, struct bpf_insn *insn) +{ + pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n", + prog_name, relo_idx, insn_idx); + insn->code = BPF_JMP | BPF_CALL; + insn->dst_reg = 0; + insn->src_reg = 0; + insn->off = 0; + /* if this instruction is reachable (not a dead code), + * verifier will complain with the following message: + * invalid func unknown#195896080 + */ + insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ +} + +static int insn_bpf_size_to_bytes(struct bpf_insn *insn) +{ + switch (BPF_SIZE(insn->code)) { + case BPF_DW: return 8; + case BPF_W: return 4; + case BPF_H: return 2; + case BPF_B: return 1; + default: return -1; + } +} + +static int insn_bytes_to_bpf_size(__u32 sz) +{ + switch (sz) { + case 8: return BPF_DW; + case 4: return BPF_W; + case 2: return BPF_H; + case 1: return BPF_B; + default: return -1; + } +} + +/* + * Patch relocatable BPF instruction. + * + * Patched value is determined by relocation kind and target specification. + * For existence relocations target spec will be NULL if field/type is not found. + * Expected insn->imm value is determined using relocation kind and local + * spec, and is checked before patching instruction. If actual insn->imm value + * is wrong, bail out with error. + * + * Currently supported classes of BPF instruction are: + * 1. rX = (assignment with immediate operand); + * 2. rX += (arithmetic operations with immediate operand); + * 3. rX = (load with 64-bit immediate value); + * 4. rX = *(T *)(rY + ), where T is one of {u8, u16, u32, u64}; + * 5. *(T *)(rX + ) = rY, where T is one of {u8, u16, u32, u64}; + * 6. *(T *)(rX + ) = , where T is one of {u8, u16, u32, u64}. + */ +int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, + int insn_idx, const struct bpf_core_relo *relo, + int relo_idx, const struct bpf_core_relo_res *res) +{ + __u32 orig_val, new_val; + __u8 class; + + class = BPF_CLASS(insn->code); + + if (res->poison) { +poison: + /* poison second part of ldimm64 to avoid confusing error from + * verifier about "unknown opcode 00" + */ + if (is_ldimm64_insn(insn)) + bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1); + bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn); + return 0; + } + + orig_val = res->orig_val; + new_val = res->new_val; + + switch (class) { + case BPF_ALU: + case BPF_ALU64: + if (BPF_SRC(insn->code) != BPF_K) + return -EINVAL; + if (res->validate && insn->imm != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", + prog_name, relo_idx, + insn_idx, insn->imm, orig_val, new_val); + return -EINVAL; + } + orig_val = insn->imm; + insn->imm = new_val; + pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", + prog_name, relo_idx, insn_idx, + orig_val, new_val); + break; + case BPF_LDX: + case BPF_ST: + case BPF_STX: + if (res->validate && insn->off != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", + prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val); + return -EINVAL; + } + if (new_val > SHRT_MAX) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n", + prog_name, relo_idx, insn_idx, new_val); + return -ERANGE; + } + if (res->fail_memsz_adjust) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. " + "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n", + prog_name, relo_idx, insn_idx); + goto poison; + } + + orig_val = insn->off; + insn->off = new_val; + pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", + prog_name, relo_idx, insn_idx, orig_val, new_val); + + if (res->new_sz != res->orig_sz) { + int insn_bytes_sz, insn_bpf_sz; + + insn_bytes_sz = insn_bpf_size_to_bytes(insn); + if (insn_bytes_sz != res->orig_sz) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n", + prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); + return -EINVAL; + } + + insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); + if (insn_bpf_sz < 0) { + pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n", + prog_name, relo_idx, insn_idx, res->new_sz); + return -EINVAL; + } + + insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); + pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", + prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz); + } + break; + case BPF_LD: { + __u64 imm; + + if (!is_ldimm64_insn(insn) || + insn[0].src_reg != 0 || insn[0].off != 0 || + insn[1].code != 0 || insn[1].dst_reg != 0 || + insn[1].src_reg != 0 || insn[1].off != 0) { + pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n", + prog_name, relo_idx, insn_idx); + return -EINVAL; + } + + imm = insn[0].imm + ((__u64)insn[1].imm << 32); + if (res->validate && imm != orig_val) { + pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", + prog_name, relo_idx, + insn_idx, (unsigned long long)imm, + orig_val, new_val); + return -EINVAL; + } + + insn[0].imm = new_val; + insn[1].imm = 0; /* currently only 32-bit values are supported */ + pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", + prog_name, relo_idx, insn_idx, + (unsigned long long)imm, new_val); + break; + } + default: + pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n", + prog_name, relo_idx, insn_idx, insn->code, + insn->src_reg, insn->dst_reg, insn->off, insn->imm); + return -EINVAL; + } + + return 0; +} + +/* Output spec definition in the format: + * [] () + => @, + * where is a C-syntax view of recorded field access, e.g.: x.a[3].b + */ +int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec) +{ + const struct btf_type *t; + const struct btf_enum *e; + const char *s; + __u32 type_id; + int i, len = 0; + +#define append_buf(fmt, args...) \ + ({ \ + int r; \ + r = snprintf(buf, buf_sz, fmt, ##args); \ + len += r; \ + if (r >= buf_sz) \ + r = buf_sz; \ + buf += r; \ + buf_sz -= r; \ + }) + + type_id = spec->root_type_id; + t = btf_type_by_id(spec->btf, type_id); + s = btf__name_by_offset(spec->btf, t->name_off); + + append_buf("<%s> [%u] %s %s", + core_relo_kind_str(spec->relo_kind), + type_id, btf_kind_str(t), str_is_empty(s) ? "" : s); + + if (core_relo_is_type_based(spec->relo_kind)) + return len; + + if (core_relo_is_enumval_based(spec->relo_kind)) { + t = skip_mods_and_typedefs(spec->btf, type_id, NULL); + e = btf_enum(t) + spec->raw_spec[0]; + s = btf__name_by_offset(spec->btf, e->name_off); + + append_buf("::%s = %u", s, e->val); + return len; + } + + if (core_relo_is_field_based(spec->relo_kind)) { + for (i = 0; i < spec->len; i++) { + if (spec->spec[i].name) + append_buf(".%s", spec->spec[i].name); + else if (i > 0 || spec->spec[i].idx > 0) + append_buf("[%u]", spec->spec[i].idx); + } + + append_buf(" ("); + for (i = 0; i < spec->raw_len; i++) + append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); + + if (spec->bit_offset % 8) + append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8); + else + append_buf(" @ offset %u)", spec->bit_offset / 8); + return len; + } + + return len; +#undef append_buf +} + +/* + * Calculate CO-RE relocation target result. + * + * The outline and important points of the algorithm: + * 1. For given local type, find corresponding candidate target types. + * Candidate type is a type with the same "essential" name, ignoring + * everything after last triple underscore (___). E.g., `sample`, + * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates + * for each other. Names with triple underscore are referred to as + * "flavors" and are useful, among other things, to allow to + * specify/support incompatible variations of the same kernel struct, which + * might differ between different kernel versions and/or build + * configurations. + * + * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C + * converter, when deduplicated BTF of a kernel still contains more than + * one different types with the same name. In that case, ___2, ___3, etc + * are appended starting from second name conflict. But start flavors are + * also useful to be defined "locally", in BPF program, to extract same + * data from incompatible changes between different kernel + * versions/configurations. For instance, to handle field renames between + * kernel versions, one can use two flavors of the struct name with the + * same common name and use conditional relocations to extract that field, + * depending on target kernel version. + * 2. For each candidate type, try to match local specification to this + * candidate target type. Matching involves finding corresponding + * high-level spec accessors, meaning that all named fields should match, + * as well as all array accesses should be within the actual bounds. Also, + * types should be compatible (see bpf_core_fields_are_compat for details). + * 3. It is supported and expected that there might be multiple flavors + * matching the spec. As long as all the specs resolve to the same set of + * offsets across all candidates, there is no error. If there is any + * ambiguity, CO-RE relocation will fail. This is necessary to accomodate + * imprefection of BTF deduplication, which can cause slight duplication of + * the same BTF type, if some directly or indirectly referenced (by + * pointer) type gets resolved to different actual types in different + * object files. If such situation occurs, deduplicated BTF will end up + * with two (or more) structurally identical types, which differ only in + * types they refer to through pointer. This should be OK in most cases and + * is not an error. + * 4. Candidate types search is performed by linearly scanning through all + * types in target BTF. It is anticipated that this is overall more + * efficient memory-wise and not significantly worse (if not better) + * CPU-wise compared to prebuilding a map from all local type names to + * a list of candidate type names. It's also sped up by caching resolved + * list of matching candidates per each local "root" type ID, that has at + * least one bpf_core_relo associated with it. This list is shared + * between multiple relocations for the same type ID and is updated as some + * of the candidates are pruned due to structural incompatibility. + */ +int bpf_core_calc_relo_insn(const char *prog_name, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch, + struct bpf_core_relo_res *targ_res) +{ + struct bpf_core_spec *local_spec = &specs_scratch[0]; + struct bpf_core_spec *cand_spec = &specs_scratch[1]; + struct bpf_core_spec *targ_spec = &specs_scratch[2]; + struct bpf_core_relo_res cand_res; + const struct btf_type *local_type; + const char *local_name; + __u32 local_id; + char spec_buf[256]; + int i, j, err; + + local_id = relo->type_id; + local_type = btf_type_by_id(local_btf, local_id); + local_name = btf__name_by_offset(local_btf, local_type->name_off); + if (!local_name) + return -EINVAL; + + err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec); + if (err) { + const char *spec_str; + + spec_str = btf__name_by_offset(local_btf, relo->access_str_off); + pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", + prog_name, relo_idx, local_id, btf_kind_str(local_type), + str_is_empty(local_name) ? "" : local_name, + spec_str ?: "", err); + return -EINVAL; + } + + bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec); + pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf); + + /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ + if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { + /* bpf_insn's imm value could get out of sync during linking */ + memset(targ_res, 0, sizeof(*targ_res)); + targ_res->validate = false; + targ_res->poison = false; + targ_res->orig_val = local_spec->root_type_id; + targ_res->new_val = local_spec->root_type_id; + return 0; + } + + /* libbpf doesn't support candidate search for anonymous types */ + if (str_is_empty(local_name)) { + pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n", + prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); + return -EOPNOTSUPP; + } + + for (i = 0, j = 0; i < cands->len; i++) { + err = bpf_core_spec_match(local_spec, cands->cands[i].btf, + cands->cands[i].id, cand_spec); + if (err < 0) { + bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); + pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ", + prog_name, relo_idx, i, spec_buf, err); + return err; + } + + bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); + pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name, + relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf); + + if (err == 0) + continue; + + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res); + if (err) + return err; + + if (j == 0) { + *targ_res = cand_res; + *targ_spec = *cand_spec; + } else if (cand_spec->bit_offset != targ_spec->bit_offset) { + /* if there are many field relo candidates, they + * should all resolve to the same bit offset + */ + pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n", + prog_name, relo_idx, cand_spec->bit_offset, + targ_spec->bit_offset); + return -EINVAL; + } else if (cand_res.poison != targ_res->poison || + cand_res.new_val != targ_res->new_val) { + /* all candidates should result in the same relocation + * decision and value, otherwise it's dangerous to + * proceed due to ambiguity + */ + pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n", + prog_name, relo_idx, + cand_res.poison ? "failure" : "success", cand_res.new_val, + targ_res->poison ? "failure" : "success", targ_res->new_val); + return -EINVAL; + } + + cands->cands[j++] = cands->cands[i]; + } + + /* + * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field + * existence checks or kernel version/config checks, it's expected + * that we might not find any candidates. In this case, if field + * wasn't found in any candidate, the list of candidates shouldn't + * change at all, we'll just handle relocating appropriately, + * depending on relo's kind. + */ + if (j > 0) + cands->len = j; + + /* + * If no candidates were found, it might be both a programmer error, + * as well as expected case, depending whether instruction w/ + * relocation is guarded in some way that makes it unreachable (dead + * code) if relocation can't be resolved. This is handled in + * bpf_core_patch_insn() uniformly by replacing that instruction with + * BPF helper call insn (using invalid helper ID). If that instruction + * is indeed unreachable, then it will be ignored and eliminated by + * verifier. If it was an error, then verifier will complain and point + * to a specific instruction number in its log. + */ + if (j == 0) { + pr_debug("prog '%s': relo #%d: no matching targets found\n", + prog_name, relo_idx); + + /* calculate single target relo result explicitly */ + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res); + if (err) + return err; + } + + return 0; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/relo_core.h b/pkg/plugin/lib/common/libbpf/_src/relo_core.h new file mode 100644 index 000000000..073039d8c --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/relo_core.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2019 Facebook */ + +#ifndef __RELO_CORE_H +#define __RELO_CORE_H + +#include + +struct bpf_core_cand { + const struct btf *btf; + __u32 id; +}; + +/* dynamically sized list of type IDs and its associated struct btf */ +struct bpf_core_cand_list { + struct bpf_core_cand *cands; + int len; +}; + +#define BPF_CORE_SPEC_MAX_LEN 64 + +/* represents BPF CO-RE field or array element accessor */ +struct bpf_core_accessor { + __u32 type_id; /* struct/union type or array element type */ + __u32 idx; /* field index or array index */ + const char *name; /* field name or NULL for array accessor */ +}; + +struct bpf_core_spec { + const struct btf *btf; + /* high-level spec: named fields and array indices only */ + struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN]; + /* original unresolved (no skip_mods_or_typedefs) root type ID */ + __u32 root_type_id; + /* CO-RE relocation kind */ + enum bpf_core_relo_kind relo_kind; + /* high-level spec length */ + int len; + /* raw, low-level spec: 1-to-1 with accessor spec string */ + int raw_spec[BPF_CORE_SPEC_MAX_LEN]; + /* raw spec length */ + int raw_len; + /* field bit offset represented by spec */ + __u32 bit_offset; +}; + +struct bpf_core_relo_res { + /* expected value in the instruction, unless validate == false */ + __u32 orig_val; + /* new value that needs to be patched up to */ + __u32 new_val; + /* relocation unsuccessful, poison instruction, but don't fail load */ + bool poison; + /* some relocations can't be validated against orig_val */ + bool validate; + /* for field byte offset relocations or the forms: + * *(T *)(rX + ) = rY + * rX = *(T *)(rY + ), + * we remember original and resolved field size to adjust direct + * memory loads of pointers and integers; this is necessary for 32-bit + * host kernel architectures, but also allows to automatically + * relocate fields that were resized from, e.g., u32 to u64, etc. + */ + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; +}; + +int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, + const struct btf *targ_btf, __u32 targ_id); + +size_t bpf_core_essential_name_len(const char *name); + +int bpf_core_calc_relo_insn(const char *prog_name, + const struct bpf_core_relo *relo, int relo_idx, + const struct btf *local_btf, + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch, + struct bpf_core_relo_res *targ_res); + +int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, + int insn_idx, const struct bpf_core_relo *relo, + int relo_idx, const struct bpf_core_relo_res *res); + +int bpf_core_parse_spec(const char *prog_name, const struct btf *btf, + const struct bpf_core_relo *relo, + struct bpf_core_spec *spec); + +int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec); + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/ringbuf.c b/pkg/plugin/lib/common/libbpf/_src/ringbuf.c new file mode 100644 index 000000000..8bc117bcc --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/ringbuf.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* + * Ring buffer operations. + * + * Copyright (C) 2020 Facebook, Inc. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libbpf.h" +#include "libbpf_internal.h" +#include "bpf.h" + +struct ring { + ring_buffer_sample_fn sample_cb; + void *ctx; + void *data; + unsigned long *consumer_pos; + unsigned long *producer_pos; + unsigned long mask; + int map_fd; +}; + +struct ring_buffer { + struct epoll_event *events; + struct ring *rings; + size_t page_size; + int epoll_fd; + int ring_cnt; +}; + +static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r) +{ + if (r->consumer_pos) { + munmap(r->consumer_pos, rb->page_size); + r->consumer_pos = NULL; + } + if (r->producer_pos) { + munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); + r->producer_pos = NULL; + } +} + +/* Add extra RINGBUF maps to this ring buffer manager */ +int ring_buffer__add(struct ring_buffer *rb, int map_fd, + ring_buffer_sample_fn sample_cb, void *ctx) +{ + struct bpf_map_info info; + __u32 len = sizeof(info); + struct epoll_event *e; + struct ring *r; + void *tmp; + int err; + + memset(&info, 0, sizeof(info)); + + err = bpf_obj_get_info_by_fd(map_fd, &info, &len); + if (err) { + err = -errno; + pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", + map_fd, err); + return libbpf_err(err); + } + + if (info.type != BPF_MAP_TYPE_RINGBUF) { + pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n", + map_fd); + return libbpf_err(-EINVAL); + } + + tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); + if (!tmp) + return libbpf_err(-ENOMEM); + rb->rings = tmp; + + tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); + if (!tmp) + return libbpf_err(-ENOMEM); + rb->events = tmp; + + r = &rb->rings[rb->ring_cnt]; + memset(r, 0, sizeof(*r)); + + r->map_fd = map_fd; + r->sample_cb = sample_cb; + r->ctx = ctx; + r->mask = info.max_entries - 1; + + /* Map writable consumer page */ + tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, + map_fd, 0); + if (tmp == MAP_FAILED) { + err = -errno; + pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", + map_fd, err); + return libbpf_err(err); + } + r->consumer_pos = tmp; + + /* Map read-only producer page and data pages. We map twice as big + * data size to allow simple reading of samples that wrap around the + * end of a ring buffer. See kernel implementation for details. + * */ + tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, + MAP_SHARED, map_fd, rb->page_size); + if (tmp == MAP_FAILED) { + err = -errno; + ringbuf_unmap_ring(rb, r); + pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n", + map_fd, err); + return libbpf_err(err); + } + r->producer_pos = tmp; + r->data = tmp + rb->page_size; + + e = &rb->events[rb->ring_cnt]; + memset(e, 0, sizeof(*e)); + + e->events = EPOLLIN; + e->data.fd = rb->ring_cnt; + if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { + err = -errno; + ringbuf_unmap_ring(rb, r); + pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n", + map_fd, err); + return libbpf_err(err); + } + + rb->ring_cnt++; + return 0; +} + +void ring_buffer__free(struct ring_buffer *rb) +{ + int i; + + if (!rb) + return; + + for (i = 0; i < rb->ring_cnt; ++i) + ringbuf_unmap_ring(rb, &rb->rings[i]); + if (rb->epoll_fd >= 0) + close(rb->epoll_fd); + + free(rb->events); + free(rb->rings); + free(rb); +} + +struct ring_buffer * +ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, + const struct ring_buffer_opts *opts) +{ + struct ring_buffer *rb; + int err; + + if (!OPTS_VALID(opts, ring_buffer_opts)) + return errno = EINVAL, NULL; + + rb = calloc(1, sizeof(*rb)); + if (!rb) + return errno = ENOMEM, NULL; + + rb->page_size = getpagesize(); + + rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); + if (rb->epoll_fd < 0) { + err = -errno; + pr_warn("ringbuf: failed to create epoll instance: %d\n", err); + goto err_out; + } + + err = ring_buffer__add(rb, map_fd, sample_cb, ctx); + if (err) + goto err_out; + + return rb; + +err_out: + ring_buffer__free(rb); + return errno = -err, NULL; +} + +static inline int roundup_len(__u32 len) +{ + /* clear out top 2 bits (discard and busy, if set) */ + len <<= 2; + len >>= 2; + /* add length prefix */ + len += BPF_RINGBUF_HDR_SZ; + /* round up to 8 byte alignment */ + return (len + 7) / 8 * 8; +} + +static int64_t ringbuf_process_ring(struct ring* r) +{ + int *len_ptr, len, err; + /* 64-bit to avoid overflow in case of extreme application behavior */ + int64_t cnt = 0; + unsigned long cons_pos, prod_pos; + bool got_new_data; + void *sample; + + cons_pos = smp_load_acquire(r->consumer_pos); + do { + got_new_data = false; + prod_pos = smp_load_acquire(r->producer_pos); + while (cons_pos < prod_pos) { + len_ptr = r->data + (cons_pos & r->mask); + len = smp_load_acquire(len_ptr); + + /* sample not committed yet, bail out for now */ + if (len & BPF_RINGBUF_BUSY_BIT) + goto done; + + got_new_data = true; + cons_pos += roundup_len(len); + + if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) { + sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ; + err = r->sample_cb(r->ctx, sample, len); + if (err < 0) { + /* update consumer pos and bail out */ + smp_store_release(r->consumer_pos, + cons_pos); + return err; + } + cnt++; + } + + smp_store_release(r->consumer_pos, cons_pos); + } + } while (got_new_data); +done: + return cnt; +} + +/* Consume available ring buffer(s) data without event polling. + * Returns number of records consumed across all registered ring buffers (or + * INT_MAX, whichever is less), or negative number if any of the callbacks + * return error. + */ +int ring_buffer__consume(struct ring_buffer *rb) +{ + int64_t err, res = 0; + int i; + + for (i = 0; i < rb->ring_cnt; i++) { + struct ring *ring = &rb->rings[i]; + + err = ringbuf_process_ring(ring); + if (err < 0) + return libbpf_err(err); + res += err; + } + if (res > INT_MAX) + return INT_MAX; + return res; +} + +/* Poll for available data and consume records, if any are available. + * Returns number of records consumed (or INT_MAX, whichever is less), or + * negative number, if any of the registered callbacks returned error. + */ +int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) +{ + int i, cnt; + int64_t err, res = 0; + + cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); + if (cnt < 0) + return libbpf_err(-errno); + + for (i = 0; i < cnt; i++) { + __u32 ring_id = rb->events[i].data.fd; + struct ring *ring = &rb->rings[ring_id]; + + err = ringbuf_process_ring(ring); + if (err < 0) + return libbpf_err(err); + res += err; + } + if (res > INT_MAX) + return INT_MAX; + return res; +} + +/* Get an fd that can be used to sleep until data is available in the ring(s) */ +int ring_buffer__epoll_fd(const struct ring_buffer *rb) +{ + return rb->epoll_fd; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/skel_internal.h b/pkg/plugin/lib/common/libbpf/_src/skel_internal.h new file mode 100644 index 000000000..bd6f4505e --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/skel_internal.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2021 Facebook */ +#ifndef __SKEL_INTERNAL_H +#define __SKEL_INTERNAL_H + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#else +#include +#include +#include +#include +#include "bpf.h" +#endif + +#ifndef __NR_bpf +# if defined(__mips__) && defined(_ABIO32) +# define __NR_bpf 4355 +# elif defined(__mips__) && defined(_ABIN32) +# define __NR_bpf 6319 +# elif defined(__mips__) && defined(_ABI64) +# define __NR_bpf 5315 +# endif +#endif + +/* This file is a base header for auto-generated *.lskel.h files. + * Its contents will change and may become part of auto-generation in the future. + * + * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent + * and will change from one version of libbpf to another and features + * requested during loader program generation. + */ +struct bpf_map_desc { + /* output of the loader prog */ + int map_fd; + /* input for the loader prog */ + __u32 max_entries; + __aligned_u64 initial_value; +}; +struct bpf_prog_desc { + int prog_fd; +}; + +enum { + BPF_SKEL_KERNEL = (1ULL << 0), +}; + +struct bpf_loader_ctx { + __u32 sz; + __u32 flags; + __u32 log_level; + __u32 log_size; + __u64 log_buf; +}; + +struct bpf_load_and_run_opts { + struct bpf_loader_ctx *ctx; + const void *data; + const void *insns; + __u32 data_sz; + __u32 insns_sz; + const char *errstr; +}; + +long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size); + +static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, + unsigned int size) +{ +#ifdef __KERNEL__ + return bpf_sys_bpf(cmd, attr, size); +#else + return syscall(__NR_bpf, cmd, attr, size); +#endif +} + +#ifdef __KERNEL__ +static inline int close(int fd) +{ + return close_fd(fd); +} + +static inline void *skel_alloc(size_t size) +{ + struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL); + + if (!ctx) + return NULL; + ctx->flags |= BPF_SKEL_KERNEL; + return ctx; +} + +static inline void skel_free(const void *p) +{ + kfree(p); +} + +/* skel->bss/rodata maps are populated the following way: + * + * For kernel use: + * skel_prep_map_data() allocates kernel memory that kernel module can directly access. + * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. + * The loader program will perform probe_read_kernel() from maps.rodata.initial_value. + * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and + * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree + * is not nessary. + * + * For user space: + * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly. + * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. + * The loader program will perform copy_from_user() from maps.rodata.initial_value. + * skel_finalize_map_data() remaps bpf array map value from the kernel memory into + * skel->rodata address. + * + * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for + * both kernel and user space. The generated loader program does + * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value + * depending on bpf_loader_ctx->flags. + */ +static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) +{ + if (addr != ~0ULL) + kvfree(p); + /* When addr == ~0ULL the 'p' points to + * ((struct bpf_array *)map)->value. See skel_finalize_map_data. + */ +} + +static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) +{ + void *addr; + + addr = kvmalloc(val_sz, GFP_KERNEL); + if (!addr) + return NULL; + memcpy(addr, val, val_sz); + return addr; +} + +static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) +{ + struct bpf_map *map; + void *addr = NULL; + + kvfree((void *) (long) *init_val); + *init_val = ~0ULL; + + /* At this point bpf_load_and_run() finished without error and + * 'fd' is a valid bpf map FD. All sanity checks below should succeed. + */ + map = bpf_map_get(fd); + if (IS_ERR(map)) + return NULL; + if (map->map_type != BPF_MAP_TYPE_ARRAY) + goto out; + addr = ((struct bpf_array *)map)->value; + /* the addr stays valid, since FD is not closed */ +out: + bpf_map_put(map); + return addr; +} + +#else + +static inline void *skel_alloc(size_t size) +{ + return calloc(1, size); +} + +static inline void skel_free(void *p) +{ + free(p); +} + +static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) +{ + munmap(p, sz); +} + +static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) +{ + void *addr; + + addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (addr == (void *) -1) + return NULL; + memcpy(addr, val, val_sz); + return addr; +} + +static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) +{ + void *addr; + + addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0); + if (addr == (void *) -1) + return NULL; + return addr; +} +#endif + +static inline int skel_closenz(int fd) +{ + if (fd > 0) + return close(fd); + return -EINVAL; +} + +#ifndef offsetofend +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) +#endif + +static inline int skel_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries) +{ + const size_t attr_sz = offsetofend(union bpf_attr, map_extra); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + + attr.map_type = map_type; + strncpy(attr.map_name, map_name, sizeof(attr.map_name)); + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + + return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz); +} + +static inline int skel_map_update_elem(int fd, const void *key, + const void *value, __u64 flags) +{ + const size_t attr_sz = offsetofend(union bpf_attr, flags); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.map_fd = fd; + attr.key = (long) key; + attr.value = (long) value; + attr.flags = flags; + + return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); +} + +static inline int skel_raw_tracepoint_open(const char *name, int prog_fd) +{ + const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.raw_tracepoint.name = (long) name; + attr.raw_tracepoint.prog_fd = prog_fd; + + return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); +} + +static inline int skel_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type) +{ + const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.link_create.prog_fd = prog_fd; + attr.link_create.target_fd = target_fd; + attr.link_create.attach_type = attach_type; + + return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz); +} + +#ifdef __KERNEL__ +#define set_err +#else +#define set_err err = -errno +#endif + +static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) +{ + int map_fd = -1, prog_fd = -1, key = 0, err; + union bpf_attr attr; + + err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); + if (map_fd < 0) { + opts->errstr = "failed to create loader map"; + set_err; + goto out; + } + + err = skel_map_update_elem(map_fd, &key, opts->data, 0); + if (err < 0) { + opts->errstr = "failed to update loader map"; + set_err; + goto out; + } + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SYSCALL; + attr.insns = (long) opts->insns; + attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); + attr.license = (long) "Dual BSD/GPL"; + memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog")); + attr.fd_array = (long) &map_fd; + attr.log_level = opts->ctx->log_level; + attr.log_size = opts->ctx->log_size; + attr.log_buf = opts->ctx->log_buf; + attr.prog_flags = BPF_F_SLEEPABLE; + err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + if (prog_fd < 0) { + opts->errstr = "failed to load loader prog"; + set_err; + goto out; + } + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.ctx_in = (long) opts->ctx; + attr.test.ctx_size_in = opts->ctx->sz; + err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr)); + if (err < 0 || (int)attr.test.retval < 0) { + opts->errstr = "failed to execute loader prog"; + if (err < 0) { + set_err; + } else { + err = (int)attr.test.retval; +#ifndef __KERNEL__ + errno = -err; +#endif + } + goto out; + } + err = 0; +out: + if (map_fd >= 0) + close(map_fd); + if (prog_fd >= 0) + close(prog_fd); + return err; +} + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/str_error.c b/pkg/plugin/lib/common/libbpf/_src/str_error.c new file mode 100644 index 000000000..146da0197 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/str_error.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +#undef _GNU_SOURCE +#include +#include +#include "str_error.h" + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* + * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl + * libc, while checking strerror_r() return to avoid having to check this in + * all places calling it. + */ +char *libbpf_strerror_r(int err, char *dst, int len) +{ + int ret = strerror_r(err < 0 ? -err : err, dst, len); + if (ret) + snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); + return dst; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/str_error.h b/pkg/plugin/lib/common/libbpf/_src/str_error.h new file mode 100644 index 000000000..a139334d5 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/str_error.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __LIBBPF_STR_ERROR_H +#define __LIBBPF_STR_ERROR_H + +char *libbpf_strerror_r(int err, char *dst, int len); +#endif /* __LIBBPF_STR_ERROR_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/strset.c b/pkg/plugin/lib/common/libbpf/_src/strset.c new file mode 100644 index 000000000..ea6553181 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/strset.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ +#include +#include +#include +#include +#include +#include "hashmap.h" +#include "libbpf_internal.h" +#include "strset.h" + +struct strset { + void *strs_data; + size_t strs_data_len; + size_t strs_data_cap; + size_t strs_data_max_len; + + /* lookup index for each unique string in strings set */ + struct hashmap *strs_hash; +}; + +static size_t strset_hash_fn(const void *key, void *ctx) +{ + const struct strset *s = ctx; + const char *str = s->strs_data + (long)key; + + return str_hash(str); +} + +static bool strset_equal_fn(const void *key1, const void *key2, void *ctx) +{ + const struct strset *s = ctx; + const char *str1 = s->strs_data + (long)key1; + const char *str2 = s->strs_data + (long)key2; + + return strcmp(str1, str2) == 0; +} + +struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz) +{ + struct strset *set = calloc(1, sizeof(*set)); + struct hashmap *hash; + int err = -ENOMEM; + + if (!set) + return ERR_PTR(-ENOMEM); + + hash = hashmap__new(strset_hash_fn, strset_equal_fn, set); + if (IS_ERR(hash)) + goto err_out; + + set->strs_data_max_len = max_data_sz; + set->strs_hash = hash; + + if (init_data) { + long off; + + set->strs_data = malloc(init_data_sz); + if (!set->strs_data) + goto err_out; + + memcpy(set->strs_data, init_data, init_data_sz); + set->strs_data_len = init_data_sz; + set->strs_data_cap = init_data_sz; + + for (off = 0; off < set->strs_data_len; off += strlen(set->strs_data + off) + 1) { + /* hashmap__add() returns EEXIST if string with the same + * content already is in the hash map + */ + err = hashmap__add(hash, (void *)off, (void *)off); + if (err == -EEXIST) + continue; /* duplicate */ + if (err) + goto err_out; + } + } + + return set; +err_out: + strset__free(set); + return ERR_PTR(err); +} + +void strset__free(struct strset *set) +{ + if (IS_ERR_OR_NULL(set)) + return; + + hashmap__free(set->strs_hash); + free(set->strs_data); + free(set); +} + +size_t strset__data_size(const struct strset *set) +{ + return set->strs_data_len; +} + +const char *strset__data(const struct strset *set) +{ + return set->strs_data; +} + +static void *strset_add_str_mem(struct strset *set, size_t add_sz) +{ + return libbpf_add_mem(&set->strs_data, &set->strs_data_cap, 1, + set->strs_data_len, set->strs_data_max_len, add_sz); +} + +/* Find string offset that corresponds to a given string *s*. + * Returns: + * - >0 offset into string data, if string is found; + * - -ENOENT, if string is not in the string data; + * - <0, on any other error. + */ +int strset__find_str(struct strset *set, const char *s) +{ + long old_off, new_off, len; + void *p; + + /* see strset__add_str() for why we do this */ + len = strlen(s) + 1; + p = strset_add_str_mem(set, len); + if (!p) + return -ENOMEM; + + new_off = set->strs_data_len; + memcpy(p, s, len); + + if (hashmap__find(set->strs_hash, (void *)new_off, (void **)&old_off)) + return old_off; + + return -ENOENT; +} + +/* Add a string s to the string data. If the string already exists, return its + * offset within string data. + * Returns: + * - > 0 offset into string data, on success; + * - < 0, on error. + */ +int strset__add_str(struct strset *set, const char *s) +{ + long old_off, new_off, len; + void *p; + int err; + + /* Hashmap keys are always offsets within set->strs_data, so to even + * look up some string from the "outside", we need to first append it + * at the end, so that it can be addressed with an offset. Luckily, + * until set->strs_data_len is incremented, that string is just a piece + * of garbage for the rest of the code, so no harm, no foul. On the + * other hand, if the string is unique, it's already appended and + * ready to be used, only a simple set->strs_data_len increment away. + */ + len = strlen(s) + 1; + p = strset_add_str_mem(set, len); + if (!p) + return -ENOMEM; + + new_off = set->strs_data_len; + memcpy(p, s, len); + + /* Now attempt to add the string, but only if the string with the same + * contents doesn't exist already (HASHMAP_ADD strategy). If such + * string exists, we'll get its offset in old_off (that's old_key). + */ + err = hashmap__insert(set->strs_hash, (void *)new_off, (void *)new_off, + HASHMAP_ADD, (const void **)&old_off, NULL); + if (err == -EEXIST) + return old_off; /* duplicated string, return existing offset */ + if (err) + return err; + + set->strs_data_len += len; /* new unique string, adjust data length */ + return new_off; +} diff --git a/pkg/plugin/lib/common/libbpf/_src/strset.h b/pkg/plugin/lib/common/libbpf/_src/strset.h new file mode 100644 index 000000000..b6ddf77a8 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/strset.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* Copyright (c) 2021 Facebook */ +#ifndef __LIBBPF_STRSET_H +#define __LIBBPF_STRSET_H + +#include +#include + +struct strset; + +struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz); +void strset__free(struct strset *set); + +const char *strset__data(const struct strset *set); +size_t strset__data_size(const struct strset *set); + +int strset__find_str(struct strset *set, const char *s); +int strset__add_str(struct strset *set, const char *s); + +#endif /* __LIBBPF_STRSET_H */ diff --git a/pkg/plugin/lib/common/libbpf/_src/usdt.bpf.h b/pkg/plugin/lib/common/libbpf/_src/usdt.bpf.h new file mode 100644 index 000000000..4181fddb3 --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/usdt.bpf.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#ifndef __USDT_BPF_H__ +#define __USDT_BPF_H__ + +#include +#include +#include +#include + +/* Below types and maps are internal implementation details of libbpf's USDT + * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should + * be considered an unstable API as well and might be adjusted based on user + * feedback from using libbpf's USDT support in production. + */ + +/* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal + * map that keeps track of USDT argument specifications. This might be + * necessary if there are a lot of USDT attachments. + */ +#ifndef BPF_USDT_MAX_SPEC_CNT +#define BPF_USDT_MAX_SPEC_CNT 256 +#endif +/* User can override BPF_USDT_MAX_IP_CNT to change default size of internal + * map that keeps track of IP (memory address) mapping to USDT argument + * specification. + * Note, if kernel supports BPF cookies, this map is not used and could be + * resized all the way to 1 to save a bit of memory. + */ +#ifndef BPF_USDT_MAX_IP_CNT +#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT) +#endif +/* We use BPF CO-RE to detect support for BPF cookie from BPF side. This is + * the only dependency on CO-RE, so if it's undesirable, user can override + * BPF_USDT_HAS_BPF_COOKIE to specify whether to BPF cookie is supported or not. + */ +#ifndef BPF_USDT_HAS_BPF_COOKIE +#define BPF_USDT_HAS_BPF_COOKIE \ + bpf_core_enum_value_exists(enum bpf_func_id___usdt, BPF_FUNC_get_attach_cookie___usdt) +#endif + +enum __bpf_usdt_arg_type { + BPF_USDT_ARG_CONST, + BPF_USDT_ARG_REG, + BPF_USDT_ARG_REG_DEREF, +}; + +struct __bpf_usdt_arg_spec { + /* u64 scalar interpreted depending on arg_type, see below */ + __u64 val_off; + /* arg location case, see bpf_udst_arg() for details */ + enum __bpf_usdt_arg_type arg_type; + /* offset of referenced register within struct pt_regs */ + short reg_off; + /* whether arg should be interpreted as signed value */ + bool arg_signed; + /* number of bits that need to be cleared and, optionally, + * sign-extended to cast arguments that are 1, 2, or 4 bytes + * long into final 8-byte u64/s64 value returned to user + */ + char arg_bitshift; +}; + +/* should match USDT_MAX_ARG_CNT in usdt.c exactly */ +#define BPF_USDT_MAX_ARG_CNT 12 +struct __bpf_usdt_spec { + struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT]; + __u64 usdt_cookie; + short arg_cnt; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, BPF_USDT_MAX_SPEC_CNT); + __type(key, int); + __type(value, struct __bpf_usdt_spec); +} __bpf_usdt_specs SEC(".maps") __weak; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, BPF_USDT_MAX_IP_CNT); + __type(key, long); + __type(value, __u32); +} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak; + +/* don't rely on user's BPF code to have latest definition of bpf_func_id */ +enum bpf_func_id___usdt { + BPF_FUNC_get_attach_cookie___usdt = 0xBAD, /* value doesn't matter */ +}; + +static __always_inline +int __bpf_usdt_spec_id(struct pt_regs *ctx) +{ + if (!BPF_USDT_HAS_BPF_COOKIE) { + long ip = PT_REGS_IP(ctx); + int *spec_id_ptr; + + spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip); + return spec_id_ptr ? *spec_id_ptr : -ESRCH; + } + + return bpf_get_attach_cookie(ctx); +} + +/* Return number of USDT arguments defined for currently traced USDT. */ +__weak __hidden +int bpf_usdt_arg_cnt(struct pt_regs *ctx) +{ + struct __bpf_usdt_spec *spec; + int spec_id; + + spec_id = __bpf_usdt_spec_id(ctx); + if (spec_id < 0) + return -ESRCH; + + spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); + if (!spec) + return -ESRCH; + + return spec->arg_cnt; +} + +/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res. + * Returns 0 on success; negative error, otherwise. + * On error *res is guaranteed to be set to zero. + */ +__weak __hidden +int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res) +{ + struct __bpf_usdt_spec *spec; + struct __bpf_usdt_arg_spec *arg_spec; + unsigned long val; + int err, spec_id; + + *res = 0; + + spec_id = __bpf_usdt_spec_id(ctx); + if (spec_id < 0) + return -ESRCH; + + spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); + if (!spec) + return -ESRCH; + + if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt) + return -ENOENT; + + arg_spec = &spec->args[arg_num]; + switch (arg_spec->arg_type) { + case BPF_USDT_ARG_CONST: + /* Arg is just a constant ("-4@$-9" in USDT arg spec). + * value is recorded in arg_spec->val_off directly. + */ + val = arg_spec->val_off; + break; + case BPF_USDT_ARG_REG: + /* Arg is in a register (e.g, "8@%rax" in USDT arg spec), + * so we read the contents of that register directly from + * struct pt_regs. To keep things simple user-space parts + * record offsetof(struct pt_regs, ) in arg_spec->reg_off. + */ + err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off); + if (err) + return err; + break; + case BPF_USDT_ARG_REG_DEREF: + /* Arg is in memory addressed by register, plus some offset + * (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is + * identified like with BPF_USDT_ARG_REG case, and the offset + * is in arg_spec->val_off. We first fetch register contents + * from pt_regs, then do another user-space probe read to + * fetch argument value itself. + */ + err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off); + if (err) + return err; + err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off); + if (err) + return err; +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + val >>= arg_spec->arg_bitshift; +#endif + break; + default: + return -EINVAL; + } + + /* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing + * necessary upper arg_bitshift bits, with sign extension if argument + * is signed + */ + val <<= arg_spec->arg_bitshift; + if (arg_spec->arg_signed) + val = ((long)val) >> arg_spec->arg_bitshift; + else + val = val >> arg_spec->arg_bitshift; + *res = val; + return 0; +} + +/* Retrieve user-specified cookie value provided during attach as + * bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie + * returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself + * utilizing BPF cookies internally, so user can't use BPF cookie directly + * for USDT programs and has to use bpf_usdt_cookie() API instead. + */ +__weak __hidden +long bpf_usdt_cookie(struct pt_regs *ctx) +{ + struct __bpf_usdt_spec *spec; + int spec_id; + + spec_id = __bpf_usdt_spec_id(ctx); + if (spec_id < 0) + return 0; + + spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id); + if (!spec) + return 0; + + return spec->usdt_cookie; +} + +/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */ +#define ___bpf_usdt_args0() ctx +#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; }) +#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; }) +#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; }) +#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; }) +#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; }) +#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; }) +#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; }) +#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; }) +#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; }) +#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; }) +#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; }) +#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; }) +#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args) + +/* + * BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for + * tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes. + * Original struct pt_regs * context is preserved as 'ctx' argument. + */ +#define BPF_USDT(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_usdt_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#endif /* __USDT_BPF_H__ */ diff --git a/pkg/plugin/lib/common/libbpf/_src/usdt.c b/pkg/plugin/lib/common/libbpf/_src/usdt.c new file mode 100644 index 000000000..f1c9339cf --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/usdt.c @@ -0,0 +1,1518 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* s8 will be marked as poison while it's a reg of riscv */ +#if defined(__riscv) +#define rv_s8 s8 +#endif + +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_common.h" +#include "libbpf_internal.h" +#include "hashmap.h" + +/* libbpf's USDT support consists of BPF-side state/code and user-space + * state/code working together in concert. BPF-side parts are defined in + * usdt.bpf.h header library. User-space state is encapsulated by struct + * usdt_manager and all the supporting code centered around usdt_manager. + * + * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map + * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that + * don't support BPF cookie (see below). These two maps are implicitly + * embedded into user's end BPF object file when user's code included + * usdt.bpf.h. This means that libbpf doesn't do anything special to create + * these USDT support maps. They are created by normal libbpf logic of + * instantiating BPF maps when opening and loading BPF object. + * + * As such, libbpf is basically unaware of the need to do anything + * USDT-related until the very first call to bpf_program__attach_usdt(), which + * can be called by user explicitly or happen automatically during skeleton + * attach (or, equivalently, through generic bpf_program__attach() call). At + * this point, libbpf will instantiate and initialize struct usdt_manager and + * store it in bpf_object. USDT manager is per-BPF object construct, as each + * independent BPF object might or might not have USDT programs, and thus all + * the expected USDT-related state. There is no coordination between two + * bpf_object in parts of USDT attachment, they are oblivious of each other's + * existence and libbpf is just oblivious, dealing with bpf_object-specific + * USDT state. + * + * Quick crash course on USDTs. + * + * From user-space application's point of view, USDT is essentially just + * a slightly special function call that normally has zero overhead, unless it + * is being traced by some external entity (e.g, BPF-based tool). Here's how + * a typical application can trigger USDT probe: + * + * #include // provided by systemtap-sdt-devel package + * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h + * + * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y); + * + * USDT is identified by it's : pair of names. Each + * individual USDT has a fixed number of arguments (3 in the above example) + * and specifies values of each argument as if it was a function call. + * + * USDT call is actually not a function call, but is instead replaced by + * a single NOP instruction (thus zero overhead, effectively). But in addition + * to that, those USDT macros generate special SHT_NOTE ELF records in + * .note.stapsdt ELF section. Here's an example USDT definition as emitted by + * `readelf -n `: + * + * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors) + * Provider: test + * Name: usdt12 + * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e + * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil + * + * In this case we have USDT test:usdt12 with 12 arguments. + * + * Location and base are offsets used to calculate absolute IP address of that + * NOP instruction that kernel can replace with an interrupt instruction to + * trigger instrumentation code (BPF program for all that we care about). + * + * Semaphore above is and optional feature. It records an address of a 2-byte + * refcount variable (normally in '.probes' ELF section) used for signaling if + * there is anything that is attached to USDT. This is useful for user + * applications if, for example, they need to prepare some arguments that are + * passed only to USDTs and preparation is expensive. By checking if USDT is + * "activated", an application can avoid paying those costs unnecessarily. + * Recent enough kernel has built-in support for automatically managing this + * refcount, which libbpf expects and relies on. If USDT is defined without + * associated semaphore, this value will be zero. See selftests for semaphore + * examples. + * + * Arguments is the most interesting part. This USDT specification string is + * providing information about all the USDT arguments and their locations. The + * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and + * whether the argument is signed or unsigned (negative size means signed). + * The part after @ sign is assembly-like definition of argument location + * (see [0] for more details). Technically, assembler can provide some pretty + * advanced definitions, but libbpf is currently supporting three most common + * cases: + * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9); + * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer + * whose value is in register %rdx"; + * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which + * specifies signed 32-bit integer stored at offset -1204 bytes from + * memory address stored in %rbp. + * + * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation + * + * During attachment, libbpf parses all the relevant USDT specifications and + * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side + * code through spec map. This allows BPF applications to quickly fetch the + * actual value at runtime using a simple BPF-side code. + * + * With basics out of the way, let's go over less immediately obvious aspects + * of supporting USDTs. + * + * First, there is no special USDT BPF program type. It is actually just + * a uprobe BPF program (which for kernel, at least currently, is just a kprobe + * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference + * that uprobe is usually attached at the function entry, while USDT will + * normally will be somewhere inside the function. But it should always be + * pointing to NOP instruction, which makes such uprobes the fastest uprobe + * kind. + * + * Second, it's important to realize that such STAP_PROBEn(provider, name, ...) + * macro invocations can end up being inlined many-many times, depending on + * specifics of each individual user application. So single conceptual USDT + * (identified by provider:name pair of identifiers) is, generally speaking, + * multiple uprobe locations (USDT call sites) in different places in user + * application. Further, again due to inlining, each USDT call site might end + * up having the same argument #N be located in a different place. In one call + * site it could be a constant, in another will end up in a register, and in + * yet another could be some other register or even somewhere on the stack. + * + * As such, "attaching to USDT" means (in general case) attaching the same + * uprobe BPF program to multiple target locations in user application, each + * potentially having a completely different USDT spec associated with it. + * To wire all this up together libbpf allocates a unique integer spec ID for + * each unique USDT spec. Spec IDs are allocated as sequential small integers + * so that they can be used as keys in array BPF map (for performance reasons). + * Spec ID allocation and accounting is big part of what usdt_manager is + * about. This state has to be maintained per-BPF object and coordinate + * between different USDT attachments within the same BPF object. + * + * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out + * as struct usdt_spec. Each invocation of BPF program at runtime needs to + * know its associated spec ID. It gets it either through BPF cookie, which + * libbpf sets to spec ID during attach time, or, if kernel is too old to + * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such + * case. The latter means that some modes of operation can't be supported + * without BPF cookie. Such mode is attaching to shared library "generically", + * without specifying target process. In such case, it's impossible to + * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode + * is not supported without BPF cookie support. + * + * Note that libbpf is using BPF cookie functionality for its own internal + * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf + * provides conceptually equivalent USDT cookie support. It's still u64 + * user-provided value that can be associated with USDT attachment. Note that + * this will be the same value for all USDT call sites within the same single + * *logical* USDT attachment. This makes sense because to user attaching to + * USDT is a single BPF program triggered for singular USDT probe. The fact + * that this is done at multiple actual locations is a mostly hidden + * implementation details. This USDT cookie value can be fetched with + * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h + * + * Lastly, while single USDT can have tons of USDT call sites, it doesn't + * necessarily have that many different USDT specs. It very well might be + * that 1000 USDT call sites only need 5 different USDT specs, because all the + * arguments are typically contained in a small set of registers or stack + * locations. As such, it's wasteful to allocate as many USDT spec IDs as + * there are USDT call sites. So libbpf tries to be frugal and performs + * on-the-fly deduplication during a single USDT attachment to only allocate + * the minimal required amount of unique USDT specs (and thus spec IDs). This + * is trivially achieved by using USDT spec string (Arguments string from USDT + * note) as a lookup key in a hashmap. USDT spec string uniquely defines + * everything about how to fetch USDT arguments, so two USDT call sites + * sharing USDT spec string can safely share the same USDT spec and spec ID. + * Note, this spec string deduplication is happening only during the same USDT + * attachment, so each USDT spec shares the same USDT cookie value. This is + * not generally true for other USDT attachments within the same BPF object, + * as even if USDT spec string is the same, USDT cookie value can be + * different. It was deemed excessive to try to deduplicate across independent + * USDT attachments by taking into account USDT spec string *and* USDT cookie + * value, which would complicated spec ID accounting significantly for little + * gain. + */ + +#define USDT_BASE_SEC ".stapsdt.base" +#define USDT_SEMA_SEC ".probes" +#define USDT_NOTE_SEC ".note.stapsdt" +#define USDT_NOTE_TYPE 3 +#define USDT_NOTE_NAME "stapsdt" + +/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */ +enum usdt_arg_type { + USDT_ARG_CONST, + USDT_ARG_REG, + USDT_ARG_REG_DEREF, +}; + +/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */ +struct usdt_arg_spec { + __u64 val_off; + enum usdt_arg_type arg_type; + short reg_off; + bool arg_signed; + char arg_bitshift; +}; + +/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */ +#define USDT_MAX_ARG_CNT 12 + +/* should match struct __bpf_usdt_spec from usdt.bpf.h */ +struct usdt_spec { + struct usdt_arg_spec args[USDT_MAX_ARG_CNT]; + __u64 usdt_cookie; + short arg_cnt; +}; + +struct usdt_note { + const char *provider; + const char *name; + /* USDT args specification string, e.g.: + * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx" + */ + const char *args; + long loc_addr; + long base_addr; + long sema_addr; +}; + +struct usdt_target { + long abs_ip; + long rel_ip; + long sema_off; + struct usdt_spec spec; + const char *spec_str; +}; + +struct usdt_manager { + struct bpf_map *specs_map; + struct bpf_map *ip_to_spec_id_map; + + int *free_spec_ids; + size_t free_spec_cnt; + size_t next_free_spec_id; + + bool has_bpf_cookie; + bool has_sema_refcnt; +}; + +struct usdt_manager *usdt_manager_new(struct bpf_object *obj) +{ + static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"; + struct usdt_manager *man; + struct bpf_map *specs_map, *ip_to_spec_id_map; + + specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs"); + ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id"); + if (!specs_map || !ip_to_spec_id_map) { + pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n"); + return ERR_PTR(-ESRCH); + } + + man = calloc(1, sizeof(*man)); + if (!man) + return ERR_PTR(-ENOMEM); + + man->specs_map = specs_map; + man->ip_to_spec_id_map = ip_to_spec_id_map; + + /* Detect if BPF cookie is supported for kprobes. + * We don't need IP-to-ID mapping if we can use BPF cookies. + * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value") + */ + man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE); + + /* Detect kernel support for automatic refcounting of USDT semaphore. + * If this is not supported, USDTs with semaphores will not be supported. + * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") + */ + man->has_sema_refcnt = access(ref_ctr_sysfs_path, F_OK) == 0; + + return man; +} + +void usdt_manager_free(struct usdt_manager *man) +{ + if (IS_ERR_OR_NULL(man)) + return; + + free(man->free_spec_ids); + free(man); +} + +static int sanity_check_usdt_elf(Elf *elf, const char *path) +{ + GElf_Ehdr ehdr; + int endianness; + + if (elf_kind(elf) != ELF_K_ELF) { + pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path); + return -EBADF; + } + + switch (gelf_getclass(elf)) { + case ELFCLASS64: + if (sizeof(void *) != 8) { + pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path); + return -EBADF; + } + break; + case ELFCLASS32: + if (sizeof(void *) != 4) { + pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path); + return -EBADF; + } + break; + default: + pr_warn("usdt: unsupported ELF class for '%s'\n", path); + return -EBADF; + } + + if (!gelf_getehdr(elf, &ehdr)) + return -EINVAL; + + if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) { + pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n", + path, ehdr.e_type); + return -EBADF; + } + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + endianness = ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + endianness = ELFDATA2MSB; +#else +# error "Unrecognized __BYTE_ORDER__" +#endif + if (endianness != ehdr.e_ident[EI_DATA]) { + pr_warn("usdt: ELF endianness mismatch for '%s'\n", path); + return -EBADF; + } + + return 0; +} + +static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn) +{ + Elf_Scn *sec = NULL; + size_t shstrndx; + + if (elf_getshdrstrndx(elf, &shstrndx)) + return -EINVAL; + + /* check if ELF is corrupted and avoid calling elf_strptr if yes */ + if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) + return -EINVAL; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *name; + + if (!gelf_getshdr(sec, shdr)) + return -EINVAL; + + name = elf_strptr(elf, shstrndx, shdr->sh_name); + if (name && strcmp(sec_name, name) == 0) { + *scn = sec; + return 0; + } + } + + return -ENOENT; +} + +struct elf_seg { + long start; + long end; + long offset; + bool is_exec; +}; + +static int cmp_elf_segs(const void *_a, const void *_b) +{ + const struct elf_seg *a = _a; + const struct elf_seg *b = _b; + + return a->start < b->start ? -1 : 1; +} + +static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt) +{ + GElf_Phdr phdr; + size_t n; + int i, err; + struct elf_seg *seg; + void *tmp; + + *seg_cnt = 0; + + if (elf_getphdrnum(elf, &n)) { + err = -errno; + return err; + } + + for (i = 0; i < n; i++) { + if (!gelf_getphdr(elf, i, &phdr)) { + err = -errno; + return err; + } + + pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n", + i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset, + (long)phdr.p_type, (long)phdr.p_flags); + if (phdr.p_type != PT_LOAD) + continue; + + tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); + if (!tmp) + return -ENOMEM; + + *segs = tmp; + seg = *segs + *seg_cnt; + (*seg_cnt)++; + + seg->start = phdr.p_vaddr; + seg->end = phdr.p_vaddr + phdr.p_memsz; + seg->offset = phdr.p_offset; + seg->is_exec = phdr.p_flags & PF_X; + } + + if (*seg_cnt == 0) { + pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path); + return -ESRCH; + } + + qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); + return 0; +} + +static int parse_lib_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt) +{ + char path[PATH_MAX], line[PATH_MAX], mode[16]; + size_t seg_start, seg_end, seg_off; + struct elf_seg *seg; + int tmp_pid, i, err; + FILE *f; + + *seg_cnt = 0; + + /* Handle containerized binaries only accessible from + * /proc//root/. They will be reported as just / in + * /proc//maps. + */ + if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid) + goto proceed; + + if (!realpath(lib_path, path)) { + pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n", + lib_path, -errno); + libbpf_strlcpy(path, lib_path, sizeof(path)); + } + +proceed: + sprintf(line, "/proc/%d/maps", pid); + f = fopen(line, "r"); + if (!f) { + err = -errno; + pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n", + line, lib_path, err); + return err; + } + + /* We need to handle lines with no path at the end: + * + * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so + * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0 + * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so + */ + while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n", + &seg_start, &seg_end, mode, &seg_off, line) == 5) { + void *tmp; + + /* to handle no path case (see above) we need to capture line + * without skipping any whitespaces. So we need to strip + * leading whitespaces manually here + */ + i = 0; + while (isblank(line[i])) + i++; + if (strcmp(line + i, path) != 0) + continue; + + pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n", + path, seg_start, seg_end, mode, seg_off); + + /* ignore non-executable sections for shared libs */ + if (mode[2] != 'x') + continue; + + tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); + if (!tmp) { + err = -ENOMEM; + goto err_out; + } + + *segs = tmp; + seg = *segs + *seg_cnt; + *seg_cnt += 1; + + seg->start = seg_start; + seg->end = seg_end; + seg->offset = seg_off; + seg->is_exec = true; + } + + if (*seg_cnt == 0) { + pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n", + lib_path, path, pid); + err = -ESRCH; + goto err_out; + } + + qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); + err = 0; +err_out: + fclose(f); + return err; +} + +static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long addr, bool relative) +{ + struct elf_seg *seg; + int i; + + if (relative) { + /* for shared libraries, address is relative offset and thus + * should be fall within logical offset-based range of + * [offset_start, offset_end) + */ + for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { + if (seg->offset <= addr && addr < seg->offset + (seg->end - seg->start)) + return seg; + } + } else { + /* for binaries, address is absolute and thus should be within + * absolute address range of [seg_start, seg_end) + */ + for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { + if (seg->start <= addr && addr < seg->end) + return seg; + } + } + + return NULL; +} + +static int parse_usdt_note(Elf *elf, const char *path, long base_addr, + GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off, + struct usdt_note *usdt_note); + +static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie); + +static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid, + const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie, + struct usdt_target **out_targets, size_t *out_target_cnt) +{ + size_t off, name_off, desc_off, seg_cnt = 0, lib_seg_cnt = 0, target_cnt = 0; + struct elf_seg *segs = NULL, *lib_segs = NULL; + struct usdt_target *targets = NULL, *target; + long base_addr = 0; + Elf_Scn *notes_scn, *base_scn; + GElf_Shdr base_shdr, notes_shdr; + GElf_Ehdr ehdr; + GElf_Nhdr nhdr; + Elf_Data *data; + int err; + + *out_targets = NULL; + *out_target_cnt = 0; + + err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, ¬es_shdr, ¬es_scn); + if (err) { + pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path); + return err; + } + + if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) { + pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path); + return -EINVAL; + } + + err = parse_elf_segs(elf, path, &segs, &seg_cnt); + if (err) { + pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err); + goto err_out; + } + + /* .stapsdt.base ELF section is optional, but is used for prelink + * offset compensation (see a big comment further below) + */ + if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0) + base_addr = base_shdr.sh_addr; + + data = elf_getdata(notes_scn, 0); + off = 0; + while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) { + long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0; + struct usdt_note note; + struct elf_seg *seg = NULL; + void *tmp; + + err = parse_usdt_note(elf, path, base_addr, &nhdr, + data->d_buf, name_off, desc_off, ¬e); + if (err) + goto err_out; + + if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0) + continue; + + /* We need to compensate "prelink effect". See [0] for details, + * relevant parts quoted here: + * + * Each SDT probe also expands into a non-allocated ELF note. You can + * find this by looking at SHT_NOTE sections and decoding the format; + * see below for details. Because the note is non-allocated, it means + * there is no runtime cost, and also preserved in both stripped files + * and .debug files. + * + * However, this means that prelink won't adjust the note's contents + * for address offsets. Instead, this is done via the .stapsdt.base + * section. This is a special section that is added to the text. We + * will only ever have one of these sections in a final link and it + * will only ever be one byte long. Nothing about this section itself + * matters, we just use it as a marker to detect prelink address + * adjustments. + * + * Each probe note records the link-time address of the .stapsdt.base + * section alongside the probe PC address. The decoder compares the + * base address stored in the note with the .stapsdt.base section's + * sh_addr. Initially these are the same, but the section header will + * be adjusted by prelink. So the decoder applies the difference to + * the probe PC address to get the correct prelinked PC address; the + * same adjustment is applied to the semaphore address, if any. + * + * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation + */ + usdt_rel_ip = usdt_abs_ip = note.loc_addr; + if (base_addr) { + usdt_abs_ip += base_addr - note.base_addr; + usdt_rel_ip += base_addr - note.base_addr; + } + + if (ehdr.e_type == ET_EXEC) { + /* When attaching uprobes (which what USDTs basically + * are) kernel expects a relative IP to be specified, + * so if we are attaching to an executable ELF binary + * (i.e., not a shared library), we need to calculate + * proper relative IP based on ELF's load address + */ + seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip, false /* relative */); + if (!seg) { + err = -ESRCH; + pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n", + usdt_provider, usdt_name, path, usdt_abs_ip); + goto err_out; + } + if (!seg->is_exec) { + err = -ESRCH; + pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n", + path, seg->start, seg->end, usdt_provider, usdt_name, + usdt_abs_ip); + goto err_out; + } + + usdt_rel_ip = usdt_abs_ip - (seg->start - seg->offset); + } else if (!man->has_bpf_cookie) { /* ehdr.e_type == ET_DYN */ + /* If we don't have BPF cookie support but need to + * attach to a shared library, we'll need to know and + * record absolute addresses of attach points due to + * the need to lookup USDT spec by absolute IP of + * triggered uprobe. Doing this resolution is only + * possible when we have a specific PID of the process + * that's using specified shared library. BPF cookie + * removes the absolute address limitation as we don't + * need to do this lookup (we just use BPF cookie as + * an index of USDT spec), so for newer kernels with + * BPF cookie support libbpf supports USDT attachment + * to shared libraries with no PID filter. + */ + if (pid < 0) { + pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n"); + err = -ENOTSUP; + goto err_out; + } + + /* lib_segs are lazily initialized only if necessary */ + if (lib_seg_cnt == 0) { + err = parse_lib_segs(pid, path, &lib_segs, &lib_seg_cnt); + if (err) { + pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n", + pid, path, err); + goto err_out; + } + } + + seg = find_elf_seg(lib_segs, lib_seg_cnt, usdt_rel_ip, true /* relative */); + if (!seg) { + err = -ESRCH; + pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n", + usdt_provider, usdt_name, path, usdt_rel_ip); + goto err_out; + } + + usdt_abs_ip = seg->start + (usdt_rel_ip - seg->offset); + } + + pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n", + usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path, + note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args, + seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0); + + /* Adjust semaphore address to be a relative offset */ + if (note.sema_addr) { + if (!man->has_sema_refcnt) { + pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n", + usdt_provider, usdt_name, path); + err = -ENOTSUP; + goto err_out; + } + + seg = find_elf_seg(segs, seg_cnt, note.sema_addr, false /* relative */); + if (!seg) { + err = -ESRCH; + pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n", + usdt_provider, usdt_name, path, note.sema_addr); + goto err_out; + } + if (seg->is_exec) { + err = -ESRCH; + pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n", + path, seg->start, seg->end, usdt_provider, usdt_name, + note.sema_addr); + goto err_out; + } + + usdt_sema_off = note.sema_addr - (seg->start - seg->offset); + + pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n", + usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", + path, note.sema_addr, note.base_addr, usdt_sema_off, + seg->start, seg->end, seg->offset); + } + + /* Record adjusted addresses and offsets and parse USDT spec */ + tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets)); + if (!tmp) { + err = -ENOMEM; + goto err_out; + } + targets = tmp; + + target = &targets[target_cnt]; + memset(target, 0, sizeof(*target)); + + target->abs_ip = usdt_abs_ip; + target->rel_ip = usdt_rel_ip; + target->sema_off = usdt_sema_off; + + /* notes->args references strings from Elf itself, so they can + * be referenced safely until elf_end() call + */ + target->spec_str = note.args; + + err = parse_usdt_spec(&target->spec, ¬e, usdt_cookie); + if (err) + goto err_out; + + target_cnt++; + } + + *out_targets = targets; + *out_target_cnt = target_cnt; + err = target_cnt; + +err_out: + free(segs); + free(lib_segs); + if (err < 0) + free(targets); + return err; +} + +struct bpf_link_usdt { + struct bpf_link link; + + struct usdt_manager *usdt_man; + + size_t spec_cnt; + int *spec_ids; + + size_t uprobe_cnt; + struct { + long abs_ip; + struct bpf_link *link; + } *uprobes; +}; + +static int bpf_link_usdt_detach(struct bpf_link *link) +{ + struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); + struct usdt_manager *man = usdt_link->usdt_man; + int i; + + for (i = 0; i < usdt_link->uprobe_cnt; i++) { + /* detach underlying uprobe link */ + bpf_link__destroy(usdt_link->uprobes[i].link); + /* there is no need to update specs map because it will be + * unconditionally overwritten on subsequent USDT attaches, + * but if BPF cookies are not used we need to remove entry + * from ip_to_spec_id map, otherwise we'll run into false + * conflicting IP errors + */ + if (!man->has_bpf_cookie) { + /* not much we can do about errors here */ + (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map), + &usdt_link->uprobes[i].abs_ip); + } + } + + /* try to return the list of previously used spec IDs to usdt_manager + * for future reuse for subsequent USDT attaches + */ + if (!man->free_spec_ids) { + /* if there were no free spec IDs yet, just transfer our IDs */ + man->free_spec_ids = usdt_link->spec_ids; + man->free_spec_cnt = usdt_link->spec_cnt; + usdt_link->spec_ids = NULL; + } else { + /* otherwise concat IDs */ + size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt; + int *new_free_ids; + + new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt, + sizeof(*new_free_ids)); + /* If we couldn't resize free_spec_ids, we'll just leak + * a bunch of free IDs; this is very unlikely to happen and if + * system is so exhausted on memory, it's the least of user's + * concerns, probably. + * So just do our best here to return those IDs to usdt_manager. + */ + if (new_free_ids) { + memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids, + usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids)); + man->free_spec_ids = new_free_ids; + man->free_spec_cnt = new_cnt; + } + } + + return 0; +} + +static void bpf_link_usdt_dealloc(struct bpf_link *link) +{ + struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); + + free(usdt_link->spec_ids); + free(usdt_link->uprobes); + free(usdt_link); +} + +static size_t specs_hash_fn(const void *key, void *ctx) +{ + const char *s = key; + + return str_hash(s); +} + +static bool specs_equal_fn(const void *key1, const void *key2, void *ctx) +{ + const char *s1 = key1; + const char *s2 = key2; + + return strcmp(s1, s2) == 0; +} + +static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash, + struct bpf_link_usdt *link, struct usdt_target *target, + int *spec_id, bool *is_new) +{ + void *tmp; + int err; + + /* check if we already allocated spec ID for this spec string */ + if (hashmap__find(specs_hash, target->spec_str, &tmp)) { + *spec_id = (long)tmp; + *is_new = false; + return 0; + } + + /* otherwise it's a new ID that needs to be set up in specs map and + * returned back to usdt_manager when USDT link is detached + */ + tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids)); + if (!tmp) + return -ENOMEM; + link->spec_ids = tmp; + + /* get next free spec ID, giving preference to free list, if not empty */ + if (man->free_spec_cnt) { + *spec_id = man->free_spec_ids[man->free_spec_cnt - 1]; + + /* cache spec ID for current spec string for future lookups */ + err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id); + if (err) + return err; + + man->free_spec_cnt--; + } else { + /* don't allocate spec ID bigger than what fits in specs map */ + if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map)) + return -E2BIG; + + *spec_id = man->next_free_spec_id; + + /* cache spec ID for current spec string for future lookups */ + err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id); + if (err) + return err; + + man->next_free_spec_id++; + } + + /* remember new spec ID in the link for later return back to free list on detach */ + link->spec_ids[link->spec_cnt] = *spec_id; + link->spec_cnt++; + *is_new = true; + return 0; +} + +struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog, + pid_t pid, const char *path, + const char *usdt_provider, const char *usdt_name, + __u64 usdt_cookie) +{ + int i, fd, err, spec_map_fd, ip_map_fd; + LIBBPF_OPTS(bpf_uprobe_opts, opts); + struct hashmap *specs_hash = NULL; + struct bpf_link_usdt *link = NULL; + struct usdt_target *targets = NULL; + size_t target_cnt; + Elf *elf; + + spec_map_fd = bpf_map__fd(man->specs_map); + ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map); + + /* TODO: perform path resolution similar to uprobe's */ + fd = open(path, O_RDONLY); + if (fd < 0) { + err = -errno; + pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err); + return libbpf_err_ptr(err); + } + + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + err = -EBADF; + pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1)); + goto err_out; + } + + err = sanity_check_usdt_elf(elf, path); + if (err) + goto err_out; + + /* normalize PID filter */ + if (pid < 0) + pid = -1; + else if (pid == 0) + pid = getpid(); + + /* discover USDT in given binary, optionally limiting + * activations to a given PID, if pid > 0 + */ + err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name, + usdt_cookie, &targets, &target_cnt); + if (err <= 0) { + err = (err == 0) ? -ENOENT : err; + goto err_out; + } + + specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL); + if (IS_ERR(specs_hash)) { + err = PTR_ERR(specs_hash); + goto err_out; + } + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto err_out; + } + + link->usdt_man = man; + link->link.detach = &bpf_link_usdt_detach; + link->link.dealloc = &bpf_link_usdt_dealloc; + + link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); + if (!link->uprobes) { + err = -ENOMEM; + goto err_out; + } + + for (i = 0; i < target_cnt; i++) { + struct usdt_target *target = &targets[i]; + struct bpf_link *uprobe_link; + bool is_new; + int spec_id; + + /* Spec ID can be either reused or newly allocated. If it is + * newly allocated, we'll need to fill out spec map, otherwise + * entire spec should be valid and can be just used by a new + * uprobe. We reuse spec when USDT arg spec is identical. We + * also never share specs between two different USDT + * attachments ("links"), so all the reused specs already + * share USDT cookie value implicitly. + */ + err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new); + if (err) + goto err_out; + + if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) { + err = -errno; + pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n", + spec_id, usdt_provider, usdt_name, path, err); + goto err_out; + } + if (!man->has_bpf_cookie && + bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) { + err = -errno; + if (err == -EEXIST) { + pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n", + spec_id, usdt_provider, usdt_name, path); + } else { + pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n", + target->abs_ip, spec_id, usdt_provider, usdt_name, + path, err); + } + goto err_out; + } + + opts.ref_ctr_offset = target->sema_off; + opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; + uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, + target->rel_ip, &opts); + err = libbpf_get_error(uprobe_link); + if (err) { + pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", + i, usdt_provider, usdt_name, path, err); + goto err_out; + } + + link->uprobes[i].link = uprobe_link; + link->uprobes[i].abs_ip = target->abs_ip; + link->uprobe_cnt++; + } + + free(targets); + hashmap__free(specs_hash); + elf_end(elf); + close(fd); + + return &link->link; + +err_out: + if (link) + bpf_link__destroy(&link->link); + free(targets); + hashmap__free(specs_hash); + if (elf) + elf_end(elf); + close(fd); + return libbpf_err_ptr(err); +} + +/* Parse out USDT ELF note from '.note.stapsdt' section. + * Logic inspired by perf's code. + */ +static int parse_usdt_note(Elf *elf, const char *path, long base_addr, + GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off, + struct usdt_note *note) +{ + const char *provider, *name, *args; + long addrs[3]; + size_t len; + + /* sanity check USDT note name and type first */ + if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0) + return -EINVAL; + if (nhdr->n_type != USDT_NOTE_TYPE) + return -EINVAL; + + /* sanity check USDT note contents ("description" in ELF terminology) */ + len = nhdr->n_descsz; + data = data + desc_off; + + /* +3 is the very minimum required to store three empty strings */ + if (len < sizeof(addrs) + 3) + return -EINVAL; + + /* get location, base, and semaphore addrs */ + memcpy(&addrs, data, sizeof(addrs)); + + /* parse string fields: provider, name, args */ + provider = data + sizeof(addrs); + + name = (const char *)memchr(provider, '\0', data + len - provider); + if (!name) /* non-zero-terminated provider */ + return -EINVAL; + name++; + if (name >= data + len || *name == '\0') /* missing or empty name */ + return -EINVAL; + + args = memchr(name, '\0', data + len - name); + if (!args) /* non-zero-terminated name */ + return -EINVAL; + ++args; + if (args >= data + len) /* missing arguments spec */ + return -EINVAL; + + note->provider = provider; + note->name = name; + if (*args == '\0' || *args == ':') + note->args = ""; + else + note->args = args; + note->loc_addr = addrs[0]; + note->base_addr = addrs[1]; + note->sema_addr = addrs[2]; + + return 0; +} + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg); + +static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie) +{ + const char *s; + int len; + + spec->usdt_cookie = usdt_cookie; + spec->arg_cnt = 0; + + s = note->args; + while (s[0]) { + if (spec->arg_cnt >= USDT_MAX_ARG_CNT) { + pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n", + USDT_MAX_ARG_CNT, note->provider, note->name, note->args); + return -E2BIG; + } + + len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]); + if (len < 0) + return len; + + s += len; + spec->arg_cnt++; + } + + return 0; +} + +/* Architecture-specific logic for parsing USDT argument location specs */ + +#if defined(__x86_64__) || defined(__i386__) + +static int calc_pt_regs_off(const char *reg_name) +{ + static struct { + const char *names[4]; + size_t pt_regs_off; + } reg_map[] = { +#ifdef __x86_64__ +#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64) +#else +#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32) +#endif + { {"rip", "eip", "", ""}, reg_off(rip, eip) }, + { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) }, + { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) }, + { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) }, + { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) }, + { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) }, + { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) }, + { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) }, + { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) }, +#undef reg_off +#ifdef __x86_64__ + { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) }, + { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) }, + { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) }, + { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) }, + { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) }, + { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) }, + { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) }, + { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) }, +#endif + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(reg_map); i++) { + for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) { + if (strcmp(reg_name, reg_map[i].names[j]) == 0) + return reg_map[i].pt_regs_off; + } + } + + pr_warn("usdt: unrecognized register '%s'\n", reg_name); + return -ENOENT; +} + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +{ + char *reg_name = NULL; + int arg_sz, len, reg_off; + long off; + + if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, ®_name, &len) == 3) { + /* Memory dereference case, e.g., -4@-20(%rbp) */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = off; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, ®_name, &len) == 2) { + /* Register read case, e.g., -4@%eax */ + arg->arg_type = USDT_ARG_REG; + arg->val_off = 0; + + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) { + /* Constant value case, e.g., 4@$71 */ + arg->arg_type = USDT_ARG_CONST; + arg->val_off = off; + arg->reg_off = 0; + } else { + pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); + return -EINVAL; + } + + arg->arg_signed = arg_sz < 0; + if (arg_sz < 0) + arg_sz = -arg_sz; + + switch (arg_sz) { + case 1: case 2: case 4: case 8: + arg->arg_bitshift = 64 - arg_sz * 8; + break; + default: + pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", + arg_num, arg_str, arg_sz); + return -EINVAL; + } + + return len; +} + +#elif defined(__s390x__) + +/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */ + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +{ + unsigned int reg; + int arg_sz, len; + long off; + + if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, ®, &len) == 3) { + /* Memory dereference case, e.g., -2@-28(%r15) */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = off; + if (reg > 15) { + pr_warn("usdt: unrecognized register '%%r%u'\n", reg); + return -EINVAL; + } + arg->reg_off = offsetof(user_pt_regs, gprs[reg]); + } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, ®, &len) == 2) { + /* Register read case, e.g., -8@%r0 */ + arg->arg_type = USDT_ARG_REG; + arg->val_off = 0; + if (reg > 15) { + pr_warn("usdt: unrecognized register '%%r%u'\n", reg); + return -EINVAL; + } + arg->reg_off = offsetof(user_pt_regs, gprs[reg]); + } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + /* Constant value case, e.g., 4@71 */ + arg->arg_type = USDT_ARG_CONST; + arg->val_off = off; + arg->reg_off = 0; + } else { + pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); + return -EINVAL; + } + + arg->arg_signed = arg_sz < 0; + if (arg_sz < 0) + arg_sz = -arg_sz; + + switch (arg_sz) { + case 1: case 2: case 4: case 8: + arg->arg_bitshift = 64 - arg_sz * 8; + break; + default: + pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", + arg_num, arg_str, arg_sz); + return -EINVAL; + } + + return len; +} + +#elif defined(__aarch64__) + +static int calc_pt_regs_off(const char *reg_name) +{ + int reg_num; + + if (sscanf(reg_name, "x%d", ®_num) == 1) { + if (reg_num >= 0 && reg_num < 31) + return offsetof(struct user_pt_regs, regs[reg_num]); + } else if (strcmp(reg_name, "sp") == 0) { + return offsetof(struct user_pt_regs, sp); + } + pr_warn("usdt: unrecognized register '%s'\n", reg_name); + return -ENOENT; +} + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +{ + char *reg_name = NULL; + int arg_sz, len, reg_off; + long off; + + if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, ®_name, &off, &len) == 3) { + /* Memory dereference case, e.g., -4@[sp, 96] */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = off; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, ®_name, &len) == 2) { + /* Memory dereference case, e.g., -4@[sp] */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = 0; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + /* Constant value case, e.g., 4@5 */ + arg->arg_type = USDT_ARG_CONST; + arg->val_off = off; + arg->reg_off = 0; + } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, ®_name, &len) == 2) { + /* Register read case, e.g., -8@x4 */ + arg->arg_type = USDT_ARG_REG; + arg->val_off = 0; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else { + pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); + return -EINVAL; + } + + arg->arg_signed = arg_sz < 0; + if (arg_sz < 0) + arg_sz = -arg_sz; + + switch (arg_sz) { + case 1: case 2: case 4: case 8: + arg->arg_bitshift = 64 - arg_sz * 8; + break; + default: + pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", + arg_num, arg_str, arg_sz); + return -EINVAL; + } + + return len; +} + +#elif defined(__riscv) + +static int calc_pt_regs_off(const char *reg_name) +{ + static struct { + const char *name; + size_t pt_regs_off; + } reg_map[] = { + { "ra", offsetof(struct user_regs_struct, ra) }, + { "sp", offsetof(struct user_regs_struct, sp) }, + { "gp", offsetof(struct user_regs_struct, gp) }, + { "tp", offsetof(struct user_regs_struct, tp) }, + { "a0", offsetof(struct user_regs_struct, a0) }, + { "a1", offsetof(struct user_regs_struct, a1) }, + { "a2", offsetof(struct user_regs_struct, a2) }, + { "a3", offsetof(struct user_regs_struct, a3) }, + { "a4", offsetof(struct user_regs_struct, a4) }, + { "a5", offsetof(struct user_regs_struct, a5) }, + { "a6", offsetof(struct user_regs_struct, a6) }, + { "a7", offsetof(struct user_regs_struct, a7) }, + { "s0", offsetof(struct user_regs_struct, s0) }, + { "s1", offsetof(struct user_regs_struct, s1) }, + { "s2", offsetof(struct user_regs_struct, s2) }, + { "s3", offsetof(struct user_regs_struct, s3) }, + { "s4", offsetof(struct user_regs_struct, s4) }, + { "s5", offsetof(struct user_regs_struct, s5) }, + { "s6", offsetof(struct user_regs_struct, s6) }, + { "s7", offsetof(struct user_regs_struct, s7) }, + { "s8", offsetof(struct user_regs_struct, rv_s8) }, + { "s9", offsetof(struct user_regs_struct, s9) }, + { "s10", offsetof(struct user_regs_struct, s10) }, + { "s11", offsetof(struct user_regs_struct, s11) }, + { "t0", offsetof(struct user_regs_struct, t0) }, + { "t1", offsetof(struct user_regs_struct, t1) }, + { "t2", offsetof(struct user_regs_struct, t2) }, + { "t3", offsetof(struct user_regs_struct, t3) }, + { "t4", offsetof(struct user_regs_struct, t4) }, + { "t5", offsetof(struct user_regs_struct, t5) }, + { "t6", offsetof(struct user_regs_struct, t6) }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(reg_map); i++) { + if (strcmp(reg_name, reg_map[i].name) == 0) + return reg_map[i].pt_regs_off; + } + + pr_warn("usdt: unrecognized register '%s'\n", reg_name); + return -ENOENT; +} + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +{ + char *reg_name = NULL; + int arg_sz, len, reg_off; + long off; + + if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, ®_name, &len) == 3) { + /* Memory dereference case, e.g., -8@-88(s0) */ + arg->arg_type = USDT_ARG_REG_DEREF; + arg->val_off = off; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) { + /* Constant value case, e.g., 4@5 */ + arg->arg_type = USDT_ARG_CONST; + arg->val_off = off; + arg->reg_off = 0; + } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, ®_name, &len) == 2) { + /* Register read case, e.g., -8@a1 */ + arg->arg_type = USDT_ARG_REG; + arg->val_off = 0; + reg_off = calc_pt_regs_off(reg_name); + free(reg_name); + if (reg_off < 0) + return reg_off; + arg->reg_off = reg_off; + } else { + pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); + return -EINVAL; + } + + arg->arg_signed = arg_sz < 0; + if (arg_sz < 0) + arg_sz = -arg_sz; + + switch (arg_sz) { + case 1: case 2: case 4: case 8: + arg->arg_bitshift = 64 - arg_sz * 8; + break; + default: + pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", + arg_num, arg_str, arg_sz); + return -EINVAL; + } + + return len; +} + +#else + +static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg) +{ + pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n"); + return -ENOTSUP; +} + +#endif diff --git a/pkg/plugin/lib/common/libbpf/_src/xsk.c b/pkg/plugin/lib/common/libbpf/_src/xsk.c new file mode 100644 index 000000000..af136f73b --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/xsk.c @@ -0,0 +1,1260 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * AF_XDP user-space access library. + * + * Copyright(c) 2018 - 2019 Intel Corporation. + * + * Author(s): Magnus Karlsson + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" +#include "xsk.h" + +/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal + * uses of deprecated APIs + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +#ifndef SOL_XDP + #define SOL_XDP 283 +#endif + +#ifndef AF_XDP + #define AF_XDP 44 +#endif + +#ifndef PF_XDP + #define PF_XDP AF_XDP +#endif + +enum xsk_prog { + XSK_PROG_FALLBACK, + XSK_PROG_REDIRECT_FLAGS, +}; + +struct xsk_umem { + struct xsk_ring_prod *fill_save; + struct xsk_ring_cons *comp_save; + char *umem_area; + struct xsk_umem_config config; + int fd; + int refcount; + struct list_head ctx_list; + bool rx_ring_setup_done; + bool tx_ring_setup_done; +}; + +struct xsk_ctx { + struct xsk_ring_prod *fill; + struct xsk_ring_cons *comp; + __u32 queue_id; + struct xsk_umem *umem; + int refcount; + int ifindex; + struct list_head list; + int prog_fd; + int link_fd; + int xsks_map_fd; + char ifname[IFNAMSIZ]; + bool has_bpf_link; +}; + +struct xsk_socket { + struct xsk_ring_cons *rx; + struct xsk_ring_prod *tx; + __u64 outstanding_tx; + struct xsk_ctx *ctx; + struct xsk_socket_config config; + int fd; +}; + +struct xsk_nl_info { + bool xdp_prog_attached; + int ifindex; + int fd; +}; + +/* Up until and including Linux 5.3 */ +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +/* Up until and including Linux 5.3 */ +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + +int xsk_umem__fd(const struct xsk_umem *umem) +{ + return umem ? umem->fd : -EINVAL; +} + +int xsk_socket__fd(const struct xsk_socket *xsk) +{ + return xsk ? xsk->fd : -EINVAL; +} + +static bool xsk_page_aligned(void *buffer) +{ + unsigned long addr = (unsigned long)buffer; + + return !(addr & (getpagesize() - 1)); +} + +static void xsk_set_umem_config(struct xsk_umem_config *cfg, + const struct xsk_umem_config *usr_cfg) +{ + if (!usr_cfg) { + cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; + cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; + cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; + cfg->flags = XSK_UMEM__DEFAULT_FLAGS; + return; + } + + cfg->fill_size = usr_cfg->fill_size; + cfg->comp_size = usr_cfg->comp_size; + cfg->frame_size = usr_cfg->frame_size; + cfg->frame_headroom = usr_cfg->frame_headroom; + cfg->flags = usr_cfg->flags; +} + +static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, + const struct xsk_socket_config *usr_cfg) +{ + if (!usr_cfg) { + cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; + cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; + cfg->libbpf_flags = 0; + cfg->xdp_flags = 0; + cfg->bind_flags = 0; + return 0; + } + + if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD) + return -EINVAL; + + cfg->rx_size = usr_cfg->rx_size; + cfg->tx_size = usr_cfg->tx_size; + cfg->libbpf_flags = usr_cfg->libbpf_flags; + cfg->xdp_flags = usr_cfg->xdp_flags; + cfg->bind_flags = usr_cfg->bind_flags; + + return 0; +} + +static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) +{ + struct xdp_mmap_offsets_v1 off_v1; + + /* getsockopt on a kernel <= 5.3 has no flags fields. + * Copy over the offsets to the correct places in the >=5.4 format + * and put the flags where they would have been on that kernel. + */ + memcpy(&off_v1, off, sizeof(off_v1)); + + off->rx.producer = off_v1.rx.producer; + off->rx.consumer = off_v1.rx.consumer; + off->rx.desc = off_v1.rx.desc; + off->rx.flags = off_v1.rx.consumer + sizeof(__u32); + + off->tx.producer = off_v1.tx.producer; + off->tx.consumer = off_v1.tx.consumer; + off->tx.desc = off_v1.tx.desc; + off->tx.flags = off_v1.tx.consumer + sizeof(__u32); + + off->fr.producer = off_v1.fr.producer; + off->fr.consumer = off_v1.fr.consumer; + off->fr.desc = off_v1.fr.desc; + off->fr.flags = off_v1.fr.consumer + sizeof(__u32); + + off->cr.producer = off_v1.cr.producer; + off->cr.consumer = off_v1.cr.consumer; + off->cr.desc = off_v1.cr.desc; + off->cr.flags = off_v1.cr.consumer + sizeof(__u32); +} + +static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) +{ + socklen_t optlen; + int err; + + optlen = sizeof(*off); + err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen); + if (err) + return err; + + if (optlen == sizeof(*off)) + return 0; + + if (optlen == sizeof(struct xdp_mmap_offsets_v1)) { + xsk_mmap_offsets_v1(off); + return 0; + } + + return -EINVAL; +} + +static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp) +{ + struct xdp_mmap_offsets off; + void *map; + int err; + + err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING, + &umem->config.fill_size, + sizeof(umem->config.fill_size)); + if (err) + return -errno; + + err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, + &umem->config.comp_size, + sizeof(umem->config.comp_size)); + if (err) + return -errno; + + err = xsk_get_mmap_offsets(fd, &off); + if (err) + return -errno; + + map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, + XDP_UMEM_PGOFF_FILL_RING); + if (map == MAP_FAILED) + return -errno; + + fill->mask = umem->config.fill_size - 1; + fill->size = umem->config.fill_size; + fill->producer = map + off.fr.producer; + fill->consumer = map + off.fr.consumer; + fill->flags = map + off.fr.flags; + fill->ring = map + off.fr.desc; + fill->cached_cons = umem->config.fill_size; + + map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, + XDP_UMEM_PGOFF_COMPLETION_RING); + if (map == MAP_FAILED) { + err = -errno; + goto out_mmap; + } + + comp->mask = umem->config.comp_size - 1; + comp->size = umem->config.comp_size; + comp->producer = map + off.cr.producer; + comp->consumer = map + off.cr.consumer; + comp->flags = map + off.cr.flags; + comp->ring = map + off.cr.desc; + + return 0; + +out_mmap: + munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64)); + return err; +} + +DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) +int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, + __u64 size, struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *usr_config) +{ + struct xdp_umem_reg mr; + struct xsk_umem *umem; + int err; + + if (!umem_area || !umem_ptr || !fill || !comp) + return -EFAULT; + if (!size && !xsk_page_aligned(umem_area)) + return -EINVAL; + + umem = calloc(1, sizeof(*umem)); + if (!umem) + return -ENOMEM; + + umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0); + if (umem->fd < 0) { + err = -errno; + goto out_umem_alloc; + } + + umem->umem_area = umem_area; + INIT_LIST_HEAD(&umem->ctx_list); + xsk_set_umem_config(&umem->config, usr_config); + + memset(&mr, 0, sizeof(mr)); + mr.addr = (uintptr_t)umem_area; + mr.len = size; + mr.chunk_size = umem->config.frame_size; + mr.headroom = umem->config.frame_headroom; + mr.flags = umem->config.flags; + + err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)); + if (err) { + err = -errno; + goto out_socket; + } + + err = xsk_create_umem_rings(umem, umem->fd, fill, comp); + if (err) + goto out_socket; + + umem->fill_save = fill; + umem->comp_save = comp; + *umem_ptr = umem; + return 0; + +out_socket: + close(umem->fd); +out_umem_alloc: + free(umem); + return err; +} + +struct xsk_umem_config_v1 { + __u32 fill_size; + __u32 comp_size; + __u32 frame_size; + __u32 frame_headroom; +}; + +COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) +int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, + __u64 size, struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *usr_config) +{ + struct xsk_umem_config config; + + memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1)); + config.flags = 0; + + return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp, + &config); +} + +static enum xsk_prog get_xsk_prog(void) +{ + enum xsk_prog detected = XSK_PROG_FALLBACK; + __u32 size_out, retval, duration; + char data_in = 0, data_out; + struct bpf_insn insns[] = { + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, XDP_PASS), + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + BPF_EXIT_INSN(), + }; + int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns); + + map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL); + if (map_fd < 0) + return detected; + + insns[0].imm = map_fd; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); + if (prog_fd < 0) { + close(map_fd); + return detected; + } + + ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration); + if (!ret && retval == XDP_PASS) + detected = XSK_PROG_REDIRECT_FLAGS; + close(prog_fd); + close(map_fd); + return detected; +} + +static int xsk_load_xdp_prog(struct xsk_socket *xsk) +{ + static const int log_buf_size = 16 * 1024; + struct xsk_ctx *ctx = xsk->ctx; + char log_buf[log_buf_size]; + int prog_fd; + + /* This is the fallback C-program: + * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) + * { + * int ret, index = ctx->rx_queue_index; + * + * // A set entry here means that the correspnding queue_id + * // has an active AF_XDP socket bound to it. + * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS); + * if (ret > 0) + * return ret; + * + * // Fallback for pre-5.3 kernels, not supporting default + * // action in the flags parameter. + * if (bpf_map_lookup_elem(&xsks_map, &index)) + * return bpf_redirect_map(&xsks_map, index, 0); + * return XDP_PASS; + * } + */ + struct bpf_insn prog[] = { + /* r2 = *(u32 *)(r1 + 16) */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16), + /* *(u32 *)(r10 - 4) = r2 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* r3 = XDP_PASS */ + BPF_MOV64_IMM(BPF_REG_3, 2), + /* call bpf_redirect_map */ + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + /* if w0 != 0 goto pc+13 */ + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13), + /* r2 = r10 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + /* r2 += -4 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* call bpf_map_lookup_elem */ + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + /* r1 = r0 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + /* r0 = XDP_PASS */ + BPF_MOV64_IMM(BPF_REG_0, 2), + /* if r1 == 0 goto pc+5 */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), + /* r2 = *(u32 *)(r10 - 4) */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* r3 = 0 */ + BPF_MOV64_IMM(BPF_REG_3, 0), + /* call bpf_redirect_map */ + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + /* The jumps are to this instruction */ + BPF_EXIT_INSN(), + }; + + /* This is the post-5.3 kernel C-program: + * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) + * { + * return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS); + * } + */ + struct bpf_insn prog_redirect_flags[] = { + /* r2 = *(u32 *)(r1 + 16) */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16), + /* r1 = xskmap[] */ + BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd), + /* r3 = XDP_PASS */ + BPF_MOV64_IMM(BPF_REG_3, 2), + /* call bpf_redirect_map */ + BPF_EMIT_CALL(BPF_FUNC_redirect_map), + BPF_EXIT_INSN(), + }; + size_t insns_cnt[] = {ARRAY_SIZE(prog), + ARRAY_SIZE(prog_redirect_flags), + }; + struct bpf_insn *progs[] = {prog, prog_redirect_flags}; + enum xsk_prog option = get_xsk_prog(); + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = log_buf, + .log_size = log_buf_size, + ); + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause", + progs[option], insns_cnt[option], &opts); + if (prog_fd < 0) { + pr_warn("BPF log buffer:\n%s", log_buf); + return prog_fd; + } + + ctx->prog_fd = prog_fd; + return 0; +} + +static int xsk_create_bpf_link(struct xsk_socket *xsk) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + struct xsk_ctx *ctx = xsk->ctx; + __u32 prog_id = 0; + int link_fd; + int err; + + err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags); + if (err) { + pr_warn("getting XDP prog id failed\n"); + return err; + } + + /* if there's a netlink-based XDP prog loaded on interface, bail out + * and ask user to do the removal by himself + */ + if (prog_id) { + pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n"); + return -EINVAL; + } + + opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE); + + link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts); + if (link_fd < 0) { + pr_warn("bpf_link_create failed: %s\n", strerror(errno)); + return link_fd; + } + + ctx->link_fd = link_fd; + return 0; +} + +static int xsk_get_max_queues(struct xsk_socket *xsk) +{ + struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; + struct xsk_ctx *ctx = xsk->ctx; + struct ifreq ifr = {}; + int fd, err, ret; + + fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0); + if (fd < 0) + return -errno; + + ifr.ifr_data = (void *)&channels; + libbpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ); + err = ioctl(fd, SIOCETHTOOL, &ifr); + if (err && errno != EOPNOTSUPP) { + ret = -errno; + goto out; + } + + if (err) { + /* If the device says it has no channels, then all traffic + * is sent to a single stream, so max queues = 1. + */ + ret = 1; + } else { + /* Take the max of rx, tx, combined. Drivers return + * the number of channels in different ways. + */ + ret = max(channels.max_rx, channels.max_tx); + ret = max(ret, (int)channels.max_combined); + } + +out: + close(fd); + return ret; +} + +static int xsk_create_bpf_maps(struct xsk_socket *xsk) +{ + struct xsk_ctx *ctx = xsk->ctx; + int max_queues; + int fd; + + max_queues = xsk_get_max_queues(xsk); + if (max_queues < 0) + return max_queues; + + fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map", + sizeof(int), sizeof(int), max_queues, NULL); + if (fd < 0) + return fd; + + ctx->xsks_map_fd = fd; + + return 0; +} + +static void xsk_delete_bpf_maps(struct xsk_socket *xsk) +{ + struct xsk_ctx *ctx = xsk->ctx; + + bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id); + close(ctx->xsks_map_fd); +} + +static int xsk_lookup_bpf_maps(struct xsk_socket *xsk) +{ + __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info); + __u32 map_len = sizeof(struct bpf_map_info); + struct bpf_prog_info prog_info = {}; + struct xsk_ctx *ctx = xsk->ctx; + struct bpf_map_info map_info; + int fd, err; + + err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len); + if (err) + return err; + + num_maps = prog_info.nr_map_ids; + + map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids)); + if (!map_ids) + return -ENOMEM; + + memset(&prog_info, 0, prog_len); + prog_info.nr_map_ids = num_maps; + prog_info.map_ids = (__u64)(unsigned long)map_ids; + + err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len); + if (err) + goto out_map_ids; + + ctx->xsks_map_fd = -1; + + for (i = 0; i < prog_info.nr_map_ids; i++) { + fd = bpf_map_get_fd_by_id(map_ids[i]); + if (fd < 0) + continue; + + memset(&map_info, 0, map_len); + err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len); + if (err) { + close(fd); + continue; + } + + if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) { + ctx->xsks_map_fd = fd; + break; + } + + close(fd); + } + + if (ctx->xsks_map_fd == -1) + err = -ENOENT; + +out_map_ids: + free(map_ids); + return err; +} + +static int xsk_set_bpf_maps(struct xsk_socket *xsk) +{ + struct xsk_ctx *ctx = xsk->ctx; + + return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id, + &xsk->fd, 0); +} + +static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd) +{ + struct bpf_link_info link_info; + __u32 link_len; + __u32 id = 0; + int err; + int fd; + + while (true) { + err = bpf_link_get_next_id(id, &id); + if (err) { + if (errno == ENOENT) { + err = 0; + break; + } + pr_warn("can't get next link: %s\n", strerror(errno)); + break; + } + + fd = bpf_link_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; + pr_warn("can't get link by id (%u): %s\n", id, strerror(errno)); + err = -errno; + break; + } + + link_len = sizeof(struct bpf_link_info); + memset(&link_info, 0, link_len); + err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len); + if (err) { + pr_warn("can't get link info: %s\n", strerror(errno)); + close(fd); + break; + } + if (link_info.type == BPF_LINK_TYPE_XDP) { + if (link_info.xdp.ifindex == ifindex) { + *link_fd = fd; + if (prog_id) + *prog_id = link_info.prog_id; + break; + } + } + close(fd); + } + + return err; +} + +static bool xsk_probe_bpf_link(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE); + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), + BPF_EXIT_INSN() + }; + int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns); + int ifindex_lo = 1; + bool ret = false; + int err; + + err = xsk_link_lookup(ifindex_lo, NULL, &link_fd); + if (err) + return ret; + + if (link_fd >= 0) + return true; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); + if (prog_fd < 0) + return ret; + + link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts); + close(prog_fd); + + if (link_fd >= 0) { + ret = true; + close(link_fd); + } + + return ret; +} + +static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk) +{ + char ifname[IFNAMSIZ]; + struct xsk_ctx *ctx; + char *interface; + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) + return -ENOMEM; + + interface = if_indextoname(ifindex, &ifname[0]); + if (!interface) { + free(ctx); + return -errno; + } + + ctx->ifindex = ifindex; + libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ); + + xsk->ctx = ctx; + xsk->ctx->has_bpf_link = xsk_probe_bpf_link(); + + return 0; +} + +static int xsk_init_xdp_res(struct xsk_socket *xsk, + int *xsks_map_fd) +{ + struct xsk_ctx *ctx = xsk->ctx; + int err; + + err = xsk_create_bpf_maps(xsk); + if (err) + return err; + + err = xsk_load_xdp_prog(xsk); + if (err) + goto err_load_xdp_prog; + + if (ctx->has_bpf_link) + err = xsk_create_bpf_link(xsk); + else + err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd, + xsk->config.xdp_flags); + + if (err) + goto err_attach_xdp_prog; + + if (!xsk->rx) + return err; + + err = xsk_set_bpf_maps(xsk); + if (err) + goto err_set_bpf_maps; + + return err; + +err_set_bpf_maps: + if (ctx->has_bpf_link) + close(ctx->link_fd); + else + bpf_set_link_xdp_fd(ctx->ifindex, -1, 0); +err_attach_xdp_prog: + close(ctx->prog_fd); +err_load_xdp_prog: + xsk_delete_bpf_maps(xsk); + return err; +} + +static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id) +{ + struct xsk_ctx *ctx = xsk->ctx; + int err; + + ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (ctx->prog_fd < 0) { + err = -errno; + goto err_prog_fd; + } + err = xsk_lookup_bpf_maps(xsk); + if (err) + goto err_lookup_maps; + + if (!xsk->rx) + return err; + + err = xsk_set_bpf_maps(xsk); + if (err) + goto err_set_maps; + + return err; + +err_set_maps: + close(ctx->xsks_map_fd); +err_lookup_maps: + close(ctx->prog_fd); +err_prog_fd: + if (ctx->has_bpf_link) + close(ctx->link_fd); + return err; +} + +static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd) +{ + struct xsk_socket *xsk = _xdp; + struct xsk_ctx *ctx = xsk->ctx; + __u32 prog_id = 0; + int err; + + if (ctx->has_bpf_link) + err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd); + else + err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags); + + if (err) + return err; + + err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) : + xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id); + + if (!err && xsks_map_fd) + *xsks_map_fd = ctx->xsks_map_fd; + + return err; +} + +static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex, + __u32 queue_id) +{ + struct xsk_ctx *ctx; + + if (list_empty(&umem->ctx_list)) + return NULL; + + list_for_each_entry(ctx, &umem->ctx_list, list) { + if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) { + ctx->refcount++; + return ctx; + } + } + + return NULL; +} + +static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap) +{ + struct xsk_umem *umem = ctx->umem; + struct xdp_mmap_offsets off; + int err; + + if (--ctx->refcount) + return; + + if (!unmap) + goto out_free; + + err = xsk_get_mmap_offsets(umem->fd, &off); + if (err) + goto out_free; + + munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * + sizeof(__u64)); + munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size * + sizeof(__u64)); + +out_free: + list_del(&ctx->list); + free(ctx); +} + +static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, + struct xsk_umem *umem, int ifindex, + const char *ifname, __u32 queue_id, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp) +{ + struct xsk_ctx *ctx; + int err; + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) + return NULL; + + if (!umem->fill_save) { + err = xsk_create_umem_rings(umem, xsk->fd, fill, comp); + if (err) { + free(ctx); + return NULL; + } + } else if (umem->fill_save != fill || umem->comp_save != comp) { + /* Copy over rings to new structs. */ + memcpy(fill, umem->fill_save, sizeof(*fill)); + memcpy(comp, umem->comp_save, sizeof(*comp)); + } + + ctx->ifindex = ifindex; + ctx->refcount = 1; + ctx->umem = umem; + ctx->queue_id = queue_id; + libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ); + + ctx->fill = fill; + ctx->comp = comp; + list_add(&ctx->list, &umem->ctx_list); + return ctx; +} + +static void xsk_destroy_xsk_struct(struct xsk_socket *xsk) +{ + free(xsk->ctx); + free(xsk); +} + +int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd) +{ + xsk->ctx->xsks_map_fd = fd; + return xsk_set_bpf_maps(xsk); +} + +int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd) +{ + struct xsk_socket *xsk; + int res; + + xsk = calloc(1, sizeof(*xsk)); + if (!xsk) + return -ENOMEM; + + res = xsk_create_xsk_struct(ifindex, xsk); + if (res) { + free(xsk); + return -EINVAL; + } + + res = __xsk_setup_xdp_prog(xsk, xsks_map_fd); + + xsk_destroy_xsk_struct(xsk); + + return res; +} + +int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, + const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_socket_config *usr_config) +{ + bool unmap, rx_setup_done = false, tx_setup_done = false; + void *rx_map = NULL, *tx_map = NULL; + struct sockaddr_xdp sxdp = {}; + struct xdp_mmap_offsets off; + struct xsk_socket *xsk; + struct xsk_ctx *ctx; + int err, ifindex; + + if (!umem || !xsk_ptr || !(rx || tx)) + return -EFAULT; + + unmap = umem->fill_save != fill; + + xsk = calloc(1, sizeof(*xsk)); + if (!xsk) + return -ENOMEM; + + err = xsk_set_xdp_socket_config(&xsk->config, usr_config); + if (err) + goto out_xsk_alloc; + + xsk->outstanding_tx = 0; + ifindex = if_nametoindex(ifname); + if (!ifindex) { + err = -errno; + goto out_xsk_alloc; + } + + if (umem->refcount++ > 0) { + xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0); + if (xsk->fd < 0) { + err = -errno; + goto out_xsk_alloc; + } + } else { + xsk->fd = umem->fd; + rx_setup_done = umem->rx_ring_setup_done; + tx_setup_done = umem->tx_ring_setup_done; + } + + ctx = xsk_get_ctx(umem, ifindex, queue_id); + if (!ctx) { + if (!fill || !comp) { + err = -EFAULT; + goto out_socket; + } + + ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id, + fill, comp); + if (!ctx) { + err = -ENOMEM; + goto out_socket; + } + } + xsk->ctx = ctx; + xsk->ctx->has_bpf_link = xsk_probe_bpf_link(); + + if (rx && !rx_setup_done) { + err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, + &xsk->config.rx_size, + sizeof(xsk->config.rx_size)); + if (err) { + err = -errno; + goto out_put_ctx; + } + if (xsk->fd == umem->fd) + umem->rx_ring_setup_done = true; + } + if (tx && !tx_setup_done) { + err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING, + &xsk->config.tx_size, + sizeof(xsk->config.tx_size)); + if (err) { + err = -errno; + goto out_put_ctx; + } + if (xsk->fd == umem->fd) + umem->tx_ring_setup_done = true; + } + + err = xsk_get_mmap_offsets(xsk->fd, &off); + if (err) { + err = -errno; + goto out_put_ctx; + } + + if (rx) { + rx_map = mmap(NULL, off.rx.desc + + xsk->config.rx_size * sizeof(struct xdp_desc), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, + xsk->fd, XDP_PGOFF_RX_RING); + if (rx_map == MAP_FAILED) { + err = -errno; + goto out_put_ctx; + } + + rx->mask = xsk->config.rx_size - 1; + rx->size = xsk->config.rx_size; + rx->producer = rx_map + off.rx.producer; + rx->consumer = rx_map + off.rx.consumer; + rx->flags = rx_map + off.rx.flags; + rx->ring = rx_map + off.rx.desc; + rx->cached_prod = *rx->producer; + rx->cached_cons = *rx->consumer; + } + xsk->rx = rx; + + if (tx) { + tx_map = mmap(NULL, off.tx.desc + + xsk->config.tx_size * sizeof(struct xdp_desc), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, + xsk->fd, XDP_PGOFF_TX_RING); + if (tx_map == MAP_FAILED) { + err = -errno; + goto out_mmap_rx; + } + + tx->mask = xsk->config.tx_size - 1; + tx->size = xsk->config.tx_size; + tx->producer = tx_map + off.tx.producer; + tx->consumer = tx_map + off.tx.consumer; + tx->flags = tx_map + off.tx.flags; + tx->ring = tx_map + off.tx.desc; + tx->cached_prod = *tx->producer; + /* cached_cons is r->size bigger than the real consumer pointer + * See xsk_prod_nb_free + */ + tx->cached_cons = *tx->consumer + xsk->config.tx_size; + } + xsk->tx = tx; + + sxdp.sxdp_family = PF_XDP; + sxdp.sxdp_ifindex = ctx->ifindex; + sxdp.sxdp_queue_id = ctx->queue_id; + if (umem->refcount > 1) { + sxdp.sxdp_flags |= XDP_SHARED_UMEM; + sxdp.sxdp_shared_umem_fd = umem->fd; + } else { + sxdp.sxdp_flags = xsk->config.bind_flags; + } + + err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); + if (err) { + err = -errno; + goto out_mmap_tx; + } + + ctx->prog_fd = -1; + + if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { + err = __xsk_setup_xdp_prog(xsk, NULL); + if (err) + goto out_mmap_tx; + } + + *xsk_ptr = xsk; + umem->fill_save = NULL; + umem->comp_save = NULL; + return 0; + +out_mmap_tx: + if (tx) + munmap(tx_map, off.tx.desc + + xsk->config.tx_size * sizeof(struct xdp_desc)); +out_mmap_rx: + if (rx) + munmap(rx_map, off.rx.desc + + xsk->config.rx_size * sizeof(struct xdp_desc)); +out_put_ctx: + xsk_put_ctx(ctx, unmap); +out_socket: + if (--umem->refcount) + close(xsk->fd); +out_xsk_alloc: + free(xsk); + return err; +} + +int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, + const struct xsk_socket_config *usr_config) +{ + if (!umem) + return -EFAULT; + + return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, + rx, tx, umem->fill_save, + umem->comp_save, usr_config); +} + +int xsk_umem__delete(struct xsk_umem *umem) +{ + struct xdp_mmap_offsets off; + int err; + + if (!umem) + return 0; + + if (umem->refcount) + return -EBUSY; + + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err && umem->fill_save && umem->comp_save) { + munmap(umem->fill_save->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp_save->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + + close(umem->fd); + free(umem); + + return 0; +} + +void xsk_socket__delete(struct xsk_socket *xsk) +{ + size_t desc_sz = sizeof(struct xdp_desc); + struct xdp_mmap_offsets off; + struct xsk_umem *umem; + struct xsk_ctx *ctx; + int err; + + if (!xsk) + return; + + ctx = xsk->ctx; + umem = ctx->umem; + if (ctx->prog_fd != -1) { + xsk_delete_bpf_maps(xsk); + close(ctx->prog_fd); + if (ctx->has_bpf_link) + close(ctx->link_fd); + } + + err = xsk_get_mmap_offsets(xsk->fd, &off); + if (!err) { + if (xsk->rx) { + munmap(xsk->rx->ring - off.rx.desc, + off.rx.desc + xsk->config.rx_size * desc_sz); + } + if (xsk->tx) { + munmap(xsk->tx->ring - off.tx.desc, + off.tx.desc + xsk->config.tx_size * desc_sz); + } + } + + xsk_put_ctx(ctx, true); + + umem->refcount--; + /* Do not close an fd that also has an associated umem connected + * to it. + */ + if (xsk->fd != umem->fd) + close(xsk->fd); + free(xsk); +} diff --git a/pkg/plugin/lib/common/libbpf/_src/xsk.h b/pkg/plugin/lib/common/libbpf/_src/xsk.h new file mode 100644 index 000000000..64e9c57fd --- /dev/null +++ b/pkg/plugin/lib/common/libbpf/_src/xsk.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +/* + * AF_XDP user-space access library. + * + * Copyright (c) 2018 - 2019 Intel Corporation. + * Copyright (c) 2019 Facebook + * + * Author(s): Magnus Karlsson + */ + +#ifndef __LIBBPF_XSK_H +#define __LIBBPF_XSK_H + +#include +#include +#include +#include + +#include "libbpf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This whole API has been deprecated and moved to libxdp that can be found at + * https://github.com/xdp-project/xdp-tools. The APIs are exactly the same so + * it should just be linking with libxdp instead of libbpf for this set of + * functionality. If not, please submit a bug report on the aforementioned page. + */ + +/* Load-Acquire Store-Release barriers used by the XDP socket + * library. The following macros should *NOT* be considered part of + * the xsk.h API, and is subject to change anytime. + * + * LIBRARY INTERNAL + */ + +#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x) +#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v) + +#if defined(__i386__) || defined(__x86_64__) +# define libbpf_smp_store_release(p, v) \ + do { \ + asm volatile("" : : : "memory"); \ + __XSK_WRITE_ONCE(*p, v); \ + } while (0) +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ + asm volatile("" : : : "memory"); \ + ___p1; \ + }) +#elif defined(__aarch64__) +# define libbpf_smp_store_release(p, v) \ + asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory") +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1; \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + ___p1; \ + }) +#elif defined(__riscv) +# define libbpf_smp_store_release(p, v) \ + do { \ + asm volatile ("fence rw,w" : : : "memory"); \ + __XSK_WRITE_ONCE(*p, v); \ + } while (0) +# define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ + asm volatile ("fence r,rw" : : : "memory"); \ + ___p1; \ + }) +#endif + +#ifndef libbpf_smp_store_release +#define libbpf_smp_store_release(p, v) \ + do { \ + __sync_synchronize(); \ + __XSK_WRITE_ONCE(*p, v); \ + } while (0) +#endif + +#ifndef libbpf_smp_load_acquire +#define libbpf_smp_load_acquire(p) \ + ({ \ + typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \ + __sync_synchronize(); \ + ___p1; \ + }) +#endif + +/* LIBRARY INTERNAL -- END */ + +/* Do not access these members directly. Use the functions below. */ +#define DEFINE_XSK_RING(name) \ +struct name { \ + __u32 cached_prod; \ + __u32 cached_cons; \ + __u32 mask; \ + __u32 size; \ + __u32 *producer; \ + __u32 *consumer; \ + void *ring; \ + __u32 *flags; \ +} + +DEFINE_XSK_RING(xsk_ring_prod); +DEFINE_XSK_RING(xsk_ring_cons); + +/* For a detailed explanation on the memory barriers associated with the + * ring, please take a look at net/xdp/xsk_queue.h. + */ + +struct xsk_umem; +struct xsk_socket; + +static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, + __u32 idx) +{ + __u64 *addrs = (__u64 *)fill->ring; + + return &addrs[idx & fill->mask]; +} + +static inline const __u64 * +xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) +{ + const __u64 *addrs = (const __u64 *)comp->ring; + + return &addrs[idx & comp->mask]; +} + +static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, + __u32 idx) +{ + struct xdp_desc *descs = (struct xdp_desc *)tx->ring; + + return &descs[idx & tx->mask]; +} + +static inline const struct xdp_desc * +xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) +{ + const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; + + return &descs[idx & rx->mask]; +} + +static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) +{ + return *r->flags & XDP_RING_NEED_WAKEUP; +} + +static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) +{ + __u32 free_entries = r->cached_cons - r->cached_prod; + + if (free_entries >= nb) + return free_entries; + + /* Refresh the local tail pointer. + * cached_cons is r->size bigger than the real consumer pointer so + * that this addition can be avoided in the more frequently + * executed code that computs free_entries in the beginning of + * this function. Without this optimization it whould have been + * free_entries = r->cached_prod - r->cached_cons + r->size. + */ + r->cached_cons = libbpf_smp_load_acquire(r->consumer); + r->cached_cons += r->size; + + return r->cached_cons - r->cached_prod; +} + +static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) +{ + __u32 entries = r->cached_prod - r->cached_cons; + + if (entries == 0) { + r->cached_prod = libbpf_smp_load_acquire(r->producer); + entries = r->cached_prod - r->cached_cons; + } + + return (entries > nb) ? nb : entries; +} + +static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx) +{ + if (xsk_prod_nb_free(prod, nb) < nb) + return 0; + + *idx = prod->cached_prod; + prod->cached_prod += nb; + + return nb; +} + +static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) +{ + /* Make sure everything has been written to the ring before indicating + * this to the kernel by writing the producer pointer. + */ + libbpf_smp_store_release(prod->producer, *prod->producer + nb); +} + +static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) +{ + __u32 entries = xsk_cons_nb_avail(cons, nb); + + if (entries > 0) { + *idx = cons->cached_cons; + cons->cached_cons += entries; + } + + return entries; +} + +static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb) +{ + cons->cached_cons -= nb; +} + +static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) +{ + /* Make sure data has been read before indicating we are done + * with the entries by updating the consumer pointer. + */ + libbpf_smp_store_release(cons->consumer, *cons->consumer + nb); + +} + +static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) +{ + return &((char *)umem_area)[addr]; +} + +static inline __u64 xsk_umem__extract_addr(__u64 addr) +{ + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; +} + +static inline __u64 xsk_umem__extract_offset(__u64 addr) +{ + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; +} + +static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) +{ + return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); +} + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__fd(const struct xsk_umem *umem); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__fd(const struct xsk_socket *xsk); + +#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 +#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 +#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ +#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) +#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 +#define XSK_UMEM__DEFAULT_FLAGS 0 + +struct xsk_umem_config { + __u32 fill_size; + __u32 comp_size; + __u32 frame_size; + __u32 frame_headroom; + __u32 flags; +}; + +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd); + +/* Flags for the libbpf_flags field. */ +#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) + +struct xsk_socket_config { + __u32 rx_size; + __u32 tx_size; + __u32 libbpf_flags; + __u32 xdp_flags; + __u16 bind_flags; +}; + +/* Set config to NULL to get the default configuration. */ +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create_v0_0_2(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__create_v0_0_4(struct xsk_umem **umem, + void *umem_area, __u64 size, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_umem_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__create(struct xsk_socket **xsk, + const char *ifname, __u32 queue_id, + struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + const struct xsk_socket_config *config); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, + const char *ifname, + __u32 queue_id, struct xsk_umem *umem, + struct xsk_ring_cons *rx, + struct xsk_ring_prod *tx, + struct xsk_ring_prod *fill, + struct xsk_ring_cons *comp, + const struct xsk_socket_config *config); + +/* Returns 0 for success and -EBUSY if the umem is still in use. */ +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +int xsk_umem__delete(struct xsk_umem *umem); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp") +void xsk_socket__delete(struct xsk_socket *xsk); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __LIBBPF_XSK_H */ diff --git a/pkg/plugin/linuxutil/Makefile b/pkg/plugin/linuxutil/Makefile new file mode 100644 index 000000000..7e90ff976 --- /dev/null +++ b/pkg/plugin/linuxutil/Makefile @@ -0,0 +1,11 @@ +REPO_ROOT = $(shell git rev-parse --show-toplevel) +TOOLS_BIN_DIR = $(REPO_ROOT)/hack/tools/bin +MOCKGEN = $(TOOLS_BIN_DIR)/mockgen + +.PHONY: generate + +generate: $(MOCKGEN) ## Generate mock clients + $(MOCKGEN) -source=$(REPO_ROOT)/pkg/plugin/linuxutil/types.go -copyright_file=$(REPO_ROOT)/pkg/lib/ignore_headers.txt -package=linuxutil > linuxutil_mock_generated.go + +$(MOCKGEN): + @make -C $(REPO_ROOT) $(MOCKGEN) diff --git a/pkg/plugin/linuxutil/ethtool_stats.go b/pkg/plugin/linuxutil/ethtool_stats.go new file mode 100644 index 000000000..32c42ea2f --- /dev/null +++ b/pkg/plugin/linuxutil/ethtool_stats.go @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package linuxutil + +import ( + "net" + "strings" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/safchain/ethtool" + "go.uber.org/zap" +) + +type EthtoolReader struct { + l *log.ZapLogger + opts *EthtoolOpts + data *EthtoolStats + ethHandle EthtoolInterface +} + +func NewEthtoolReader(opts *EthtoolOpts, ethHandle EthtoolInterface) *EthtoolReader { + if ethHandle == nil { + var err error + ethHandle, err = ethtool.NewEthtool() + if err != nil { + log.Logger().Error("Error while creating ethtool handle: %v\n", zap.Error(err)) + return nil + } + } + return &EthtoolReader{ + l: log.Logger().Named(string("EthtoolReader")), + opts: opts, + data: &EthtoolStats{}, + ethHandle: ethHandle, + } +} + +func (er *EthtoolReader) readAndUpdate() error { + if err := er.readInterfaceStats(); err != nil { + return err + } + + er.updateMetrics() + er.l.Debug("Done reading and updating interface stats") + + return nil +} + +func (er *EthtoolReader) readInterfaceStats() error { + // ethtool section + + ifaces, err := net.Interfaces() + if err != nil { + er.l.Error("Error while getting all interfaces: %v\n", zap.Error(err)) + return err + } + + defer er.ethHandle.Close() + + er.data = &EthtoolStats{ + stats: make(map[string]map[string]uint64), + } + + for _, i := range ifaces { + // exclude loopback interface and bridge network interface + if strings.Contains(i.Name, "lo") || strings.Contains(i.Name, "cbr0") { + continue + } + // Retrieve tx from eth0 + ifaceStats, err := er.ethHandle.Stats(i.Name) + if err != nil { + er.l.Error("Error while getting ethtool:", zap.String("ifacename", i.Name), zap.Error(err)) + continue + } + + er.data.stats[i.Name] = make(map[string]uint64) + tempMap := er.processStats(ifaceStats) + er.data.stats[i.Name] = tempMap + + er.l.Debug("Processed ethtool Stats ", zap.String("ifacename", i.Name)) + } + + return nil +} + +func (er *EthtoolReader) processStats(ifaceStats map[string]uint64) map[string]uint64 { + // process stats section + newStats := make(map[string]uint64) + for k, v := range ifaceStats { + if !er.opts.addZeroVal && v == 0 { + continue + } + + if er.opts.errOrDropKeysOnly && !strings.Contains(k, "err") && !strings.Contains(k, "drop") { + continue + } + + newStats[k] = v + } + + return newStats +} + +func (er *EthtoolReader) updateMetrics() { + // update metrics section + // retrive interfacename and statname from ethStats + for ifName, stats := range er.data.stats { + for statName, statVal := range stats { + metrics.InterfaceStats.WithLabelValues(ifName, statName).Set(float64(statVal)) + } + } +} diff --git a/pkg/plugin/linuxutil/ethtool_stats_test.go b/pkg/plugin/linuxutil/ethtool_stats_test.go new file mode 100644 index 000000000..b91cae69c --- /dev/null +++ b/pkg/plugin/linuxutil/ethtool_stats_test.go @@ -0,0 +1,123 @@ +package linuxutil + +import ( + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + gomock "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" +) + +var ( + MockGaugeVec *metrics.MockIGaugeVec + MockCounterVec *metrics.MockICounterVec +) + +func TestNewEthtool(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + opts := &EthtoolOpts{ + errOrDropKeysOnly: false, + addZeroVal: false, + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ethHandle := NewMockEthtoolInterface(ctrl) + ethReader := NewEthtoolReader(opts, ethHandle) + assert.NotNil(t, ethReader) +} + +func TestNewEthtoolWithNil(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + opts := &EthtoolOpts{ + errOrDropKeysOnly: false, + addZeroVal: false, + } + + ethReader := NewEthtoolReader(opts, nil) + assert.NotNil(t, ethReader) +} + +func TestReadInterfaceStats(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("ethtool test").Sugar() + + tests := []struct { + name string + opts *EthtoolOpts + statsReturn map[string]uint64 + statErr error + result map[string]uint64 + wantErr bool + }{ + { + name: "test correct", + opts: &EthtoolOpts{ + errOrDropKeysOnly: false, + addZeroVal: false, + }, + statsReturn: map[string]uint64{ + "rx_packets": 1, + }, + statErr: nil, + result: map[string]uint64{ + "rx_packets": 1, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + l.Infof("Running TestReadInterfaceStats %s", tt.name) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ethHandle := NewMockEthtoolInterface(ctrl) + ethReader := NewEthtoolReader(tt.opts, ethHandle) + assert.NotNil(t, ethReader) + + ethHandle.EXPECT().Stats(gomock.Any()).Return(tt.statsReturn, nil).AnyTimes() + ethHandle.EXPECT().Close().Times(1) + InitalizeMetricsForTesting(ctrl) + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + MockGaugeVec.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).AnyTimes() + + err := ethReader.readInterfaceStats() + assert.Nil(t, err) + + ethReader.updateMetrics() + } +} + +func InitalizeMetricsForTesting(ctrl *gomock.Controller) { + metricsLogger := log.Logger().Named("metrics") + metricsLogger.Info("Initializing metrics for testing") + + MockCounterVec = metrics.NewMockICounterVec(ctrl) + MockGaugeVec = metrics.NewMockIGaugeVec(ctrl) //nolint:typecheck + + metrics.DropCounter = MockGaugeVec + metrics.DropBytesCounter = MockGaugeVec + metrics.ForwardBytesCounter = MockGaugeVec + metrics.ForwardCounter = MockGaugeVec + metrics.NodeConnectivityStatusGauge = MockGaugeVec + metrics.NodeConnectivityLatencyGauge = MockGaugeVec + metrics.TCPStateGauge = MockGaugeVec + metrics.TCPConnectionRemoteGauge = MockGaugeVec + metrics.TCPConnectionStats = MockGaugeVec + metrics.TCPFlagCounters = MockGaugeVec + metrics.IPConnectionStats = MockGaugeVec + metrics.UDPConnectionStats = MockGaugeVec + metrics.UDPActiveSocketsCounter = MockGaugeVec + metrics.InterfaceStats = MockGaugeVec + metrics.PluginManagerFailedToReconcileCounter = MockCounterVec +} diff --git a/pkg/plugin/linuxutil/linuxutil.go b/pkg/plugin/linuxutil/linuxutil.go new file mode 100644 index 000000000..192cd2fc0 --- /dev/null +++ b/pkg/plugin/linuxutil/linuxutil.go @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package linuxutil + +import ( + "context" + "fmt" + "sync" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + hubblev1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/safchain/ethtool" + "go.uber.org/zap" +) + +// New creates a linuxutil plugin. +func New(cfg *kcfg.Config) api.Plugin { + return &linuxUtil{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +func (lu *linuxUtil) Name() string { + return string(Name) +} + +func (lu *linuxUtil) Generate(ctx context.Context) error { + return nil +} + +func (lu *linuxUtil) Compile(ctx context.Context) error { + return nil +} + +func (lu *linuxUtil) Init() error { + lu.l.Info("Initializing linuxutil plugin...") + return nil +} + +func (lu *linuxUtil) Start(ctx context.Context) error { + lu.isRunning = true + return lu.run(ctx) +} + +func (lu *linuxUtil) SetupChannel(ch chan *hubblev1.Event) error { + lu.l.Warn("Plugin does not support SetupChannel", zap.String("plugin", string(Name))) + return nil +} + +func (lu *linuxUtil) run(ctx context.Context) error { + lu.l.Info("Running linuxutil plugin...") + ticker := time.NewTicker(lu.cfg.MetricsInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + lu.l.Info("Context is done, linuxutil will stop running") + return nil + case <-ticker.C: + opts := &NetstatOpts{ + CuratedKeys: false, + AddZeroVal: false, + ListenSock: false, + } + var wg sync.WaitGroup + + ns := &Netstat{} + nsReader := NewNetstatReader(opts, ns) + wg.Add(1) + go func() { + defer wg.Done() + err := nsReader.readAndUpdate() + if err != nil { + lu.l.Error("Reading netstat failed", zap.Error(err)) + } + }() + + ethtoolOpts := &EthtoolOpts{ + errOrDropKeysOnly: false, + addZeroVal: false, + } + + ethHandle, err := ethtool.NewEthtool() + if err != nil { + lu.l.Error("Error while creating ethHandle: %v\n", zap.Error(err)) + return err + } + + ethReader := NewEthtoolReader(ethtoolOpts, ethHandle) + if ethReader == nil { + lu.l.Error("Error while creating ethReader") + return fmt.Errorf("error while creating ethReader") + } + wg.Add(1) + go func() { + defer wg.Done() + err := ethReader.readAndUpdate() + if err != nil { + lu.l.Error("Reading ethTool failed", zap.Error(err)) + } + }() + + wg.Wait() + } + } +} + +func (lu *linuxUtil) Stop() error { + if !lu.isRunning { + return nil + } + lu.l.Info("Stopping linuxutil plugin...") + lu.isRunning = false + return nil +} diff --git a/pkg/plugin/linuxutil/linuxutil_mock_generated.go b/pkg/plugin/linuxutil/linuxutil_mock_generated.go new file mode 100644 index 000000000..e9d8ce794 --- /dev/null +++ b/pkg/plugin/linuxutil/linuxutil_mock_generated.go @@ -0,0 +1,120 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Code generated by MockGen. DO NOT EDIT. +// Source: /home/enterprise/retina/pkg/plugin/linuxutil/types.go + +// Package linuxutil is a generated GoMock package. +package linuxutil + +import ( + reflect "reflect" + + netstat "github.com/cakturk/go-netstat/netstat" + gomock "github.com/golang/mock/gomock" +) + +// MockEthtoolInterface is a mock of EthtoolInterface interface. +type MockEthtoolInterface struct { + ctrl *gomock.Controller + recorder *MockEthtoolInterfaceMockRecorder +} + +// MockEthtoolInterfaceMockRecorder is the mock recorder for MockEthtoolInterface. +type MockEthtoolInterfaceMockRecorder struct { + mock *MockEthtoolInterface +} + +// NewMockEthtoolInterface creates a new mock instance. +func NewMockEthtoolInterface(ctrl *gomock.Controller) *MockEthtoolInterface { + mock := &MockEthtoolInterface{ctrl: ctrl} + mock.recorder = &MockEthtoolInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEthtoolInterface) EXPECT() *MockEthtoolInterfaceMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockEthtoolInterface) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockEthtoolInterfaceMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockEthtoolInterface)(nil).Close)) +} + +// Stats mocks base method. +func (m *MockEthtoolInterface) Stats(intf string) (map[string]uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats", intf) + ret0, _ := ret[0].(map[string]uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Stats indicates an expected call of Stats. +func (mr *MockEthtoolInterfaceMockRecorder) Stats(intf interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockEthtoolInterface)(nil).Stats), intf) +} + +// MockNetstatInterface is a mock of NetstatInterface interface. +type MockNetstatInterface struct { + ctrl *gomock.Controller + recorder *MockNetstatInterfaceMockRecorder +} + +// MockNetstatInterfaceMockRecorder is the mock recorder for MockNetstatInterface. +type MockNetstatInterfaceMockRecorder struct { + mock *MockNetstatInterface +} + +// NewMockNetstatInterface creates a new mock instance. +func NewMockNetstatInterface(ctrl *gomock.Controller) *MockNetstatInterface { + mock := &MockNetstatInterface{ctrl: ctrl} + mock.recorder = &MockNetstatInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetstatInterface) EXPECT() *MockNetstatInterfaceMockRecorder { + return m.recorder +} + +// TCPSocks mocks base method. +func (m *MockNetstatInterface) TCPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TCPSocks", accept) + ret0, _ := ret[0].([]netstat.SockTabEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TCPSocks indicates an expected call of TCPSocks. +func (mr *MockNetstatInterfaceMockRecorder) TCPSocks(accept interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TCPSocks", reflect.TypeOf((*MockNetstatInterface)(nil).TCPSocks), accept) +} + +// UDPSocks mocks base method. +func (m *MockNetstatInterface) UDPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UDPSocks", accept) + ret0, _ := ret[0].([]netstat.SockTabEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UDPSocks indicates an expected call of UDPSocks. +func (mr *MockNetstatInterfaceMockRecorder) UDPSocks(accept interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UDPSocks", reflect.TypeOf((*MockNetstatInterface)(nil).UDPSocks), accept) +} diff --git a/pkg/plugin/linuxutil/linuxutil_test.go b/pkg/plugin/linuxutil/linuxutil_test.go new file mode 100644 index 000000000..50a2dc6b8 --- /dev/null +++ b/pkg/plugin/linuxutil/linuxutil_test.go @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build unit +// +build unit + +package linuxutil + +import ( + "context" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +var ( + cfgPodLevelEnabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + cfgPodLevelDisabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: false, + } +) + +func TestStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &linuxUtil{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + err := p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } + + p.isRunning = true + err = p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } +} + +func TestShutdown(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &linuxUtil{ + cfg: &kcfg.Config{ + MetricsInterval: 100 * time.Second, + EnablePodLevel: true, + }, + l: log.Logger().Named(string(Name)), + } + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return p.Start(errctx) + }) + + time.Sleep(1 * time.Second) + cancel() + err := g.Wait() + require.NoError(t, err) +} diff --git a/pkg/plugin/linuxutil/netstat_stats.go b/pkg/plugin/linuxutil/netstat_stats.go new file mode 100644 index 000000000..f9c20c8e1 --- /dev/null +++ b/pkg/plugin/linuxutil/netstat_stats.go @@ -0,0 +1,236 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package linuxutil + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/utils" + "github.com/cakturk/go-netstat/netstat" + "go.uber.org/zap" +) + +const ( + pathNetNetstat = "/proc/net/netstat" + pathNetSnmp = "/proc/net/snmp" +) + +type NetstatReader struct { + l *log.ZapLogger + connStats *ConnectionStats + ifaceStats *IfaceStats + opts *NetstatOpts + ns NetstatInterface +} + +func NewNetstatReader(opts *NetstatOpts, ns NetstatInterface) *NetstatReader { + return &NetstatReader{ + l: log.Logger().Named(string("NetstatReader")), + opts: opts, + connStats: &ConnectionStats{}, + ifaceStats: &IfaceStats{}, + ns: ns, + } +} + +func (nr *NetstatReader) readAndUpdate() error { + if err := nr.readConnectionStats(pathNetNetstat); err != nil { + return err + } + + // Get Socket stats + if err := nr.readSockStats(); err != nil { + return err + } + + nr.updateMetrics() + nr.l.Debug("Done reading and updating connections stats") + + return nil +} + +func (nr *NetstatReader) readConnectionStats(path string) error { + // Read the contents of the file into a string + data, err := os.ReadFile(path) + if err != nil { + nr.l.Error("Error while reading netstat path file: \n", zap.Error(err)) + return err + } + + // Split the string into lines + lines := strings.Split(string(data), "\n") + + if len(lines) < 2 && len(lines)%2 != 0 { + return fmt.Errorf("invalid netstat file") + } + + for i := 0; i < len(lines); i += 2 { + fields1 := strings.Fields(lines[i]) + if len(fields1) < 2 { + continue + } + + fields2 := strings.Fields(lines[i+1]) + if len(fields2) < 2 { + continue + } + + if fields1[0] != fields2[0] { + continue + } + + if len(fields1) != len(fields2) { + continue + } + + if strings.HasPrefix(fields1[0], "TcpExt") && strings.HasPrefix(fields2[0], "TcpExt") { + nr.l.Debug("TcpExt found for netstat ") + nr.connStats.TcpExt = nr.processConnFields(fields1, fields2) + } else if strings.HasPrefix(fields1[0], "IpExt") && strings.HasPrefix(fields2[0], "IpExt") { + nr.l.Debug("IpExt found for netstat ") + nr.connStats.IpExt = nr.processConnFields(fields1, fields2) + } else if strings.HasPrefix(fields1[0], "MPTcpExt") && strings.HasPrefix(fields2[0], "MPTcpExt") { + nr.l.Debug("MPTcpExt found for netstat ") + nr.connStats.MPTcpExt = nr.processConnFields(fields1, fields2) + } else { + nr.l.Info("Unknown field found for netstat ", zap.Any("F1", fields1[0]), zap.Any("F2", fields2[0])) + continue + } + + } + + return nil +} + +func (nr *NetstatReader) processConnFields(f1, f2 []string) map[string]uint64 { + stats := make(map[string]uint64) + + for i := 1; i < len(f1); i++ { + num, err := strconv.ParseUint(f2[i], 10, 64) + if err != nil { + continue + } + + if _, ok := netstatCuratedKeys[f1[i]]; nr.opts.CuratedKeys && !ok { + continue + } + + if num == 0 && !nr.opts.AddZeroVal { + continue + } + + stats[f1[i]] = num + } + + return stats +} + +func (nr *NetstatReader) readSockStats() error { + filter := netstat.NoopFilter + if nr.opts.ListenSock { + filter = func(s *netstat.SockTabEntry) bool { + return s.State == netstat.Listen + } + } + + // UDP sockets + socks, err := nr.ns.UDPSocks(filter) + if err != nil { + nr.l.Error("netstat 1: \n", zap.Error(err)) + return err + } else { + sockStats := processSocks(socks) + nr.connStats.UdpSockets = *sockStats + } + + // TCP sockets + socks, err = nr.ns.TCPSocks(filter) + if err != nil { + nr.l.Error("netstat 2: \n", zap.Error(err)) + return err + } else { + sockStats := processSocks(socks) + nr.connStats.TcpSockets = *sockStats + } + + return nil +} + +func processSocks(socks []netstat.SockTabEntry) *SocketStats { + stats := &SocketStats{ + totalActiveSockets: len(socks), + socketByState: make(map[string]int), + socketByRemoteAddr: make(map[string]int), + } + + for _, e := range socks { + stats.socketByState[e.State.String()]++ + stats.socketByRemoteAddr[e.RemoteAddr.String()]++ + } + + return stats +} + +func (nr *NetstatReader) updateMetrics() { + if nr.connStats == nil { + nr.l.Info("No connection stats found") + return + } + // Adding TCP Connection Stats + for statName, val := range nr.connStats.TcpExt { + metrics.TCPConnectionStats.WithLabelValues(statName).Set(float64(val)) + } + + // Adding IP Stats + for statName, val := range nr.connStats.IpExt { + metrics.IPConnectionStats.WithLabelValues(statName).Set(float64(val)) + } + + // Adding MPTCP Stats + for statName, val := range nr.connStats.MPTcpExt { + metrics.TCPConnectionStats.WithLabelValues(statName).Set(float64(val)) + } + + // TCP COnnection State and remote addr metrics + for state, v := range nr.connStats.TcpSockets.socketByState { + metrics.TCPStateGauge.WithLabelValues(state).Set(float64(v)) + } + + for remoteAddr, v := range nr.connStats.TcpSockets.socketByRemoteAddr { + addr := "" + port := "" + splitAddr := strings.Split(remoteAddr, ":") + if len(splitAddr) == 2 { + addr = splitAddr[0] + port = splitAddr[1] + } else { + addr = remoteAddr + } + if !validateRemoteAddr(addr) { + continue + } + + metrics.TCPConnectionRemoteGauge.WithLabelValues(addr, port).Set(float64(v)) + } + + // UDP COnnection State metrics + metrics.UDPConnectionStats.WithLabelValues(utils.Active).Set(float64(nr.connStats.UdpSockets.totalActiveSockets)) +} + +func validateRemoteAddr(addr string) bool { + if addr == "" { + return false + } + + // ignore localhost addresses. + if strings.Contains(addr, "127.0.0") { + return false + } + + return true +} diff --git a/pkg/plugin/linuxutil/netstat_stats_test.go b/pkg/plugin/linuxutil/netstat_stats_test.go new file mode 100644 index 000000000..782b2033b --- /dev/null +++ b/pkg/plugin/linuxutil/netstat_stats_test.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package linuxutil + +import ( + "fmt" + "net" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/cakturk/go-netstat/netstat" + gomock "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" +) + +func TestNewNetstatReader(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + opts := &NetstatOpts{ + CuratedKeys: false, + AddZeroVal: false, + ListenSock: false, + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ns := NewMockNetstatInterface(ctrl) + nr := NewNetstatReader(opts, ns) + assert.NotNil(t, nr) +} + +func TestReadConnStats(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + opts := &NetstatOpts{ + CuratedKeys: false, + AddZeroVal: false, + ListenSock: false, + } + tests := []struct { + name string + filePath string + result *ConnectionStats + totalsCheck map[string]int + checkVals bool + wantErr bool + addValZero bool + }{ + { + name: "test correct", + filePath: "testdata/correct-netstat", + totalsCheck: map[string]int{ + "TcpExt": 55, + "IpExt": 5, + "MPTcpExt": 0, + }, + wantErr: false, + }, + { + name: "test correct with zero values", + filePath: "testdata/correct-netstat", + totalsCheck: map[string]int{ + "TcpExt": 123, + "IpExt": 18, + "MPTcpExt": 0, + }, + wantErr: false, + addValZero: true, + }, + { + name: "test some correct with zero values", + filePath: "testdata/somecorrect-netstat1", + result: &ConnectionStats{ + TcpExt: nil, + IpExt: map[string]uint64{ + "InNoRoutes": 18965, + "InTruncatedPkts": 0, + }, + MPTcpExt: map[string]uint64{ + "Test1": 10, + }, + }, + wantErr: false, + checkVals: true, + addValZero: true, + }, + { + name: "test some correct with nonzero", + filePath: "testdata/somecorrect-netstat1", + result: &ConnectionStats{ + TcpExt: nil, + IpExt: map[string]uint64{ + "InNoRoutes": 18965, + }, + MPTcpExt: map[string]uint64{ + "Test1": 10, + }, + }, + wantErr: false, + checkVals: true, + }, + { + name: "test wrong", + filePath: "testdata/wrong-netstat", + result: &ConnectionStats{ + TcpExt: map[string]uint64{}, + IpExt: map[string]uint64{}, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + if tt.addValZero { + opts.AddZeroVal = true + } else { + opts.AddZeroVal = false + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ns := NewMockNetstatInterface(ctrl) + nr := NewNetstatReader(opts, ns) + InitalizeMetricsForTesting(ctrl) + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + MockGaugeVec.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).AnyTimes() + + assert.NotNil(t, nr) + err := nr.readConnectionStats(tt.filePath) + if tt.wantErr { + assert.NotNil(t, err, "Expected error but got nil tetsname: %s", tt.name) + } else { + assert.Nil(t, err, "Expected nil but got err tetsname: %s", tt.name) + assert.NotNil(t, nr.connStats, "Expected data got nil tetsname: %s", tt.name) + if tt.checkVals { + assert.Equal(t, tt.result, nr.connStats, "Expected data got nil tetsname: %s", tt.name) + } else { + assert.Equal(t, len(nr.connStats.TcpExt), tt.totalsCheck["TcpExt"], "Read values are not equal to expected tetsname: %s", tt.name) + assert.Equal(t, len(nr.connStats.IpExt), tt.totalsCheck["IpExt"], "Read values are not equal to expected tetsname: %s", tt.name) + assert.Equal(t, len(nr.connStats.MPTcpExt), tt.totalsCheck["MPTcpExt"], "Read values are not equal to expected tetsname: %s", tt.name) + } + + nr.updateMetrics() + } + } +} + +func TestProcessSocks(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + tests := []struct { + name string + data []netstat.SockTabEntry + result *SocketStats + wantErr bool + }{ + { + name: "localhost", + data: []netstat.SockTabEntry{ + { + LocalAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + RemoteAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + State: 1, + UID: 0, + }, + }, + result: &SocketStats{ + totalActiveSockets: 1, + socketByState: map[string]int{ + "ESTABLISHED": 1, + }, + socketByRemoteAddr: map[string]int{ + "127.0.0.1:80": 1, + }, + }, + wantErr: false, + }, + { + name: "localhost2", + data: []netstat.SockTabEntry{ + { + LocalAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + RemoteAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + State: 1, + }, + { + RemoteAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 443, + }, + State: 1, + }, + }, + result: &SocketStats{ + totalActiveSockets: 2, + socketByState: map[string]int{ + "ESTABLISHED": 2, + }, + socketByRemoteAddr: map[string]int{ + "127.0.0.1:80": 1, + "127.0.0.1:443": 1, + }, + }, + wantErr: false, + }, + { + name: "nil", + data: nil, + result: &SocketStats{ + totalActiveSockets: 0, + socketByState: map[string]int{}, + socketByRemoteAddr: map[string]int{}, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + retData := processSocks(tt.data) + assert.NotNil(t, retData) + assert.Equal(t, tt.result.totalActiveSockets, retData.totalActiveSockets) + assert.Equal(t, tt.result.socketByState, retData.socketByState) + assert.Equal(t, tt.result.socketByRemoteAddr, retData.socketByRemoteAddr) + } +} + +func TestReadSockStatsError(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + opts := &NetstatOpts{ + CuratedKeys: false, + AddZeroVal: false, + ListenSock: false, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ns := NewMockNetstatInterface(ctrl) + nr := NewNetstatReader(opts, ns) + InitalizeMetricsForTesting(ctrl) + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + MockGaugeVec.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).AnyTimes() + ns.EXPECT().UDPSocks(gomock.Any()).Return(nil, fmt.Errorf("Random error")).Times(1) + + assert.NotNil(t, nr) + err := nr.readSockStats() + assert.NotNil(t, err, "Expected error but got nil tetsname") +} + +func TestReadSockStats(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + opts := &NetstatOpts{ + CuratedKeys: false, + AddZeroVal: false, + ListenSock: false, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ns := NewMockNetstatInterface(ctrl) + nr := NewNetstatReader(opts, ns) + InitalizeMetricsForTesting(ctrl) + + testmetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testmetric", + Help: "testmetric", + }) + + MockGaugeVec.EXPECT().WithLabelValues(gomock.Any()).Return(testmetric).AnyTimes() + ns.EXPECT().UDPSocks(gomock.Any()).Return([]netstat.SockTabEntry{ + { + LocalAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + RemoteAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + State: 1, + UID: 0, + }, + }, nil).Times(1) + + ns.EXPECT().TCPSocks(gomock.Any()).Return([]netstat.SockTabEntry{ + { + LocalAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + RemoteAddr: &netstat.SockAddr{ + IP: net.IPv4(127, 0, 0, 1).To4(), + Port: 80, + }, + State: 1, + UID: 0, + }, + }, nil).Times(1) + + assert.NotNil(t, nr) + err := nr.readSockStats() + assert.Nil(t, err, "Expected nil but got err tetsname") + assert.NotNil(t, nr.connStats, "Expected data got nil tetsname") + assert.Equal(t, nr.connStats.UdpSockets.totalActiveSockets, 1, "Read values are not equal to expected tetsname") + assert.Equal(t, nr.connStats.UdpSockets.socketByState["ESTABLISHED"], 1, "Read values are not equal to expected tetsname") + + assert.Equal(t, nr.connStats.TcpSockets.totalActiveSockets, 1, "Read values are not equal to expected tetsname") + assert.Equal(t, nr.connStats.TcpSockets.socketByState["ESTABLISHED"], 1, "Read values are not equal to expected tetsname") + + nr.updateMetrics() +} diff --git a/pkg/plugin/linuxutil/testdata/correct-netstat b/pkg/plugin/linuxutil/testdata/correct-netstat new file mode 100644 index 000000000..df88b1329 --- /dev/null +++ b/pkg/plugin/linuxutil/testdata/correct-netstat @@ -0,0 +1,4 @@ +TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPHPHits TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPSlowStartRetrans TCPTimeouts TCPLossProbes TCPLossProbeRecovery TCPRenoRecoveryFail TCPSackRecoveryFail TCPRcvCollapsed TCPBacklogCoalesce TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPMemoryPressuresChrono TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPMD5Failure TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop PFMemallocDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPRetransFail TCPRcvCoalesce TCPOFOQueue TCPOFODrop TCPOFOMerge TCPChallengeACK TCPSYNChallenge TCPFastOpenActive TCPFastOpenActiveFail TCPFastOpenPassive TCPFastOpenPassiveFail TCPFastOpenListenOverflow TCPFastOpenCookieReqd TCPFastOpenBlackhole TCPSpuriousRtxHostQueues BusyPollRxPackets TCPAutoCorking TCPFromZeroWindowAdv TCPToZeroWindowAdv TCPWantZeroWindowAdv TCPSynRetrans TCPOrigDataSent TCPHystartTrainDetect TCPHystartTrainCwnd TCPHystartDelayDetect TCPHystartDelayCwnd TCPACKSkippedSynRecv TCPACKSkippedPAWS TCPACKSkippedSeq TCPACKSkippedFinWait2 TCPACKSkippedTimeWait TCPACKSkippedChallenge TCPWinProbe TCPKeepAlive TCPMTUPFail TCPMTUPSuccess TCPDelivered TCPDeliveredCE TCPAckCompressed TCPZeroWindowDrop TCPRcvQDrop TCPWqueueTooBig TCPFastOpenPassiveAltKey TcpTimeoutRehash TcpDuplicateDataRehash TCPDSACKRecvSegs TCPDSACKIgnoredDubious +TcpExt: 0 0 0 0 0 0 0 0 0 0 1685 0 0 0 6 30138 9 167 0 0 2057563 96012 704069 0 21 0 401 0 1 5 1 1 4 508 0 4 0 21 0 2091 277 2 0 0 0 26677 171 1 58 0 1353 148 0 0 0 0 0 0 25 0 6 0 0 0 0 1 19 312 0 0 0 0 0 0 0 0 0 1400786 13254 0 1 1 1 0 0 0 0 0 0 0 1 0 2805 5 5 492 1338 883243 265 8029 3 257 0 0 1 0 0 0 0 1 0 0 883519 0 9082 0 0 0 0 2091 0 33 25 +IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets InCsumErrors InNoECTPkts InECT1Pkts InECT0Pkts InCEPkts ReasmOverlaps +IpExt: 0 0 0 0 18965 0 7291961352 4187876377 0 0 885028 0 0 3859769 0 0 0 0 diff --git a/pkg/plugin/linuxutil/testdata/somecorrect-netstat1 b/pkg/plugin/linuxutil/testdata/somecorrect-netstat1 new file mode 100644 index 000000000..2ad878f6d --- /dev/null +++ b/pkg/plugin/linuxutil/testdata/somecorrect-netstat1 @@ -0,0 +1,10 @@ +TcpExt: SyncookiesSent +TcpExt: 12 0 0 0 0 0 0 0 +IpExt: InNoRoutes InTruncatedPkts +IpExt: 18965 0 +MPTcpExt: Test1 +MPTcpExt: 10 +Test +Test +Test2: 1 +Test2 diff --git a/pkg/plugin/linuxutil/testdata/wrong-netstat b/pkg/plugin/linuxutil/testdata/wrong-netstat new file mode 100644 index 000000000..919e3b52a --- /dev/null +++ b/pkg/plugin/linuxutil/testdata/wrong-netstat @@ -0,0 +1 @@ +Test: aa aa aa diff --git a/pkg/plugin/linuxutil/types.go b/pkg/plugin/linuxutil/types.go new file mode 100644 index 000000000..f50d0a0a8 --- /dev/null +++ b/pkg/plugin/linuxutil/types.go @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package linuxutil + +import ( + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/cakturk/go-netstat/netstat" +) + +const ( + Name api.PluginName = "linuxutil" +) + +type linuxUtil struct { + cfg *kcfg.Config + l *log.ZapLogger + isRunning bool +} + +var netstatCuratedKeys = map[string]struct{}{ + "ListenDrops": {}, + "LockDroppedIcmps": {}, + "PFMemallocDrop": {}, + "TCPBacklogDrop": {}, + "TCPDeferAcceptDrop": {}, + "TCPMinTTLDrop": {}, + "TCPRcvQDrop": {}, + "TCPReqQFullDrop": {}, + "TCPZeroWindowDrop": {}, + "InCsumErrors": {}, + "DataCsumErr": {}, + "AddAddrDrop": {}, + "RmAddrDrop": {}, +} + +type ConnectionStats struct { + // https://github.com/ecki/net-tools/blob/master/statistics.c#L206 + TcpExt map[string]uint64 `json:"tcp_ext"` + IpExt map[string]uint64 `json:"ip_ext"` + MPTcpExt map[string]uint64 `json:"mptcp_ext"` + // Socket stats + UdpSockets SocketStats `json:"udp_sockets"` + TcpSockets SocketStats `json:"tcp_sockets"` +} + +type IfaceStats struct { + Name string + // Inbound stats + RxBytes uint64 + RxPackets uint64 + RxErrs uint64 + RxDrop uint64 + RxFIFO uint64 + RxFrame uint64 + RxCompressed uint64 + RxMulticast uint64 + // Outbound stats + TxBytes uint64 + TxPackets uint64 + TxErrs uint64 + TxDrop uint64 + TxFIFO uint64 + TxColls uint64 + TxCarrier uint64 + TxCompressed uint64 +} + +type SocketStats struct { + totalActiveSockets int + // count of sockets opened by state + socketByState map[string]int + // count of sockets opened by remote address + socketByRemoteAddr map[string]int +} + +type NetstatOpts struct { + // when true only includes curated list of keys + CuratedKeys bool + + // when true will include all keys with value 0 + AddZeroVal bool + + // get only listening sockets + ListenSock bool +} + +type EthtoolStats struct { + // Stats by interface name and stat name + stats map[string]map[string]uint64 +} + +type EthtoolOpts struct { + // when true will only include keys with err or drop in its name + errOrDropKeysOnly bool + + // when true will include all keys with value 0 + addZeroVal bool +} + +type EthtoolInterface interface { + Stats(intf string) (map[string]uint64, error) + Close() +} + +type NetstatInterface interface { + UDPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) + TCPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) +} + +type Netstat struct{} + +func (n *Netstat) UDPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) { + return netstat.UDPSocks(accept) +} + +func (n *Netstat) TCPSocks(accept netstat.AcceptFn) ([]netstat.SockTabEntry, error) { + return netstat.TCPSocks(accept) +} diff --git a/pkg/plugin/mockplugin/mockplugin.go b/pkg/plugin/mockplugin/mockplugin.go new file mode 100644 index 000000000..f4bc3de7f --- /dev/null +++ b/pkg/plugin/mockplugin/mockplugin.go @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package mockplugin + +import ( + "context" + "fmt" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + hubblev1 "github.com/cilium/cilium/pkg/hubble/api/v1" +) + +const ( + Name api.PluginName = "mockplugin" +) + +const ( + initialize = iota + 1 + start + stop +) + +type MockPlugin struct { + cfg *kcfg.Config + state int + l *log.ZapLogger +} + +// New creates a mock plugin. +func New(cfg *kcfg.Config) api.Plugin { + return &MockPlugin{ + cfg: cfg, + } +} + +func (mp *MockPlugin) Name() string { + return "mockplugin" +} + +func (mp *MockPlugin) Generate(ctx context.Context) error { + return nil +} + +func (mp *MockPlugin) Compile(ctx context.Context) error { + return nil +} + +func (mp *MockPlugin) Init() error { + mp.state = initialize + return nil +} + +func (mp *MockPlugin) Start(ctx context.Context) error { + if mp.state != initialize { + return fmt.Errorf("plugin not initialized") + } + mp.state = start + return nil +} + +func (mp *MockPlugin) Stop() error { + if mp.state != start { + return nil + } + mp.state = stop + return nil +} + +func (mp *MockPlugin) SetupChannel(ch chan *hubblev1.Event) error { + return nil +} + +func NewPluginFn(l *log.ZapLogger) api.Plugin { + return &MockPlugin{l: l} +} diff --git a/pkg/plugin/packetforward/_cprog/packetforward.c b/pkg/plugin/packetforward/_cprog/packetforward.c new file mode 100644 index 000000000..24aa9a49d --- /dev/null +++ b/pkg/plugin/packetforward/_cprog/packetforward.c @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "packetforward.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +// Ref: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/if_packet.h#L26 +#define PACKET_HOST 0 // Incomming packets +#define PACKET_OUTGOING 4 // Outgoing packets + +struct metric +{ + __u64 count; + __u64 bytes; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 2); + __type(key, key_type); + __type(value, struct metric); +} packets SEC(".maps"); + +SEC("socket1") +int socket_filter(struct __sk_buff *skb) { + key_type mapKey; //0->incoming; 1->outgoing + + if (skb->pkt_type == PACKET_HOST) { + mapKey = INGRESS_KEY; + } else if (skb->pkt_type == PACKET_OUTGOING) { + mapKey = EGRESS_KEY; + } else { + // Ignore multicast/broadcast. + return 0; + } + + // Get the packet size (in bytes) including headers. + u64 packetSize = skb->len; + + struct metric *curMetric = bpf_map_lookup_elem(&packets, &mapKey); + if (!curMetric) { + // Per CPU hashmap, hence no race condition here. + struct metric initMetric; + initMetric.count = 1; + initMetric.bytes = packetSize; + bpf_map_update_elem(&packets, &mapKey, &initMetric, BPF_ANY); + } else { + // Atomic operation. + curMetric->count++; + curMetric->bytes += packetSize; + } + + return 0; +} diff --git a/pkg/plugin/packetforward/_cprog/packetforward.h b/pkg/plugin/packetforward/_cprog/packetforward.h new file mode 100644 index 000000000..7ec809d80 --- /dev/null +++ b/pkg/plugin/packetforward/_cprog/packetforward.h @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" + +typedef enum +{ + INGRESS_KEY = 0, + EGRESS_KEY, +} key_type; diff --git a/pkg/plugin/packetforward/_cprog/placeholder.go b/pkg/plugin/packetforward/_cprog/placeholder.go new file mode 100644 index 000000000..fd0225add --- /dev/null +++ b/pkg/plugin/packetforward/_cprog/placeholder.go @@ -0,0 +1,3 @@ +package cprog //nolint:all + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/packetforward/mocks/mock_types.go b/pkg/plugin/packetforward/mocks/mock_types.go new file mode 100644 index 000000000..d3fc6324a --- /dev/null +++ b/pkg/plugin/packetforward/mocks/mock_types.go @@ -0,0 +1,62 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/plugin/packetforward (interfaces: IMap) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockIMap is a mock of IMap interface. +type MockIMap struct { + ctrl *gomock.Controller + recorder *MockIMapMockRecorder +} + +// MockIMapMockRecorder is the mock recorder for MockIMap. +type MockIMapMockRecorder struct { + mock *MockIMap +} + +// NewMockIMap creates a new mock instance. +func NewMockIMap(ctrl *gomock.Controller) *MockIMap { + mock := &MockIMap{ctrl: ctrl} + mock.recorder = &MockIMapMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIMap) EXPECT() *MockIMapMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIMap) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIMapMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIMap)(nil).Close)) +} + +// Lookup mocks base method. +func (m *MockIMap) Lookup(arg0, arg1 interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Lookup", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Lookup indicates an expected call of Lookup. +func (mr *MockIMapMockRecorder) Lookup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockIMap)(nil).Lookup), arg0, arg1) +} diff --git a/pkg/plugin/packetforward/packetforward_bpfel_arm64.go b/pkg/plugin/packetforward/packetforward_bpfel_arm64.go new file mode 100644 index 000000000..ff0450657 --- /dev/null +++ b/pkg/plugin/packetforward/packetforward_bpfel_arm64.go @@ -0,0 +1,126 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package packetforward + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type packetforwardKeyType uint32 + +type packetforwardMetric struct { + Count uint64 + Bytes uint64 +} + +// loadPacketforward returns the embedded CollectionSpec for packetforward. +func loadPacketforward() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_PacketforwardBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load packetforward: %w", err) + } + + return spec, err +} + +// loadPacketforwardObjects loads packetforward and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *packetforwardObjects +// *packetforwardPrograms +// *packetforwardMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadPacketforwardObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadPacketforward() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// packetforwardSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardSpecs struct { + packetforwardProgramSpecs + packetforwardMapSpecs +} + +// packetforwardSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardProgramSpecs struct { + SocketFilter *ebpf.ProgramSpec `ebpf:"socket_filter"` +} + +// packetforwardMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardMapSpecs struct { + Packets *ebpf.MapSpec `ebpf:"packets"` +} + +// packetforwardObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardObjects struct { + packetforwardPrograms + packetforwardMaps +} + +func (o *packetforwardObjects) Close() error { + return _PacketforwardClose( + &o.packetforwardPrograms, + &o.packetforwardMaps, + ) +} + +// packetforwardMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardMaps struct { + Packets *ebpf.Map `ebpf:"packets"` +} + +func (m *packetforwardMaps) Close() error { + return _PacketforwardClose( + m.Packets, + ) +} + +// packetforwardPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardPrograms struct { + SocketFilter *ebpf.Program `ebpf:"socket_filter"` +} + +func (p *packetforwardPrograms) Close() error { + return _PacketforwardClose( + p.SocketFilter, + ) +} + +func _PacketforwardClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed packetforward_bpfel_arm64.o +var _PacketforwardBytes []byte diff --git a/pkg/plugin/packetforward/packetforward_bpfel_arm64.o b/pkg/plugin/packetforward/packetforward_bpfel_arm64.o new file mode 100644 index 000000000..3fc3d90da Binary files /dev/null and b/pkg/plugin/packetforward/packetforward_bpfel_arm64.o differ diff --git a/pkg/plugin/packetforward/packetforward_bpfel_x86.go b/pkg/plugin/packetforward/packetforward_bpfel_x86.go new file mode 100644 index 000000000..9d4751502 --- /dev/null +++ b/pkg/plugin/packetforward/packetforward_bpfel_x86.go @@ -0,0 +1,126 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package packetforward + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type packetforwardKeyType uint32 + +type packetforwardMetric struct { + Count uint64 + Bytes uint64 +} + +// loadPacketforward returns the embedded CollectionSpec for packetforward. +func loadPacketforward() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_PacketforwardBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load packetforward: %w", err) + } + + return spec, err +} + +// loadPacketforwardObjects loads packetforward and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *packetforwardObjects +// *packetforwardPrograms +// *packetforwardMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadPacketforwardObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadPacketforward() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// packetforwardSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardSpecs struct { + packetforwardProgramSpecs + packetforwardMapSpecs +} + +// packetforwardSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardProgramSpecs struct { + SocketFilter *ebpf.ProgramSpec `ebpf:"socket_filter"` +} + +// packetforwardMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetforwardMapSpecs struct { + Packets *ebpf.MapSpec `ebpf:"packets"` +} + +// packetforwardObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardObjects struct { + packetforwardPrograms + packetforwardMaps +} + +func (o *packetforwardObjects) Close() error { + return _PacketforwardClose( + &o.packetforwardPrograms, + &o.packetforwardMaps, + ) +} + +// packetforwardMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardMaps struct { + Packets *ebpf.Map `ebpf:"packets"` +} + +func (m *packetforwardMaps) Close() error { + return _PacketforwardClose( + m.Packets, + ) +} + +// packetforwardPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadPacketforwardObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetforwardPrograms struct { + SocketFilter *ebpf.Program `ebpf:"socket_filter"` +} + +func (p *packetforwardPrograms) Close() error { + return _PacketforwardClose( + p.SocketFilter, + ) +} + +func _PacketforwardClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed packetforward_bpfel_x86.o +var _PacketforwardBytes []byte diff --git a/pkg/plugin/packetforward/packetforward_bpfel_x86.o b/pkg/plugin/packetforward/packetforward_bpfel_x86.o new file mode 100644 index 000000000..3fc3d90da Binary files /dev/null and b/pkg/plugin/packetforward/packetforward_bpfel_x86.o differ diff --git a/pkg/plugin/packetforward/packetforward_linux.go b/pkg/plugin/packetforward/packetforward_linux.go new file mode 100644 index 000000000..dda584158 --- /dev/null +++ b/pkg/plugin/packetforward/packetforward_linux.go @@ -0,0 +1,229 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package packetforward + +import ( + "context" + "errors" + "fmt" + "path" + "runtime" + "syscall" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + hubblev1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/rlimit" + "github.com/microsoft/retina/pkg/loader" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + "go.uber.org/zap" + + _ "github.com/microsoft/retina/pkg/plugin/packetforward/_cprog" // nolint +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_${GOARCH} -Wall" -target ${GOARCH} -type metric packetforward ./_cprog/packetforward.c -- -I../lib/_${GOARCH} -I../lib/common/libbpf/_src + +// New creates a new packetforward plugin. +func New(cfg *kcfg.Config) api.Plugin { + return &packetForward{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +// Helper functions. + +// absPath returns the absolute path to the directory where this file resides. +func absPath() (string, error) { + _, filename, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("failed to determine current file path") + } + dir := path.Dir(filename) + return dir, nil +} + +func processMapValue(m IMap, key uint32) (uint64, uint64, error) { + /* Sample of values in hashmap m. + "values": [{ + "cpu": 0, + "value": { + "count": 2292, + "bytes": 202689 + } + },{ + "cpu": 1, + "value": { + "count": 1053, + "bytes": 89579 + } + }] + */ + var totalCount, totalBytes uint64 + var err error + values := []packetforwardMetric{} //nolint:typecheck + if err = m.Lookup(key, &values); err == nil { + for _, v := range values { + totalCount = totalCount + v.Count + totalBytes = totalBytes + v.Bytes + } + } + return totalCount, totalBytes, err +} + +func updateMetrics(data *PacketForwardData) { + // Add the packet count metrics. + metrics.ForwardCounter.WithLabelValues(ingressLabel).Set(float64(data.ingressCountTotal)) + metrics.ForwardCounter.WithLabelValues(egressLabel).Set(float64(data.egressCountTotal)) + + // Add the packet bytes metrics. + metrics.ForwardBytesCounter.WithLabelValues(ingressLabel).Set(float64(data.ingressBytesTotal)) + metrics.ForwardBytesCounter.WithLabelValues(egressLabel).Set(float64(data.egressBytesTotal)) +} + +// Plugin API implementation for packet forward. +// Ref: github.com/microsoft/retina/pkg/plugin/api + +func (p *packetForward) Name() string { + return string(Name) +} + +func (p *packetForward) Generate(ctx context.Context) error { + // Use this funtion to parse p and generate header files under cprog. + // Example: https://github.com/anubhabMajumdar/Retina/blob/c4bc06e7f922124f92536ffb5312bada5c2dfe99/pkg/plugin/custom/packetforward/packetforward.go#L77 + p.l.Info("Packet forwarding metric header generated") + return nil +} + +func (p *packetForward) Compile(ctx context.Context) error { + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + arch := runtime.GOARCH + + bpfSourceFile := fmt.Sprintf("%s/%s/%s", dir, bpfSourceDir, bpfSourceFileName) + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + includeDir := fmt.Sprintf("-I%s/../lib/_%s", dir, arch) + libbpfDir := fmt.Sprintf("-I%s/../lib/common/libbpf/_src", dir) + + targetArch := "-D__TARGET_ARCH_x86" + if arch == "arm64" { + targetArch = "-D__TARGET_ARCH_arm64" + } + // Keep target as bpf, otherwise clang compilation yields bpf object that elf reader cannot load. + err = loader.CompileEbpf(ctx, "-target", "bpf", "-Wall", targetArch, "-g", "-O2", "-c", bpfSourceFile, "-o", bpfOutputFile, includeDir, libbpfDir) + if err != nil { + return err + } + p.l.Info("Packet forwarding metric compiled") + return nil +} + +func (p *packetForward) Init() error { + if err := rlimit.RemoveMemlock(); err != nil { + p.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + objs := &packetforwardObjects{} //nolint:typecheck + spec, err := ebpf.LoadCollectionSpec(bpfOutputFile) + if err != nil { + p.l.Error("Error loading collection specs: %w", zap.Error(err)) + return err + } + + if err := spec.LoadAndAssign(objs, nil); err != nil { + p.l.Error("Error assigning specs: %w", zap.Error(err)) + return err + } + + p.hashmapData = objs.Packets + + // The steps to attach ebpf to socket is documented in cilium/ebpf + // https://github.com/cilium/ebpf/blob/master/example_sock_elf_test.go#L85. + // MIT license. + p.sock, err = utils.OpenRawSocket(socketIndex) + if err != nil { + p.l.Error("Error opening socket %d: %w", zap.Int("eth", socketIndex), zap.Error(err)) + return err + } + + if err := syscall.SetsockoptInt(p.sock, syscall.SOL_SOCKET, PacketForwardSocketAttach, objs.SocketFilter.FD()); err != nil { + p.l.Error("Error attaching packet forward socket filter: %w", zap.Error(err)) + return err + } + + p.l.Info("Packet forwarding metric initialized") + return nil +} + +func (p *packetForward) Start(ctx context.Context) error { + p.l.Info("Start collecting packet forward metrics") + p.isRunning = true + return p.run(ctx) +} + +func (p *packetForward) Stop() error { + if !p.isRunning { + return nil + } + if p.hashmapData != nil { + p.hashmapData.Close() + } + if p.sock != 0 { + syscall.Close(p.sock) + } + p.l.Info("Exiting forwarding metrics") + p.isRunning = false + return nil +} + +func (p *packetForward) SetupChannel(ch chan *hubblev1.Event) error { + p.l.Warn("SetupChannel is not supported by plugin", zap.String("plugin", string(Name))) + return nil +} + +func (p *packetForward) run(ctx context.Context) error { + var err error + ticker := time.NewTicker(p.cfg.MetricsInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + p.l.Info("Context is done, packetforward will stop running") + return nil + case <-ticker.C: + data := &PacketForwardData{} + data.ingressCountTotal, data.ingressBytesTotal, err = processMapValue(p.hashmapData, ingressKey) + if err != nil { + p.l.Error("Error reading hash map", zap.Error(err)) + continue + } + data.egressCountTotal, data.egressBytesTotal, err = processMapValue(p.hashmapData, egressKey) + if err != nil { + p.l.Error("Error reading hash map", zap.Error(err)) + continue + } + p.l.Debug("Received PacketForward data", zap.String("Data", data.String())) + updateMetrics(data) + } + } +} diff --git a/pkg/plugin/packetforward/packetforward_linux_test.go b/pkg/plugin/packetforward/packetforward_linux_test.go new file mode 100644 index 000000000..8d4f141a4 --- /dev/null +++ b/pkg/plugin/packetforward/packetforward_linux_test.go @@ -0,0 +1,280 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build unit +// +build unit + +package packetforward + +import ( + "context" + "errors" + "fmt" + "os" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + mocks "github.com/microsoft/retina/pkg/plugin/packetforward/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +var ( + cfgPodLevelEnabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + cfgPodLevelDisabled = &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: false, + } +) + +func TestProcessMapValue_NoErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := mocks.NewMockIMap(ctrl) + m.EXPECT().Lookup(uint32(0), gomock.Any()).Return(nil) + _, _, err := processMapValue(m, 0) + if err != nil { + t.Fatalf("Did not expect error %v", err) + } +} + +func TestProcessMapValue_ReturnErr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := mocks.NewMockIMap(ctrl) + m.EXPECT().Lookup(uint32(0), gomock.Any()).Return(errors.New("Error")) + _, _, err := processMapValue(m, 0) + if err == nil { + t.Fatalf("Expected function to return error") + } +} + +func TestProcessMapValue_ReturnData(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := mocks.NewMockIMap(ctrl) + + v2 := []packetforwardMetric{ + {Count: 3, Bytes: 10}, + {Count: 7, Bytes: 5}, + } + m.EXPECT().Lookup(uint32(0), gomock.Any()).SetArg(1, v2).Return(nil) + totalCount, totalBytes, _ := processMapValue(m, 0) + + if totalCount != uint64(10) { + t.Fatalf("Expected 10, got %v", totalCount) + } + if totalBytes != uint64(15) { + t.Fatalf("Expected 15, got %v", totalBytes) + } +} + +func TestStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &packetForward{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + err := p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } + + p.isRunning = true + err = p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } +} + +func TestStop_NonNilMap(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMap.EXPECT().Close().Return(errors.New("Error")).MinTimes(1) + p := &packetForward{ + l: log.Logger().Named(string(Name)), + hashmapData: mockedMap, + } + err := p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } + + p.isRunning = true + err = p.Stop() + if err != nil { + t.Fatalf("Expected no error") + } + if p.isRunning { + t.Fatalf("Expected isRunning to be false") + } +} + +func TestCompile(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &packetForward{ + l: log.Logger().Named(string(Name)), + } + dir, _ := absPath() + expectedOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + err := os.Remove(expectedOutputFile) + if err != nil && !errors.Is(err, os.ErrNotExist) { + t.Fatalf("Expected no error. Error: %+v", err) + } + + err = p.Compile(context.Background()) + if err != nil { + t.Fatalf("Expected no error. Error: %+v", err) + } + if _, err := os.Stat(expectedOutputFile); errors.Is(err, os.ErrNotExist) { + t.Fatalf("File %+v doesn't exist", expectedOutputFile) + } +} + +func TestShutdown(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + p := &packetForward{ + cfg: &kcfg.Config{ + MetricsInterval: 100 * time.Second, + EnablePodLevel: true, + }, + l: log.Logger().Named(string(Name)), + } + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return p.Start(errctx) + }) + + time.Sleep(1 * time.Second) + cancel() + err := g.Wait() + require.NoError(t, err) +} + +func TestRun(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMap.EXPECT().Lookup(gomock.Any(), gomock.Any()).Return(nil).MinTimes(1) + + p := &packetForward{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + hashmapData: mockedMap, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + g, errctx := errgroup.WithContext(ctx) + + // Create a ticker with a short interval for testing purpose + ticker := time.NewTicker(1 * time.Second) + + g.Go(func() error { + return p.run(errctx) + }) + + time.Sleep(2 * time.Second) + cancel() + ticker.Stop() + + err := g.Wait() + require.NoError(t, err) +} + +func TestRun_ReturnError_Ingress(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMap.EXPECT().Lookup(uint32(0), gomock.Any()).Return(errors.New("Error")).MinTimes(1) + + p := &packetForward{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + hashmapData: mockedMap, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + g, errctx := errgroup.WithContext(ctx) + + // Create a ticker with a short interval for testing purpose + ticker := time.NewTicker(1 * time.Second) + + g.Go(func() error { + return p.run(errctx) + }) + + time.Sleep(2 * time.Second) + cancel() + ticker.Stop() + + err := g.Wait() + require.NoError(t, err) +} + +func TestRun_ReturnError_Egress(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + metrics.InitializeMetrics() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedMap := mocks.NewMockIMap(ctrl) + mockedMap.EXPECT().Lookup(uint32(0), gomock.Any()).Return(nil).MinTimes(1) + mockedMap.EXPECT().Lookup(uint32(1), gomock.Any()).Return(errors.New("Error")).MinTimes(1) + + p := &packetForward{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + hashmapData: mockedMap, + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + g, errctx := errgroup.WithContext(ctx) + + // Create a ticker with a short interval for testing purpose + ticker := time.NewTicker(1 * time.Second) + + g.Go(func() error { + return p.run(errctx) + }) + + time.Sleep(2 * time.Second) + cancel() + ticker.Stop() + + err := g.Wait() + require.NoError(t, err) +} diff --git a/pkg/plugin/packetforward/types_linux.go b/pkg/plugin/packetforward/types_linux.go new file mode 100644 index 000000000..68aec21b3 --- /dev/null +++ b/pkg/plugin/packetforward/types_linux.go @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package packetforward + +import ( + "fmt" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" +) + +const ( + PacketForwardSocketAttach int = 50 + Name api.PluginName = "packetforward" + socketIndex int = 0 + ingressKey uint32 = 0 + egressKey uint32 = 1 + ingressLabel string = "ingress" + egressLabel string = "egress" + bpfObjectFileName string = "packetforward_bpf.o" + bpfSourceDir string = "_cprog" + bpfSourceFileName string = "packetforward.c" + dynamicHeaderFileName string = "dynamic.h" +) + +// Interface to https://pkg.go.dev/github.com/cilium/ebpf#Map. +// Added for unit tests. +// +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mocks/mock_types.go -package=mocks . IMap +type IMap interface { + Lookup(key, valueOut interface{}) error + Close() error +} + +type packetForward struct { + cfg *kcfg.Config + l *log.ZapLogger + hashmapData IMap + sock int + isRunning bool +} + +type PacketForwardData struct { + ingressBytesTotal uint64 + ingressCountTotal uint64 + egressBytesTotal uint64 + egressCountTotal uint64 +} + +func (p PacketForwardData) String() string { + return fmt.Sprintf("IngressBytes:%d IngressPackets:%d EgressBytes:%d EgressPackets:%d", + p.ingressBytesTotal, p.ingressCountTotal, p.egressBytesTotal, p.egressCountTotal) +} diff --git a/pkg/plugin/packetparser/_cprog/dynamic.h b/pkg/plugin/packetparser/_cprog/dynamic.h new file mode 100644 index 000000000..80abbd931 --- /dev/null +++ b/pkg/plugin/packetparser/_cprog/dynamic.h @@ -0,0 +1,2 @@ +// Place holder header file that will be replaced by the actual header file during runtime +// DO NOT DELETE diff --git a/pkg/plugin/packetparser/_cprog/packetparser.c b/pkg/plugin/packetparser/_cprog/packetparser.c new file mode 100644 index 000000000..7e72cd8fd --- /dev/null +++ b/pkg/plugin/packetparser/_cprog/packetparser.c @@ -0,0 +1,232 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#include "vmlinux.h" +#include "bpf_helpers.h" +#include "bpf_endian.h" +#include "packetparser.h" +#include "retina_filter.c" +#include "dynamic.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + + +struct tcpmetadata { + __u32 seq; // TCP sequence number + __u32 ack_num; // TCP ack number + // TCP flags. + __u16 syn; + __u16 ack; + __u16 fin; + __u16 rst; + __u16 psh; + __u16 urg; + __u32 tsval; // TCP timestamp value + __u32 tsecr; // TCP timestamp echo reply +}; + +struct packet +{ + // 5 tuple. + __u32 src_ip; + __u32 dst_ip; + __u16 src_port; + __u16 dst_port; + __u8 proto; + struct tcpmetadata tcp_metadata; // TCP metadata + direction dir; // 0 -> INGRESS, 1 -> EGRESS + __u64 ts; // timestamp in nanoseconds + __u64 bytes; // packet size in bytes +}; + +struct +{ + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, 16384); +} packetparser_events SEC(".maps"); + +// Define const variables to avoid warnings. +const struct packet *unused __attribute__((unused)); + +/* + * Full credit to authors of pping_kern.c for the parse_tcp_ts function. + * https://github.com/xdp-project/bpf-examples/blob/bc9df640cb9e5a541a7425ca2e66174ae22a18e3/pping/pping_kern.c#L316 + * + * Parses the TSval and TSecr values from the TCP options field. If sucessful + * the TSval and TSecr values will be stored at tsval and tsecr (in network + * byte order). + * Returns 0 if sucessful and -1 on failure + */ +static int parse_tcp_ts(struct tcphdr *tcph, void *data_end, __u32 *tsval, + __u32 *tsecr) +{ + int len = tcph->doff << 2; + void *opt_end = (void *)tcph + len; + __u8 *pos = (__u8 *)(tcph + 1); //Current pos in TCP options + __u8 i, opt; + volatile __u8 + opt_size; // Seems to ensure it's always read of from stack as u8 + + if (tcph + 1 > data_end || len <= sizeof(struct tcphdr)) + return -1; +#pragma unroll //temporary solution until we can identify why the non-unrolled loop gets stuck in an infinite loop + for (i = 0; i < MAX_TCP_OPTIONS; i++) { + if (pos + 1 > opt_end || pos + 1 > data_end) + return -1; + + opt = *pos; + if (opt == 0) // Reached end of TCP options + return -1; + + if (opt == 1) { // TCP NOP option - advance one byte + pos++; + continue; + } + + // Option > 1, should have option size + if (pos + 2 > opt_end || pos + 2 > data_end) + return -1; + opt_size = *(pos + 1); + if (opt_size < 2) // Stop parsing options if opt_size has an invalid value + return -1; + + // Option-kind is TCP timestap (yey!) + if (opt == 8 && opt_size == 10) { + if (pos + 10 > opt_end || pos + 10 > data_end) + return -1; + *tsval = bpf_ntohl(*(__u32 *)(pos + 2)); + *tsecr = bpf_ntohl(*(__u32 *)(pos + 6)); + return 0; + } + + // Some other TCP option - advance option-length bytes + pos += opt_size; + } + return -1; +} + +// Function to parse the packet and send it to the perf buffer. +static void parse(struct __sk_buff *skb, direction d) +{ + struct packet p; + __builtin_memset(&p, 0, sizeof(p)); + + // Get current time in nanoseconds. + p.ts = bpf_ktime_get_ns(); + + p.dir = d; + p.bytes = skb->len; + + void *data_end = (void *)(unsigned long long)skb->data_end; + void *data = (void *)(unsigned long long)skb->data; + + // Check if the packet is not malformed. + struct ethhdr *eth = data; + if (data + sizeof(struct ethhdr) > data_end) + return; + + // Check that this is an IP packet. + if (bpf_ntohs(eth->h_proto) != ETH_P_IP) + return; + + // Check if the packet is not malformed. + struct iphdr *ip = data + sizeof(struct ethhdr); + if (data + sizeof(struct ethhdr) + sizeof(struct iphdr) > data_end) + return; + + p.src_ip = ip->saddr; + p.dst_ip = ip->daddr; + p.proto = ip->protocol; + + // Check if the packet is of interest. + #ifdef BYPASS_LOOKUP_IP_OF_INTEREST + #if BYPASS_LOOKUP_IP_OF_INTEREST == 0 + if (!lookup(p.src_ip) && !lookup(p.dst_ip)) + { + return; + } + #endif + #endif + // Get source and destination ports. + if (ip->protocol == IPPROTO_TCP) + { + struct tcphdr *tcp = data + sizeof(struct ethhdr) + sizeof(struct iphdr); + if (data + sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr) > data_end) + return; + + p.src_port = tcp->source; + p.dst_port = tcp->dest; + + // Get TCP metadata. + struct tcpmetadata tcp_metadata; + __builtin_memset(&tcp_metadata, 0, sizeof(tcp_metadata)); + + tcp_metadata.seq = tcp->seq; + tcp_metadata.ack_num = tcp->ack_seq; + tcp_metadata.syn = tcp->syn; + tcp_metadata.ack = tcp->ack; + tcp_metadata.fin = tcp->fin; + tcp_metadata.rst = tcp->rst; + tcp_metadata.psh = tcp->psh; + tcp_metadata.urg = tcp->urg; + + p.tcp_metadata = tcp_metadata; + + // Get TSval/TSecr from TCP header. + if (parse_tcp_ts(tcp, data_end, &tcp_metadata.tsval, &tcp_metadata.tsecr) == 0) + { + p.tcp_metadata = tcp_metadata; + } + } + else if (ip->protocol == IPPROTO_UDP) + { + struct udphdr *udp = data + sizeof(struct ethhdr) + sizeof(struct iphdr); + if (data + sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) > data_end) + return; + + p.src_port = udp->source; + p.dst_port = udp->dest; + } + else + { + return; + } + + bpf_perf_event_output(skb, &packetparser_events, BPF_F_CURRENT_CPU, &p, sizeof(p)); +} + +SEC("classifier_endpoint_ingress") +int endpoint_ingress_filter(struct __sk_buff *skb) +{ + // This is attached to the interface on the host side. + // So ingress on host is egress on endpoint and vice versa. + parse(skb, FROM_ENDPOINT); + // Always return 0 to allow packet to pass. + return 0; +} + +SEC("classifier_endpoint_egress") +int endpoint_egress_filter(struct __sk_buff *skb) +{ + // This is attached to the interface on the host side. + // So egress on host is ingress on endpoint and vice versa. + parse(skb, TO_ENDPOINT); + // Always return 0 to allow packet to pass. + return 0; +} + +SEC("classifier_host_ingress") +int host_ingress_filter(struct __sk_buff *skb) +{ + parse(skb, FROM_NETWORK); + // Always return 0 to allow packet to pass. + return 0; +} + +SEC("classifier_host_egress") +int host_egress_filter(struct __sk_buff *skb) +{ + parse(skb, TO_NETWORK); + // Always return 0 to allow packet to pass. + return 0; +} diff --git a/pkg/plugin/packetparser/_cprog/packetparser.h b/pkg/plugin/packetparser/_cprog/packetparser.h new file mode 100644 index 000000000..05223efad --- /dev/null +++ b/pkg/plugin/packetparser/_cprog/packetparser.h @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#define ETH_P_IP 0x0800 +#define MAX_TCP_OPTIONS 10 + +typedef enum +{ + FROM_ENDPOINT = 0, + TO_ENDPOINT, + FROM_NETWORK, + TO_NETWORK, +} direction; diff --git a/pkg/plugin/packetparser/_cprog/placeholder.go b/pkg/plugin/packetparser/_cprog/placeholder.go new file mode 100644 index 000000000..fd0225add --- /dev/null +++ b/pkg/plugin/packetparser/_cprog/placeholder.go @@ -0,0 +1,3 @@ +package cprog //nolint:all + +// This file is a placeholder to make Go include this directory when vendoring. diff --git a/pkg/plugin/packetparser/mocks/mock_types.go b/pkg/plugin/packetparser/mocks/mock_types.go new file mode 100644 index 000000000..9d67c2f57 --- /dev/null +++ b/pkg/plugin/packetparser/mocks/mock_types.go @@ -0,0 +1,218 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types_linux.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + perf "github.com/cilium/ebpf/perf" + tc "github.com/florianl/go-tc" + gomock "github.com/golang/mock/gomock" +) + +// MockIQdisc is a mock of IQdisc interface. +type MockIQdisc struct { + ctrl *gomock.Controller + recorder *MockIQdiscMockRecorder +} + +// MockIQdiscMockRecorder is the mock recorder for MockIQdisc. +type MockIQdiscMockRecorder struct { + mock *MockIQdisc +} + +// NewMockIQdisc creates a new mock instance. +func NewMockIQdisc(ctrl *gomock.Controller) *MockIQdisc { + mock := &MockIQdisc{ctrl: ctrl} + mock.recorder = &MockIQdiscMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIQdisc) EXPECT() *MockIQdiscMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockIQdisc) Add(info *tc.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", info) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockIQdiscMockRecorder) Add(info interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockIQdisc)(nil).Add), info) +} + +// Delete mocks base method. +func (m *MockIQdisc) Delete(info *tc.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", info) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockIQdiscMockRecorder) Delete(info interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockIQdisc)(nil).Delete), info) +} + +// MockIFilter is a mock of IFilter interface. +type MockIFilter struct { + ctrl *gomock.Controller + recorder *MockIFilterMockRecorder +} + +// MockIFilterMockRecorder is the mock recorder for MockIFilter. +type MockIFilterMockRecorder struct { + mock *MockIFilter +} + +// NewMockIFilter creates a new mock instance. +func NewMockIFilter(ctrl *gomock.Controller) *MockIFilter { + mock := &MockIFilter{ctrl: ctrl} + mock.recorder = &MockIFilterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIFilter) EXPECT() *MockIFilterMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockIFilter) Add(info *tc.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", info) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockIFilterMockRecorder) Add(info interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockIFilter)(nil).Add), info) +} + +// MockITc is a mock of ITc interface. +type MockITc struct { + ctrl *gomock.Controller + recorder *MockITcMockRecorder +} + +// MockITcMockRecorder is the mock recorder for MockITc. +type MockITcMockRecorder struct { + mock *MockITc +} + +// NewMockITc creates a new mock instance. +func NewMockITc(ctrl *gomock.Controller) *MockITc { + mock := &MockITc{ctrl: ctrl} + mock.recorder = &MockITcMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockITc) EXPECT() *MockITcMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockITc) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockITcMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockITc)(nil).Close)) +} + +// Filter mocks base method. +func (m *MockITc) Filter() *tc.Filter { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Filter") + ret0, _ := ret[0].(*tc.Filter) + return ret0 +} + +// Filter indicates an expected call of Filter. +func (mr *MockITcMockRecorder) Filter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Filter", reflect.TypeOf((*MockITc)(nil).Filter)) +} + +// Qdisc mocks base method. +func (m *MockITc) Qdisc() *tc.Qdisc { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Qdisc") + ret0, _ := ret[0].(*tc.Qdisc) + return ret0 +} + +// Qdisc indicates an expected call of Qdisc. +func (mr *MockITcMockRecorder) Qdisc() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Qdisc", reflect.TypeOf((*MockITc)(nil).Qdisc)) +} + +// MockIPerf is a mock of IPerf interface. +type MockIPerf struct { + ctrl *gomock.Controller + recorder *MockIPerfMockRecorder +} + +// MockIPerfMockRecorder is the mock recorder for MockIPerf. +type MockIPerfMockRecorder struct { + mock *MockIPerf +} + +// NewMockIPerf creates a new mock instance. +func NewMockIPerf(ctrl *gomock.Controller) *MockIPerf { + mock := &MockIPerf{ctrl: ctrl} + mock.recorder = &MockIPerfMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIPerf) EXPECT() *MockIPerfMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIPerf) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIPerfMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIPerf)(nil).Close)) +} + +// Read mocks base method. +func (m *MockIPerf) Read() (perf.Record, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Read") + ret0, _ := ret[0].(perf.Record) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Read indicates an expected call of Read. +func (mr *MockIPerfMockRecorder) Read() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockIPerf)(nil).Read)) +} diff --git a/pkg/plugin/packetparser/packetparser_bpf.o b/pkg/plugin/packetparser/packetparser_bpf.o new file mode 100644 index 000000000..b30cae1a7 Binary files /dev/null and b/pkg/plugin/packetparser/packetparser_bpf.o differ diff --git a/pkg/plugin/packetparser/packetparser_bpfel_arm64.go b/pkg/plugin/packetparser/packetparser_bpfel_arm64.go new file mode 100644 index 000000000..097b4dbf1 --- /dev/null +++ b/pkg/plugin/packetparser/packetparser_bpfel_arm64.go @@ -0,0 +1,160 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package packetparser + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type packetparserMapKey struct { + Prefixlen uint32 + Data uint32 +} + +type packetparserPacket struct { + SrcIp uint32 + DstIp uint32 + SrcPort uint16 + DstPort uint16 + Proto uint8 + _ [3]byte + TcpMetadata struct { + Seq uint32 + AckNum uint32 + Syn uint16 + Ack uint16 + Fin uint16 + Rst uint16 + Psh uint16 + Urg uint16 + Tsval uint32 + Tsecr uint32 + } + Dir uint32 + Ts uint64 + Bytes uint64 +} + +// loadPacketparser returns the embedded CollectionSpec for packetparser. +func loadPacketparser() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_PacketparserBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load packetparser: %w", err) + } + + return spec, err +} + +// loadPacketparserObjects loads packetparser and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *packetparserObjects +// *packetparserPrograms +// *packetparserMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadPacketparserObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadPacketparser() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// packetparserSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserSpecs struct { + packetparserProgramSpecs + packetparserMapSpecs +} + +// packetparserSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserProgramSpecs struct { + EndpointEgressFilter *ebpf.ProgramSpec `ebpf:"endpoint_egress_filter"` + EndpointIngressFilter *ebpf.ProgramSpec `ebpf:"endpoint_ingress_filter"` + HostEgressFilter *ebpf.ProgramSpec `ebpf:"host_egress_filter"` + HostIngressFilter *ebpf.ProgramSpec `ebpf:"host_ingress_filter"` +} + +// packetparserMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserMapSpecs struct { + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` + PacketparserEvents *ebpf.MapSpec `ebpf:"packetparser_events"` +} + +// packetparserObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserObjects struct { + packetparserPrograms + packetparserMaps +} + +func (o *packetparserObjects) Close() error { + return _PacketparserClose( + &o.packetparserPrograms, + &o.packetparserMaps, + ) +} + +// packetparserMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserMaps struct { + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` + PacketparserEvents *ebpf.Map `ebpf:"packetparser_events"` +} + +func (m *packetparserMaps) Close() error { + return _PacketparserClose( + m.RetinaFilterMap, + m.PacketparserEvents, + ) +} + +// packetparserPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserPrograms struct { + EndpointEgressFilter *ebpf.Program `ebpf:"endpoint_egress_filter"` + EndpointIngressFilter *ebpf.Program `ebpf:"endpoint_ingress_filter"` + HostEgressFilter *ebpf.Program `ebpf:"host_egress_filter"` + HostIngressFilter *ebpf.Program `ebpf:"host_ingress_filter"` +} + +func (p *packetparserPrograms) Close() error { + return _PacketparserClose( + p.EndpointEgressFilter, + p.EndpointIngressFilter, + p.HostEgressFilter, + p.HostIngressFilter, + ) +} + +func _PacketparserClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed packetparser_bpfel_arm64.o +var _PacketparserBytes []byte diff --git a/pkg/plugin/packetparser/packetparser_bpfel_arm64.o b/pkg/plugin/packetparser/packetparser_bpfel_arm64.o new file mode 100644 index 000000000..4a7677652 Binary files /dev/null and b/pkg/plugin/packetparser/packetparser_bpfel_arm64.o differ diff --git a/pkg/plugin/packetparser/packetparser_bpfel_x86.go b/pkg/plugin/packetparser/packetparser_bpfel_x86.go new file mode 100644 index 000000000..0eae52b1a --- /dev/null +++ b/pkg/plugin/packetparser/packetparser_bpfel_x86.go @@ -0,0 +1,160 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package packetparser + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type packetparserMapKey struct { + Prefixlen uint32 + Data uint32 +} + +type packetparserPacket struct { + SrcIp uint32 + DstIp uint32 + SrcPort uint16 + DstPort uint16 + Proto uint8 + _ [3]byte + TcpMetadata struct { + Seq uint32 + AckNum uint32 + Syn uint16 + Ack uint16 + Fin uint16 + Rst uint16 + Psh uint16 + Urg uint16 + Tsval uint32 + Tsecr uint32 + } + Dir uint32 + Ts uint64 + Bytes uint64 +} + +// loadPacketparser returns the embedded CollectionSpec for packetparser. +func loadPacketparser() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_PacketparserBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load packetparser: %w", err) + } + + return spec, err +} + +// loadPacketparserObjects loads packetparser and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *packetparserObjects +// *packetparserPrograms +// *packetparserMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadPacketparserObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadPacketparser() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// packetparserSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserSpecs struct { + packetparserProgramSpecs + packetparserMapSpecs +} + +// packetparserSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserProgramSpecs struct { + EndpointEgressFilter *ebpf.ProgramSpec `ebpf:"endpoint_egress_filter"` + EndpointIngressFilter *ebpf.ProgramSpec `ebpf:"endpoint_ingress_filter"` + HostEgressFilter *ebpf.ProgramSpec `ebpf:"host_egress_filter"` + HostIngressFilter *ebpf.ProgramSpec `ebpf:"host_ingress_filter"` +} + +// packetparserMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type packetparserMapSpecs struct { + RetinaFilterMap *ebpf.MapSpec `ebpf:"retina_filter_map"` + PacketparserEvents *ebpf.MapSpec `ebpf:"packetparser_events"` +} + +// packetparserObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserObjects struct { + packetparserPrograms + packetparserMaps +} + +func (o *packetparserObjects) Close() error { + return _PacketparserClose( + &o.packetparserPrograms, + &o.packetparserMaps, + ) +} + +// packetparserMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserMaps struct { + RetinaFilterMap *ebpf.Map `ebpf:"retina_filter_map"` + PacketparserEvents *ebpf.Map `ebpf:"packetparser_events"` +} + +func (m *packetparserMaps) Close() error { + return _PacketparserClose( + m.RetinaFilterMap, + m.PacketparserEvents, + ) +} + +// packetparserPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadPacketparserObjects or ebpf.CollectionSpec.LoadAndAssign. +type packetparserPrograms struct { + EndpointEgressFilter *ebpf.Program `ebpf:"endpoint_egress_filter"` + EndpointIngressFilter *ebpf.Program `ebpf:"endpoint_ingress_filter"` + HostEgressFilter *ebpf.Program `ebpf:"host_egress_filter"` + HostIngressFilter *ebpf.Program `ebpf:"host_ingress_filter"` +} + +func (p *packetparserPrograms) Close() error { + return _PacketparserClose( + p.EndpointEgressFilter, + p.EndpointIngressFilter, + p.HostEgressFilter, + p.HostIngressFilter, + ) +} + +func _PacketparserClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed packetparser_bpfel_x86.o +var _PacketparserBytes []byte diff --git a/pkg/plugin/packetparser/packetparser_bpfel_x86.o b/pkg/plugin/packetparser/packetparser_bpfel_x86.o new file mode 100644 index 000000000..4a7677652 Binary files /dev/null and b/pkg/plugin/packetparser/packetparser_bpfel_x86.o differ diff --git a/pkg/plugin/packetparser/packetparser_linux.go b/pkg/plugin/packetparser/packetparser_linux.go new file mode 100644 index 000000000..fbe1a00b1 --- /dev/null +++ b/pkg/plugin/packetparser/packetparser_linux.go @@ -0,0 +1,643 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package packetparser + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "runtime" + "sync" + "unsafe" + + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/rlimit" + tc "github.com/florianl/go-tc" + helper "github.com/florianl/go-tc/core" + "github.com/microsoft/retina/pkg/common" + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/loader" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + plugincommon "github.com/microsoft/retina/pkg/plugin/common" + "github.com/microsoft/retina/pkg/plugin/constants" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/microsoft/retina/pkg/utils" + "github.com/microsoft/retina/pkg/watchers/endpoint" + "github.com/vishvananda/netlink" + "go.uber.org/zap" + "golang.org/x/sys/unix" + + _ "github.com/microsoft/retina/pkg/plugin/lib/_amd64" // nolint + _ "github.com/microsoft/retina/pkg/plugin/lib/_arm64" // nolint + _ "github.com/microsoft/retina/pkg/plugin/lib/common/libbpf/_src" // nolint + _ "github.com/microsoft/retina/pkg/plugin/packetparser/_cprog" // nolint +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@master -cc clang-14 -cflags "-g -O2 -Wall -D__TARGET_ARCH_${GOARCH} -Wall" -target ${GOARCH} -type packet packetparser ./_cprog/packetparser.c -- -I../lib/_${GOARCH} -I../lib/common/libbpf/_src -I../filter/_cprog/ + +// New creates a packetparser plugin. +func New(cfg *kcfg.Config) api.Plugin { + return &packetParser{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +func (p *packetParser) Name() string { + return string(Name) +} + +func (p *packetParser) Generate(ctx context.Context) error { + // Get absolute path to this file during runtime. + _, filename, _, ok := runtime.Caller(0) + if !ok { + return errors.New("unable to get absolute path to this file") + } + dir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", dir, bpfSourceDir, dynamicHeaderFileName) + i := 0 + if p.cfg.BypassLookupIPOfInterest { + p.l.Logger.Info("Bypassing lookup IP of interest") + i = 1 + } + st := fmt.Sprintf("#define BYPASS_LOOKUP_IP_OF_INTEREST %d \n", i) + err := loader.WriteFile(ctx, dynamicHeaderPath, st) + if err != nil { + p.l.Error("Error writing dynamic header", zap.Error(err)) + return err + } + p.l.Info("PacketParser header generated at", zap.String("path", dynamicHeaderPath)) + return nil +} + +func (p *packetParser) Compile(ctx context.Context) error { + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + // Generate dynamic header file. + if err := p.Generate(ctx); err != nil { + p.l.Error("failed to generate PacketParser dynamic header:%w", zap.Error(err)) + } + bpfSourceFile := fmt.Sprintf("%s/%s/%s", dir, bpfSourceDir, bpfSourceFileName) + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + arch := runtime.GOARCH + includeDir := fmt.Sprintf("-I%s/../lib/_%s", dir, arch) + filterDir := fmt.Sprintf("-I%s/../filter/_cprog/", dir) + libbpfDir := fmt.Sprintf("-I%s/../lib/common/libbpf/_src", dir) + targetArch := "-D__TARGET_ARCH_x86" + if arch == "arm64" { + targetArch = "-D__TARGET_ARCH_arm64" + } + // Keep target as bpf, otherwise clang compilation yields bpf object that elf reader cannot load. + err = loader.CompileEbpf(ctx, "-target", "bpf", "-Wall", targetArch, "-g", "-O2", "-c", bpfSourceFile, "-o", bpfOutputFile, includeDir, libbpfDir, filterDir) + if err != nil { + return err + } + p.l.Info("PacketParser metric compiled") + return nil +} + +func (p *packetParser) Init() error { + var err error + if !p.cfg.EnablePodLevel { + p.l.Warn("packet parser and latency plugin will not init because pod level is disabled") + return nil + } + + if err := rlimit.RemoveMemlock(); err != nil { + p.l.Error("RemoveMemLock failed:%w", zap.Error(err)) + return err + } + + // Get the absolute path to this file during runtime. + dir, err := absPath() + if err != nil { + return err + } + + bpfOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + objs := &packetparserObjects{} + spec, err := ebpf.LoadCollectionSpec(bpfOutputFile) + if err != nil { + return err + } + //nolint:typecheck + if err := spec.LoadAndAssign(objs, &ebpf.CollectionOptions{ //nolint:typecheck + Maps: ebpf.MapOptions{ + PinPath: constants.FilterMapPath, + }, + }); err != nil { //nolint:typecheck + p.l.Error("Error loading objects: %w", zap.Error(err)) + return err + } + p.objs = objs + + // Endpoint bpf programs. + p.endpointIngressInfo, err = p.objs.EndpointIngressFilter.Info() + if err != nil { + p.l.Error("Error getting ingress filter info", zap.Error(err)) + return err + } + p.endpointEgressInfo, err = p.objs.EndpointEgressFilter.Info() + if err != nil { + p.l.Error("Error getting egress filter info", zap.Error(err)) + return err + } + + // Host bpf programs. + p.hostIngressInfo, err = p.objs.HostIngressFilter.Info() + if err != nil { + p.l.Error("Error getting ingress filter info", zap.Error(err)) + return err + } + p.hostEgressInfo, err = p.objs.HostEgressFilter.Info() + if err != nil { + p.l.Error("Error getting egress filter info", zap.Error(err)) + return err + } + + p.reader, err = plugincommon.NewPerfReader(p.l, objs.PacketparserEvents, perCPUBuffer, 1) + if err != nil { + p.l.Error("Error NewReader", zap.Error(err)) + return err + } + + p.tcMap = &sync.Map{} + p.interfaceLockMap = &sync.Map{} + + return nil +} + +func (p *packetParser) Start(ctx context.Context) error { + if !p.cfg.EnablePodLevel { + p.l.Warn("packet parser and latency plugin will not start because pod level is disabled") + return nil + } + + p.l.Info("Starting packet parser") + + p.l.Info("setting up enricher since pod level is enabled") + // Setup enricher. + p.enricher = enricher.Instance() + if p.enricher == nil { + p.l.Warn("Retina enricher is nil") + // return errors.New("enricher is nil") + } + + // Get Pubsub instance. + ps := pubsub.New() + + // Register callback. + // Every time a new endpoint is created, we will create a qdisc and attach it to the endpoint. + fn := pubsub.CallBackFunc(p.endpointWatcherCallbackFn) + // Check if callback is already registered. + if p.callbackID == "" { + p.callbackID = ps.Subscribe(common.PubSubEndpoints, &fn) + } + + // Attach to eth0 for latency. + eth0Link, err := utils.GetInterface(Eth0, Device) + // eth0Link, err := retinautils.GetInterface("azvb22061c2658", "veth") + if err != nil { + return err + } + p.l.Info("Attaching Packetparser to eth0", zap.Any("eth0Link", eth0Link.Attrs())) + p.createQdiscAndAttach(*eth0Link.Attrs(), Device) + + // Create the channel. + p.recordsChannel = make(chan perf.Record, buffer) + p.l.Debug("Created records channel") + + p.l.Info("Started packet parser") + return p.run(ctx) +} + +func (p *packetParser) Stop() error { + p.l.Info("Stopping packet parser") + + // Get pubsubs instance + ps := pubsub.New() + + // Stop perf reader. + if p.reader != nil { + if err := p.reader.Close(); err != nil { + p.l.Error("Error closing perf reader", zap.Error(err)) + } + } + p.l.Debug("Stopped perf reader") + + // Close the channel. The producer should have stopped by now. + // All consumers should have stopped by now. + if p.recordsChannel != nil { + close(p.recordsChannel) + p.l.Debug("Closed records channel") + } + + // Stop map and progs. + if p.objs != nil { + if err := p.objs.Close(); err != nil { + p.l.Error("Error closing objects", zap.Error(err)) + } + } + p.l.Debug("Stopped map/progs") + + // Unregister callback. + if p.callbackID != "" { + if err := ps.Unsubscribe(common.PubSubEndpoints, p.callbackID); err != nil { + p.l.Error("Error unregistering callback for packetParser", zap.Error(err)) + } + // Reset callback ID. + p.callbackID = "" + } + + if err := p.cleanAll(); err != nil { + p.l.Error("Error cleaning", zap.Error(err)) + return err + } + + p.l.Info("Stopped packet parser") + return nil +} + +func (p *packetParser) SetupChannel(ch chan *v1.Event) error { + p.externalChannel = ch + return nil +} + +// cleanAll is NOT thread safe. +// Not required for now. +func (p *packetParser) cleanAll() error { + // Delete tunnel and qdiscs. + if p.tcMap == nil { + return nil + } + + p.tcMap.Range(func(key, value interface{}) bool { + v := value.(*val) + p.clean(v.tcnl, v.tcIngressObj, v.tcEgressObj) + return true + }) + + // Reset map. + // It is OK to do this without a lock because + // cleanAll is only invoked from Stop(), and Stop can + // only be called from PluginManager (which is single threaded). + p.tcMap = &sync.Map{} + return nil +} + +func (p *packetParser) clean(tcnl ITc, tcIngressObj *tc.Object, tcEgressObj *tc.Object) { + // Warning, not error. Clean is best effort. + if tcnl != nil { + if err := getQdisc(tcnl).Delete(tcEgressObj); err != nil && !errors.Is(err, tc.ErrNoArg) { + p.l.Warn("could not delete egress qdisc", zap.Error(err)) + } + if err := getQdisc(tcnl).Delete(tcIngressObj); err != nil && !errors.Is(err, tc.ErrNoArg) { + p.l.Warn("could not delete ingress qdisc", zap.Error(err)) + } + if err := tcnl.Close(); err != nil { + p.l.Warn("could not close rtnetlink socket", zap.Error(err)) + } + } +} + +func (p *packetParser) endpointWatcherCallbackFn(obj interface{}) { + // Contract is that we will receive an endpoint event pointer. + event := obj.(*endpoint.EndpointEvent) + if event == nil { + return + } + + iface := event.Obj.(netlink.LinkAttrs) + + ifaceKey := ifaceToKey(iface) + lockMapVal, _ := p.interfaceLockMap.LoadOrStore(ifaceKey, &sync.Mutex{}) + mu := lockMapVal.(*sync.Mutex) + mu.Lock() + defer mu.Unlock() + + switch event.Type { + case endpoint.EndpointCreated: + p.l.Debug("Endpoint created", zap.String("name", iface.Name)) + p.createQdiscAndAttach(iface, Veth) + case endpoint.EndpointDeleted: + p.l.Debug("Endpoint deleted", zap.String("name", iface.Name)) + // Clean. + if v, ok := p.tcMap.Load(ifaceKey); ok { + tcMapVal := v.(*val) + p.clean(tcMapVal.tcnl, tcMapVal.tcIngressObj, tcMapVal.tcEgressObj) + // Delete from map. + p.tcMap.Delete(ifaceKey) + } + default: + // Unknown. + p.l.Debug("Unknown event", zap.String("type", event.Type.String())) + } +} + +// This does the following: +// 1. Create a tunnel interface. +// 2. Create a qdisc and attach it to the tunnel interface. +// 3. Attach ingress program to the endpoint interface. +// 4. Create a qdisc and attach it to the endpoint interface. +// 5. Attach egress program to the endpoint interface. +// Inspired by https://github.com/mauriciovasquezbernal/talks/blob/1f2080afe731949a033330c0adc290be8f3fc06d/2022-ebpf-training/2022-10-13/drop/main.go . +// Supported ifaceTypes - device and veth. +func (p *packetParser) createQdiscAndAttach(iface netlink.LinkAttrs, ifaceType string) { + p.l.Debug("Starting qdisc attachment", zap.String("interface", iface.Name)) + + // Create tunnel interface. + var ( + tcnl ITc + err error + ingressProgram, egressProgram *ebpf.Program + ingressInfo, egressInfo *ebpf.ProgramInfo + ) + + if ifaceType == "device" { + ingressProgram = p.objs.HostIngressFilter + egressProgram = p.objs.HostEgressFilter + + ingressInfo = p.hostIngressInfo + egressInfo = p.hostEgressInfo + } else if ifaceType == "veth" { + ingressProgram = p.objs.EndpointIngressFilter + egressProgram = p.objs.EndpointEgressFilter + + ingressInfo = p.endpointIngressInfo + egressInfo = p.endpointEgressInfo + } else { + p.l.Error("Unknown ifaceType", zap.String("ifaceType", ifaceType)) + return + } + + tcnl, err = tcOpen(&tc.Config{}) + if err != nil { + p.l.Error("could not open rtnetlink socket", zap.Int("NetNsID", iface.NetNsID), zap.Error(err)) + return + } + + var qdiscIngress, qdiscEgress *tc.Object + + // Create a qdisc of type clsact on the tunnel interface. + // We will attach the ingress bpf filter on this. + qdiscIngress = &tc.Object{ + Msg: tc.Msg{ + Family: unix.AF_UNSPEC, + Ifindex: uint32(iface.Index), + Handle: helper.BuildHandle(0xFFFF, 0x0000), + Parent: tc.HandleIngress, + }, + Attribute: tc.Attribute{ + Kind: "clsact", + }, + } + // Install Qdisc on interface. + if err := getQdisc(tcnl).Add(qdiscIngress); err != nil && !errors.Is(err, os.ErrExist) { + p.l.Error("could not assign clsact ingress to ", zap.String("interface", iface.Name), zap.Error(err)) + p.clean(tcnl, qdiscIngress, qdiscEgress) + return + } + // Create a filter of type bpf on the tunnel interface. + filterIngress := tc.Object{ + Msg: tc.Msg{ + Family: unix.AF_UNSPEC, + Ifindex: uint32(iface.Index), + Handle: 0, + Parent: 0xFFFFFFF2, + Info: 0x10300, + }, + Attribute: tc.Attribute{ + Kind: "bpf", + BPF: &tc.Bpf{ + FD: utils.Uint32Ptr(uint32(getFD(ingressProgram))), + Name: utils.StringPtr(ingressInfo.Name), + Flags: utils.Uint32Ptr(0x1), + }, + }, + } + if err := getFilter(tcnl).Add(&filterIngress); err != nil && !errors.Is(err, os.ErrExist) { + p.l.Error("could not add bpf ingress to ", zap.String("interface", iface.Name), zap.Error(err)) + p.clean(tcnl, qdiscIngress, qdiscEgress) + return + } + + // Create a qdisc of type clsact on the endpoint interface. + qdiscEgress = &tc.Object{ + Msg: tc.Msg{ + Family: unix.AF_UNSPEC, + Ifindex: uint32(iface.Index), + Handle: helper.BuildHandle(0xFFFF, 0), + Parent: helper.BuildHandle(0xFFFF, 0xFFF1), + }, + Attribute: tc.Attribute{ + Kind: "clsact", + }, + } + + // Install Qdisc on interface. + if err := getQdisc(tcnl).Add(qdiscEgress); err != nil && !errors.Is(err, os.ErrExist) { + p.l.Error("could not assign clsact egress to ", zap.String("interface", iface.Name), zap.Error(err)) + p.clean(tcnl, qdiscIngress, qdiscEgress) + return + } + // Create a filter of type bpf on the endpoint interface. + filterEgress := tc.Object{ + Msg: tc.Msg{ + Family: unix.AF_UNSPEC, + Ifindex: uint32(iface.Index), + Handle: 1, + Info: TC_H_MAKE(1<<16, uint32(utils.HostToNetShort(0x0003))), + Parent: TC_H_MAKE(0xFFFFFFF1, 0xFFF3), + }, + Attribute: tc.Attribute{ + Kind: "bpf", + BPF: &tc.Bpf{ + FD: utils.Uint32Ptr(uint32(getFD(egressProgram))), + Name: utils.StringPtr(egressInfo.Name), + Flags: utils.Uint32Ptr(0x1), + }, + }, + } + if err := getFilter(tcnl).Add(&filterEgress); err != nil && !errors.Is(err, os.ErrExist) { + p.l.Error("could not add bpf egress to ", zap.String("interface", iface.Name), zap.Error(err)) + p.clean(tcnl, qdiscIngress, qdiscEgress) + return + } + + // Cache. + ifaceKey := ifaceToKey(iface) + ifaceVal := &val{tcnl: tcnl, tcIngressObj: qdiscIngress, tcEgressObj: qdiscEgress} + p.tcMap.Store(ifaceKey, ifaceVal) + + p.l.Debug("Successfully added bpf", zap.String("interface", iface.Name)) +} + +func (p *packetParser) run(ctx context.Context) error { + p.l.Info("Starting packet parser") + + // Start perf record handlers (consumers). + for i := 0; i < workers; i++ { + p.wg.Add(1) + go p.processRecord(ctx, i) + } + // Start events handler from perf array in kernel (producer). + // Don't add it to the wait group because we don't want to wait for it. + // The perf reader Read call blocks until there is data available in the perf buffer. + // That call is unblocked when Reader is closed. + go p.handleEvents(ctx) + + // Wait for the context to be done. + // This will block till all consumers exit. + p.wg.Wait() + p.l.Info("All workers have stopped") + + p.l.Info("Context is done, packet parser will stop running") + return nil +} + +// This is the data consumer. +// There will more than one of these. +func (p *packetParser) processRecord(ctx context.Context, id int) { + defer p.wg.Done() + for { + select { + case <-ctx.Done(): + p.l.Info("Context is done, stopping Worker", zap.Int("worker_id", id)) + return + case record := <-p.recordsChannel: + p.l.Debug("Received record", + zap.Int("cpu", record.CPU), + zap.Uint64("lost_samples", record.LostSamples), + zap.Int("bytes_remaining", record.Remaining), + zap.Int("worker_id", id), + ) + + bpfEvent := (*packetparserPacket)(unsafe.Pointer(&record.RawSample[0])) //nolint:typecheck + + // Post processing of the bpfEvent. + // Anything after this is required only for Pod level metrics. + sourcePortShort := uint32(utils.HostToNetShort(bpfEvent.SrcPort)) + destinationPortShort := uint32(utils.HostToNetShort(bpfEvent.DstPort)) + + fl := utils.ToFlow( + int64(bpfEvent.Ts), + utils.Int2ip(bpfEvent.SrcIp).To4(), // Precautionary To4() call. + utils.Int2ip(bpfEvent.DstIp).To4(), // Precautionary To4() call. + sourcePortShort, + destinationPortShort, + bpfEvent.Proto, + bpfEvent.Dir, + flow.Verdict_FORWARDED, + 0, + ) + if fl == nil { + p.l.Warn("Could not convert bpfEvent to flow", zap.Any("bpfEvent", bpfEvent)) + continue + } + utils.AddPacketSize(fl, bpfEvent.Bytes) + // Add the TCP metadata to the flow. + tcpMetadata := bpfEvent.TcpMetadata + utils.AddTcpFlags(fl, tcpMetadata.Syn, tcpMetadata.Ack, tcpMetadata.Fin, tcpMetadata.Rst, tcpMetadata.Psh, tcpMetadata.Urg) + + // For packets originating from node, we use tsval as the tcpID. + // Packets coming back has the tsval echoed in tsecr. + if fl.TraceObservationPoint == flow.TraceObservationPoint_TO_NETWORK { + utils.AddTcpID(fl, uint64(tcpMetadata.Tsval)) + } else if fl.TraceObservationPoint == flow.TraceObservationPoint_FROM_NETWORK { + utils.AddTcpID(fl, uint64(tcpMetadata.Tsecr)) + } + + p.l.Debug("Received packet", zap.Any("flow", fl)) + + // Write the event to the enricher. + ev := &v1.Event{ + Event: fl, + Timestamp: fl.Time, + } + if p.enricher != nil { + p.enricher.Write(ev) + } + + // Write the event to the external channel. + if p.externalChannel != nil { + select { + case p.externalChannel <- ev: + default: + // Channel is full, drop the event. + // We shouldn't slow down the reader. + metrics.LostEventsCounter.WithLabelValues(utils.ExternalChannel, string(Name)).Inc() + } + } + } + } +} + +func (p *packetParser) handleEvents(ctx context.Context) { + for { + select { + case <-ctx.Done(): + p.l.Info("Context is done, stopping handleEvents") + return + default: + p.readData() + } + } +} + +// This is the data producer. +func (p *packetParser) readData() { + // Read call blocks until there is data available in the perf buffer. + // This is unblocked by the close call. + record, err := p.reader.Read() + if err != nil { + if errors.Is(err, perf.ErrClosed) { + p.l.Error("Perf array is empty") + // nothing to do, we're done + } else { + p.l.Error("Error reading perf array", zap.Error(err)) + } + return + } + + if record.LostSamples > 0 { + // p.l.Warn("Lostsamples", zap.Uint64("lost samples", record.LostSamples)) + metrics.LostEventsCounter.WithLabelValues(utils.Kernel, string(Name)).Add(float64(record.LostSamples)) + return + } + + select { + case p.recordsChannel <- record: + p.l.Debug("Sent record to channel", zap.Any("record", record)) + default: + // Channel is full, drop the record. + // We shouldn't slow down the perf array reader. + // p.l.Warn("Channel is full, dropping record", zap.Any("lost samples", record)) + metrics.LostEventsCounter.WithLabelValues(utils.BufferedChannel, string(Name)).Inc() + } +} + +// Helper functions. + +// absPath returns the absolute path to the directory where this file resides. +func absPath() (string, error) { + _, filename, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("failed to determine current file path") + } + dir := path.Dir(filename) + return dir, nil +} diff --git a/pkg/plugin/packetparser/packetparser_test.go b/pkg/plugin/packetparser/packetparser_test.go new file mode 100644 index 000000000..8d0017138 --- /dev/null +++ b/pkg/plugin/packetparser/packetparser_test.go @@ -0,0 +1,474 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package packetparser + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "runtime" + "sync" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/prometheus/client_golang/prometheus" + + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/packetparser/mocks" + "github.com/microsoft/retina/pkg/watchers/endpoint" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/perf" + "github.com/florianl/go-tc" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vishvananda/netlink" +) + +var ( + cfgPodLevelEnabled = &kcfg.Config{ + EnablePodLevel: true, + BypassLookupIPOfInterest: true, + } + cfgPodLevelDisabled = &kcfg.Config{ + EnablePodLevel: false, + } +) + +func TestCleanAll(t *testing.T) { + opts := log.GetDefaultLogOpts() + log.SetupZapLogger(opts) + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + } + assert.Nil(t, p.cleanAll()) + + p.tcMap = &sync.Map{} + assert.Nil(t, p.cleanAll()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mtcnl := mocks.NewMockITc(ctrl) + mtcnl.EXPECT().Close().Return(nil).AnyTimes() + + mq := mocks.NewMockIQdisc(ctrl) + mq.EXPECT().Delete(gomock.Any()).Return(nil).AnyTimes() + + getQdisc = func(tcnl ITc) IQdisc { + return mq + } + + p.tcMap.Store(key{ + name: "test", + hardwareAddr: "test", + netNs: 1, + }, &val{ + tcnl: mtcnl, + tcIngressObj: &tc.Object{}, + tcEgressObj: &tc.Object{}, + }) + p.tcMap.Store(key{ + name: "test2", + hardwareAddr: "test2", + netNs: 2, + }, &val{ + tcnl: mtcnl, + tcIngressObj: &tc.Object{}, + tcEgressObj: &tc.Object{}, + }) + + assert.Nil(t, p.cleanAll()) + + keyCount := 0 + p.tcMap.Range(func(k, v interface{}) bool { + keyCount++ + return true + }) + assert.Equal(t, 0, keyCount) +} + +func TestClean(t *testing.T) { + opts := log.GetDefaultLogOpts() + log.SetupZapLogger(opts) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Test nil. + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + } + p.clean(nil, nil, nil) // Should not panic. + + // Test tcnl calls. + mq := mocks.NewMockIQdisc(ctrl) + mq.EXPECT().Delete(gomock.Any()).Return(nil).Times(2) + + mtcnl := mocks.NewMockITc(ctrl) + mtcnl.EXPECT().Qdisc().Return(nil).Times(2) + mtcnl.EXPECT().Close().Return(nil).Times(1) + + getQdisc = func(tcnl ITc) IQdisc { + // Add this verify tcnl.Qdisc() is called twice + tcnl.Qdisc() + return mq + } + + p.clean(mtcnl, &tc.Object{}, &tc.Object{}) +} + +func TestCleanWithErrors(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + } + + // Test we try delete qdiscs even if we get errors. + mq := mocks.NewMockIQdisc(ctrl) + mq.EXPECT().Delete(gomock.Any()).Return(errors.New("error")).Times(2) + + mtcnl := mocks.NewMockITc(ctrl) + mtcnl.EXPECT().Qdisc().Return(nil).AnyTimes() + mtcnl.EXPECT().Close().Return(nil).Times(1) + + getQdisc = func(tcnl ITc) IQdisc { + return mq + } + + p.clean(mtcnl, &tc.Object{}, &tc.Object{}) +} + +func TestEndpointWatcherCallbackFn_EndpointDeleted(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + interfaceLockMap: &sync.Map{}, + } + p.tcMap = &sync.Map{} + linkAttr := netlink.LinkAttrs{ + Name: "test", + HardwareAddr: []byte("test"), + NetNsID: 1, + } + key := ifaceToKey(linkAttr) + p.tcMap.Store(key, &val{ + tcnl: nil, + tcIngressObj: &tc.Object{}, + tcEgressObj: &tc.Object{}, + }) + + // Create EndpointDeleted event. + e := &endpoint.EndpointEvent{ + Type: endpoint.EndpointDeleted, + Obj: linkAttr, + } + + p.endpointWatcherCallbackFn(e) + + _, ok := p.tcMap.Load(key) + assert.False(t, ok) +} + +func TestCreateQdiscAndAttach(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mfilter := mocks.NewMockIFilter(ctrl) + mfilter.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() + + mq := mocks.NewMockIQdisc(ctrl) + mq.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() + + mtcnl := mocks.NewMockITc(ctrl) + mtcnl.EXPECT().Qdisc().Return(nil).AnyTimes() + + getQdisc = func(tcnl ITc) IQdisc { + return mq + } + + getFilter = func(tcnl ITc) IFilter { + return mfilter + } + + tcOpen = func(c *tc.Config) (ITc, error) { + return mtcnl, nil + } + + getFD = func(e *ebpf.Program) int { + return 1 + } + + pObj := &packetparserObjects{} //nolint:typecheck + pObj.EndpointIngressFilter = &ebpf.Program{} + pObj.EndpointEgressFilter = &ebpf.Program{} + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + objs: pObj, + interfaceLockMap: &sync.Map{}, + endpointIngressInfo: &ebpf.ProgramInfo{ + Name: "ingress", + }, + endpointEgressInfo: &ebpf.ProgramInfo{ + Name: "egress", + }, + hostIngressInfo: &ebpf.ProgramInfo{ + Name: "ingress", + }, + hostEgressInfo: &ebpf.ProgramInfo{ + Name: "egress", + }, + tcMap: &sync.Map{}, + } + linkAttr := netlink.LinkAttrs{ + Name: "test", + HardwareAddr: []byte("test"), + NetNsID: 1, + } + // Test veth. + p.createQdiscAndAttach(linkAttr, Veth) + + key := ifaceToKey(linkAttr) + _, ok := p.tcMap.Load(key) + assert.True(t, ok) + + pObj.HostIngressFilter = &ebpf.Program{} + pObj.HostEgressFilter = &ebpf.Program{} + linkAttr2 := netlink.LinkAttrs{ + Name: "test2", + HardwareAddr: []byte("test2"), + NetNsID: 2, + } + // Test Device. + p.createQdiscAndAttach(linkAttr2, Device) + + key = ifaceToKey(linkAttr2) + _, ok = p.tcMap.Load(key) + assert.True(t, ok) +} + +func TestReadData_Error(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mperf := mocks.NewMockIPerf(ctrl) + mperf.EXPECT().Read().Return(perf.Record{}, errors.New("error")).AnyTimes() + + menricher := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + menricher.EXPECT().Write(gomock.Any()).Times(0) + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + reader: mperf, + } + p.readData() + + // Lost samples. + mperf.EXPECT().Read().Return(perf.Record{ + LostSamples: 1, + }, nil).AnyTimes() + p.readData() +} + +func TestReadDataPodLevelEnabled(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + bpfEvent := &packetparserPacket{ //nolint:typecheck + SrcIp: uint32(83886272), // 192.0.0.5 + DstIp: uint32(16777226), // 10.0.0.1 + Proto: uint8(6), // TCP + Dir: uint32(1), // TO Endpoint + SrcPort: uint16(80), + DstPort: uint16(443), + } + bytes, _ := json.Marshal(bpfEvent) + record := perf.Record{ + LostSamples: 0, + RawSample: bytes, + } + + mperf := mocks.NewMockIPerf(ctrl) + mperf.EXPECT().Read().Return(record, nil).MinTimes(1) + + menricher := enricher.NewMockEnricherInterface(ctrl) //nolint:typecheck + menricher.EXPECT().Write(gomock.Any()).MinTimes(1) + + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named("test"), + reader: mperf, + enricher: menricher, + recordsChannel: make(chan perf.Record, buffer), + } + + mICounterVec := metrics.NewMockICounterVec(ctrl) + mICounterVec.EXPECT().WithLabelValues(gomock.Any()).Return(prometheus.NewCounter(prometheus.CounterOpts{})).AnyTimes() + + metrics.LostEventsCounter = mICounterVec + + exCh := make(chan *v1.Event, 10) + p.SetupChannel(exCh) + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + p.run(ctx) + + // Test we get the event. + select { + case <-exCh: + default: + t.Fatal("Expected event in external channel, got none") + } +} + +func TestStartPodLevelDisabled(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &packetParser{ + cfg: cfgPodLevelDisabled, + l: log.Logger().Named("test"), + } + ctx := context.Background() + err := p.Start(ctx) + require.NoError(t, err) +} + +func TestInitPodLevelDisabled(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &packetParser{ + cfg: cfgPodLevelDisabled, + l: log.Logger().Named("test"), + } + err := p.Init() + require.NoError(t, err) +} + +func TestPacketParseGenerate(t *testing.T) { + takeBackup() + defer restoreBackup() + + log.SetupZapLogger(log.GetDefaultLogOpts()) + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + t.Fatal("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Instantiate the packetParser struct with a mocked logger and context. + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + ctx := context.Background() + + // Call the Generate function and check if it returns an error. + if err := p.Generate(ctx); err != nil { + t.Fatalf("failed to generate PacketParser header: %v", err) + } + + // Verify that the dynamic header file was created in the expected location and contains the expected contents. + if _, err := os.Stat(dynamicHeaderPath); os.IsNotExist(err) { + t.Fatalf("dynamic header file does not exist: %v", err) + } + + expectedContents := "#define BYPASS_LOOKUP_IP_OF_INTEREST 1 \n" + actualContents, err := os.ReadFile(dynamicHeaderPath) + if err != nil { + t.Fatalf("failed to read dynamic header file: %v", err) + } + if string(actualContents) != expectedContents { + t.Errorf("unexpected dynamic header file contents: got %q, want %q", string(actualContents), expectedContents) + } +} + +func TestCompile(t *testing.T) { + takeBackup() + defer restoreBackup() + + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &packetParser{ + cfg: cfgPodLevelEnabled, + l: log.Logger().Named(string(Name)), + } + dir, _ := absPath() + expectedOutputFile := fmt.Sprintf("%s/%s", dir, bpfObjectFileName) + + err := os.Remove(expectedOutputFile) + if err != nil && !errors.Is(err, os.ErrNotExist) { + t.Fatalf("Expected no error. Error: %+v", err) + } + + err = p.Compile(context.Background()) + if err != nil { + t.Fatalf("Expected no error. Error: %+v", err) + } + if _, err := os.Stat(expectedOutputFile); errors.Is(err, os.ErrNotExist) { + t.Fatalf("File %+v doesn't exist", expectedOutputFile) + } +} + +// Helpers. +func takeBackup() { + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Rename the dynamic header file if it already exists. + if _, err := os.Stat(dynamicHeaderPath); err == nil { + if err := os.Rename(dynamicHeaderPath, fmt.Sprintf("%s.bak", dynamicHeaderPath)); err != nil { + panic(fmt.Sprintf("failed to rename existing dynamic header file: %v", err)) + } + } +} + +func restoreBackup() { + // Get the directory of the current test file. + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("failed to determine test file path") + } + currDir := path.Dir(filename) + dynamicHeaderPath := fmt.Sprintf("%s/%s/%s", currDir, bpfSourceDir, dynamicHeaderFileName) + + // Remove the dynamic header file generated during test. + os.RemoveAll(dynamicHeaderPath) + + // Restore the dynamic header file if it was renamed. + if _, err := os.Stat(fmt.Sprintf("%s.bak", dynamicHeaderPath)); err == nil { + if err := os.Rename(fmt.Sprintf("%s.bak", dynamicHeaderPath), dynamicHeaderPath); err != nil { + panic(fmt.Sprintf("failed to restore dynamic header file: %v", err)) + } + } +} diff --git a/pkg/plugin/packetparser/types_linux.go b/pkg/plugin/packetparser/types_linux.go new file mode 100644 index 000000000..8295d48ea --- /dev/null +++ b/pkg/plugin/packetparser/types_linux.go @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package packetparser + +import ( + "sync" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/perf" + tc "github.com/florianl/go-tc" + "github.com/vishvananda/netlink" +) + +const ( + Name api.PluginName = "packetparser" + toEndpoint string = "toEndpoint" + fromEndpoint string = "fromEndpoint" + Veth string = "veth" + Device string = "device" + Eth0 string = "eth0" + workers int = 2 + buffer int = 10000 + bpfSourceDir string = "_cprog" + bpfSourceFileName string = "packetparser.c" + bpfObjectFileName string = "packetparser_bpf.o" + dynamicHeaderFileName string = "dynamic.h" +) + +var ( + getQdisc = func(tcnl ITc) IQdisc { + return tcnl.Qdisc() + } + getFilter = func(tcnl ITc) IFilter { + return tcnl.Filter() + } + tcOpen = func(config *tc.Config) (ITc, error) { + return tc.Open(config) + } + getFD = func(e *ebpf.Program) int { + return e.FD() + } + // Determined via testing on a large cluster. + // Actual buffer size will be 32 * pagesize. + perCPUBuffer = 32 +) + +type key struct { + name string + hardwareAddr string + netNs int +} + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types_linux.go -destination=mocks/mock_types.go -package=mocks + +// Define the interfaces. +type IQdisc interface { + Add(info *tc.Object) error + Delete(info *tc.Object) error +} + +type IFilter interface { + Add(info *tc.Object) error +} + +type ITc interface { + Qdisc() *tc.Qdisc + Filter() *tc.Filter + Close() error +} + +type IPerf interface { + Read() (perf.Record, error) + Close() error +} + +type val struct { + tcnl ITc + tcIngressObj *tc.Object + tcEgressObj *tc.Object +} + +type packetParser struct { + cfg *kcfg.Config + l *log.ZapLogger + callbackID string + objs *packetparserObjects //nolint:typecheck + // tcMap is a map of key to *val. + tcMap *sync.Map + reader IPerf + enricher enricher.EnricherInterface + // interfaceLockMap is a map of key to *sync.Mutex. + interfaceLockMap *sync.Map + endpointIngressInfo *ebpf.ProgramInfo + endpointEgressInfo *ebpf.ProgramInfo + hostIngressInfo *ebpf.ProgramInfo + hostEgressInfo *ebpf.ProgramInfo + wg sync.WaitGroup + recordsChannel chan perf.Record + externalChannel chan *v1.Event +} + +func ifaceToKey(iface netlink.LinkAttrs) key { + return key{ + name: iface.Name, + hardwareAddr: iface.HardwareAddr.String(), + netNs: iface.NetNsID, + } +} + +const ( + handleMajMask uint32 = 0xFFFF0000 + handleMinMask uint32 = 0x0000FFFF +) + +func TC_H_MAKE(maj, min uint32) uint32 { + return (((maj) & handleMajMask) | (min & handleMinMask)) +} diff --git a/pkg/plugin/registry/registry_linux.go b/pkg/plugin/registry/registry_linux.go new file mode 100644 index 000000000..87a3957c0 --- /dev/null +++ b/pkg/plugin/registry/registry_linux.go @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package registry + +import ( + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/dns" + "github.com/microsoft/retina/pkg/plugin/dropreason" + "github.com/microsoft/retina/pkg/plugin/linuxutil" + "github.com/microsoft/retina/pkg/plugin/mockplugin" + "github.com/microsoft/retina/pkg/plugin/packetforward" + "github.com/microsoft/retina/pkg/plugin/packetparser" + "github.com/microsoft/retina/pkg/plugin/tcpretrans" +) + +type NewPluginFn func(*kcfg.Config) api.Plugin + +var PluginHandler map[api.PluginName]NewPluginFn + +func RegisterPlugins() { + PluginHandler = make(map[api.PluginName]NewPluginFn, 500) + PluginHandler[dropreason.Name] = dropreason.New + PluginHandler[packetforward.Name] = packetforward.New + PluginHandler[linuxutil.Name] = linuxutil.New + PluginHandler[packetparser.Name] = packetparser.New + PluginHandler[dns.Name] = dns.New + PluginHandler[tcpretrans.Name] = tcpretrans.New + PluginHandler[mockplugin.Name] = mockplugin.New +} diff --git a/pkg/plugin/registry/registry_windows.go b/pkg/plugin/registry/registry_windows.go new file mode 100644 index 000000000..8923695dc --- /dev/null +++ b/pkg/plugin/registry/registry_windows.go @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package registry + +import ( + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/windows/hnsstats" +) + +type NewPluginFn func(*kcfg.Config) api.Plugin + +var PluginHandler map[api.PluginName]NewPluginFn + +func RegisterPlugins() { + PluginHandler = make(map[api.PluginName]NewPluginFn, 500) + PluginHandler[hnsstats.Name] = hnsstats.New +} diff --git a/pkg/plugin/tcpretrans/tcpretrans_linux.go b/pkg/plugin/tcpretrans/tcpretrans_linux.go new file mode 100644 index 000000000..9b5835e99 --- /dev/null +++ b/pkg/plugin/tcpretrans/tcpretrans_linux.go @@ -0,0 +1,162 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package tcpretrans + +import ( + "context" + "net" + "strings" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "github.com/cilium/ebpf/rlimit" + gadgetcontext "github.com/inspektor-gadget/inspektor-gadget/pkg/gadget-context" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/tcpretrans/tracer" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/tcpretrans/types" + "github.com/pkg/errors" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) + +func New(cfg *kcfg.Config) api.Plugin { + return &tcpretrans{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} + +func (t *tcpretrans) Name() string { + return string(Name) +} + +func (t *tcpretrans) Generate(ctx context.Context) error { + return nil +} + +func (t *tcpretrans) Compile(ctx context.Context) error { + return nil +} + +func (t *tcpretrans) Init() error { + if !t.cfg.EnablePodLevel { + t.l.Warn("tcpretrans will not init because pod level is disabled") + return nil + } + + if err := rlimit.RemoveMemlock(); err != nil { + t.l.Error("RemoveMemLock failed", zap.Error(err)) + return err + } + + // Create tracer. In this case no parameters are passed. + t.tracer = &tracer.Tracer{} + t.tracer.SetEventHandler(t.eventHandler) + t.l.Info("Initialized tcpretrans plugin") + return nil +} + +func (t *tcpretrans) Start(ctx context.Context) error { + if !t.cfg.EnablePodLevel { + t.l.Warn("tcpretrans will not start because pod level is disabled") + return nil + } + t.enricher = enricher.Instance() + if t.enricher == nil { + t.l.Error("Failed to get enricher instance") + return errors.New("failed to get enricher instance") + } + t.gadgetCtx = gadgetcontext.New(ctx, "tcpretrans", nil, nil, nil, nil, nil, nil, nil, nil, 0, nil) + + err := t.tracer.Run(t.gadgetCtx) + if err != nil { + t.l.Error("Failed to run tracer", zap.Error(err)) + return err + } + t.l.Info("Started tcpretrans plugin") + return nil +} + +func (t *tcpretrans) Stop() error { + if !t.cfg.EnablePodLevel { + return nil + } + if t.gadgetCtx == nil { + t.l.Warn("tcpretrans plugin does not have a gadget context") + return nil + } + t.gadgetCtx.Cancel() + t.l.Info("Stopped tcpretrans plugin") + return nil +} + +func (t *tcpretrans) SetupChannel(ch chan *v1.Event) error { + t.l.Warn("SetupChannel is not supported by plugin", zap.String("plugin", string(Name))) + return nil +} + +func (t *tcpretrans) eventHandler(event *types.Event) { + if event == nil { + return + } + + if event.IPVersion != 4 { + return + } + + // TODO add metric here or add a enriched value + fl := utils.ToFlow( + int64(event.Timestamp), + net.ParseIP(event.SrcEndpoint.L3Endpoint.Addr).To4(), // Precautionary To4() call. + net.ParseIP(event.DstEndpoint.L3Endpoint.Addr).To4(), // Precautionary To4() call. + uint32(event.SrcEndpoint.Port), + uint32(event.DstEndpoint.Port), + unix.IPPROTO_TCP, // only TCP can have retransmissions + 0, // drop reason packet doesn't have a direction yet, so we set it to 0 + utils.Verdict_RETRANSMISSION, + 0, + ) + + if fl == nil { + t.l.Warn("Could not convert tracer Event to flow", zap.Any("tracer event", event)) + return + } + syn, ack, fin, rst, psh, urg := getTcpFlags(event.Tcpflags) + utils.AddTcpFlags(fl, syn, ack, fin, rst, psh, urg) + + // This is only for development purposes. + // Removing this makes logs way too chatter-y. + // dr.l.Debug("DropReason Packet Received", zap.Any("flow", fl), zap.Any("Raw Bpf Event", bpfEvent), zap.Uint32("drop type", bpfEvent.Key.DropType)) + + // Write the event to the enricher. + t.enricher.Write(&v1.Event{ + Event: fl, + Timestamp: fl.Time, + }) +} + +func getTcpFlags(flags string) (syn, ack, fin, rst, psh, urg uint16) { + // this limiter is used in IG to put all the flags together + syn, ack, fin, rst, psh, urg = 0, 0, 0, 0, 0, 0 + result := strings.Split(flags, "|") + for _, flag := range result { + switch flag { + case "SYN": + syn = 1 + case "ACK": + ack = 1 + case "FIN": + fin = 1 + case "RST": + rst = 1 + case "PSH": + psh = 1 + case "URG": + urg = 1 + } + } + return +} diff --git a/pkg/plugin/tcpretrans/types.go b/pkg/plugin/tcpretrans/types.go new file mode 100644 index 000000000..3d1d2dd61 --- /dev/null +++ b/pkg/plugin/tcpretrans/types.go @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package tcpretrans + +import ( + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + gadgetcontext "github.com/inspektor-gadget/inspektor-gadget/pkg/gadget-context" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/tcpretrans/tracer" +) + +const ( + Name api.PluginName = "tcpretrans" +) + +type tcpretrans struct { + cfg *kcfg.Config + l *log.ZapLogger + tracer *tracer.Tracer + gadgetCtx *gadgetcontext.GadgetContext + enricher enricher.EnricherInterface +} diff --git a/pkg/plugin/windows/hnsstats/hnsstats_windows.go b/pkg/plugin/windows/hnsstats/hnsstats_windows.go new file mode 100644 index 000000000..d73a09262 --- /dev/null +++ b/pkg/plugin/windows/hnsstats/hnsstats_windows.go @@ -0,0 +1,205 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package hnsstats + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/hcn" + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/utils" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "go.uber.org/zap" +) + +const ( + initialize = iota + 1 + start + stop +) + +const ( + HCN_ENDPOINT_STATE_CREATED = iota + 1 + HCN_ENDPOINT_STATE_ATTACHED + HCN_ENDPOINT_STATE_ATTACHED_SHARING + HCN_ENDPOINT_STATE_DETACHED + HCN_ENDPOINT_STATE_DEGRADED + HCN_ENDPOINT_STATE_DESTROYED +) + +func (h *hnsstats) Name() string { + return string(Name) +} + +func (h *hnsstats) Generate(ctx context.Context) error { + return nil +} + +func (h *hnsstats) Compile(ctx context.Context) error { + return nil +} + +func (h *hnsstats) Init() error { + h.l.Info("Entered hnsstats Init...") + h.state = initialize + // Initialize Endpoint query used to filter healthy endpoints (vNIC) of Windows pods + h.endpointQuery = hcn.HostComputeQuery{ + SchemaVersion: hcn.SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: hcn.HostComputeQueryFlagsNone, + } + // Filter out any endpoints that are not in "AttachedShared" State. All running Windows pods with networking must be in this state. + filterMap := map[string]uint16{"State": HCN_ENDPOINT_STATE_ATTACHED_SHARING} + filter, err := json.Marshal(filterMap) + if err != nil { + return err + } + h.endpointQuery.Filter = string(filter) + + h.l.Info("Exiting hnsstats Init...") + return nil +} + +func (h *hnsstats) SetupChannel(ch chan *v1.Event) error { + h.l.Warn("Plugin does not support SetupChannel", zap.String("plugin", string(Name))) + return nil +} + +func pullHnsStats(ctx context.Context, h *hnsstats) error { + ticker := time.NewTicker(h.cfg.MetricsInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + h.l.Error("hnsstats plugin canceling", zap.Error(ctx.Err())) + return h.Stop() + case <-ticker.C: + // Pull data from node + // Get local endpoints that are healthy + endpoints, err := hcn.ListEndpointsQuery(h.endpointQuery) + if err != nil { + h.l.Error("Getting endpoints failed", zap.Error(err)) + } + // Get VFP Ports + kv, err := getMacToPortGuidMap() + if err != nil { + h.l.Error("Getting Vswitch ports failed", zap.Error(err)) + } + for _, ep := range endpoints { + if len(ep.IpConfigurations) < 1 { + h.l.Info("Skipping endpoint without IPAddress", zap.String("EndpointID", ep.Id)) + continue + } + var mac string = ep.MacAddress + var ip string = ep.IpConfigurations[0].IpAddress + // var id string = ep.Id + if stats, err := hcsshim.GetHNSEndpointStats(ep.Id); err != nil { + h.l.Error("Getting endpoint stats failed for endpoint ID "+ep.Id, zap.Error(err)) + } else { + hnsStatsData := &HnsStatsData{hnscounters: stats, IPAddress: ip} + // h.l.Info(fmt.Sprintf("Fetched HNS endpoints stats for ID: %s, IP %s, MAC %s", id, ip, mac)) + // h.l.Info(hnsStatsData.String()) + // Get VFP port counters for matching port (MAC address of endpoint as the key) + portguid := kv[mac] + if countersRaw, err := getVfpPortCountersRaw(portguid); len(portguid) > 0 && err == nil { + if vfpcounters, err := parseVfpPortCounters(countersRaw); err == nil { + // Attach VFP port counters + hnsStatsData.vfpCounters = vfpcounters + // h.l.Info("Attached VFP port counters for Port " + portguid) + // h.l.Info(vfpcounters.String()) + } else { + h.l.Error("Unable to parse VFP port counters for Port "+portguid, zap.Error(err)) + } + } else { + h.l.Error("Unable to find VFP port counters for MAC "+mac, zap.Error(err)) + } + notifyHnsStats(h, hnsStatsData) + } + } + } + } +} + +func notifyHnsStats(h *hnsstats, stats *HnsStatsData) { + // hns signals + metrics.ForwardCounter.WithLabelValues(ingressLabel).Set(float64(stats.hnscounters.PacketsReceived)) + h.l.Debug(fmt.Sprintf("emitting label %s for value %v", PacketsReceived, stats.hnscounters.PacketsReceived)) + + metrics.ForwardCounter.WithLabelValues(egressLabel).Set(float64(stats.hnscounters.PacketsSent)) + h.l.Debug(fmt.Sprintf("emitting label %s for value %v", PacketsSent, stats.hnscounters.PacketsSent)) + + metrics.ForwardBytesCounter.WithLabelValues(egressLabel).Set(float64(stats.hnscounters.BytesSent)) + h.l.Debug(fmt.Sprintf("emitting label %s for value %v", BytesSent, stats.hnscounters.BytesSent)) + + metrics.ForwardBytesCounter.WithLabelValues(ingressLabel).Set(float64(stats.hnscounters.BytesReceived)) + h.l.Debug(fmt.Sprintf("emitting label %s for value %v", BytesReceived, stats.hnscounters.BytesReceived)) + + metrics.WindowsCounter.WithLabelValues(PacketsReceived).Set(float64(stats.hnscounters.PacketsReceived)) + metrics.WindowsCounter.WithLabelValues(PacketsSent).Set(float64(stats.hnscounters.PacketsSent)) + + metrics.DropCounter.WithLabelValues(utils.Endpoint, egressLabel).Set(float64(stats.hnscounters.DroppedPacketsOutgoing)) + metrics.DropCounter.WithLabelValues(utils.Endpoint, ingressLabel).Set(float64(stats.hnscounters.DroppedPacketsIncoming)) + + if stats.vfpCounters == nil { + h.l.Warn("will not record some metrics since VFP port counters failed to be set") + return + } + + metrics.DropCounter.WithLabelValues(utils.AclRule, ingressLabel).Set(float64(stats.vfpCounters.In.DropCounters.AclDropPacketCount)) + metrics.DropCounter.WithLabelValues(utils.AclRule, egressLabel).Set(float64(stats.vfpCounters.Out.DropCounters.AclDropPacketCount)) + + metrics.TCPConnectionStats.WithLabelValues(utils.ResetCount).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.ResetCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.ClosedFin).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.ClosedFinCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.ResetSyn).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.ResetSynCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.TcpHalfOpenTimeouts).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.TcpHalfOpenTimeoutsCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.Verified).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.VerifiedCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.TimedOutCount).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.TimedOutCount)) + metrics.TCPConnectionStats.WithLabelValues(utils.TimeWaitExpiredCount).Set(float64(stats.vfpCounters.In.TcpCounters.ConnectionCounters.TimeWaitExpiredCount)) + + // TCP Flag counters + metrics.TCPFlagCounters.WithLabelValues(ingressLabel, utils.SYN).Set(float64(stats.vfpCounters.In.TcpCounters.PacketCounters.SynPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(ingressLabel, utils.SYNACK).Set(float64(stats.vfpCounters.In.TcpCounters.PacketCounters.SynAckPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(ingressLabel, utils.FIN).Set(float64(stats.vfpCounters.In.TcpCounters.PacketCounters.FinPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(ingressLabel, utils.RST).Set(float64(stats.vfpCounters.In.TcpCounters.PacketCounters.RstPacketCount)) + + metrics.TCPFlagCounters.WithLabelValues(egressLabel, utils.SYN).Set(float64(stats.vfpCounters.Out.TcpCounters.PacketCounters.SynPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(egressLabel, utils.SYNACK).Set(float64(stats.vfpCounters.Out.TcpCounters.PacketCounters.SynAckPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(egressLabel, utils.FIN).Set(float64(stats.vfpCounters.Out.TcpCounters.PacketCounters.FinPacketCount)) + metrics.TCPFlagCounters.WithLabelValues(egressLabel, utils.RST).Set(float64(stats.vfpCounters.Out.TcpCounters.PacketCounters.RstPacketCount)) +} + +func (h *hnsstats) Start(ctx context.Context) error { + h.l.Info("Start hnsstats plugin...") + h.state = start + return pullHnsStats(ctx, h) +} + +func (d *hnsstats) Stop() error { + d.l.Info("Entered hnsstats Stop...") + if d.state != start { + d.l.Info("plugin not started") + return nil + } + d.l.Info("Stopped listening for hnsstats event...") + d.state = stop + d.l.Info("Exiting hnsstats Stop...") + return nil +} + +// New creates an hnsstats plugin. +func New(cfg *kcfg.Config) api.Plugin { + // Init logger + return &hnsstats{ + cfg: cfg, + l: log.Logger().Named(string(Name)), + } +} diff --git a/pkg/plugin/windows/hnsstats/hnsstats_windows_test.go b/pkg/plugin/windows/hnsstats/hnsstats_windows_test.go new file mode 100644 index 000000000..d46b2c1d4 --- /dev/null +++ b/pkg/plugin/windows/hnsstats/hnsstats_windows_test.go @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package hnsstats + +import ( + "context" + "testing" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestShutdown(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := &hnsstats{ + cfg: &kcfg.Config{ + MetricsInterval: 100 * time.Second, + EnablePodLevel: true, + }, + l: log.Logger().Named(string(Name)), + } + p.Init() + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return p.Start(errctx) + }) + + time.Sleep(1 * time.Second) + cancel() + err := g.Wait() + require.NoError(t, err) +} diff --git a/pkg/plugin/windows/hnsstats/types_windows.go b/pkg/plugin/windows/hnsstats/types_windows.go new file mode 100644 index 000000000..f32dbf529 --- /dev/null +++ b/pkg/plugin/windows/hnsstats/types_windows.go @@ -0,0 +1,150 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package hnsstats + +import ( + "context" + "fmt" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/hcn" + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/api" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "go.opentelemetry.io/otel/trace" +) + +const ( + Name api.PluginName = "hnsstats" + HnsStatsEvent string = "hnsstatscount" + // From HNSStats API + PacketsReceived string = "win_packets_recv_count" + PacketsSent string = "win_packets_sent_count" + BytesSent string = "win_bytes_sent_count" + BytesReceived string = "win_bytes_recv_count" + DroppedPacketsIncoming string = "win_packets_recv_drop_count" + DroppedPacketsOutgoing string = "win_packets_sent_drop_count" + DummyPort string = "80" // Required arg in GetLocalEventAttributes + // From VFP TCP counters + // IN: + // VFP TCP Packet Counters IN + SynPacketCountIn string = "win_tcp_recv_syn_packet_count" + SynAckPacketCountIn string = "win_tcp_recv_syn_ack_packet_count" + FinPacketCountIn string = "win_tcp_recv_fin_packet_count" + RstPacketCountIn string = "win_tcp_recv_rst_packet_count" + // VFP DROP Counters IN + AclDropPacketCountIn string = "win_acl_recv_drop_packet_count" + // VFP TCP Connections Counters IN + VerifiedCountIn string = "win_tcp_recv_verified_count" + TimedOutCountIn string = "win_tcp_recv_timedout_count" + ResetCountIn string = "win_tcp_recv_rst_count" + ResetSynCountIn string = "win_tcp_recv_rst_syn_count" + ClosedFinCountIn string = "win_tcp_recv_closed_fin_count" + TcpHalfOpenTimeoutsCountIn string = "win_tcp_recv_half_open_timeout_count" + TimeWaitExpiredCountIn string = "win_tcp_recv_time_wait_expired_count" + // OUT: + // VFP TCP Packet Counters OUT + SynPacketCountOut string = "win_tcp_sent_syn_packet_count" + SynAckPacketCountOut string = "win_tcp_sent_syn_ack_packet_count" + FinPacketCountOut string = "win_tcp_sent_fin_packet_count" + RstPacketCountOut string = "win_tcp_sent_rst_packet_count" + // VFP DROP Counters Out + AclDropPacketCountOut string = "win_acl_sent_drop_packet_count" + // VFP TCP Connection Counters OUT + VerifiedCountOut string = "win_tcp_sent_verified_count" + TimedOutCountOut string = "win_tcp_sent_timedout_count" + ResetCountOut string = "win_tcp_sent_rst_count" + ResetSynCountOut string = "win_tcp_sent_rst_syn_count" + ClosedFinCountOut string = "win_tcp_sent_closed_fin_count" + TcpHalfOpenTimeoutsCountOut string = "win_tcp_sent_half_open_timeout_count" + TimeWaitExpiredCountOut string = "win_tcp_sent_time_wait_expired_count" + + // metrics direction + ingressLabel = "ingress" + egressLabel = "egress" +) + +type hnsstats struct { + cfg *kcfg.Config + interval time.Duration + state int + l *log.ZapLogger + endpointQuery hcn.HostComputeQuery +} + +type HnsStatsData struct { + hnscounters *hcsshim.HNSEndpointStats + IPAddress string + vfpCounters *VfpPortStatsData +} + +// handles event signals such as incrementing a metric counter +func (h *HnsStatsData) HandlePluginEventSignals(attr []attribute.KeyValue, m metric.Meter, t trace.Tracer) { + h.addHnsStatsEventCounters(&attr, m) + h.vfpCounters.addVfpStatsEventCounters(&attr, m) +} + +// not used at the moment, but persisting until everything is ported to metrics.* +// Adds HNS endpoint stats counters +func (h *HnsStatsData) addHnsStatsEventCounters(attr *[]attribute.KeyValue, m metric.Meter) { + updateCounter(PacketsReceived, attr, m, int64(h.hnscounters.PacketsReceived)) + updateCounter(PacketsSent, attr, m, int64(h.hnscounters.PacketsSent)) + updateCounter(BytesSent, attr, m, int64(h.hnscounters.BytesSent)) + updateCounter(BytesReceived, attr, m, int64(h.hnscounters.BytesReceived)) + updateCounter(DroppedPacketsIncoming, attr, m, int64(h.hnscounters.DroppedPacketsIncoming)) + updateCounter(DroppedPacketsOutgoing, attr, m, int64(h.hnscounters.DroppedPacketsOutgoing)) +} + +// not used at the moment, but persisting until everything is ported to metrics.* +// Adds VFP stats counters +func (v *VfpPortStatsData) addVfpStatsEventCounters(attr *[]attribute.KeyValue, m metric.Meter) { + // IN TCP Packet counters + updateCounter(SynPacketCountIn, attr, m, int64(v.In.TcpCounters.PacketCounters.SynPacketCount)) + updateCounter(SynAckPacketCountIn, attr, m, int64(v.In.TcpCounters.PacketCounters.SynAckPacketCount)) + updateCounter(FinPacketCountIn, attr, m, int64(v.In.TcpCounters.PacketCounters.FinPacketCount)) + updateCounter(RstPacketCountIn, attr, m, int64(v.In.TcpCounters.PacketCounters.RstPacketCount)) + // IN DROP counters: + updateCounter(AclDropPacketCountIn, attr, m, int64(v.In.DropCounters.AclDropPacketCount)) + // IN TCP Connection Counters + updateCounter(VerifiedCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.VerifiedCount)) + updateCounter(TimedOutCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.TimedOutCount)) + updateCounter(ResetCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.ResetCount)) + updateCounter(ResetSynCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.ResetSynCount)) + updateCounter(ClosedFinCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.ClosedFinCount)) + updateCounter(TcpHalfOpenTimeoutsCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.TcpHalfOpenTimeoutsCount)) + updateCounter(TimeWaitExpiredCountIn, attr, m, int64(v.In.TcpCounters.ConnectionCounters.TimeWaitExpiredCount)) + // OUT TCP Packet counters + updateCounter(SynPacketCountOut, attr, m, int64(v.Out.TcpCounters.PacketCounters.SynPacketCount)) + updateCounter(SynAckPacketCountOut, attr, m, int64(v.Out.TcpCounters.PacketCounters.SynAckPacketCount)) + updateCounter(FinPacketCountOut, attr, m, int64(v.Out.TcpCounters.PacketCounters.FinPacketCount)) + updateCounter(RstPacketCountOut, attr, m, int64(v.Out.TcpCounters.PacketCounters.RstPacketCount)) + // OUT DROP counters: + updateCounter(AclDropPacketCountOut, attr, m, int64(v.Out.DropCounters.AclDropPacketCount)) + // OUT TCP Connection Counters + updateCounter(VerifiedCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.VerifiedCount)) + updateCounter(TimedOutCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.TimedOutCount)) + updateCounter(ResetCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.ResetCount)) + updateCounter(ResetSynCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.ResetSynCount)) + updateCounter(ClosedFinCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.ClosedFinCount)) + updateCounter(TcpHalfOpenTimeoutsCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.TcpHalfOpenTimeoutsCount)) + updateCounter(TimeWaitExpiredCountOut, attr, m, int64(v.Out.TcpCounters.ConnectionCounters.TimeWaitExpiredCount)) +} + +func updateCounter(counterName string, attr *[]attribute.KeyValue, m metric.Meter, count int64) { + // metric + cnter, err := m.Int64Counter(counterName) + // Convert the attributes to metric AddOption. + opt := metric.WithAttributes(*attr...) + if err == nil { + cnter.Add(context.TODO(), count, opt) + } +} + +func (h *HnsStatsData) String() string { + return fmt.Sprintf("Endpoint ID: %s, Packets received: %d, Packets sent %d, Bytes sent %d, Bytes received %d", + h.hnscounters.EndpointID, h.hnscounters.PacketsReceived, h.hnscounters.PacketsSent, h.hnscounters.BytesSent, h.hnscounters.BytesReceived) +} diff --git a/pkg/plugin/windows/hnsstats/vfp_counters_windows.go b/pkg/plugin/windows/hnsstats/vfp_counters_windows.go new file mode 100644 index 000000000..565ea2ba9 --- /dev/null +++ b/pkg/plugin/windows/hnsstats/vfp_counters_windows.go @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package hnsstats + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +// IN/OUT Direction of vSwitch VFP port +type Direction int + +const ( + OUT Direction = iota + IN +) + +type VfpPortStatsData struct { + // IN counters + In VfpDirectedPortCounters + // OUT counters + Out VfpDirectedPortCounters +} + +type VfpDirectedPortCounters struct { + direction Direction + // Contains TCP-level counters + TcpCounters VfpTcpStats + // Contains generic drop counters + DropCounters VfpPacketDropStats +} + +type VfpTcpStats struct { + ConnectionCounters VfpTcpConnectionStats + PacketCounters VfpTcpPacketStats +} + +type VfpPacketDropStats struct { + AclDropPacketCount uint64 +} + +type VfpTcpPacketStats struct { + SynPacketCount uint64 + SynAckPacketCount uint64 + FinPacketCount uint64 + RstPacketCount uint64 +} + +type VfpTcpConnectionStats struct { + VerifiedCount uint64 + TimedOutCount uint64 + ResetCount uint64 + ResetSynCount uint64 + ClosedFinCount uint64 + TcpHalfOpenTimeoutsCount uint64 + TimeWaitExpiredCount uint64 +} + +// Attach TCP counters from vfpctrl /get-port-counters lines +func attachVfpCounter(stats *VfpDirectedPortCounters, identifier string, ucount uint64) { + // TCP Packet identifiers + synIdentifier := "SYNpackets" + synAckIdentifier := "SYN-ACKpackets" + finIdentifier := "FINpackets" + rstIdentifier := "RSTpackets" + // TCP conn identifiers + tcpConnVerifiedIdentifier := "TCPConnectionsVerified" + tcpConnTimedOutIdentifier := "TCPConnectionsTimedOut" + tcpResetIdentifier := "TCPConnectionsReset" + tcpResetSynIdentifier := "TCPConnectionsResetbySYN" + tcpConnClosedFinIdentifier := "TCPConnectionsClosedbyFIN" + tcpHalfOpenTimeoutIdentifier := "TCPHalfOpenTimeouts" + tcpConnExpiredTimeWaitIdentifier := "TCPConnectionsExpiredtoTimeWait" + // Drop Packet counters + dropAclPacketCounter := "DroppedACLpackets" + + switch identifier { + // TCP packet counters + case synIdentifier: + stats.TcpCounters.PacketCounters.SynPacketCount = ucount + case synAckIdentifier: + stats.TcpCounters.PacketCounters.SynAckPacketCount = ucount + case finIdentifier: + stats.TcpCounters.PacketCounters.FinPacketCount = ucount + case rstIdentifier: + stats.TcpCounters.PacketCounters.RstPacketCount = ucount + // TCP connection ucounters + case tcpConnVerifiedIdentifier: + stats.TcpCounters.ConnectionCounters.VerifiedCount = ucount + case tcpConnTimedOutIdentifier: + stats.TcpCounters.ConnectionCounters.TimedOutCount = ucount + case tcpResetIdentifier: + stats.TcpCounters.ConnectionCounters.ResetCount = ucount + case tcpResetSynIdentifier: + stats.TcpCounters.ConnectionCounters.ResetSynCount = ucount + case tcpConnClosedFinIdentifier: + stats.TcpCounters.ConnectionCounters.ClosedFinCount = ucount + case tcpHalfOpenTimeoutIdentifier: + stats.TcpCounters.ConnectionCounters.TcpHalfOpenTimeoutsCount = ucount + case tcpConnExpiredTimeWaitIdentifier: + stats.TcpCounters.ConnectionCounters.TimeWaitExpiredCount = ucount + case dropAclPacketCounter: + stats.DropCounters.AclDropPacketCount = ucount + } +} + +// This will extract all the VFP Port counters. +func parseVfpPortCounters(countersRaw string) (*VfpPortStatsData, error) { + delim := "\r\n" + portCounters := &VfpPortStatsData{} + DirectionInIdentifier := "Direction-IN" + + // Remove spaces + countersRaw = strings.ReplaceAll(countersRaw, " ", "") + // Split string into Direction OUT/IN. Out always comes first (direction = 0). + for direction, countersRaw := range strings.Split(countersRaw, DirectionInIdentifier) { + for _, line := range strings.Split(countersRaw, delim) { + // Identifier: Value + fields := strings.Split(line, ":") + // Skip redundant lines + if len(fields) != 2 { + continue + } + // Extract count and convert to uint64 + count, err := strconv.ParseInt(fields[1], 10, 64) + identifier := fields[0] + if err != nil { + return nil, err + } + var ucount uint64 = uint64(count) + // Populate VfpPort stats + switch Direction(direction) { + case OUT: + portCounters.Out.direction = OUT + attachVfpCounter(&portCounters.Out, identifier, ucount) + case IN: + portCounters.In.direction = IN + attachVfpCounter(&portCounters.In, identifier, ucount) + } + } + } + return portCounters, nil +} + +// getVfpPortCountersRaw will return the raw vfpctrl port counter output for the given port. +func getVfpPortCountersRaw(portGUID string) (string, error) { + vfpCmd := fmt.Sprintf("vfpctrl /port %s /get-port-counter", portGUID) + + cmd := exec.Command("cmd", "/c", vfpCmd) + out, err := cmd.Output() + + return string(out), err +} + +// TODO: Remove this once Resources.Allocators.EndpointPortGuid gets added to hcsshim Endpoint struct +// Lists all vSwitch ports +func listvPorts() ([]byte, error) { + return exec.Command("cmd", "/c", "vfpctrl /list-vmswitch-port").CombinedOutput() +} + +// TODO: Remove this once Resources.Allocators.EndpointPortGuid gets added to hcsshim Endpoint struct +// getMacToPortGuidMap returns a map storing MAC Address => VFP Port GUID mappings +func getMacToPortGuidMap() (kv map[string]string, err error) { + kv = make(map[string]string) + out, err := listvPorts() + if err != nil { + return kv, err + } + // Some very case-specific parsing + delim := "\r\n" + portList := string(out) + portList = strings.ReplaceAll(portList, " ", "") + for _, port := range strings.Split(portList, delim+delim) { + // Skip port if there is no Portname or MACAddress field. + if !strings.Contains(port, "Portname") || !strings.Contains(port, "MACaddress") { + continue + } + // Populate map[mac] => VFP port name guid + var mac string + var portName string + for _, line := range strings.Split(port, delim) { + fields := strings.Split(line, ":") + switch fields[0] { + case "Portname": + portName = fields[1] + case "MACaddress": + mac = fields[1] + } + } + kv[mac] = portName + } + return +} + +func (tcpCounters *VfpTcpStats) String() string { + return fmt.Sprintf( + "Packets:\n SYN %d,\n SYN-ACK %d,\n FIN %d,\n RST %d\n"+ + "TCP Connections:\n Verified %d,\n TimedOut %d,\n Reset %d,\n Reset-Syn %d,\n"+ + " ClosedFin %d,\n HalfOpenTimeout %d,\n TimeWaitExpired %d\n ", + tcpCounters.PacketCounters.SynPacketCount, tcpCounters.PacketCounters.SynAckPacketCount, + tcpCounters.PacketCounters.FinPacketCount, tcpCounters.PacketCounters.RstPacketCount, + tcpCounters.ConnectionCounters.VerifiedCount, tcpCounters.ConnectionCounters.TimedOutCount, + tcpCounters.ConnectionCounters.ResetCount, tcpCounters.ConnectionCounters.ResetSynCount, + tcpCounters.ConnectionCounters.ClosedFinCount, tcpCounters.ConnectionCounters.TcpHalfOpenTimeoutsCount, + tcpCounters.ConnectionCounters.TimeWaitExpiredCount) +} + +func (drops *VfpPacketDropStats) String() string { + return fmt.Sprintf("Drops:\n Dropped ACL packets %d\n", drops.AclDropPacketCount) +} + +func (v *VfpPortStatsData) String() string { + return fmt.Sprintf("\nDirection IN:\n %s%s\n", v.In.TcpCounters.String(), v.In.DropCounters.String()) + fmt.Sprintf("Direction OUT:\n %s%s\n", v.Out.TcpCounters.String(), v.Out.DropCounters.String()) +} diff --git a/pkg/pubsub/mock_pubsubinterface.go b/pkg/pubsub/mock_pubsubinterface.go new file mode 100644 index 000000000..d18843ffe --- /dev/null +++ b/pkg/pubsub/mock_pubsubinterface.go @@ -0,0 +1,80 @@ +// autogenerated +// +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/microsoft/retina/pkg/pubsub (interfaces: PubSubInterface) + +// Package pubsub is a generated GoMock package. +package pubsub + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockPubSubInterface is a mock of PubSubInterface interface. +type MockPubSubInterface struct { + ctrl *gomock.Controller + recorder *MockPubSubInterfaceMockRecorder +} + +// MockPubSubInterfaceMockRecorder is the mock recorder for MockPubSubInterface. +type MockPubSubInterfaceMockRecorder struct { + mock *MockPubSubInterface +} + +// NewMockPubSubInterface creates a new mock instance. +func NewMockPubSubInterface(ctrl *gomock.Controller) *MockPubSubInterface { + mock := &MockPubSubInterface{ctrl: ctrl} + mock.recorder = &MockPubSubInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPubSubInterface) EXPECT() *MockPubSubInterfaceMockRecorder { + return m.recorder +} + +// Publish mocks base method. +func (m *MockPubSubInterface) Publish(arg0 PubSubTopic, arg1 interface{}) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Publish", arg0, arg1) +} + +// Publish indicates an expected call of Publish. +func (mr *MockPubSubInterfaceMockRecorder) Publish(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Publish", reflect.TypeOf((*MockPubSubInterface)(nil).Publish), arg0, arg1) +} + +// Subscribe mocks base method. +func (m *MockPubSubInterface) Subscribe(arg0 PubSubTopic, arg1 *CallBackFunc) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Subscribe", arg0, arg1) + ret0, _ := ret[0].(string) + return ret0 +} + +// Subscribe indicates an expected call of Subscribe. +func (mr *MockPubSubInterfaceMockRecorder) Subscribe(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockPubSubInterface)(nil).Subscribe), arg0, arg1) +} + +// Unsubscribe mocks base method. +func (m *MockPubSubInterface) Unsubscribe(arg0 PubSubTopic, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Unsubscribe", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Unsubscribe indicates an expected call of Unsubscribe. +func (mr *MockPubSubInterfaceMockRecorder) Unsubscribe(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unsubscribe", reflect.TypeOf((*MockPubSubInterface)(nil).Unsubscribe), arg0, arg1) +} diff --git a/pkg/pubsub/pubsub.go b/pkg/pubsub/pubsub.go new file mode 100644 index 000000000..70ddc4d86 --- /dev/null +++ b/pkg/pubsub/pubsub.go @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package pubsub + +import ( + "fmt" + "sync" + + "github.com/microsoft/retina/pkg/log" + "github.com/google/uuid" + "go.uber.org/zap" +) + +var ( + p *PubSub + once sync.Once +) + +type PubSub struct { + sync.RWMutex + // l is the logger. + l *log.ZapLogger + // topicToCallback is a map of topic to a map of callback function + topicToCallback map[PubSubTopic]map[string]*CallBackFunc +} + +// New returns a new instance of PubSub. +func New() *PubSub { + once.Do(func() { + p = &PubSub{ + l: log.Logger().Named(string("PubSub")), + topicToCallback: make(map[PubSubTopic]map[string]*CallBackFunc), + } + }) + + return p +} + +// Publish publishes the given message to the given topic, +// and calls all the callback functions subscribed to the topic. +func (p *PubSub) Publish(topic PubSubTopic, msg interface{}) { + p.RLock() + defer p.RUnlock() + + // If there are no callbacks for the given topic, return nil. + if _, ok := p.topicToCallback[topic]; !ok { + p.l.Debug("no callbacks for topic", zap.String("topic", string(topic))) + return + } + + // Run all callbacks in parallel using a wait group. + for uuid, callback := range p.topicToCallback[topic] { + callback := callback + p.l.Debug("running callback", zap.String("topic", string(topic)), zap.String("uuid", uuid)) + go func() { + (*callback)(msg) + }() + } +} + +// Subscribe subscribes to the given topic and calls the given callback function +// when a message is published to the topic, it returns a new uuid of the callback. +func (p *PubSub) Subscribe(topic PubSubTopic, callback *CallBackFunc) string { + p.Lock() + defer p.Unlock() + + // If the topic does not exist, create it. + if _, ok := p.topicToCallback[topic]; !ok { + p.topicToCallback[topic] = make(map[string]*CallBackFunc) + } + + // Generate a new uuid for the callback. + uuid := uuid.New().String() + // Add the callback to the topic. + p.topicToCallback[topic][uuid] = callback + p.l.Debug("subscribed to topic", zap.String("topic", string(topic)), zap.String("uuid", uuid)) + + return uuid +} + +// Unsubscribe unsubscribes from the given topic. +func (p *PubSub) Unsubscribe(topic PubSubTopic, uuid string) error { + p.Lock() + defer p.Unlock() + + if uuid == "" { + return fmt.Errorf("uuid cannot be empty") + } + + // If the topic does not exist, return nil. + if _, ok := p.topicToCallback[topic]; !ok { + p.l.Debug("no callbacks for topic", zap.String("topic", string(topic))) + return nil + } + + // If the callback does not exist, return nil. + if _, ok := p.topicToCallback[topic][uuid]; !ok { + p.l.Debug("callback does not exist", zap.String("topic", string(topic)), zap.String("uuid", uuid)) + return nil + } + + // Delete the callback from the topic. + delete(p.topicToCallback[topic], uuid) + p.l.Debug("unsubscribed from topic", zap.String("topic", string(topic)), zap.String("uuid", uuid)) + + // Delete the topic if there are no callbacks left. + if len(p.topicToCallback[topic]) == 0 { + delete(p.topicToCallback, topic) + p.l.Debug("deleted topic", zap.String("topic", string(topic))) + } + + return nil +} diff --git a/pkg/pubsub/pubsub_test.go b/pkg/pubsub/pubsub_test.go new file mode 100644 index 000000000..b5b4181af --- /dev/null +++ b/pkg/pubsub/pubsub_test.go @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package pubsub + +import ( + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/stretchr/testify/assert" +) + +const ( + until = 1 * time.Millisecond +) + +func TestNewPubSub(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := New() + assert.NotNil(t, p) +} + +func TestPublish(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := New() + p.Publish("topic", "msg") +} + +func TestSubscribe(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := New() + cb := CallBackFunc(func(msg interface{}) {}) + + uuid := p.Subscribe("topic", &cb) + assert.NotEmpty(t, uuid) +} + +func TestUnsubscribe(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + p := New() + cb := CallBackFunc(func(msg interface{}) {}) + + uuid := p.Subscribe("topic", &cb) + err := p.Unsubscribe("topic", uuid) + assert.NoError(t, err) +} + +func TestMultipleSubscribe(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + cb := CallBackFunc(func(msg interface{}) {}) + + p := New() + uuid1 := p.Subscribe("topic", &cb) + uuid2 := p.Subscribe("topic", &cb) + assert.NotEmpty(t, uuid1) + assert.NotEmpty(t, uuid2) +} + +func TestPubSub(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ps := New() + + // Publisher 1 publishes a message to topic "topic1" + ps.Publish("topic1", "Hello from Publisher 1!") + + // Publisher 2 publishes a message to topic "topic2" + ps.Publish("topic2", "Hello from Publisher 2!") + + cb1 := CallBackFunc(func(msg interface{}) { + if msg.(string) != "Hello from Publisher 1!" { + t.Errorf("Expected 'Hello from Publisher 1!', got %s", msg) + } + }) + + cb2 := CallBackFunc(func(msg interface{}) { + if msg.(string) != "Hello from Publisher 2!" { + t.Errorf("Expected 'Hello from Publisher 2!', got %s", msg) + } + }) + + // Subscriber 1 subscribes to topic "topic1" + subID1 := ps.Subscribe("topic1", &cb1) + defer func() { + // Unsubscribe Subscriber 1 + err := ps.Unsubscribe("topic1", subID1) + if err != nil { + t.Errorf("Failed to unsubscribe: %v", err) + } + }() + + // Subscriber 2 subscribes to topic "topic2" + subID2 := ps.Subscribe("topic2", &cb2) + defer func() { + // Unsubscribe Subscriber 2 + err := ps.Unsubscribe("topic2", subID2) + if err != nil { + t.Errorf("Failed to unsubscribe: %v", err) + } + }() + + // Publisher 1 publishes another message to topic "topic1" + ps.Publish("topic1", "Hello from Publisher 1!") + + // Publisher 2 publishes another message to topic "topic2" + ps.Publish("topic2", "Hello from Publisher 2!") + + err := ps.Unsubscribe("topic1", "randomid") + if err != nil { + t.Errorf("Failed to unsubscribe: %v", err) + } + + time.Sleep(until) +} diff --git a/pkg/pubsub/types.go b/pkg/pubsub/types.go new file mode 100644 index 000000000..81fba20be --- /dev/null +++ b/pkg/pubsub/types.go @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package pubsub + +type CallBackFunc func(interface{}) + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -destination=mock_pubsubinterface.go -copyright_file=../lib/ignore_headers.txt -package=pubsub github.com/microsoft/retina/pkg/pubsub PubSubInterface + +// this file defines the interface a simple pubsub implementation should implement +type PubSubInterface interface { + // Publish publishes the given message to the given topic + Publish(topic PubSubTopic, msg interface{}) + // Subscribe subscribes to the given topic and calls the given callback function + // when a message is published to the topic + Subscribe(topic PubSubTopic, callback *CallBackFunc) string + // Unsubscribe unsubscribes from the given topic + Unsubscribe(topic PubSubTopic, uuid string) error +} + +type PubSubTopic string diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 000000000..fc8dffdb1 --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package server + +import ( + "context" + "net/http" + "net/http/pprof" + "time" + + "github.com/microsoft/retina/pkg/exporter" + "github.com/microsoft/retina/pkg/log" + "github.com/go-chi/chi/middleware" + "github.com/go-chi/chi/v5" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +type Server struct { + l *log.ZapLogger + mux *chi.Mux +} + +func New(logger *log.ZapLogger) *Server { + r := chi.NewRouter() + r.Use( + middleware.RequestID, + middleware.RealIP, + middleware.Recoverer, + middleware.Timeout(60*time.Second), + ) + + return &Server{ + l: logger, + mux: r, + } +} + +func (rt *Server) SetupHandlers() { + rt.l.Info("Setting up handlers") + rt.servePrometheusMetrics() + exporter.RegisterMetricsServeCallback(func() { + rt.servePrometheusMetrics() + }) + rt.mux.HandleFunc("/debug/pprof/", pprof.Index) + rt.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + rt.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + rt.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + rt.mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + rt.mux.Handle("/debug/pprof/heap", pprof.Handler("heap")) + rt.mux.Handle("/debug/pprof/block", pprof.Handler("block")) + rt.mux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) + rt.mux.Handle("/debug/pprof/allocs", pprof.Handler("allocs")) + rt.mux.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) + rt.l.Info("Completed handler setup") +} + +func (rt *Server) servePrometheusMetrics() { + rt.mux.Get("/metrics", promhttp.HandlerFor(exporter.CombinedGatherer, promhttp.HandlerOpts{}).ServeHTTP) +} + +func (rt *Server) Start(ctx context.Context, addr string) error { + srv := &http.Server{Addr: addr, Handler: rt.mux} + g, gctx := errgroup.WithContext(context.Background()) + + g.Go(func() error { + rt.l.Info("starting HTTP server... on ", zap.String("addr", addr)) + if err := srv.ListenAndServe(); err != nil { + rt.l.Sugar().Infof("HTTP server stopped with err: %v", err) + return err + } + return nil + }) + + select { + case <-ctx.Done(): + rt.l.Info("gracefully shutting down HTTP server...") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + err := srv.Shutdown(ctx) + if err != nil { + return errors.Wrapf(err, "failed to gracefully shutdown HTTP server") + } + + // wait for listenAndServe to return + <-gctx.Done() + + case <-gctx.Done(): + return errors.Wrapf(gctx.Err(), "failed to start HTTP server") + } + + return nil +} diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go new file mode 100644 index 000000000..5a9b16aa4 --- /dev/null +++ b/pkg/server/server_test.go @@ -0,0 +1,56 @@ +package server + +import ( + "context" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestServerShutdown(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + s := New(log.Logger().Named("http-server")) + s.SetupHandlers() + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return s.Start(errctx, "localhost:10093") + }) + + // wait for server to start + time.Sleep(2 * time.Second) + cancel() + _ = g.Wait() + + // require.NoError(t, err) + // Ignoring the error check since this can cause transient errors in CI +} + +func TestServerStartOnUsedPort(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + s := New(log.Logger().Named("http-server")) + s.SetupHandlers() + + ctx, cancel := context.WithCancel(context.Background()) + g, errctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return s.Start(errctx, "localhost:10093") + }) + + g.Go(func() error { + return s.Start(errctx, "localhost:10093") + }) + + time.Sleep(2 * time.Second) + cancel() + err := g.Wait() + + require.Error(t, err) +} diff --git a/pkg/store/dagger.go b/pkg/store/dagger.go new file mode 100644 index 000000000..2ce4657a5 --- /dev/null +++ b/pkg/store/dagger.go @@ -0,0 +1,525 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package store + +import ( + "errors" + "strings" + + "github.com/autom8ter/dagger" +) + +const ( + NodeXIDDelimiter = "." + EdgeXIDDelimiter = "->" + + NodeXTypePod = "pod" + NodeXTypeSvc = "service" + // XXX: Not covered until we know how to represent this + NodeXTypeExt = "ext" + + EdgeXTypeConnection = "connection" + + ReservedAttributeNodeType = "node_type" + ReservedAttributeEdgeType = "edge_type" + ReservedAttributeName = "name" + ReservedAttributeNamespace = "namespace" + ReservedAttributeXID = "xid" +) + +type Dagger struct { + // logger *log.ZapLogger + graph *dagger.Graph +} + +// func NewDaggerStore(logger *log.ZapLogger) *Dagger { +func NewDaggerStore() *Dagger { + d := dagger.NewGraph() + graph := &Dagger{ + // logger: logger, + graph: d, + } + + return graph +} + +func (d *Dagger) AddNode(node Node) { + xtype := string(NodeXTypeFromString(node.objType)) + + if node.attributes == nil { + node.attributes = make(map[string]interface{}) + } + + node.attributes[ReservedAttributeNodeType] = xtype + node.attributes[ReservedAttributeName] = node.name + node.attributes[ReservedAttributeNamespace] = node.namespace + + _ = d.graph.SetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: xtype, + }, node.attributes) +} + +func (d *Dagger) DelNode(node Node) { + d.graph.DelNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) +} + +func (d *Dagger) GetNodeAttributes(node Node) (map[string]interface{}, error) { + n, ok := d.graph.GetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) + if !ok { + return nil, errors.New("node not found") + } + + return n.Attributes, nil +} + +func (d *Dagger) GetNodeAttribute(node Node, key string) (interface{}, error) { + n, ok := d.graph.GetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) + if !ok { + return nil, errors.New("node not found") + } + + v, ok := n.Attributes[key] + if !ok { + return nil, errors.New("attribute not found") + } + return v, nil +} + +func (d *Dagger) SetNodeAttributes(node Node, attributes map[string]interface{}) (map[string]interface{}, error) { + // TODO: Prevent overwriting reserved keys + n, ok := d.graph.GetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) + if !ok { + return nil, errors.New("node not found") + } + + for k, v := range attributes { + n.Attributes[k] = v + } + + _ = d.graph.SetNode(n.Path, n.Attributes) + + return n.Attributes, nil +} + +func (d *Dagger) SetNodeAttribute(node Node, key string, value interface{}) (map[string]interface{}, error) { + // TODO: Prevent overwriting reserved keys + n, ok := d.graph.GetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) + if !ok { + return nil, errors.New("node not found") + } + + if value == nil { + return nil, errors.New("value cannot be nil") + } + + n.Attributes[key] = value + + _ = d.graph.SetNode(n.Path, n.Attributes) + + return n.Attributes, nil +} + +func (d *Dagger) DelNodeAttribute(node Node, key string) (map[string]interface{}, error) { + n, ok := d.graph.GetNode(dagger.Path{ + XID: generateNodeXID(node.name, node.namespace), + XType: NodeXTypeFromString(node.objType), + }) + if !ok { + return nil, errors.New("node not found") + } + + delete(n.Attributes, key) + + _ = d.graph.SetNode(n.Path, n.Attributes) + + return n.Attributes, nil +} + +func (d *Dagger) AddEdge(edge Edge) error { + if edge.attributes == nil { + edge.attributes = make(map[string]interface{}) + } + + edge.attributes[ReservedAttributeEdgeType] = EdgeXTypeConnection + + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + edge.attributes["from_"+ReservedAttributeNodeType] = NodeXTypeFromString(edge.from.objType) + edge.attributes["from_"+ReservedAttributeName] = edge.from.name + edge.attributes["from_"+ReservedAttributeNamespace] = edge.from.namespace + edge.attributes["from_"+ReservedAttributeXID] = fromXID + + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + edge.attributes["to_"+ReservedAttributeNodeType] = NodeXTypeFromString(edge.to.objType) + edge.attributes["to_"+ReservedAttributeName] = edge.to.name + edge.attributes["to_"+ReservedAttributeNamespace] = edge.to.namespace + edge.attributes["to_"+ReservedAttributeXID] = toXID + + if _, err := d.graph.SetEdge(dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(edge.from.objType), + }, dagger.Path{ + XID: toXID, + XType: NodeXTypeFromString(edge.to.objType), + }, dagger.Node{ + Path: dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }, + Attributes: edge.attributes, + }); err != nil { + return err + } + return nil +} + +func (d *Dagger) DelEdge(edge Edge) { + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + + d.graph.DelEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) +} + +func (d *Dagger) GetEdgeNodes(edge Edge) (Node, Node, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return Node{}, Node{}, errors.New("edge not found") + } + + from, ok := d.graph.GetNode(e.From) + if !ok { + return Node{}, Node{}, errors.New("from node not found") + } + + to, ok := d.graph.GetNode(e.To) + if !ok { + return Node{}, Node{}, errors.New("to node not found") + } + + fromNode := Node{ + objType: edge.from.objType, + name: edge.from.name, + namespace: edge.from.namespace, + attributes: from.Attributes, + } + toNode := Node{ + objType: edge.to.objType, + name: edge.to.name, + namespace: edge.to.namespace, + attributes: to.Attributes, + } + return fromNode, toNode, nil +} + +func (d *Dagger) GetEdgeAttributes(edge Edge) (map[string]interface{}, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return nil, errors.New("edge not found") + } + return e.Attributes, nil +} + +func (d *Dagger) GetEdgeAttribute(edge Edge, key string) (interface{}, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return nil, errors.New("edge not found") + } + + v, ok := e.Attributes[key] + if ok { + return v, nil + } + + return nil, errors.New("attribute not found") +} + +func (d *Dagger) SetEdgeAttributes(edge Edge, attributes map[string]interface{}) (map[string]interface{}, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return nil, errors.New("edge not found") + } + + for k, v := range attributes { + e.Attributes[k] = v + } + + if _, err := d.graph.SetEdge( + dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(edge.from.objType), + }, + dagger.Path{ + XID: toXID, + XType: NodeXTypeFromString(edge.to.objType), + }, + dagger.Node{ + Path: dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }, + Attributes: e.Attributes, + }); err != nil { + return nil, err + } + + return e.Attributes, nil +} + +func (d *Dagger) SetEdgeAttribute(edge Edge, key string, value interface{}) (map[string]interface{}, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return nil, errors.New("edge not found") + } + + e.Attributes[key] = value + + if _, err := d.graph.SetEdge( + dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(edge.from.objType), + }, + dagger.Path{ + XID: toXID, + XType: NodeXTypeFromString(edge.to.objType), + }, + dagger.Node{ + Path: dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }, + Attributes: e.Attributes, + }); err != nil { + return nil, err + } + + return e.Attributes, nil +} + +func (d *Dagger) DelEdgeAttribute(edge Edge, key string) (map[string]interface{}, error) { + toXID := generateNodeXID(edge.to.name, edge.to.namespace) + fromXID := generateNodeXID(edge.from.name, edge.from.namespace) + + e, ok := d.graph.GetEdge(dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }) + if !ok { + return nil, errors.New("edge not found") + } + + delete(e.Attributes, key) + + if _, err := d.graph.SetEdge( + dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(edge.from.objType), + }, + dagger.Path{ + XID: toXID, + XType: NodeXTypeFromString(edge.to.objType), + }, + dagger.Node{ + Path: dagger.Path{ + XID: generateEdgeXID(fromXID, toXID), + XType: string(EdgeXTypeConnection), + }, + Attributes: e.Attributes, + }); err != nil { + return nil, err + } + + return e.Attributes, nil +} + +func (d *Dagger) ListNodes() ([]Node, error) { + nodes := []Node{} + d.graph.RangeNodes(NodeXTypePod, func(n dagger.Node) bool { + name, namespace := nameNamespaceFromNodeXID(n.Path.XID) + nodes = append(nodes, Node{ + name: name, + namespace: namespace, + objType: NodeXTypeToString(n.Path.XType), + }) + return true + }) + + d.graph.RangeNodes(NodeXTypeSvc, func(n dagger.Node) bool { + name, namespace := nameNamespaceFromNodeXID(n.Path.XID) + nodes = append(nodes, Node{ + name: name, + namespace: namespace, + objType: NodeXTypeToString(n.Path.XType), + }) + return true + }) + + // XXX: Not covered until we know how to represent this + + // d.graph.RangeNodes(NodeXTypeExt, func(n dagger.Node) bool { + // name, namespace := nameNamespaceFromNodeXID(n.Path.XID) + // nodes = append(nodes, Node{ + // name: name, + // namespace: namespace, + // objType: NodeXTypeToString(n.Path.XType), + // }) + // return true + // }) + + return nodes, nil +} + +func (d *Dagger) ListNeighbors(node Node) ([]Node, error) { + fromXID := generateNodeXID(node.name, node.namespace) + + nodes := []Node{} + + d.graph.RangeEdgesFrom(EdgeXTypeConnection, dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(node.objType), + }, func(e dagger.Edge) bool { + node, _ := d.graph.GetNode(e.To) + name, namespace := nameNamespaceFromNodeXID(node.Path.XID) + nodes = append(nodes, Node{ + name: name, + namespace: namespace, + objType: NodeXTypeToString(node.Path.XType), + }) + return true + }) + + return nodes, nil +} + +func (d *Dagger) ListOutboundEdges(node Node) ([]Edge, error) { + fromXID := generateNodeXID(node.name, node.namespace) + + edges := []Edge{} + + d.graph.RangeEdgesFrom(EdgeXTypeConnection, dagger.Path{ + XID: fromXID, + XType: NodeXTypeFromString(node.objType), + }, func(e dagger.Edge) bool { + toName, toNamespace := nameNamespaceFromNodeXID(e.To.XID) + edges = append(edges, Edge{ + from: node, + to: Node{ + name: toName, + namespace: toNamespace, + objType: NodeXTypeToString(e.To.XType), + }, + attributes: e.Attributes, + }) + return true + }) + + return edges, nil +} + +func (d *Dagger) ListInboundEdges(node Node) ([]Edge, error) { + toXID := generateNodeXID(node.name, node.namespace) + + edges := []Edge{} + + d.graph.RangeEdgesTo(EdgeXTypeConnection, dagger.Path{ + XID: toXID, + XType: NodeXTypeFromString(node.objType), + }, func(e dagger.Edge) bool { + fromName, fromNamespace := nameNamespaceFromNodeXID(e.From.XID) + edges = append(edges, Edge{ + to: node, + from: Node{ + name: fromName, + namespace: fromNamespace, + objType: NodeXTypeToString(e.From.XType), + }, + attributes: e.Attributes, + }) + return true + }) + + return edges, nil +} + +func generateNodeXID(name, namespace string) string { + return namespace + NodeXIDDelimiter + name +} + +func nameNamespaceFromNodeXID(nodeXID string) (string, string) { + parts := strings.Split(nodeXID, NodeXIDDelimiter) + return parts[1], parts[0] +} + +func NodeXTypeFromString(objType string) string { + switch strings.ToLower(objType) { + case "pod": + return NodeXTypePod + case "service": + return NodeXTypeSvc + default: + return NodeXTypeExt + } +} + +func NodeXTypeToString(xtype string) string { + switch strings.ToLower(xtype) { + case "pod": + return "pod" + case "service": + return "service" + default: + // XXX: Not covered until we know how to represent this + return NodeXTypeExt + } +} + +func generateEdgeXID(fromNodeXID, toNodeXID string) string { + return fromNodeXID + EdgeXIDDelimiter + toNodeXID +} diff --git a/pkg/store/dagger_test.go b/pkg/store/dagger_test.go new file mode 100644 index 000000000..bed472cb9 --- /dev/null +++ b/pkg/store/dagger_test.go @@ -0,0 +1,2169 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build unit +// +build unit + +package store + +import ( + "reflect" + "testing" +) + +func arrayEqualNode(x []Node, y []Node) bool { + for xItem := range x { + found := false + for yItem := range y { + if reflect.DeepEqual(xItem, yItem) { + found = true + break + } + } + + if !found { + return false + } + } + + return true +} + +func arrayEqualEdge(x []Edge, y []Edge) bool { + for xItem := range x { + found := false + for yItem := range y { + if reflect.DeepEqual(xItem, yItem) { + found = true + break + } + } + + if !found { + return false + } + } + + return true +} + +func TestDagger_GetNodeAttributes(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "additional attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + "test-key": "test-value", + }, + wantErr: false, + }, + { + name: "node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + }, + want: nil, + wantErr: true, + }, + { + name: "nil attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.GetNodeAttributes(tt.args.node) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.GetNodeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.GetNodeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_GetNodeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + key string + } + tests := []struct { + name string + fields fields + args args + want interface{} + wantErr bool + }{ + { + name: "attribute exists", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: "test-value", + wantErr: false, + }, + { + name: "attribute does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + { + name: "node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.GetNodeAttribute(tt.args.node, tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.GetNodeAttribute() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.GetNodeAttribute() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_SetNodeAttributes(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + attributes map[string]interface{} + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "add multiple attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + attributes: map[string]interface{}{ + "test-key": "test-value", + "test-key2": "test-value2", + }, + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + "test-key": "test-value", + "test-key2": "test-value2", + }, + wantErr: false, + }, + { + name: "node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + attributes: map[string]interface{}{}, + }, + want: nil, + wantErr: true, + }, + { + name: "nil attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + attributes: nil, + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.SetNodeAttributes(tt.args.node, tt.args.attributes) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.SetNodeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.SetNodeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_SetNodeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + key string + value interface{} + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "add attribute", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + value: "test-value", + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + "test-key": "test-value", + }, + wantErr: false, + }, + { + name: "node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + value: "test-value", + }, + want: nil, + wantErr: true, + }, + { + name: "nil attribute value", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + value: nil, + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.SetNodeAttribute(tt.args.node, tt.args.key, tt.args.value) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.SetNodeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.SetNodeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_DelNodeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + key string + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "attribute exists", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + }, + wantErr: false, + }, + { + name: "node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + { + name: "attribute does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + return d + }(), + }, + args: args{ + node: Node{ + objType: "pod", + name: "test-pod", + namespace: "test-namespace", + }, + key: "test-key", + }, + want: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.DelNodeAttribute(tt.args.node, tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.SetNodeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.SetNodeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_AddEdge(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "adds an edge to the graph", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + d.AddNode(Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + }, + attributes: nil, + }, + }, + wantErr: false, + }, + { + name: "to node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + }, + attributes: nil, + }, + }, + wantErr: true, + }, + { + name: "from node not found", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + d.AddNode(Node{ + name: "test-svx", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + }, + attributes: nil, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.fields.d.AddEdge(tt.args.edge); (err != nil) != tt.wantErr { + t.Errorf("Dagger.AddEdge() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestDagger_GetEdgeNodes(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + } + tests := []struct { + name string + fields fields + args args + want Node + want1 Node + wantErr bool + }{ + { + name: "gets the edge nodes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + _ = d.AddEdge(Edge{ + from: from, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + }, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }, + }, + want: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: map[string]interface{}{ + ReservedAttributeNodeType: "pod", + ReservedAttributeName: "test-pod", + ReservedAttributeNamespace: "test-namespace", + }, + }, + want1: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: map[string]interface{}{ + ReservedAttributeNodeType: "service", + ReservedAttributeName: "test-service", + ReservedAttributeNamespace: "test-namespace", + }, + }, + wantErr: false, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + }, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }, + }, + want: Node{}, + want1: Node{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := tt.fields.d.GetEdgeNodes(tt.args.edge) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.GetEdgeNodes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.GetEdgeNodes() got = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(got1, tt.want1) { + t.Errorf("Dagger.GetEdgeNodes() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func TestDagger_GetEdgeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + key string + } + tests := []struct { + name string + fields fields + args args + want interface{} + wantErr bool + }{ + { + name: "attribute exists", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: "test-value", + wantErr: false, + }, + { + name: "attribute does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.GetEdgeAttribute(tt.args.edge, tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.GetEdgeAttribute() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.GetEdgeAttribute() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_GetEdgeAttributes(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "with additional attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key": "test-value", + }, + wantErr: false, + }, + { + name: "with nil attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + }, + wantErr: false, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.GetEdgeAttributes(tt.args.edge) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.GetEdgeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.GetEdgeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_SetEdgeAttributes(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + attributes map[string]interface{} + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "add multiple attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + attributes: map[string]interface{}{ + "test-key": "test-value", + "test-key2": "test-value2", + }, + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key": "test-value", + "test-key2": "test-value2", + }, + wantErr: false, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + }, + want: nil, + wantErr: true, + }, + { + name: "add nil attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + attributes: nil, + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.SetEdgeAttributes(tt.args.edge, tt.args.attributes) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.SetEdgeAttributes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.SetEdgeAttributes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_SetEdgeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + key string + value interface{} + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "add new attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: nil, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + value: "test-value", + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key": "test-value", + }, + wantErr: false, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + value: "test-value", + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.SetEdgeAttribute(tt.args.edge, tt.args.key, tt.args.value) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.SetEdgeAttribute() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.SetEdgeAttribute() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_DelEdgeAttribute(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + edge Edge + key string + } + tests := []struct { + name string + fields fields + args args + want map[string]interface{} + wantErr bool + }{ + { + name: "delete existing attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + "test-key2": "test-value2", + }, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key2": "test-value2", + }, + wantErr: false, + }, + { + name: "delete missing attributes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + d.AddEdge(Edge{ + from: from, + to: to, + attributes: map[string]interface{}{ + "test-key2": "test-value2", + }, + }) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key2": "test-value2", + }, + wantErr: false, + }, + { + name: "edge does not exist", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + to := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + d.AddNode(from) + d.AddNode(to) + return d + }(), + }, + args: args{ + edge: Edge{ + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + key: "test-key", + }, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.DelEdgeAttribute(tt.args.edge, tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.DelEdgeAttribute() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Dagger.DelEdgeAttribute() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_ListOutboundEdges(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + } + tests := []struct { + name string + fields fields + args args + want []Edge + wantErr bool + }{ + { + name: "list two outbound edges", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + toSvc := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + toPod := Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + d.AddNode(from) + d.AddNode(toSvc) + d.AddNode(toPod) + + d.AddEdge(Edge{ + from: from, + to: toSvc, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + d.AddEdge(Edge{ + from: from, + to: toPod, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + }, + want: []Edge{ + { + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + attributes: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-service", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key": "test-value", + }, + }, + { + from: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + to: Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + attributes: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "from_" + ReservedAttributeName: "test-pod", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "to_" + ReservedAttributeName: "test-pod2", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-pod2", "test-namespace"), + "test-key": "test-value", + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.ListOutboundEdges(tt.args.node) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.ListOutboundEdges() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !arrayEqualEdge(got, tt.want) { + t.Errorf("Dagger.ListOutboundEdges() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_ListInboundEdges(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + } + tests := []struct { + name string + fields fields + args args + want []Edge + wantErr bool + }{ + { + name: "list two inbound edges", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + to := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + fromSvc := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + fromPod := Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + d.AddNode(to) + d.AddNode(fromSvc) + d.AddNode(fromPod) + + d.AddEdge(Edge{ + from: fromSvc, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + d.AddEdge(Edge{ + from: fromPod, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + }, + want: []Edge{ + { + to: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + from: Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + attributes: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("service"), + "to_" + ReservedAttributeName: "test-pod", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "from_" + ReservedAttributeName: "test-service", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-service", "test-namespace"), + "test-key": "test-value", + }, + }, + { + to: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + from: Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + attributes: map[string]interface{}{ + ReservedAttributeEdgeType: EdgeXTypeConnection, + "to_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "from_" + ReservedAttributeNodeType: NodeXTypeFromString("pod"), + "to_" + ReservedAttributeName: "test-pod", + "to_" + ReservedAttributeNamespace: "test-namespace", + "to_" + ReservedAttributeXID: generateNodeXID("test-pod", "test-namespace"), + "from_" + ReservedAttributeName: "test-pod2", + "from_" + ReservedAttributeNamespace: "test-namespace", + "from_" + ReservedAttributeXID: generateNodeXID("test-pod2", "test-namespace"), + "test-key": "test-value", + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.ListInboundEdges(tt.args.node) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.ListInboundEdges() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !arrayEqualEdge(got, tt.want) { + t.Errorf("Dagger.ListInboundEdges() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_ListNodes(t *testing.T) { + type fields struct { + d *Dagger + } + tests := []struct { + name string + fields fields + want []Node + wantErr bool + }{ + { + name: "list all graph nodes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + to := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + fromSvc := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + fromPod := Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + d.AddNode(to) + d.AddNode(fromSvc) + d.AddNode(fromPod) + + d.AddEdge(Edge{ + from: fromSvc, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + d.AddEdge(Edge{ + from: fromPod, + to: to, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + want: []Node{ + { + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + { + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + { + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.ListNodes() + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.ListNodes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !arrayEqualNode(got, tt.want) { + t.Errorf("Dagger.ListNodes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestDagger_ListNeighbors(t *testing.T) { + type fields struct { + d *Dagger + } + type args struct { + node Node + } + tests := []struct { + name string + fields fields + args args + want []Node + wantErr bool + }{ + { + name: "list two neighbor nodes", + fields: fields{ + d: func() *Dagger { + d := NewDaggerStore() + from := Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + toSvc := Node{ + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + } + toPod := Node{ + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + } + d.AddNode(from) + d.AddNode(toSvc) + d.AddNode(toPod) + + d.AddEdge(Edge{ + from: from, + to: toSvc, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + d.AddEdge(Edge{ + from: from, + to: toPod, + attributes: map[string]interface{}{ + "test-key": "test-value", + }, + }) + return d + }(), + }, + args: args{ + node: Node{ + name: "test-pod", + namespace: "test-namespace", + objType: "pod", + }, + }, + want: []Node{ + { + name: "test-service", + namespace: "test-namespace", + objType: "service", + attributes: nil, + }, + { + name: "test-pod2", + namespace: "test-namespace", + objType: "pod", + attributes: nil, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.fields.d.ListNeighbors(tt.args.node) + if (err != nil) != tt.wantErr { + t.Errorf("Dagger.ListNeighbors() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !arrayEqualNode(got, tt.want) { + t.Errorf("Dagger.ListNeighbors() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/store/graph.go b/pkg/store/graph.go new file mode 100644 index 000000000..6146ef722 --- /dev/null +++ b/pkg/store/graph.go @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package store + +type NodeOps interface { + AddNode(node Node) + DelNode(node Node) + GetNodeAttributes(node Node) (map[string]interface{}, error) + GetNodeAttribute(node Node, key string) (interface{}, error) + SetNodeAttributes(node Node, attributes map[string]interface{}) (map[string]interface{}, error) + SetNodeAttribute(node Node, key string, value interface{}) (map[string]interface{}, error) + DelNodeAttribute(node Node, key string) (map[string]interface{}, error) +} + +type EdgeOps interface { + AddEdge(edge Edge) error + DelEdge(edge Edge) + GetEdgeNodes(edge Edge) (Node, Node, error) + GetEdgeAttributes(edge Edge) (map[string]interface{}, error) + GetEdgeAttribute(edge Edge, key string) (interface{}, error) + SetEdgeAttributes(edge Edge, attributes map[string]interface{}) (map[string]interface{}, error) + SetEdgeAttribute(edge Edge, key string, value interface{}) (map[string]interface{}, error) + DelEdgeAttribute(edge Edge, key string) (map[string]interface{}, error) +} + +type GraphOps interface { + ListNodes() ([]Node, error) + ListNeighbors(node Node) ([]Node, error) + ListOutboundEdges(node Node) ([]Edge, error) + ListInboundEdges(node Node) ([]Edge, error) +} + +type GraphStorage interface { + NodeOps + EdgeOps + GraphOps +} + +type Node struct { + objType string + name, namespace string + attributes map[string]interface{} +} + +type Edge struct { + from, to Node + attributes map[string]interface{} +} diff --git a/pkg/telemetry/consts.go b/pkg/telemetry/consts.go new file mode 100644 index 000000000..d0e66eda4 --- /dev/null +++ b/pkg/telemetry/consts.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +const ( + // PropertyApiVersion is the property name of the telemetry item that contains the APIServer endpoint. + // apiserver is used to distinguish the telemetry items from different Kubernetes clusters and should be uniformed + // across all the telemetry items from the same cluster with the format http(s)://:. + // For example "https://retina-test-c4528d-zn0ugsna.hcp.southeastasia.azmk8s.io:443 + PropertyApiserver = "apiserver" +) + +const ( + EnvPodName = "POD_NAME" +) diff --git a/pkg/telemetry/heartbeat_linux.go b/pkg/telemetry/heartbeat_linux.go new file mode 100644 index 000000000..ea4f362d8 --- /dev/null +++ b/pkg/telemetry/heartbeat_linux.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "os/exec" + "strings" + + "github.com/pkg/errors" +) + +func KernelVersion(ctx context.Context) (string, error) { + cmd := exec.CommandContext(ctx, "uname", "-r") + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "failed to get linux kernel version: %s", string(output)) + } + return strings.TrimSuffix(string(output), "\n"), nil +} diff --git a/pkg/telemetry/heartbeat_linux_test.go b/pkg/telemetry/heartbeat_linux_test.go new file mode 100644 index 000000000..04574de8a --- /dev/null +++ b/pkg/telemetry/heartbeat_linux_test.go @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLinuxPlatformKernelVersion(t *testing.T) { + InitAppInsights("", "") + ctx := context.TODO() + + str, err := KernelVersion(ctx) + require.NoError(t, err) + require.NotEmpty(t, str) +} diff --git a/pkg/telemetry/heartbeat_windows.go b/pkg/telemetry/heartbeat_windows.go new file mode 100644 index 000000000..452b193fe --- /dev/null +++ b/pkg/telemetry/heartbeat_windows.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "os/exec" + "strings" + + "github.com/pkg/errors" +) + +func KernelVersion(ctx context.Context) (string, error) { + cmd := exec.CommandContext(ctx, ".\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe", "$([Environment]::OSVersion).VersionString") + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "failed to get windows kernel version: %s", string(output)) + } + return strings.TrimSuffix(string(output), "\r\n"), nil +} diff --git a/pkg/telemetry/heartbeat_windows_test.go b/pkg/telemetry/heartbeat_windows_test.go new file mode 100644 index 000000000..49808d7fb --- /dev/null +++ b/pkg/telemetry/heartbeat_windows_test.go @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWindowsGetKernelVersion(t *testing.T) { + InitAppInsights("") + ctx := context.TODO() + + str, err := KernelVersion(ctx) + require.NoError(t, err) + require.NotEmpty(t, str) +} diff --git a/pkg/telemetry/noop_telemetry.go b/pkg/telemetry/noop_telemetry.go new file mode 100644 index 000000000..8391af105 --- /dev/null +++ b/pkg/telemetry/noop_telemetry.go @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "time" + + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +func NewNoopTelemetry() *NoopTelemetry { + return &NoopTelemetry{} +} + +type NoopTelemetry struct{} + +func (m NoopTelemetry) StartPerf(functionName string) *PerformanceCounter { + return &PerformanceCounter{} +} + +func (m NoopTelemetry) StopPerf(counter *PerformanceCounter) { +} + +func (m NoopTelemetry) Heartbeat(ctx context.Context, heartbeatInterval time.Duration) { +} + +func (m NoopTelemetry) TrackEvent(name string, properties map[string]string) { +} + +func (m NoopTelemetry) TrackMetric(name string, value float64, properties map[string]string) { +} + +func (m NoopTelemetry) TrackTrace(name string, severity contracts.SeverityLevel, properties map[string]string) { +} diff --git a/pkg/telemetry/telemetry.go b/pkg/telemetry/telemetry.go new file mode 100644 index 000000000..0e2028ff7 --- /dev/null +++ b/pkg/telemetry/telemetry.go @@ -0,0 +1,249 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "fmt" + "os" + "runtime" + "runtime/debug" + "sync" + "time" + + "github.com/azure/kappie/pkg/log" + "github.com/microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" +) + +var ( + client appinsights.TelemetryClient + version string +) + +type Telemetry interface { + StartPerf(name string) *PerformanceCounter + StopPerf(counter *PerformanceCounter) + Heartbeat(ctx context.Context, heartbeatInterval time.Duration) + TrackEvent(name string, properties map[string]string) + TrackMetric(name string, value float64, properties map[string]string) + TrackTrace(name string, severity contracts.SeverityLevel, properties map[string]string) +} + +func InitAppInsights(appinsightsId, appVersion string) { + if client != nil { + fmt.Printf("appinsights client already initialized") + return + } + telemetryConfig := appinsights.NewTelemetryConfiguration(appinsightsId) + telemetryConfig.MaxBatchInterval = 1 * time.Second + client = appinsights.NewTelemetryClientFromConfig(telemetryConfig) + + // Set the app version + version = appVersion +} + +func ShutdownAppInsights() { + select { + case <-client.Channel().Close(5 * time.Second): + // Five second timeout for retries. + + // If we got here, then all telemetry was submitted + // successfully, and we can proceed to exiting. + case <-time.After(30 * time.Second): + // Thirty second absolute timeout. This covers any + // previous telemetry submission that may not have + // completed before Close was called. + + // There are a number of reasons we could have + // reached here. We gave it a go, but telemetry + // submission failed somewhere. Perhaps old events + // were still retrying, or perhaps we're throttled. + // Either way, we don't want to wait around for it + // to complete, so let's just exit. + } +} + +type TelemetryClient struct { + sync.RWMutex + processName string + properties map[string]string +} + +func NewAppInsightsTelemetryClient(processName string, additionalproperties map[string]string) *TelemetryClient { + if client == nil { + fmt.Println("appinsights client not initialized") + } + + properties := GetEnvironmentProperties() + + for k, v := range additionalproperties { + properties[k] = v + } + + return &TelemetryClient{ + processName: processName, + properties: properties, + } +} + +// TrackPanic function sends the stacktrace and flushes logs only in a goroutine where its call is deferred. +// Panics in other goroutines will not be caught by this recover function. +func TrackPanic() { + if r := recover(); r != nil { + message := fmt.Sprintf("Panic caused by: %v , Stacktrace %s", r, string(debug.Stack())) + trace := appinsights.NewTraceTelemetry(message, appinsights.Critical) + trace.Properties = GetEnvironmentProperties() + trace.Properties["version"] = version + + // Create trace and track it + client.Track(trace) + + // Close zapai and flush logs + if logger := log.Logger(); logger != nil { + logger.Close() + time.Sleep(10 * time.Second) + } + + ShutdownAppInsights() + + // propagate panic so that the pod wil restart. + panic(r) + } +} + +func GetEnvironmentProperties() map[string]string { + hostname, err := os.Hostname() + if err != nil { + fmt.Printf("failed to get hostname with err %v", err) + } + + return map[string]string{ + "goversion": runtime.Version(), + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "numcores": fmt.Sprintf("%d", runtime.NumCPU()), + "hostname": hostname, + "podname": os.Getenv(EnvPodName), + } +} + +func (t *TelemetryClient) trackWarning(err error, msg string) { + t.TrackTrace(err.Error(), contracts.Warning, GetEnvironmentProperties()) +} + +func (t *TelemetryClient) heartbeat(ctx context.Context) { + kernelVersion, err := KernelVersion(ctx) + if err != nil { + t.trackWarning(err, "failed to get kernel version") + } + + t.TrackEvent("heartbeat", map[string]string{"kernelversion": kernelVersion}) +} + +func (t *TelemetryClient) TrackEvent(name string, properties map[string]string) { + event := appinsights.NewEventTelemetry(name) + + if t.properties != nil { + t.RLock() + for k, v := range t.properties { + event.Properties[k] = v + } + + for k, v := range properties { + event.Properties[k] = v + } + + event.Properties["process"] = t.processName + t.RUnlock() + } + + client.Track(event) +} + +func (t *TelemetryClient) TrackMetric(metricname string, value float64, properties map[string]string) { + metric := appinsights.NewMetricTelemetry(metricname, value) + if t.properties != nil { + t.RLock() + for k, v := range t.properties { + metric.Properties[k] = v + } + + for k, v := range properties { + metric.Properties[k] = v + } + + metric.Properties["process"] = t.processName + t.RUnlock() + } + + client.Track(metric) +} + +func (t *TelemetryClient) TrackTrace(name string, severity contracts.SeverityLevel, properties map[string]string) { + trace := appinsights.NewTraceTelemetry(name, severity) + if t.properties != nil { + t.RLock() + for k, v := range t.properties { + trace.Properties[k] = v + } + + for k, v := range properties { + trace.Properties[k] = v + } + + trace.Properties["process"] = t.processName + t.RUnlock() + } + + client.Track(trace) +} + +func (t *TelemetryClient) TrackException(exception *appinsights.ExceptionTelemetry) { + if t.properties != nil { + t.RLock() + for k, v := range t.properties { + exception.Properties[k] = v + } + + exception.Properties["process"] = t.processName + + t.RUnlock() + } + client.Track(exception) +} + +type PerformanceCounter struct { + functionName string + startTime time.Time +} + +// Used with start to measure the execution time of a function +// usage defer telemetry.StopPerf(telemetry.StartPerf("functionName")) +// start perf will be evaluated on defer line, and then stop perf will be evaluated on return +func (t *TelemetryClient) StartPerf(functionName string) *PerformanceCounter { + return &PerformanceCounter{ + functionName: functionName, + startTime: time.Now(), + } +} + +func (t *TelemetryClient) StopPerf(counter *PerformanceCounter) { + ms := float64(time.Since(counter.startTime).Milliseconds()) + t.TrackMetric(counter.functionName, ms, nil) +} + +func (t *TelemetryClient) Heartbeat(ctx context.Context, heartbeatInterval time.Duration) { + ticker := time.NewTicker(heartbeatInterval) // TODOL: make configurable + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + t.heartbeat(ctx) + } + } +} diff --git a/pkg/telemetry/telemetry_test.go b/pkg/telemetry/telemetry_test.go new file mode 100644 index 000000000..bebf960c2 --- /dev/null +++ b/pkg/telemetry/telemetry_test.go @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + "context" + "fmt" + "os" + "runtime" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func init() { + InitAppInsights("test", "test") +} + +func TestNewAppInsightsTelemetryClient(t *testing.T) { + require.NotPanics(t, func() { NewAppInsightsTelemetryClient("test", map[string]string{}) }) +} + +func TestGetEnvironmentProerties(t *testing.T) { + properties := GetEnvironmentProperties() + + hostname, err := os.Hostname() + if err != nil { + fmt.Printf("failed to get hostname with err %v", err) + } + require.NotEmpty(t, properties) + require.Exactly( + t, + map[string]string{ + "goversion": runtime.Version(), + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "numcores": fmt.Sprintf("%d", runtime.NumCPU()), + "hostname": hostname, + "podname": os.Getenv("POD_NAME"), + }, + properties, + ) +} + +func TestNoopTelemetry(t *testing.T) { + telemetry := NewNoopTelemetry() + require.NotNil(t, telemetry) + require.Equal(t, &NoopTelemetry{}, telemetry) +} + +func TestNoopTelemetryStartPerf(t *testing.T) { + telemetry := NewNoopTelemetry() + require.NotNil(t, telemetry) + require.Equal(t, &NoopTelemetry{}, telemetry) + + perf := telemetry.StartPerf("test") + require.NotNil(t, perf) + require.Equal(t, &PerformanceCounter{}, perf) +} + +func TestHeartbeat(t *testing.T) { + InitAppInsights("test", "test") + type fields struct { + properties map[string]string + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "test heartbeat", + fields: fields{ + properties: map[string]string{ + "test": "test", + }, + }, + args: args{ + ctx: context.Background(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tr := &TelemetryClient{ + RWMutex: sync.RWMutex{}, //nolint:copylocks + properties: tt.fields.properties, + } + tr.heartbeat(tt.args.ctx) + }) + } +} + +func TestTelemetryClient_StopPerf(t *testing.T) { + type fields struct { + properties map[string]string + } + type args struct { + counter *PerformanceCounter + } + tests := []struct { + name string + fields fields + args args + }{ + { + name: "test stop performance", + fields: fields{ + properties: map[string]string{ + "test": "test", + }, + }, + args: args{ + counter: &PerformanceCounter{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tr := &TelemetryClient{ + RWMutex: sync.RWMutex{}, + properties: tt.fields.properties, + } + tr.StopPerf(tt.args.counter) + }) + } +} diff --git a/pkg/telemetry/utils.go b/pkg/telemetry/utils.go new file mode 100644 index 000000000..e5ce87993 --- /dev/null +++ b/pkg/telemetry/utils.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package telemetry + +import ( + kcfg "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// GetK8SApiserverURLFromKubeConfig returns apiserver URL from kubeconfig. +// The apiserver URL is expected to be publicly unique identifier of the Kubernetes cluster. +// In case the kubeconfig does not exists, this identifier can be obtained for pods from kube-system namespace. +// Kubelet will populate env KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT for Pods deployed in kube-system +// namespace, which can be used to generate apiserver URL from `GetConfig()`. +func GetK8SApiserverURLFromKubeConfig() (string, error) { + cfg, err := kcfg.GetConfig() + if err != nil { + return "", err + } + return cfg.Host, nil +} diff --git a/pkg/utils/attr_utils.go b/pkg/utils/attr_utils.go new file mode 100644 index 000000000..c0c47e957 --- /dev/null +++ b/pkg/utils/attr_utils.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package utils + +import ( + "go.opentelemetry.io/otel/attribute" +) + +const ( + unknown = "__uknown__" +) + +var ( + pluginKey = attribute.Key("plugin") + eventTypeKey = attribute.Key("event_type") + timestampKey = attribute.Key("timestamp") + + localIPKey = attribute.Key("local_ip") + localPortKey = attribute.Key("local_port") + localKindKey = attribute.Key("local_kind") + localNameKey = attribute.Key("local_name") + localNamespaceKey = attribute.Key("local_namespace") + localOwnerNameKey = attribute.Key("local_owner_name") + localOwnerKindKey = attribute.Key("local_owner_kind") + localNodeKey = attribute.Key("local_node") + + remoteIPKey = attribute.Key("remote_ip") + remotePortKey = attribute.Key("remote_port") + remoteKindKey = attribute.Key("remote_kind") + remoteNameKey = attribute.Key("remote_name") + remoteNamespaceKey = attribute.Key("remote_namespace") + remoteOwnerNameKey = attribute.Key("remote_owner_name") + remoteOwnerKindKey = attribute.Key("remote_owner_kind") + remoteNodeKey = attribute.Key("remote_node") + + // todo move to attributes pkg? + Type = "type" + Reason = "reason" + Direction = "direction" + SourceNodeName = "source_node_name" + TargetNodeName = "target_node_name" + State = "state" + Address = "address" + Port = "port" + StatName = "statistic_name" + InterfaceName = "interface_name" + Flag = "flag" + Endpoint = "endpoint" + AclRule = "aclrule" + Active = "ACTIVE" + + // TCP Connection Statistic Names + ResetCount = "ResetCount" + ClosedFin = "ClosedFin" + ResetSyn = "ResetSyn" + TcpHalfOpenTimeouts = "TcpHalfOpenTimeouts" + Verified = "Verified" + TimedOutCount = "TimedOutCount" + TimeWaitExpiredCount = "TimeWaitExpiredCount" + + // Events types + Kernel = "kernel" + EnricherRing = "enricher_ring" + BufferedChannel = "buffered_channel" + ExternalChannel = "external_channel" + + // TCP Flags + SYN = "SYN" + SYNACK = "SYNACK" + ACK = "ACK" + FIN = "FIN" + RST = "RST" + PSH = "PSH" + ECE = "ECE" + CWR = "CWR" + NS = "NS" + URG = "URG" + + DataPlane = "dataplane" + Linux = "linux" + Windows = "windows" + + // DNS labels. + DNSLabels = []string{"return_code", "query_type", "query", "response", "num_response"} +) + +func GetPluginEventAttributes(attrs []attribute.KeyValue, pluginName, eventName, timestamp string) []attribute.KeyValue { + return append(attrs, + pluginKey.String(pluginName), + eventTypeKey.String(eventName), + timestampKey.String(timestamp), + ) +} diff --git a/pkg/utils/common.go b/pkg/utils/common.go new file mode 100644 index 000000000..aef4de75a --- /dev/null +++ b/pkg/utils/common.go @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package utils + +import ( + "encoding/json" + "fmt" + "math" + "net/http" + "time" +) + +const ( + contentTypeStr = "Content-Type" + defaultContentType = "application/json; charset=UTF-8" +) + +// TODO: Steven to add reverse lookup code here + +func DecodeRequestBody(request *http.Request, iface interface{}) (err error) { + if request.Body != nil { + err = json.NewDecoder(request.Body).Decode(&iface) + } else { + err = fmt.Errorf("nil request.body") + } + + return err +} + +func EncodeResponseBody(w http.ResponseWriter, iface interface{}) error { + w.Header().Set(contentTypeStr, defaultContentType) + w.WriteHeader(http.StatusOK) + return json.NewEncoder(w).Encode(iface) +} + +// Inspired by https://github.com/mauriciovasquezbernal/talks/blob/1f2080afe731949a033330c0adc290be8f3fc06d/2022-ebpf-training/2022-10-13/drop/main.go . +func Uint32Ptr(v uint32) *uint32 { + return &v +} + +func StringPtr(v string) *string { + return &v +} + +// Exponential backoff retry logic. +func Retry(f func() error, retry int) (err error) { + for i := 0; i < retry; i++ { + err = f() + if err == nil { + return nil + } + t := int64(math.Pow(2, float64(i))) + time.Sleep(time.Duration(t) * time.Second) + } + return err +} + +func CompareStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + + aMap := make(map[string]bool) + for _, v := range a { + aMap[v] = true + } + + bMap := make(map[string]bool) + for _, v := range b { + bMap[v] = true + } + + if len(aMap) != len(bMap) { + return false + } + + for k := range aMap { + if _, ok := bMap[k]; !ok { + return false + } + } + + return true +} diff --git a/pkg/utils/flow_utils.go b/pkg/utils/flow_utils.go new file mode 100644 index 000000000..e99e616b1 --- /dev/null +++ b/pkg/utils/flow_utils.go @@ -0,0 +1,273 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package utils + +import ( + "net" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/cilium/cilium/api/v1/flow" + "github.com/cilium/cilium/pkg/monitor/api" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Additional Verdicts to be used for flow objects +const ( + Verdict_RETRANSMISSION flow.Verdict = 15 + Verdict_DNS flow.Verdict = 16 + TypeUrl string = "retina.io" +) + +// ToFlow returns a flow.Flow object. +// This sets up a L3/L4 flow object. +// sourceIP, destIP are IPv4 addresses. +// sourcePort, destPort are TCP/UDP ports. +// proto is the protocol number. Ref: https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml . +// observationPoint is the observation point+direction of the flow. 0 is from n/w stack to container, 1 is from container to stack, +// 2 is from host to network and 3 is from network to host. +// ts is the timestamp in nanoseconds. +func ToFlow( + ts int64, + sourceIP, destIP net.IP, + sourcePort, destPort uint32, + proto uint8, + observationPoint uint32, + verdict flow.Verdict, + dropReason uint32, +) *flow.Flow { //nolint:typecheck + var ( + l4 *flow.Layer4 + checkpoint flow.TraceObservationPoint + direction flow.TrafficDirection + subeventtype int + ) + + l := log.Logger().Named("ToFlow") + + switch proto { + case 6: + l4 = &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + SourcePort: sourcePort, + DestinationPort: destPort, + }, + }, + } + case 17: + l4 = &flow.Layer4{ + Protocol: &flow.Layer4_UDP{ + UDP: &flow.UDP{ + SourcePort: sourcePort, + DestinationPort: destPort, + }, + }, + } + } + + // We are attaching the filters to the veth interface on the host side. + // So for HOST -> CONTAINER, egress of host veth is ingress of container. + // Hence, we need to swap the direction. + switch observationPoint { + case uint32(0): + checkpoint = flow.TraceObservationPoint_TO_STACK + direction = flow.TrafficDirection_EGRESS + subeventtype = int(api.TraceToStack) + case uint32(1): + checkpoint = flow.TraceObservationPoint_TO_ENDPOINT + direction = flow.TrafficDirection_INGRESS + subeventtype = int(api.TraceToLxc) + case uint32(2): + checkpoint = flow.TraceObservationPoint_FROM_NETWORK + direction = flow.TrafficDirection_INGRESS + subeventtype = int(api.TraceFromNetwork) + case uint32(3): + checkpoint = flow.TraceObservationPoint_TO_NETWORK + direction = flow.TrafficDirection_EGRESS + subeventtype = int(api.TraceToNetwork) + default: + checkpoint = flow.TraceObservationPoint_UNKNOWN_POINT + direction = flow.TrafficDirection_TRAFFIC_DIRECTION_UNKNOWN + } + + if verdict == 0 { + verdict = flow.Verdict_FORWARDED + } + + ext, _ := anypb.New(&RetinaMetadata{}) //nolint:typecheck + + f := &flow.Flow{ + Type: flow.FlowType_L3_L4, + EventType: &flow.CiliumEventType{ + Type: int32(api.MessageTypeTrace), + SubType: int32(subeventtype), + }, + IP: &flow.IP{ + Source: sourceIP.String(), + Destination: destIP.String(), + // We only support IPv4 for now. + IpVersion: flow.IPVersion_IPv4, + }, + L4: l4, + TraceObservationPoint: checkpoint, + TrafficDirection: direction, + Verdict: verdict, + DropReason: dropReason, + Extensions: ext, + IsReply: &wrapperspb.BoolValue{Value: false}, // Setting false by default as we don't have a better way to determine flow direction. + } + if t, err := decodeTime(ts); err == nil { + f.Time = t + } else { + l.Warn("Failed to get current time", zap.Error(err)) + } + return f +} + +func AddTcpFlags(f *flow.Flow, syn, ack, fin, rst, psh, urg uint16) { + if f.L4.GetTCP() == nil { + return + } + + f.L4.GetTCP().Flags = &flow.TCPFlags{ + SYN: syn == uint16(1), + ACK: ack == uint16(1), + FIN: fin == uint16(1), + RST: rst == uint16(1), + PSH: psh == uint16(1), + URG: urg == uint16(1), + } +} + +// Add TSval/TSecr to the flow as TCP ID. +// The TSval/TSecr works as ID for the flow. +// We will use this ID to calculate latency. +func AddTcpID(f *flow.Flow, id uint64) { + if f.L4 == nil || f.L4.GetTCP() == nil { + return + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + k.TcpId = id + f.Extensions, _ = anypb.New(k) //nolint:errcheck +} + +func GetTcpID(f *flow.Flow) uint64 { + if f.L4 == nil || f.L4.GetTCP() == nil { + return 0 + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + return k.TcpId +} + +func AddDnsInfo(f *flow.Flow, qType string, rCode uint32, query string, qTypes []string, numAnswers int, ips []string) { + if f == nil { + return + } + // Set type to L7. + f.Type = flow.FlowType_L7 + // Reset Eventtype if already set at L3/4 level. + f.EventType = &flow.CiliumEventType{ + Type: int32(api.MessageTypeNames[api.MessageTypeNameL7]), + } + l7 := flow.Layer7_Dns{ + Dns: &flow.DNS{ + Rcode: rCode, + Query: query, + Qtypes: qTypes, + Ips: ips, + }, + } + f.L7 = &flow.Layer7{ + Record: &l7, + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + switch qType { + case "Q": + k.DnsType = DNSType_QUERY + f.L7.Type = flow.L7FlowType_REQUEST + case "R": + k.DnsType = DNSType_RESPONSE + f.L7.Type = flow.L7FlowType_RESPONSE + f.IsReply = &wrapperspb.BoolValue{Value: true} // we can definitely say that this is a reply + default: + k.DnsType = DNSType_UNKNOWN + f.L7.Type = flow.L7FlowType_UNKNOWN_L7_TYPE + } + k.NumResponses = uint32(numAnswers) + f.Extensions, _ = anypb.New(k) +} + +func GetDns(f *flow.Flow) (*flow.DNS, DNSType, uint32) { + if f == nil || f.L7 == nil || f.L7.GetDns() == nil { + return nil, DNSType_UNKNOWN, 0 + } + dns := f.L7.GetDns() + if f.Extensions == nil { + return dns, DNSType_UNKNOWN, 0 + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + + return dns, k.DnsType, k.NumResponses +} + +// DNS Return code to string. +func DnsRcodeToString(f *flow.Flow) string { + if f == nil || f.L7 == nil || f.L7.GetDns() == nil { + return "" + } + switch f.L7.GetDns().Rcode { + case 0: + return "NOERROR" + case 1: + return "FORMERR" + case 2: + return "SERVFAIL" + case 3: + return "NXDOMAIN" + case 4: + return "NOTIMP" + case 5: + return "REFUSED" + default: + return "" + } +} + +func AddPacketSize(f *flow.Flow, packetSize uint64) { + if f.Extensions == nil { + return + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + k.Bytes = packetSize + f.Extensions, _ = anypb.New(k) +} + +func PacketSize(f *flow.Flow) uint64 { + if f.Extensions == nil { + return 0 + } + k := &RetinaMetadata{} //nolint:typecheck + f.Extensions.UnmarshalTo(k) //nolint:errcheck + return k.Bytes +} + +func decodeTime(nanoseconds int64) (pbTime *timestamppb.Timestamp, err error) { + goTime, err := time.Parse(time.RFC3339Nano, time.Unix(0, nanoseconds).Format(time.RFC3339Nano)) + if err != nil { + return nil, err + } + pbTime = timestamppb.New(goTime) + if err = pbTime.CheckValid(); err != nil { + return nil, err + } + return pbTime, nil +} diff --git a/pkg/utils/metadata.pb.go b/pkg/utils/metadata.pb.go new file mode 100644 index 000000000..6a531191a --- /dev/null +++ b/pkg/utils/metadata.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.24.2 +// source: pkg/utils/metadata.proto + +package utils + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DNSType int32 + +const ( + DNSType_UNKNOWN DNSType = 0 + DNSType_QUERY DNSType = 1 + DNSType_RESPONSE DNSType = 2 +) + +// Enum value maps for DNSType. +var ( + DNSType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "QUERY", + 2: "RESPONSE", + } + DNSType_value = map[string]int32{ + "UNKNOWN": 0, + "QUERY": 1, + "RESPONSE": 2, + } +) + +func (x DNSType) Enum() *DNSType { + p := new(DNSType) + *p = x + return p +} + +func (x DNSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DNSType) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_utils_metadata_proto_enumTypes[0].Descriptor() +} + +func (DNSType) Type() protoreflect.EnumType { + return &file_pkg_utils_metadata_proto_enumTypes[0] +} + +func (x DNSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DNSType.Descriptor instead. +func (DNSType) EnumDescriptor() ([]byte, []int) { + return file_pkg_utils_metadata_proto_rawDescGZIP(), []int{0} +} + +type RetinaMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes uint64 `protobuf:"varint,1,opt,name=bytes,proto3" json:"bytes,omitempty"` + // DNS metadata. + DnsType DNSType `protobuf:"varint,2,opt,name=dns_type,json=dnsType,proto3,enum=utils.DNSType" json:"dns_type,omitempty"` + NumResponses uint32 `protobuf:"varint,3,opt,name=num_responses,json=numResponses,proto3" json:"num_responses,omitempty"` + // TCP ID. Either Tsval or Tsecr will be set. + TcpId uint64 `protobuf:"varint,4,opt,name=tcp_id,json=tcpId,proto3" json:"tcp_id,omitempty"` +} + +func (x *RetinaMetadata) Reset() { + *x = RetinaMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_utils_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetinaMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetinaMetadata) ProtoMessage() {} + +func (x *RetinaMetadata) ProtoReflect() protoreflect.Message { + mi := &file_pkg_utils_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetinaMetadata.ProtoReflect.Descriptor instead. +func (*RetinaMetadata) Descriptor() ([]byte, []int) { + return file_pkg_utils_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *RetinaMetadata) GetBytes() uint64 { + if x != nil { + return x.Bytes + } + return 0 +} + +func (x *RetinaMetadata) GetDnsType() DNSType { + if x != nil { + return x.DnsType + } + return DNSType_UNKNOWN +} + +func (x *RetinaMetadata) GetNumResponses() uint32 { + if x != nil { + return x.NumResponses + } + return 0 +} + +func (x *RetinaMetadata) GetTcpId() uint64 { + if x != nil { + return x.TcpId + } + return 0 +} + +var File_pkg_utils_metadata_proto protoreflect.FileDescriptor + +var file_pkg_utils_metadata_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x70, 0x6b, 0x67, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x73, 0x2f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x75, 0x74, 0x69, 0x6c, + 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x74, 0x69, 0x6e, 0x61, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x08, 0x64, 0x6e, + 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x75, + 0x74, 0x69, 0x6c, 0x73, 0x2e, 0x44, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x64, 0x6e, + 0x73, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6e, 0x75, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x63, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x63, 0x70, 0x49, + 0x64, 0x2a, 0x2f, 0x0a, 0x07, 0x44, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x51, 0x55, 0x45, + 0x52, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, + 0x10, 0x02, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x2f, 0x6b, 0x61, 0x70, 0x70, 0x69, 0x65, 0x2f, 0x70, 0x6b, + 0x67, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_utils_metadata_proto_rawDescOnce sync.Once + file_pkg_utils_metadata_proto_rawDescData = file_pkg_utils_metadata_proto_rawDesc +) + +func file_pkg_utils_metadata_proto_rawDescGZIP() []byte { + file_pkg_utils_metadata_proto_rawDescOnce.Do(func() { + file_pkg_utils_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_utils_metadata_proto_rawDescData) + }) + return file_pkg_utils_metadata_proto_rawDescData +} + +var file_pkg_utils_metadata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_pkg_utils_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pkg_utils_metadata_proto_goTypes = []interface{}{ + (DNSType)(0), // 0: utils.DNSType + (*RetinaMetadata)(nil), // 1: utils.RetinaMetadata +} +var file_pkg_utils_metadata_proto_depIdxs = []int32{ + 0, // 0: utils.RetinaMetadata.dns_type:type_name -> utils.DNSType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pkg_utils_metadata_proto_init() } +func file_pkg_utils_metadata_proto_init() { + if File_pkg_utils_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_utils_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetinaMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_utils_metadata_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pkg_utils_metadata_proto_goTypes, + DependencyIndexes: file_pkg_utils_metadata_proto_depIdxs, + EnumInfos: file_pkg_utils_metadata_proto_enumTypes, + MessageInfos: file_pkg_utils_metadata_proto_msgTypes, + }.Build() + File_pkg_utils_metadata_proto = out.File + file_pkg_utils_metadata_proto_rawDesc = nil + file_pkg_utils_metadata_proto_goTypes = nil + file_pkg_utils_metadata_proto_depIdxs = nil +} diff --git a/pkg/utils/metadata.proto b/pkg/utils/metadata.proto new file mode 100644 index 000000000..9b8226869 --- /dev/null +++ b/pkg/utils/metadata.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package utils; + +option go_package = "github.com/microsoft/retina/pkg/utils"; + +message RetinaMetadata { + uint64 bytes = 1; + + // DNS metadata. + DNSType dns_type = 2; + uint32 num_responses = 3; + + // TCP ID. Either Tsval or Tsecr will be set. + uint64 tcp_id = 4; +} + +enum DNSType { + UNKNOWN = 0; + QUERY = 1; + RESPONSE = 2; +} diff --git a/pkg/utils/metric_names.go b/pkg/utils/metric_names.go new file mode 100644 index 000000000..b672dd208 --- /dev/null +++ b/pkg/utils/metric_names.go @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package utils + +/* Creating this UTILs package to avoid operator from importing filtermanager package + * + * This package will contain all the utility functions that are used by the operator + * needed to make metrics work + */ + +const ( + // Common Counters across os distributions (should these be asynch or synch) + // make sure IsMetric is updated if you add a new metric here + DropCountTotalName = "drop_count" + DropBytesTotalName = "drop_bytes" + ForwardCountTotalName = "forward_count" + ForwardBytesTotalName = "forward_bytes" + TcpStateGaugeName = "tcp_state" + TcpConnectionRemoteGaugeName = "tcp_connection_remote" + TcpConnectionStatsName = "tcp_connection_stats" + TcpFlagCounters = "tcp_flag_counters" + TcpRetransCount = "tcp_retransmission_count" + IpConnectionStatsName = "ip_connection_stats" + UdpConnectionStatsName = "udp_connection_stats" + UdpActiveSocketsCounterName = "udp_active_sockets" + InterfaceStatsName = "interface_stats" + DNSRequestCounterName = "dns_request_count" + DNSResponseCounterName = "dns_response_count" + NodeApiServerLatencyName = "node_apiserver_latency" + NodeApiServerTcpHandshakeLatencyName = "node_apiserver_handshake_latency" + NoResponseFromApiServerName = "node_apiserver_no_response" + + // Common Gauges across os distributions + NodeConnectivityStatusName = "node_connectivity_status" + NodeConnectivityLatencySecondsName = "node_connectivity_latency_seconds" +) + +// IsAdvancedMetric is a helper function to determine if a name is an advanced metric +func IsAdvancedMetric(name string) bool { + switch name { + case + DropCountTotalName, + DropBytesTotalName, + ForwardCountTotalName, + ForwardBytesTotalName, + NodeConnectivityStatusName, + NodeConnectivityLatencySecondsName, + TcpStateGaugeName, + TcpConnectionRemoteGaugeName, + TcpConnectionStatsName, + TcpFlagCounters, + TcpRetransCount, + IpConnectionStatsName, + UdpConnectionStatsName, + UdpActiveSocketsCounterName, + DNSRequestCounterName, + DNSResponseCounterName, + NodeApiServerLatencyName, + NodeApiServerTcpHandshakeLatencyName, + NoResponseFromApiServerName: + return true + default: + return false + } +} diff --git a/pkg/utils/utils_linux.go b/pkg/utils/utils_linux.go new file mode 100644 index 000000000..7f9d16f08 --- /dev/null +++ b/pkg/utils/utils_linux.go @@ -0,0 +1,111 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package utils + +import ( + "encoding/binary" + "fmt" + "net" + "syscall" + "unsafe" + + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +var showLink = netlink.LinkList + +// Both openRawSock and htons are available in +// https://github.com/cilium/ebpf/blob/master/example_sock_elf_test.go. +// MIT license. + +func OpenRawSocket(index int) (int, error) { + sock, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, int(htons(syscall.ETH_P_ALL))) + if err != nil { + return 0, err + } + sll := syscall.SockaddrLinklayer{ + Ifindex: index, + Protocol: htons(syscall.ETH_P_ALL), + } + if err := syscall.Bind(sock, &sll); err != nil { + return 0, err + } + return sock, nil +} + +// htons converts the unsigned short integer hostshort from host byte order to network byte order. +func htons(i uint16) uint16 { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, i) + return *(*uint16)(unsafe.Pointer(&b[0])) +} + +// https://gist.github.com/ammario/649d4c0da650162efd404af23e25b86b +func Int2ip(nn uint32) net.IP { + ip := make(net.IP, 4) + switch determineEndian() { + case binary.BigEndian: + binary.BigEndian.PutUint32(ip, nn) + default: + // default is little endian + binary.LittleEndian.PutUint32(ip, nn) + } + return ip +} + +func Ip2int(ip []byte) (res uint32, err error) { + if len(ip) == 16 { + return res, errors.New("IPv6 not supported") + } + switch determineEndian() { + case binary.BigEndian: + res = binary.BigEndian.Uint32(ip) + default: + // default is little endian. + res = binary.LittleEndian.Uint32(ip) + } + return res, nil +} + +// HostToNetShort converts a 16-bit integer from host to network byte order, aka "htons" +func HostToNetShort(i uint16) uint16 { + b := make([]byte, 2) + binary.LittleEndian.PutUint16(b, i) + return binary.BigEndian.Uint16(b) +} + +func determineEndian() binary.ByteOrder { + var endian binary.ByteOrder + buf := [2]byte{} + *(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD) + + switch buf { + case [2]byte{0xCD, 0xAB}: + endian = binary.LittleEndian + case [2]byte{0xAB, 0xCD}: + endian = binary.BigEndian + default: + fmt.Println("Couldn't determine endianness") + } + return endian +} + +// Get all the veth interfaces. +// Similar to ip link show type veth +func GetInterface(name string, interfaceType string) (netlink.Link, error) { + links, err := showLink() + if err != nil { + return nil, err + } + + for _, link := range links { + // Ref for types: https://github.com/vishvananda/netlink/blob/ced5aaba43e3f25bb5f04860641d3e3dd04a8544/link.go#L367 + // Version of netlink tested - https://github.com/vishvananda/netlink/tree/v1.2.1-beta.2 + if link.Type() == interfaceType && link.Attrs().Name == name { + return link, nil + } + } + + return nil, fmt.Errorf("interface %s of type %s not found", name, interfaceType) +} diff --git a/pkg/utils/utils_linux_test.go b/pkg/utils/utils_linux_test.go new file mode 100644 index 000000000..7143f91b6 --- /dev/null +++ b/pkg/utils/utils_linux_test.go @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package utils + +import ( + "net" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/cilium/cilium/api/v1/flow" + "github.com/stretchr/testify/assert" +) + +func TestToFlow(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ts := int64(1649748687588860) + f := ToFlow(ts, net.ParseIP("1.1.1.1").To4(), + net.ParseIP("2.2.2.2").To4(), + 443, 80, 6, uint32(1), flow.Verdict_FORWARDED, 0) + /* + expected ---> flow.Flow{ + IP: &flow.IP{ + Source: "1.1.1.1", + Destination: "2.2.2.2", + IpVersion: 1, + }, + L4: &flow.Layer4{ + Protocol: &flow.Layer4_TCP{ + TCP: &flow.TCP{ + SourcePort: 443, + DestinationPort: 80, + }, + }, + }, + TraceObservationPoint: flow.TraceObservationPoint_TO_ENDPOINT, + } + */ + assert.Equal(t, f.IP.Source, "1.1.1.1") + assert.Equal(t, f.IP.Destination, "2.2.2.2") + assert.Equal(t, f.IP.IpVersion, flow.IPVersion_IPv4) + assert.EqualValues(t, f.L4.Protocol.(*flow.Layer4_TCP).TCP.SourcePort, uint32(443)) + assert.EqualValues(t, f.L4.Protocol.(*flow.Layer4_TCP).TCP.DestinationPort, uint32(80)) + assert.NotNil(t, f.Time) + assert.NotNil(t, f.Extensions) + assert.Equal(t, f.Type, flow.FlowType_L3_L4) + + if !f.GetTime().IsValid() { + t.Errorf("Time is not valid") + } + assert.EqualValues(t, f.GetTime().AsTime().Format("Oct 1 15:04:05.000"), "Oct 1 02:15:48.687") + + expectedObsPoint := []flow.TraceObservationPoint{ + flow.TraceObservationPoint_TO_STACK, + flow.TraceObservationPoint_TO_ENDPOINT, + flow.TraceObservationPoint_FROM_NETWORK, + flow.TraceObservationPoint_TO_NETWORK, + flow.TraceObservationPoint_UNKNOWN_POINT, + } + expectedSubtype := []int32{3, 0, 10, 11, 0} + for idx, val := range []uint32{0, 1, 2, 3, 4} { + f = ToFlow(ts, net.ParseIP("1.1.1.1").To4(), + net.ParseIP("2.2.2.2").To4(), + 443, 80, 6, uint32(val), flow.Verdict_FORWARDED, 0) + assert.EqualValues(t, f.TraceObservationPoint, expectedObsPoint[idx]) + assert.EqualValues(t, f.GetEventType().GetSubType(), expectedSubtype[idx]) + } +} + +func TestAddPacketSize(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ts := int64(1649748687588864) + f := ToFlow(ts, net.ParseIP("1.1.1.1").To4(), + net.ParseIP("2.2.2.2").To4(), + 443, 80, 6, uint32(1), flow.Verdict_FORWARDED, 0) + AddPacketSize(f, uint64(100)) + + res := PacketSize(f) + assert.EqualValues(t, res, uint64(100)) +} + +func TestTcpID(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + ts := int64(1649748687588864) + f := ToFlow(ts, net.ParseIP("1.1.1.1").To4(), + net.ParseIP("2.2.2.2").To4(), + 443, 80, 6, uint32(1), flow.Verdict_FORWARDED, 0) + AddTcpID(f, uint64(1234)) + assert.EqualValues(t, GetTcpID(f), uint64(1234)) +} diff --git a/pkg/watchers/apiserver/apiserver.go b/pkg/watchers/apiserver/apiserver.go new file mode 100644 index 000000000..ce6860442 --- /dev/null +++ b/pkg/watchers/apiserver/apiserver.go @@ -0,0 +1,241 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package apiserver + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "github.com/microsoft/retina/pkg/common" + cc "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/log" + fm "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/pubsub" + "go.uber.org/zap" + kcfg "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +const ( + filterManagerRetries = 3 +) + +type ApiServerWatcher struct { + isRunning bool + l *log.ZapLogger + current cache + new cache + apiServerUrl string + hostResolver IHostResolver + filtermanager fm.IFilterManager +} + +var a *ApiServerWatcher + +// NewApiServerWatcher creates a new apiserver watcher. +func Watcher() *ApiServerWatcher { + if a == nil { + a = &ApiServerWatcher{ + isRunning: false, + l: log.Logger().Named("apiserver-watcher"), + current: make(cache), + apiServerUrl: getHostURL(), + hostResolver: net.DefaultResolver, + } + } + + return a +} + +func (a *ApiServerWatcher) Init(ctx context.Context) error { + if a.isRunning { + a.l.Info("apiserver watcher is already running") + return nil + } + + a.filtermanager = getFilterManager() + a.isRunning = true + return nil +} + +// Stop the apiserver watcher. +func (a *ApiServerWatcher) Stop(ctx context.Context) error { + if !a.isRunning { + a.l.Info("apiserver watcher is not running") + return nil + } + a.isRunning = false + return nil +} + +func (a *ApiServerWatcher) Refresh(ctx context.Context) error { + err := a.initNewCache(ctx) + if err != nil { + return err + } + // Compare the new ips with the old ones. + created, deleted := a.diffCache() + + // Publish the new ips. + createdIps := []net.IP{} + deletedIps := []net.IP{} + + for _, v := range created { + a.l.Info("New Apiserver ips:", zap.Any("ip", v)) + ip := net.ParseIP(v.(string)).To4() + createdIps = append(createdIps, ip) + } + + for _, v := range deleted { + a.l.Info("Deleted Apiserver ips:", zap.Any("ip", v)) + ip := net.ParseIP(v.(string)).To4() + deletedIps = append(deletedIps, ip) + } + + if len(createdIps) > 0 { + // Publish the new ips. + a.publish(createdIps, cc.EventTypeAddAPIServerIPs) + // Add ips to filter manager if any. + err := a.filtermanager.AddIPs(createdIps, "apiserver-watcher", fm.RequestMetadata{RuleID: "apiserver-watcher"}) + if err != nil { + a.l.Error("Failed to add ips to filter manager", zap.Error(err)) + } + } + + if len(deletedIps) > 0 { + // Publish the deleted ips. + a.publish(deletedIps, cc.EventTypeDeleteAPIServerIPs) + // Delete ips from filter manager if any. + err := a.filtermanager.DeleteIPs(deletedIps, "apiserver-watcher", fm.RequestMetadata{RuleID: "apiserver-watcher"}) + if err != nil { + a.l.Error("Failed to delete ips from filter manager", zap.Error(err)) + } + } + + // update the current cache and reset the new cache + a.current = a.new.deepcopy() + a.new = nil + + return nil +} + +func (a *ApiServerWatcher) initNewCache(ctx context.Context) error { + ips, err := a.getApiServerIPs(ctx) + if err != nil { + return err + } + + // Reset the new cache. + a.new = make(cache) + for _, ip := range ips { + a.new[ip] = struct{}{} + } + return nil +} + +func (a *ApiServerWatcher) diffCache() (created, deleted []interface{}) { + // check if there are new ips + for k := range a.new { + if _, ok := a.current[k]; !ok { + created = append(created, k) + } + } + + // check if there are deleted ips + for k := range a.current { + if _, ok := a.new[k]; !ok { + deleted = append(deleted, k) + } + } + return +} + +func (a *ApiServerWatcher) getApiServerIPs(ctx context.Context) ([]string, error) { + // Parse the URL + host, err := a.retrieveApiServerHostname() + if err != nil { + return nil, err + } + + // Get the ips for the host + ips, err := a.resolveIPs(ctx, host) + if err != nil { + return nil, err + } + + return ips, nil +} + +// parse url to extract hostname +func (a *ApiServerWatcher) retrieveApiServerHostname() (string, error) { + // Parse the URL + url, err := url.Parse(a.apiServerUrl) + if err != nil { + fmt.Println("Failed to parse URL:", err) + return "", err + } + + // Remove the scheme (http:// or https://) and port from the host + host := strings.TrimPrefix(url.Host, "www.") + colonIndex := strings.IndexByte(host, ':') + if colonIndex != -1 { + host = host[:colonIndex] + } + return host, nil +} + +// Resolve the list of ips for the given host +func (a *ApiServerWatcher) resolveIPs(ctx context.Context, host string) ([]string, error) { + hostIps, err := a.hostResolver.LookupHost(ctx, host) + if err != nil { + return nil, err + } + + if len(hostIps) == 0 { + a.l.Error("no ips found for host", zap.String("host", host)) + return nil, fmt.Errorf("no ips found for host %s", host) + } + + return hostIps, nil +} + +func (a *ApiServerWatcher) publish(netIPs []net.IP, eventType cc.EventType) { + if len(netIPs) == 0 { + return + } + + ipsToPublish := []string{} + for _, ip := range netIPs { + ipsToPublish = append(ipsToPublish, ip.String()) + } + ps := pubsub.New() + ps.Publish(common.PubSubAPIServer, + cc.NewCacheEvent( + eventType, + common.NewAPIServerObject(ipsToPublish), + ), + ) + a.l.Debug("Published event", zap.Any("eventType", eventType), zap.Any("netIPs", ipsToPublish)) +} + +// getHostURL returns the host url from the config. +func getHostURL() string { + config, err := kcfg.GetConfig() + if err != nil { + log.Logger().Error("failed to get config", zap.Error(err)) + return "" + } + return config.Host +} + +// Get FilterManager +func getFilterManager() *fm.FilterManager { + f, err := fm.Init(filterManagerRetries) + if err != nil { + a.l.Error("failed to init filter manager", zap.Error(err)) + } + return f +} diff --git a/pkg/watchers/apiserver/apiserver_test.go b/pkg/watchers/apiserver/apiserver_test.go new file mode 100644 index 000000000..bcd041eb6 --- /dev/null +++ b/pkg/watchers/apiserver/apiserver_test.go @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package apiserver + +import ( + "context" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + filtermanagermocks "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/watchers/apiserver/mocks" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestGetWatcher(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + a := Watcher() + assert.NotNil(t, a) + + a_again := Watcher() + assert.Equal(t, a, a_again, "Expected the same veth watcher instance") +} + +func TestAPIServerWatcherStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedFilterManager := filtermanagermocks.NewMockIFilterManager(ctrl) + + // When apiserver is already stopped. + a := &ApiServerWatcher{ + isRunning: false, + l: log.Logger().Named("apiserver-watcher"), + filtermanager: mockedFilterManager, + } + err := a.Stop(ctx) + assert.NoError(t, err, "Expected no error when stopping a stopped apiserver watcher") + assert.Equal(t, false, a.isRunning, "Expected apiserver watcher to be stopped") + + // Start the watcher. + err = a.Init(ctx) + assert.NoError(t, err, "Expected no error when starting a stopped apiserver watcher") + + // Stop the watcher. + err = a.Stop(ctx) + assert.NoError(t, err, "Expected no error when stopping a running apiserver watcher") + assert.Equal(t, false, a.isRunning, "Expected apiserver watcher to be stopped") +} + +func TestRefresh(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + mockedResolver := mocks.NewMockIHostResolver(ctrl) + mockedFilterManager := filtermanagermocks.NewMockIFilterManager(ctrl) + + a := &ApiServerWatcher{ + l: log.Logger().Named("apiserver-watcher"), + apiServerUrl: "https://kubernetes.default.svc.cluster.local:443", + hostResolver: mockedResolver, + filtermanager: mockedFilterManager, + } + + // Return 2 random IPs for the host everytime LookupHost is called. + mockedResolver.EXPECT().LookupHost(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, host string) ([]string, error) { + return []string{randomIP(), randomIP()}, nil + }).AnyTimes() + + mockedFilterManager.EXPECT().AddIPs(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + mockedFilterManager.EXPECT().DeleteIPs(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + a.Refresh(ctx) + assert.NoError(t, a.Refresh(context.Background()), "Expected no error when refreshing the cache") +} + +func TestDiffCache(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockedResolver := mocks.NewMockIHostResolver(ctrl) + + old := make(map[string]struct{}) + new := make(map[string]struct{}) + + old["192.168.1.1"] = struct{}{} + old["192.168.1.2"] = struct{}{} + new["192.168.1.2"] = struct{}{} + new["192.168.1.3"] = struct{}{} + + a := &ApiServerWatcher{ + l: log.Logger().Named("apiserver-watcher"), + apiServerUrl: "https://kubernetes.default.svc.cluster.local:443", + hostResolver: mockedResolver, + current: old, + new: new, + } + + created, deleted := a.diffCache() + assert.Equal(t, 1, len(created), "Expected 1 created host") + assert.Equal(t, 1, len(deleted), "Expected 1 deleted host") +} + +func TestRefreshError(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + mockedResolver := mocks.NewMockIHostResolver(ctrl) + + a := &ApiServerWatcher{ + l: log.Logger().Named("apiserver-watcher"), + apiServerUrl: "https://kubernetes.default.svc.cluster.local:443", + hostResolver: mockedResolver, + } + + mockedResolver.EXPECT().LookupHost(gomock.Any(), gomock.Any()).Return(nil, errors.New("Error")).AnyTimes() + + a.Refresh(ctx) + assert.Error(t, a.Refresh(context.Background()), "Expected error when refreshing the cache") +} + +func TestResolveIPEmpty(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + mockedResolver := mocks.NewMockIHostResolver(ctrl) + + a := &ApiServerWatcher{ + l: log.Logger().Named("apiserver-watcher"), + apiServerUrl: "https://kubernetes.default.svc.cluster.local:443", + hostResolver: mockedResolver, + } + + mockedResolver.EXPECT().LookupHost(gomock.Any(), gomock.Any()).Return([]string{}, nil).AnyTimes() + + a.Refresh(ctx) + assert.Error(t, a.Refresh(context.Background()), "Expected error when refreshing the cache") +} + +func randomIP() string { + return fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)) +} diff --git a/pkg/watchers/apiserver/mocks/mock_types.go b/pkg/watchers/apiserver/mocks/mock_types.go new file mode 100644 index 000000000..04be8e191 --- /dev/null +++ b/pkg/watchers/apiserver/mocks/mock_types.go @@ -0,0 +1,50 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockIHostResolver is a mock of IHostResolver interface. +type MockIHostResolver struct { + ctrl *gomock.Controller + recorder *MockIHostResolverMockRecorder +} + +// MockIHostResolverMockRecorder is the mock recorder for MockIHostResolver. +type MockIHostResolverMockRecorder struct { + mock *MockIHostResolver +} + +// NewMockIHostResolver creates a new mock instance. +func NewMockIHostResolver(ctrl *gomock.Controller) *MockIHostResolver { + mock := &MockIHostResolver{ctrl: ctrl} + mock.recorder = &MockIHostResolverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIHostResolver) EXPECT() *MockIHostResolverMockRecorder { + return m.recorder +} + +// LookupHost mocks base method. +func (m *MockIHostResolver) LookupHost(context context.Context, host string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LookupHost", context, host) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LookupHost indicates an expected call of LookupHost. +func (mr *MockIHostResolverMockRecorder) LookupHost(context, host interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LookupHost", reflect.TypeOf((*MockIHostResolver)(nil).LookupHost), context, host) +} diff --git a/pkg/watchers/apiserver/types.go b/pkg/watchers/apiserver/types.go new file mode 100644 index 000000000..5ca5ca441 --- /dev/null +++ b/pkg/watchers/apiserver/types.go @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package apiserver + +import "context" + +//go:generate go run github.com/golang/mock/mockgen@v1.6.0 -source=types.go -destination=mocks/mock_types.go -package=mocks . +type IHostResolver interface { + LookupHost(context context.Context, host string) ([]string, error) +} + +// define cache as a set +type cache map[string]struct{} + +func (c cache) deepcopy() cache { + copy := make(cache) + for k, v := range c { + copy[k] = v + } + return copy +} diff --git a/pkg/watchers/endpoint/endpoint.go b/pkg/watchers/endpoint/endpoint.go new file mode 100644 index 000000000..1a90ea451 --- /dev/null +++ b/pkg/watchers/endpoint/endpoint.go @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package endpoint + +import ( + "context" + + "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "go.uber.org/zap" +) + +type EndpointWatcher struct { + isRunning bool + l *log.ZapLogger + current cache + new cache + p pubsub.PubSubInterface +} + +var e *EndpointWatcher + +// NewEndpointWatcher creates a new endpoint watcher. +func Watcher() *EndpointWatcher { + if e == nil { + e = &EndpointWatcher{ + isRunning: false, + l: log.Logger().Named("endpoint-watcher"), + p: pubsub.New(), + current: make(cache), + } + } + + return e +} + +func (e *EndpointWatcher) Init(ctx context.Context) error { + if e.isRunning { + e.l.Info("endpoint watcher is already running") + return nil + } + e.isRunning = true + return nil +} + +func (e *EndpointWatcher) Stop(ctx context.Context) error { + if !e.isRunning { + e.l.Info("endpoint watcher is not running") + return nil + } + e.isRunning = false + return nil +} + +func (e *EndpointWatcher) Refresh(ctx context.Context) error { + // initNewCache is OS specific. + // Based on GOOS, will be implemented by either endpoint_linux, or + // endpoint_windows. + err := e.initNewCache() + if err != nil { + return err + } + + // Compare the new veths with the old ones. + created, deleted := e.diffCache() + + // Publish the new veths. + for _, v := range created { + e.l.Debug("Endpoint created", zap.Any("veth", v)) + e.p.Publish(common.PubSubEndpoints, NewEndpointEvent(EndpointCreated, v)) + } + + // Publish the deleted veths. + for _, v := range deleted { + e.l.Debug("Endpoint deleted", zap.Any("veth", v)) + e.p.Publish(common.PubSubEndpoints, NewEndpointEvent(EndpointDeleted, v)) + } + + // Update the cache and reset the new cache. + e.current = e.new.deepcopy() + e.new = nil + + return nil +} + +// Function to differentiate between two caches. +func (e *EndpointWatcher) diffCache() (created, deleted []interface{}) { + // Check if there are any new veths. + for k, v := range e.new { + if _, ok := e.current[k]; !ok { + created = append(created, v) + } + } + + // Check if there are any deleted veths. + for k, v := range e.current { + if _, ok := e.new[k]; !ok { + deleted = append(deleted, v) + } + } + return +} diff --git a/pkg/watchers/endpoint/endpoint_linux.go b/pkg/watchers/endpoint/endpoint_linux.go new file mode 100644 index 000000000..40a601e1f --- /dev/null +++ b/pkg/watchers/endpoint/endpoint_linux.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package endpoint + +import ( + "github.com/vishvananda/netlink" +) + +var showLink = netlink.LinkList + +func (e *EndpointWatcher) initNewCache() error { + veths, err := listVeths() + if err != nil { + return err + } + + // Reset new cache. + e.new = make(cache) + for _, veth := range veths { + k := key{ + name: veth.Attrs().Name, + hardwareAddr: veth.Attrs().HardwareAddr.String(), + netNsID: veth.Attrs().NetNsID, + } + + e.new[k] = *veth.Attrs() + } + + return nil +} + +// Helper functions. + +// Get all the veth interfaces. +// Similar to ip link show type veth +func listVeths() ([]netlink.Link, error) { + links, err := showLink() + if err != nil { + return nil, err + } + + var veths []netlink.Link + for _, link := range links { + // Ref: https://github.com/vishvananda/netlink/blob/ced5aaba43e3f25bb5f04860641d3e3dd04a8544/link.go#L367 + // Unfortunately, there is no type/constant defined for "veth" in the netlink package. + // Version of netlink tested - https://github.com/vishvananda/netlink/tree/v1.2.1-beta.2 + if link.Type() == "veth" { + veths = append(veths, link) + } + } + + return veths, nil +} diff --git a/pkg/watchers/endpoint/endpoint_test.go b/pkg/watchers/endpoint/endpoint_test.go new file mode 100644 index 000000000..a9fbc439e --- /dev/null +++ b/pkg/watchers/endpoint/endpoint_test.go @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package endpoint + +import ( + "context" + "errors" + "net" + "testing" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" +) + +func TestGetWatcher(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + v := Watcher() + assert.NotNil(t, v) + + v_again := Watcher() + assert.Equal(t, v, v_again, "Expected the same veth watcher instance") +} + +func TestEndpointWatcherStart(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + c := context.Background() + + // When veth is already running. + v := &EndpointWatcher{ + isRunning: true, + l: log.Logger().Named("veth-watcher"), + } + err := v.Init(c) + assert.NoError(t, err, "Expected no error when starting a running veth watcher") + assert.Equal(t, true, v.isRunning, "Expected veth watcher to be running") + + // When veth is not running. + v.isRunning = false + err = v.Init(c) + assert.NoError(t, err, "Expected no error when starting a stopped veth watcher") + assert.Equal(t, true, v.isRunning, "Expected veth watcher to be running") + + // Stop the watcher. + err = v.Stop(c) + assert.NoError(t, err, "Expected no error when stopping a running veth watcher") + + // Restart the watcher. + err = v.Init(c) + assert.NoError(t, err, "Expected no error when starting a stopped veth watcher") + assert.Equal(t, true, v.isRunning, "Expected veth watcher to be running") + + // Stop the watcher. + err = v.Stop(c) + assert.NoError(t, err, "Expected no error when stopping a running veth watcher") +} + +func TestEndpointWatcherStop(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + c := context.Background() + + // When veth is already stopped. + v := &EndpointWatcher{ + isRunning: false, + l: log.Logger().Named("veth-watcher"), + } + err := v.Stop(c) + assert.NoError(t, err, "Expected no error when stopping a stopped veth watcher") + assert.Equal(t, false, v.isRunning, "Expected veth watcher to be stopped") + + // Start the watcher. + err = v.Init(c) + assert.NoError(t, err, "Expected no error when starting a stopped veth watcher") + + // Stop the watcher. + err = v.Stop(c) + assert.NoError(t, err, "Expected no error when stopping a running veth watcher") + assert.Equal(t, false, v.isRunning, "Expected veth watcher to be stopped") +} + +func TestRun(t *testing.T) { + showLink = func() ([]netlink.Link, error) { + return []netlink.Link{ + &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "veth0", + }, + }, + &netlink.Vxlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + }, + }, + }, nil + } + + links, err := listVeths() + assert.NoError(t, err, "Expected no error when listing veths") + assert.Equal(t, 1, len(links), "Expected to find 1 veth") + assert.Equal(t, "veth0", links[0].Attrs().Name, "Expected to find veth0") +} + +func TestDiffCache(t *testing.T) { + old := cache{ + key{ + name: "veth0", + hardwareAddr: "00:00:00:00:00:00", + netNsID: 0, + }: netlink.LinkAttrs{ + Name: "veth0", + }, + } + new := cache{ + key{ + name: "veth1", + hardwareAddr: "00:00:00:00:00:FF", + netNsID: 1, + }: netlink.LinkAttrs{ + Name: "veth1", + }, + } + e := &EndpointWatcher{current: old, new: new} + c, d := e.diffCache() + assert.Equal(t, 1, len(c), "Expected to find 1 created veth") + assert.Equal(t, 1, len(d), "Expected to find 1 deleted veth") + assert.Equal(t, "veth1", c[0].(netlink.LinkAttrs).Name, "Expected to find veth1") + assert.Equal(t, "veth0", d[0].(netlink.LinkAttrs).Name, "Expected to find veth0") +} + +func TestRefreshAndCallback(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + c := context.Background() + + showLink = func() ([]netlink.Link, error) { + return []netlink.Link{ + &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "veth0", + HardwareAddr: func() net.HardwareAddr { + mac, _ := net.ParseMAC("00:00:00:00:00:00") + return mac + }(), + NetNsID: 0, + }, + }, + &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "veth1", + HardwareAddr: func() net.HardwareAddr { + mac, _ := net.ParseMAC("00:00:00:00:00:01") + return mac + }(), + NetNsID: 1, + }, + }, + }, nil + } + + cache := make(cache) + cache[key{ + name: "veth2", + hardwareAddr: "00:00:00:00:00:02", + netNsID: 2, + }] = &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "veth2", + }, + } + + v := &EndpointWatcher{ + isRunning: true, + current: cache, + l: log.Logger().Named("veth-watcher"), + p: pubsub.New(), + } + + // When cache is empty. + assert.Equal(t, 1, len(v.current), "Expected to find 0 veths") + + // Post refresh. + err := v.Refresh(c) + assert.NoError(t, err, "Expected no error when refreshing veth cache") + assert.Equal(t, 2, len(v.current), "Expected to find 2 veths") + assert.Equal(t, "veth0", v.current[key{ + name: "veth0", + hardwareAddr: "00:00:00:00:00:00", + netNsID: 0, + }].(netlink.LinkAttrs).Name, "Expected to find veth0") + assert.Equal(t, "veth1", v.current[key{ + name: "veth1", + hardwareAddr: "00:00:00:00:00:01", + netNsID: 1, + }].(netlink.LinkAttrs).Name, "Expected to find veth1") +} + +func TestRefreshError(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + c := context.Background() + + showLink = func() ([]netlink.Link, error) { + return nil, errors.New("error") + } + + v := &EndpointWatcher{ + isRunning: true, + current: make(cache), + l: log.Logger().Named("veth-watcher"), + p: pubsub.New(), + } + + err := v.Refresh(c) + assert.Error(t, err, "Expected an error when refreshing veth cache") +} + +func TestListVethsError(t *testing.T) { + log.SetupZapLogger(log.GetDefaultLogOpts()) + + showLink = func() ([]netlink.Link, error) { + return nil, errors.New("error") + } + + _, err := listVeths() + assert.Error(t, err, "Expected an error when listing veths") +} diff --git a/pkg/watchers/endpoint/endpoint_windows.go b/pkg/watchers/endpoint/endpoint_windows.go new file mode 100644 index 000000000..0a12ff969 --- /dev/null +++ b/pkg/watchers/endpoint/endpoint_windows.go @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* Template */ + +package endpoint + +import ( + "github.com/Microsoft/hcsshim/hcn" +) + +func (e *EndpointWatcher) initNewCache() error { + return nil +} + +func listVeths() ([]hcn.HostComputeEndpoint, error) { + return nil, nil +} diff --git a/pkg/watchers/endpoint/types.go b/pkg/watchers/endpoint/types.go new file mode 100644 index 000000000..693a4c79a --- /dev/null +++ b/pkg/watchers/endpoint/types.go @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package endpoint + +const ( + endpointCreated string = "endpoint_created" + endpointDeleted string = "endpoint_deleted" +) + +type key struct { + name string + hardwareAddr string + // Network namespace for linux. + // Compartment ID for windows. + netNsID int +} + +type cache map[key]interface{} + +type EndpointEvent struct { + // Type is the type of the event. + Type EventType + // Obj is the object that the event is about. + Obj interface{} +} + +func NewEndpointEvent(t EventType, obj interface{}) *EndpointEvent { + return &EndpointEvent{ + Type: t, + Obj: obj, + } +} + +type EventType int + +const ( + EndpointCreated EventType = iota + EndpointDeleted +) + +func (e EventType) String() string { + switch e { + case EndpointCreated: + return endpointCreated + case EndpointDeleted: + return endpointDeleted + default: + return "unknown" + } +} + +func (c cache) deepcopy() cache { + copy := make(cache) + for k, v := range c { + copy[k] = v + } + return copy +} diff --git a/samples/capture/capture-specific-pod-on-a-node.yaml b/samples/capture/capture-specific-pod-on-a-node.yaml new file mode 100644 index 000000000..0380a1c00 --- /dev/null +++ b/samples/capture/capture-specific-pod-on-a-node.yaml @@ -0,0 +1,29 @@ +apiVersion: retina.io/v1alpha1 +kind: Capture +metadata: + name: capture-pod-blobupload +spec: + captureConfiguration: + captureOption: + duration: 10m + captureTarget: + nodeSelector: + matchExpressions: + - { + key: kubernetes.io/hostname, + operator: In, + values: [aks-nodepool1-11396069-vmss000001], + } + filters: + # Include packets from/to the following IP addresses or IP:Port pairs + include: + - 10.224.0.128:80 + - 10.224.0.129 + # Comment the following code out to exclude packets from/to IMDS server + # exclude: + # - 169.254.169.254 + outputConfiguration: + # the artifact will be copied to hostpath + # and uploaded to azure storage account + hostPath: "/tmp/retina" + blobUpload: blobsassecret diff --git a/samples/capture/nodeblobupload.yaml b/samples/capture/nodeblobupload.yaml new file mode 100644 index 000000000..a4a9e1748 --- /dev/null +++ b/samples/capture/nodeblobupload.yaml @@ -0,0 +1,23 @@ +apiVersion: retina.io/v1alpha1 +kind: Capture +metadata: + name: capture-node-blobupload +spec: + captureConfiguration: + captureOption: + duration: 10m + captureTarget: + nodeSelector: + matchExpressions: + - { + key: kubernetes.io/hostname, + operator: In, + values: + [ + aks-nodepool1-11396069-vmss000001, + aks-nodepool1-11396069-vmss000000, + ], + } + outputConfiguration: + hostPath: "/tmp/retina" + blobUpload: blobsassecret diff --git a/samples/capture/podblobupload.yaml b/samples/capture/podblobupload.yaml new file mode 100644 index 000000000..edd7e9dec --- /dev/null +++ b/samples/capture/podblobupload.yaml @@ -0,0 +1,20 @@ +apiVersion: retina.io/v1alpha1 +kind: Capture +metadata: + name: capture-pod-blobupload +spec: + captureConfiguration: + captureOption: + duration: 10m + captureTarget: + podSelector: + matchLabels: + k8s-app: cloud-node-manager + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + outputConfiguration: + # the artifact will be copied to hostpath + # and uploaded to azure storage account + hostPath: "/tmp/retina" + blobUpload: blobsassecret diff --git a/samples/metricsconfiguration.yaml b/samples/metricsconfiguration.yaml new file mode 100644 index 000000000..b08bdb260 --- /dev/null +++ b/samples/metricsconfiguration.yaml @@ -0,0 +1,16 @@ +apiVersion: retina.io/v1alpha1 +kind: MetricsConfiguration +metadata: + name: sample-metricsconfig +spec: + contextOptions: + - metricName: requests_total + sourceLabels: + - source + additionalLabels: + - direction + namespaces: + include: + - default + exclude: + - kube-system diff --git a/scripts/coverage/README.md b/scripts/coverage/README.md new file mode 100644 index 000000000..fe67214e7 --- /dev/null +++ b/scripts/coverage/README.md @@ -0,0 +1,15 @@ +# Overview + +We use GITHUB_TOKEN to get the following: + +1. Workflow ID with the retina test yaml +2. Get the latest main branch successful workflow run with that id +3. Get the artifacts from that run and download them + +## Testing Locally + +1. Set your PAT from github developer settings as `GITHUB_TOKEN` env variable +https: //docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#creating-a-personal-access-token-classic +2. Set a test PR number as `PULL_REQUEST_NUMBER` env variable +3. generate coverage.out for local branch with `go test -coverprofile=coverage.out ./...` +4. run `make retina-cc` on your local branch and not main. diff --git a/scripts/coverage/compare_cov.py b/scripts/coverage/compare_cov.py new file mode 100644 index 000000000..af4896651 --- /dev/null +++ b/scripts/coverage/compare_cov.py @@ -0,0 +1,292 @@ +import requests +import os +import json + +# Set up authentication credentials +token = os.environ.get("GITHUB_TOKEN") +headers = {"Authorization": f"Bearer {token}"} + +pr_num = os.environ.get("PULL_REQUEST_NUMBER") +print("PR number is", pr_num) +# Set repository information +owner = "azure" +repo = "retina" + +current_branch_file = "coverageexpanded.out" +main_branch_file = "maincoverageexpanded.out" + +if not pr_num: + print("No PR number found") + exit(0) + +if not token: + print("No token found") + exit(0) + + +def getAvgPerFile(given_dict): + for key in given_dict: + if key == "total": + given_dict[key] = float(given_dict[key]["(statements)"]) + continue + total = 0 + for subkey in given_dict[key]: + total += float(given_dict[key][subkey]) + avg = total / len(given_dict[key]) + given_dict[key] = round(avg, 2) + return given_dict + + +current_branch_lines = None +current_parsed_data = {} +# read the current branch coverage file +with open(current_branch_file, "r") as f: + current_branch_lines = f.readlines() + if current_branch_lines is None: + print("No coverage data found for current branch") + exit(1) + for line in current_branch_lines: + # Split the string into separate items + items = line.split("\t") + # Split each item into four parts and add them to a list + funcname = "" + for item in items: + if item == "": + continue + + if ":" in item: + parts = item.split(":") + path = parts[0] + if path not in current_parsed_data: + current_parsed_data[path] = {} + continue + if "%" in item: + parts = item.split("%") + percentage = parts[0] + current_parsed_data[path][funcname] = percentage + continue + funcname = item + +current_parsed_data = getAvgPerFile(current_parsed_data) + +main_branch_lines = None +main_parsed_data = {} +# read the main branch coverage file +with open(main_branch_file, "r") as f: + main_branch_lines = f.readlines() + if main_branch_lines is None: + print("No coverage data found for main branch") + exit(1) + for line in main_branch_lines: + # Split the string into separate items + items = line.split("\t") + # Split each item into four parts and add them to a list + funcname = "" + for item in items: + if item == "": + continue + + if ":" in item: + parts = item.split(":") + path = parts[0] + if path not in main_parsed_data: + main_parsed_data[path] = {} + continue + if "%" in item: + parts = item.split("%") + percentage = parts[0] + main_parsed_data[path][funcname] = percentage + continue + funcname = item + +new_main_parsed_data = getAvgPerFile(main_parsed_data) + +calculated_diff = { + "increased": {}, + "decreased": {}, + "added": {}, + "removed": {}, + "nochange": {}, + "total": {} +} + + +''' This one is to compare with symbols +def compare_dicts(main_dict, cur_dict): + """ + Recursively compare two dictionaries and print the differences. + """ + for key in set(main_dict.keys()) | set(cur_dict.keys()): + if key not in main_dict: + for subkey in main_dict[key]: + calculated_diff["added"][key] = subkey + elif key not in cur_dict: + for subkey in cur_dict[key]: + calculated_diff["removed"][key] = subkey + elif key == "total": + if float(main_dict[key]['(statements)']) < float(cur_dict[key]['(statements)']): + calculated_diff[key] = "increased" + elif float(main_dict[key]['(statements)']) > float(cur_dict[key]['(statements)']): + calculated_diff[key] = "decreased" + else: + calculated_diff[key] = "no change" + else: + for subkey in set(main_dict[key].keys()) | set(cur_dict[key].keys()): + if subkey not in main_dict[key]: + calculated_diff["added"][key] = subkey + elif subkey not in cur_dict[key]: + calculated_diff["removed"][key] = subkey + else: + if float(main_dict[key][subkey]) < float(cur_dict[key][subkey]): + calculated_diff["increased"][key] = subkey + elif float(main_dict[key][subkey]) > float(cur_dict[key][subkey]): + calculated_diff["decreased"][key] = subkey + else: + calculated_diff["nochange"][key] = subkey + + +compare_dicts(main_parsed_data, current_parsed_data) +''' + + +def compare_dicts(main_dict, cur_dict): + """ + Recursively compare two dictionaries and print the differences. + """ + for key in set(main_dict.keys()) | set(cur_dict.keys()): + if key not in main_dict: + calculated_diff["added"][key] = f"`0%` ... `{cur_dict[key]}%`" + elif key not in cur_dict: + calculated_diff["removed"][key] = f"`{main_dict[key]}%` ... `0%`" + elif key == "total": + if float(main_dict[key]) < float(cur_dict[key]): + calculated_diff[key] = "increased" + elif float(main_dict[key]) > float(cur_dict[key]): + calculated_diff[key] = "decreased" + else: + calculated_diff[key] = "no change" + else: + if float(main_dict[key]) < float(cur_dict[key]): + calculated_diff["increased"][ + key] = f"`{main_dict[key]}%` ... `{cur_dict[key]}%` (`{ round(cur_dict[key] - main_dict[key],2) }%`)" + elif float(main_dict[key]) > float(cur_dict[key]): + calculated_diff["decreased"][ + key] = f"`{main_dict[key]}%` ... `{cur_dict[key]}%` (`{ round(cur_dict[key] - main_dict[key],2)}%`)" + else: + calculated_diff["nochange"][key] = f"`{main_dict[key]}%` ... `{cur_dict[key]}%`" + + +compare_dicts(main_parsed_data, current_parsed_data) + +body_of_comment = "# Retina Code Coverage Report\n\n" + +if calculated_diff["total"] == "increased": + body_of_comment += "## Total coverage increased from `" + \ + str(main_parsed_data["total"]) + "%` to `" + \ + str(current_parsed_data["total"]) + \ + "%` :white_check_mark:\n\n" + +elif calculated_diff["total"] == "decreased": + body_of_comment += "## Total coverage decreased from `" + \ + str(main_parsed_data["total"]) + "%` to `" + \ + str(current_parsed_data["total"]) + \ + "%` :x:\n\n" +else: + body_of_comment += "## Total coverage no change\n\n" + + +if len(calculated_diff["increased"]) > 0: + body_of_comment += "Increased diff\n" + body_of_comment += "| Impacted Files | Coverage | |\n" + body_of_comment += "| --- | --- | --- |\n" + for key in calculated_diff["increased"]: + body_of_comment += "| " + key.replace("github.com/microsoft/retina/", "") + " | " + \ + calculated_diff["increased"][key] + " | :arrow_up: |\n" + body_of_comment += "\n" + +if len(calculated_diff["decreased"]) > 0: + body_of_comment += "Decreased diff \n" + body_of_comment += "| Impacted Files | Coverage | |\n" + body_of_comment += "| --- | --- | --- |\n" + for key in calculated_diff["decreased"]: + body_of_comment += "| " + key.replace("github.com/microsoft/retina/", "") + " | " + \ + calculated_diff["decreased"][key] + \ + " | :arrow_down: |\n" + body_of_comment += "\n" + + +print(body_of_comment) + +# check if the PR is raised by depandabot, if yes ignore posting a comment on the PR +issue_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{pr_num}" +# Make the API call to get the comments on the pull request +issue_response = requests.get(issue_url, headers=headers) +if issue_response.status_code != 200: + print( + f"Failed to get PR with url {issue_url}", issue_response.content) + exit(1) + +# Parse the JSON response to get the comments as a list of Python dictionaries +issues_data = json.loads(issue_response.content) + +if issues_data["user"]["login"] == "dependabot[bot]": + print("PR raised by dependabot, ignoring posting a comment on the PR") + exit(0) + + +# Set the URL for the API call to get the comments on the pull request +comments_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{pr_num}/comments" + +# Make the API call to get the comments on the pull request +comments_response = requests.get(comments_url, headers=headers) +if comments_response.status_code != 200: + print( + f"Failed to get comments of the PR with url {comments_url}", comments_response.content) + exit(1) + +# Parse the JSON response to get the comments as a list of Python dictionaries +comments_data = json.loads(comments_response.content) + +# Check if there is an existing comment that starts with "Coverage" +existing_comment = None +for comment in comments_data: + if comment["body"].startswith("# Retina Code Coverage Report"): + existing_comment = comment + break + +# If there is an existing comment, update it +if existing_comment: + # Set the URL for the API call to update the comment + update_url = f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{existing_comment['id']}" + + # Set the payload for the API call to update the comment + payload = { + "body": body_of_comment + } + + # Make the API call to update the comment + update_response = requests.patch(update_url, headers=headers, json=payload) + if update_response.status_code != 200: + print( + f"Failed to update the comment with url {update_url}", update_response.content) + exit(1) + + # Print the response to confirm that the comment was updated successfully + print(update_response.content) + +# If there is no existing comment, add a new comment +else: + # Set the payload for the API call to add the new comment + payload = { + "body": body_of_comment + } + + # Make the API call to add the new comment + add_response = requests.post(comments_url, headers=headers, json=payload) + if add_response.status_code != 201: + print( + f"Failed to add the comment with url {comments_url}", add_response.content) + exit(1) + + # Print the response to confirm that the comment was added successfully + print(add_response.content) diff --git a/scripts/coverage/get_coverage.py b/scripts/coverage/get_coverage.py new file mode 100644 index 000000000..d82b304de --- /dev/null +++ b/scripts/coverage/get_coverage.py @@ -0,0 +1,74 @@ +import requests +import zipfile +import os + +# Set up authentication credentials +token = os.environ.get("GITHUB_TOKEN") +headers = {"Authorization": f"Bearer {token}"} + +pr_num = os.environ.get("PULL_REQUEST_NUMBER") +print("PR number is", pr_num) + +if not pr_num: + print("No PR number found") + exit(0) + +if not token: + print("No token found") + exit(0) + +# Set repository information +owner = "azure" +repo = "retina" + +ut_workflow_yaml = "retina-test.yaml" + +# Get the id of UT workflow +wf_url = f"https://api.github.com/repos/{owner}/{repo}/actions/workflows/{ut_workflow_yaml}" +response = requests.get(wf_url, headers=headers) +response.raise_for_status() +wf_id = response.json()["id"] + +# Get the latest completed workflow run on the main branch +runs_url = f"https://api.github.com/repos/{owner}/{repo}/actions/workflows/{wf_id}/runs" +params = {"branch": "main", "status": "completed", "per_page": 10} +response = requests.get(runs_url, headers=headers, params=params) +response.raise_for_status() +artifacts_url = response.json()["workflow_runs"][0]["artifacts_url"] + +# Create the main branch folder for coverage +folder_name = "mainbranchcoverage" +file_name = "coverage.out" +if not os.path.exists(folder_name): + os.makedirs(folder_name) + +for wf in response.json()["workflow_runs"]: + artifacts_url = wf["artifacts_url"] + # Get any artifacts named "coverage" for the specified workflow run + # artifacts_url = f"https://api.github.com/repos/{owner}/{repo}/actions/runs/{run_id}/artifacts" + response = requests.get(artifacts_url, headers=headers) + response.raise_for_status() + artifacts = response.json()["artifacts"] + found_files = False + + # Process the list of artifacts as needed + for artifact in artifacts: + if "coverage" in artifact["name"]: + print("Downloading artifacts from main branch", + artifact["name"], artifact["archive_download_url"]) + response = requests.get( + artifact["archive_download_url"], headers=headers) + response.raise_for_status() + with open("mainbranchcov.zip", "wb") as f: + f.write(response.content) + # Unzip the coverage file + + with zipfile.ZipFile("mainbranchcov.zip", 'r') as zip_ref: + zip_ref.extractall(folder_name) + + if os.path.isfile(f'./{folder_name}/{file_name}'): + found_files = True + break + + if found_files: + break diff --git a/scripts/create_aks_cluster.sh b/scripts/create_aks_cluster.sh new file mode 100644 index 000000000..eef129f62 --- /dev/null +++ b/scripts/create_aks_cluster.sh @@ -0,0 +1,82 @@ +set -o xtrace +set -e + +regions=(eastus eastus2 centralus southcentralus westcentralus northcentralus westus westus2) +rand_index=$((RANDOM % ${#regions[@]})) + +export LOCATION=${regions[$rand_index]} +export BASE_NAME=ret-$3 + +# upgrade az cli to the latest version +az upgrade -y + +# Select the oldest patch version in the latest minor version from AKS support version list. +K8S_VERSION=$(az aks get-versions -l $LOCATION -o json | jq '.values |sort_by(.version) | first.patchVersions ' | jq 'keys|.[0]' --sort-keys | tr -d '"') +echo "$K8S_VERSION is selected from region $LOCATION" + + +echo "creating AKS cluster with linux amd64 node pool" +echo "Image tag: $1" + +RESOURCE_GROUP=$BASE_NAME-$1-$2 +RESOURCE_GROUP=${RESOURCE_GROUP//./-} + +CLUSTER_NAME=$BASE_NAME-$1-$2 +CLUSTER_NAME=${CLUSTER_NAME//./-} + +# Update aks-preview to the latest version +az extension add --name aks-preview +az extension update --name aks-preview + +az provider register -n Microsoft.ContainerService + +az group create --name $RESOURCE_GROUP --location $LOCATION + +az aks create \ + --resource-group $RESOURCE_GROUP \ + --name $CLUSTER_NAME \ + --generate-ssh-keys \ + --kubernetes-version $K8S_VERSION \ + --network-plugin azure \ + --vm-set-type VirtualMachineScaleSets \ + --node-count 1 \ + --network-policy azure + + +sleep 60s + +# Function to add node pools to aks cluster previously create +function add_node_pool() { + local name=$1 + local vm_size=$2 + local os_type=$3 + local os_sku=$4 + + echo "Adding $name $os_type $os_sku node pool" + + if [ -n "$vm_size" ]; then + set +e # Ignore error on arm64 node pool + az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $CLUSTER_NAME \ + --name $name \ + --node-count 1 \ + --os-type $os_type \ + --os-sku $os_sku \ + --node-vm-size $vm_size + set -e + else + az aks nodepool add \ + --resource-group $RESOURCE_GROUP \ + --cluster-name $CLUSTER_NAME \ + --name $name \ + --node-count 1 \ + --os-type $os_type \ + --os-sku $os_sku + fi +} + +add_node_pool mariner Standard_D4pds_v5 Linux AzureLinux +add_node_pool arm64 Standard_D4pds_v5 Linux Ubuntu +add_node_pool nwin22 "" Windows Windows2022 +add_node_pool nwin19 "" Windows Windows2019 diff --git a/scripts/create_retina_logs.sh b/scripts/create_retina_logs.sh new file mode 100644 index 000000000..baf349cad --- /dev/null +++ b/scripts/create_retina_logs.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +OUTPUT_DIR=$1 +TAG=$2 + +echo "Getting retina pods and nodes wide informations" + +echo "kubectl get pods -n kube-system -o wide -l app=retina" > $OUTPUT_DIR/retina_agents_pods_$TAG.txt +kubectl get pods -n kube-system -o wide -l app=retina >> $OUTPUT_DIR/retina_agents_pods_$TAG.txt + +echo "kubectl get nodes -o wide" > $OUTPUT_DIR/retina_cluster_nodes_$TAG.txt +kubectl get nodes -o wide >> $OUTPUT_DIR/retina_cluster_nodes_$TAG.txt + +# get retina pods +pods=($(kubectl get pods -n kube-system -l app=retina -o jsonpath='{.items[*].metadata.name}')) + +echo "Getting retina pods descriptions" +for pod in "${pods[@]}"; do + kubectl describe pod $pod -n kube-system > $OUTPUT_DIR/${pod}_description_$TAG.txt +done + +echo "Getting retina pods logs" +for pod in "${pods[@]}"; do + kubectl logs $pod -n kube-system > $OUTPUT_DIR/${pod}_logs_$TAG.log + #check if pod restarted and get previous logs + number_of_restarts=$(kubectl get pods $pod -n kube-system -o jsonpath='{.status.containerStatuses[0].restartCount}') + if [ $number_of_restarts -gt 0 ]; then + echo "$pod has restarted" + kubectl logs $pod -n kube-system --previous > $OUTPUT_DIR/${pod}_logs_previous_$TAG.log + fi +done diff --git a/scripts/install_retina.sh b/scripts/install_retina.sh new file mode 100644 index 000000000..8752748c1 --- /dev/null +++ b/scripts/install_retina.sh @@ -0,0 +1,35 @@ +set -o xtrace +set -e + +echo "Install Retina on cluster using helm" + +helm install retina ./deploy/manifests/controller/helm/retina/\ + --namespace kube-system \ + --dependency-update \ + --values $5/values.yaml \ + --set image.repository=$1 \ + --set operator.repository=$2 \ + --set image.initRepository=$3 \ + --set image.tag=$4 \ + --set operator.tag=$4 \ + +sleep 20s + +kubectl get pods -n kube-system -o wide -l app=retina +# wait for retina pods to be ready +kubectl -n kube-system wait --for=condition=ready --timeout=600s pod -l app=retina # windows server 2019 is really slow +kubectl get pods -n kube-system -o wide -l app=retina +kubectl describe pods -n kube-system -l app=retina + +# Check if the profile is advanced. The operator isn't needed for basic profile. +if [ "$6" = "adv" ]; then + kubectl -n kube-system wait --for=condition=ready --timeout=600s pod -l app=retina-operator # windows server 2019 is really slow + kubectl get pods -n kube-system -o wide -l app=retina-operator + kubectl describe pods -n kube-system -l app=retina-operator +fi + +# This applies CRDs corresponding to the integration test profile. +# For example, when we are running the adv profile, we will apply test/profiles/advanced/crd/metrics_config_crd.yaml +if [ -d "$5/crd" ] && [ "$(ls -A $5/crd | grep .yaml)" ]; then + kubectl apply -f $5/crd/ +fi diff --git a/scripts/test_retina.sh b/scripts/test_retina.sh new file mode 100644 index 000000000..7272c8f29 --- /dev/null +++ b/scripts/test_retina.sh @@ -0,0 +1,72 @@ +set -o xtrace +set -e + +#create a test pod to check if retina metrics are available +if ! kubectl get pod retina-test -n kube-system >/dev/null 2>&1; then + kubectl run retina-test -n kube-system --image=alpine --overrides='{"apiVersion": "v1", "spec": {"nodeSelector": { "kubernetes.io/os": "linux", "kubernetes.io/arch": "amd64" }}}' --command sleep infinity +fi +kubectl -n kube-system wait --for=condition=ready --timeout=300s pod -l run=retina-test + +# wait for metrics to get generated +sleep 60s + +# Get the list of retina pods +kubectl get pods -n kube-system -o wide -l app=retina + +# Get the list of retina pods ips +ips=($(kubectl get pods -n kube-system -l app=retina -o jsonpath='{.items[*].status.podIP}')) + +# check if retina metrics are available in every pods +for ip in "${ips[@]}"; do + echo " + ============================================================ + =========Checking Metrics on Pod with Ip: $ip=============== + ============================================================ + " + result=$(kubectl exec pod/retina-test -n kube-system -- wget -qO- http://$ip:10093/metrics | grep "networkobservability_") + + if [ -z "$result" ]; then + echo "==============Retina metrics not available================" + exit 1 + else + echo "Retina metrics available" + fi + +done + +# wait for 3 minutes for logs to be generated +sleep 3m + +# Get all retina pods names +pods=($(kubectl get pods -n kube-system -l app=retina -o jsonpath='{.items[*].metadata.name}')) + +# Check running plugins in all pods +for pod in "${pods[@]}"; do + echo " + ============================================================ + ================Checking Errors for Pod: $pod=============== + ============================================================ + " + # get number of errors + number_of_errors=$(kubectl logs $pod -n kube-system | awk '{print $2}'| grep -w "error"| wc -l) + if [ $number_of_errors -gt 0 ]; then + echo " =========================Error Found===================================" + kubectl logs $pod -n kube-system | grep -w "error" + echo " =======================================================================" + exit 1 + fi + + echo "Checking restarts on pod: $pod" + # get number of restarts + number_of_restarts=$(kubectl get pods $pod -n kube-system -o jsonpath='{.status.containerStatuses[0].restartCount}') + + if [ $number_of_restarts -gt 0 ]; then + if [[ $pod == *"agent-win"* ]] && kubectl describe pod $pod -n kube-system | grep -q "SandboxChanged"; then + echo "============================= Warning! $pod has restarted because of SandboxChanged ==========================" + elif [[ $pod != *"agent-win"* ]]; then + echo "=============================$pod has restarted==========================" + kubectl logs $pod -n kube-system -p + exit 1 + fi + fi +done diff --git a/scripts/upgrade_retina.sh b/scripts/upgrade_retina.sh new file mode 100644 index 000000000..76fbc0b3d --- /dev/null +++ b/scripts/upgrade_retina.sh @@ -0,0 +1,22 @@ +set -o xtrace +set -e + +# upgrade helm with new value file +helm upgrade retina ./deploy/manifests/controller/helm/retina/\ + --namespace kube-system \ + --dependency-update \ + --values test/profiles/$5/values.yaml \ + --set image.repository=$1 \ + --set operator.repository=$2 \ + --set image.initRepository=$3 \ + --set image.tag=$4 \ + --set operator.tag=$4 \ + + sleep 30s + +# show configmap +kubectl get configmap -n kube-system retina-config -o yaml + +# wait for retina pods to be ready +kubectl -n kube-system wait --for=condition=ready --timeout=600s pod -l app=retina +kubectl get pods -n kube-system -o wide -l app=retina diff --git a/scripts/windows.ps1 b/scripts/windows.ps1 new file mode 100644 index 000000000..b18aa39e8 --- /dev/null +++ b/scripts/windows.ps1 @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +# example usage: +# powershell.exe -command "& { . .\windows.ps1; controller-image }" +# Retry({retina-image $(tag)-windows-amd64}) + +function Retry([Action]$action) { + $attempts = 3 + $sleepInSeconds = 5 + do { + try { + $action.Invoke(); + break; + } + catch [Exception] { + Write-Host $_.Exception.Message + } + $attempts-- + if ($attempts -gt 0) { + sleep $sleepInSeconds + } + } while ($attempts -gt 0) +} + +function retina-image { + if ($null -eq $env:TAG) { $env:TAG = $args[0] } + docker build ` + -f .\Dockerfile.windows ` + -t acnpublic.azurecr.io/retina-agent:$env:TAG ` + . +} diff --git a/site/README.md b/site/README.md new file mode 100644 index 000000000..ff7cb7edb --- /dev/null +++ b/site/README.md @@ -0,0 +1,52 @@ +# Website + +This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. + +## Development + +When adding a new doc, make sure to add it to /site/sidebars.js + +To test, run `make docs` to spin up local webserver and view changes with hot reload + +## Using Yarn + +### Installation + +Install `yarn` (e.g. for Ubuntu, try [this guide](https://www.linuxcapable.com/how-to-install-yarn-on-ubuntu-linux/#install-yarn-on-ubuntu-2204-or-2004-via-nodesource)). + +In this directory (*retina/site/*), run: +```bash +yarn +``` + +### Local Development + +```bash +yarn start +``` + +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. + +### Build + +```bash +yarn build +``` + +This command generates static content into the `build` directory and can be served using any static contents hosting service. + +## Deployment + +Using SSH: + +```bash +USE_SSH=true yarn deploy +``` + +Not using SSH: + +```bash +GIT_USER= yarn deploy +``` + +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/site/babel.config.js b/site/babel.config.js new file mode 100644 index 000000000..e00595dae --- /dev/null +++ b/site/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; diff --git a/site/docusaurus.config.js b/site/docusaurus.config.js new file mode 100644 index 000000000..efc2c08fb --- /dev/null +++ b/site/docusaurus.config.js @@ -0,0 +1,91 @@ +// @ts-check +// Note: type annotations allow type checking and IDEs autocompletion + +const lightCodeTheme = require('prism-react-renderer').themes.github; +const darkCodeTheme = require('prism-react-renderer').themes.dracula; + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'Retina', + tagline: 'REal-TIme Network Analytics', + favicon: 'img/favicon.ico', + + // Set the production url of your site here + url: 'https://docs.microsoft.com', + // Set the // pathname under which your site is served + // For GitHub pages deployment, it is often '//' + baseUrl: '/', + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'Azure', // Usually your GitHub org/user name. + projectName: 'Retina', // Usually your repo name. + + onBrokenLinks: 'throw', + onBrokenMarkdownLinks: 'warn', + + // Even if you don't use internalization, you can use this field to set useful + // metadata like html lang. For example, if your site is Chinese, you may want + // to replace "en" with "zh-Hans". + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, + + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + sidebarPath: require.resolve('./sidebars.js'), + // Please change this to your repo. + // Remove this to remove the "edit this page" links. + path: '../docs', + //routeBasePath: '../docs', + editUrl: + 'https://github.com/microsoft/retina/docs', + }, + theme: { + customCss: require.resolve('./src/css/custom.css'), + }, + }), + ], + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + // Replace with your project's social card + image: 'img/docusaurus-social-card.jpg', + navbar: { + title: 'Retina', + logo: { + alt: 'My Site Logo', + src: 'img/retina-logo.png', + }, + items: [ + { + type: 'doc', + docId: 'intro', + position: 'left', + label: 'Docs', + }, + { + href: 'https://github.com/microsoft/retina', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + }, + prism: { + theme: lightCodeTheme, + darkTheme: darkCodeTheme, + }, + }), +}; + +module.exports = config; diff --git a/site/package-lock.json b/site/package-lock.json new file mode 100644 index 000000000..060f2d21a --- /dev/null +++ b/site/package-lock.json @@ -0,0 +1,14661 @@ +{ + "name": "retina", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "retina", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/preset-classic": "3.0.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.1", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.0.1" + }, + "engines": { + "node": ">=16.14" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", + "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", + "@algolia/autocomplete-shared": "1.9.3" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", + "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", + "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", + "dependencies": { + "@algolia/autocomplete-shared": "1.9.3" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", + "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/cache-browser-local-storage": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.22.0.tgz", + "integrity": "sha512-uZ1uZMLDZb4qODLfTSNHxSi4fH9RdrQf7DXEzW01dS8XK7QFtFh29N5NGKa9S+Yudf1vUMIF+/RiL4i/J0pWlQ==", + "dependencies": { + "@algolia/cache-common": "4.22.0" + } + }, + "node_modules/@algolia/cache-common": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.22.0.tgz", + "integrity": "sha512-TPwUMlIGPN16eW67qamNQUmxNiGHg/WBqWcrOoCddhqNTqGDPVqmgfaM85LPbt24t3r1z0zEz/tdsmuq3Q6oaA==" + }, + "node_modules/@algolia/cache-in-memory": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.22.0.tgz", + "integrity": "sha512-kf4Cio9NpPjzp1+uXQgL4jsMDeck7MP89BYThSvXSjf2A6qV/0KeqQf90TL2ECS02ovLOBXkk98P7qVarM+zGA==", + "dependencies": { + "@algolia/cache-common": "4.22.0" + } + }, + "node_modules/@algolia/client-account": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.22.0.tgz", + "integrity": "sha512-Bjb5UXpWmJT+yGWiqAJL0prkENyEZTBzdC+N1vBuHjwIJcjLMjPB6j1hNBRbT12Lmwi55uzqeMIKS69w+0aPzA==", + "dependencies": { + "@algolia/client-common": "4.22.0", + "@algolia/client-search": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.22.0.tgz", + "integrity": "sha512-os2K+kHUcwwRa4ArFl5p/3YbF9lN3TLOPkbXXXxOvDpqFh62n9IRZuzfxpHxMPKAQS3Et1s0BkKavnNP02E9Hg==", + "dependencies": { + "@algolia/client-common": "4.22.0", + "@algolia/client-search": "4.22.0", + "@algolia/requester-common": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.22.0.tgz", + "integrity": "sha512-BlbkF4qXVWuwTmYxVWvqtatCR3lzXwxx628p1wj1Q7QP2+LsTmGt1DiUYRuy9jG7iMsnlExby6kRMOOlbhv2Ag==", + "dependencies": { + "@algolia/requester-common": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.22.0.tgz", + "integrity": "sha512-pEOftCxeBdG5pL97WngOBi9w5Vxr5KCV2j2D+xMVZH8MuU/JX7CglDSDDb0ffQWYqcUN+40Ry+xtXEYaGXTGow==", + "dependencies": { + "@algolia/client-common": "4.22.0", + "@algolia/requester-common": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.22.0.tgz", + "integrity": "sha512-bn4qQiIdRPBGCwsNuuqB8rdHhGKKWIij9OqidM1UkQxnSG8yzxHdb7CujM30pvp5EnV7jTqDZRbxacbjYVW20Q==", + "dependencies": { + "@algolia/client-common": "4.22.0", + "@algolia/requester-common": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" + }, + "node_modules/@algolia/logger-common": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.22.0.tgz", + "integrity": "sha512-HMUQTID0ucxNCXs5d1eBJ5q/HuKg8rFVE/vOiLaM4Abfeq1YnTtGV3+rFEhOPWhRQxNDd+YHa4q864IMc0zHpQ==" + }, + "node_modules/@algolia/logger-console": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.22.0.tgz", + "integrity": "sha512-7JKb6hgcY64H7CRm3u6DRAiiEVXMvCJV5gRE672QFOUgDxo4aiDpfU61g6Uzy8NKjlEzHMmgG4e2fklELmPXhQ==", + "dependencies": { + "@algolia/logger-common": "4.22.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.22.0.tgz", + "integrity": "sha512-BHfv1h7P9/SyvcDJDaRuIwDu2yrDLlXlYmjvaLZTtPw6Ok/ZVhBR55JqW832XN/Fsl6k3LjdkYHHR7xnsa5Wvg==", + "dependencies": { + "@algolia/requester-common": "4.22.0" + } + }, + "node_modules/@algolia/requester-common": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.22.0.tgz", + "integrity": "sha512-Y9cEH/cKjIIZgzvI1aI0ARdtR/xRrOR13g5psCxkdhpgRN0Vcorx+zePhmAa4jdQNqexpxtkUdcKYugBzMZJgQ==" + }, + "node_modules/@algolia/requester-node-http": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.22.0.tgz", + "integrity": "sha512-8xHoGpxVhz3u2MYIieHIB6MsnX+vfd5PS4REgglejJ6lPigftRhTdBCToe6zbwq4p0anZXjjPDvNWMlgK2+xYA==", + "dependencies": { + "@algolia/requester-common": "4.22.0" + } + }, + "node_modules/@algolia/transporter": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.22.0.tgz", + "integrity": "sha512-ieO1k8x2o77GNvOoC+vAkFKppydQSVfbjM3YrSjLmgywiBejPTvU1R1nEvG59JIIUvtSLrZsLGPkd6vL14zopA==", + "dependencies": { + "@algolia/cache-common": "4.22.0", + "@algolia/logger-common": "4.22.0", + "@algolia/requester-common": "4.22.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", + "dependencies": { + "@babel/highlight": "^7.23.4", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", + "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.6.tgz", + "integrity": "sha512-FxpRyGjrMJXh7X3wGLGhNDCRiwpWEF74sKjTLDJSG5Kyvow3QZaG0Adbqzi9ZrVjTWpsX+2cxWXD71NMg93kdw==", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.6", + "@babel/parser": "^7.23.6", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.6", + "@babel/types": "^7.23.6", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", + "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", + "dependencies": { + "@babel/types": "^7.23.6", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.23.6.tgz", + "integrity": "sha512-cBXU1vZni/CpGF29iTu4YRbOZt3Wat6zCoMDxRF1MayiEc4URxOj31tT65HUM0CRpMowA3HCJaAOVOUnMf96cw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-member-expression-to-functions": "^7.23.0", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.20", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.4.tgz", + "integrity": "sha512-QcJMILQCu2jm5TFPGA3lCpJJTeEP+mqeXooG/NZbg/h5FTFi6V0+99ahlRsW8/kRLyb24LZVCCiclDedhLKcBA==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz", + "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==", + "dependencies": { + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", + "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz", + "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz", + "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==", + "dependencies": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.15", + "@babel/types": "^7.22.19" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.6.tgz", + "integrity": "sha512-wCfsbN4nBidDRhpDhvcKlzHWCTlgJYUUdSJfzXb2NuBssDSIjc3xcb+znA7l+zYsFljAcGM0aFkN40cR3lXiGA==", + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.6", + "@babel/types": "^7.23.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.6.tgz", + "integrity": "sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.23.3.tgz", + "integrity": "sha512-iRkKcCqb7iGnq9+3G6rZ+Ciz5VywC4XNRHe57lKM+jOeYAoR0lVqdeeDRfh0tQcTfw/+vBhHn926FmQhLtlFLQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.23.3.tgz", + "integrity": "sha512-WwlxbfMNdVEpQjZmK5mhm7oSwD3dS6eU+Iwsi4Knl9wAletWem7kaRsGOG+8UEbRyqxY4SS5zvtfXwX+jMxUwQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.23.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.23.3.tgz", + "integrity": "sha512-XaJak1qcityzrX0/IU5nKHb34VaibwP3saKqG6a/tppelgllOH13LUann4ZCIBcVOeE6H18K4Vx9QKkVww3z/w==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.23.3.tgz", + "integrity": "sha512-lPgDSU+SJLK3xmFDTV2ZRQAiM7UuUjGidwBywFavObCiZc1BeAAcMtHJKUya92hPHO+at63JJPLygilZard8jw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.23.3.tgz", + "integrity": "sha512-pawnE0P9g10xgoP7yKr6CK63K2FMsTE+FZidZO/1PwRdzmAPVs+HS1mAURUsgaoxammTJvULUdIkEK0gOcU2tA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz", + "integrity": "sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz", + "integrity": "sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.23.3.tgz", + "integrity": "sha512-NzQcQrzaQPkaEwoTm4Mhyl8jI1huEL/WWIEvudjTCMJ9aBZNpsJbMASx7EQECtQQPS/DcnFpo0FIh3LvEO9cxQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.23.4.tgz", + "integrity": "sha512-efdkfPhHYTtn0G6n2ddrESE91fgXxjlqLsnUtPWnJs4a4mZIbUaK7ffqKIIUKXSHwcDvaCVX6GXkaJJFqtX7jw==", + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.20", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.23.3.tgz", + "integrity": "sha512-A7LFsKi4U4fomjqXJlZg/u0ft/n8/7n7lpffUP/ZULx/DtV9SGlNKZolHH6PE8Xl1ngCc0M11OaeZptXVkfKSw==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.23.3.tgz", + "integrity": "sha512-vI+0sIaPIO6CNuM9Kk5VmXcMVRiOpDh7w2zZt9GXzmE/9KD70CUEVhvPR/etAeNK/FAEkhxQtXOzVF3EuRL41A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.4.tgz", + "integrity": "sha512-0QqbP6B6HOh7/8iNR4CQU2Th/bbRtBp4KS9vcaZd1fZ0wSh5Fyssg0UCIHwxh+ka+pNDREbVLQnHCMHKZfPwfw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.23.3.tgz", + "integrity": "sha512-uM+AN8yCIjDPccsKGlw271xjJtGii+xQIF/uMPS8H15L12jZTsLfF4o5vNO7d/oUguOyfdikHGc/yi9ge4SGIg==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.23.4.tgz", + "integrity": "sha512-nsWu/1M+ggti1SOALj3hfx5FXzAY06fwPJsUZD4/A5e1bWi46VUIWtD+kOX6/IdhXGsXBWllLFDSnqSCdUNydQ==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.23.5.tgz", + "integrity": "sha512-jvOTR4nicqYC9yzOHIhXG5emiFEOpappSJAl73SDSEDcybD+Puuze8Tnpb9p9qEyYup24tq891gkaygIFvWDqg==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.20", + "@babel/helper-split-export-declaration": "^7.22.6", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.23.3.tgz", + "integrity": "sha512-dTj83UVTLw/+nbiHqQSFdwO9CbTtwq1DsDqm3CUEtDrZNET5rT5E6bIdTlOftDTDLMYxvxHNEYO4B9SLl8SLZw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.3.tgz", + "integrity": "sha512-n225npDqjDIr967cMScVKHXJs7rout1q+tt50inyBCPkyZ8KxeI6d+GIbSBTT/w/9WdlWDOej3V9HE5Lgk57gw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.23.3.tgz", + "integrity": "sha512-vgnFYDHAKzFaTVp+mneDsIEbnJ2Np/9ng9iviHw3P/KVcgONxpNULEW/51Z/BaFojG2GI2GwwXck5uV1+1NOYQ==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.23.3.tgz", + "integrity": "sha512-RrqQ+BQmU3Oyav3J+7/myfvRCq7Tbz+kKLLshUmMwNlDHExbGL7ARhajvoBJEvc+fCguPPu887N+3RRXBVKZUA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.23.4.tgz", + "integrity": "sha512-V6jIbLhdJK86MaLh4Jpghi8ho5fGzt3imHOBu/x0jlBaPYqDoWz4RDXjmMOfnh+JWNaQleEAByZLV0QzBT4YQQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.23.3.tgz", + "integrity": "sha512-5fhCsl1odX96u7ILKHBj4/Y8vipoqwsJMh4csSA8qFfxrZDEA4Ssku2DyNvMJSmZNOEBT750LfFPbtrnTP90BQ==", + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.23.4.tgz", + "integrity": "sha512-GzuSBcKkx62dGzZI1WVgTWvkkz84FZO5TC5T8dl/Tht/rAla6Dg/Mz9Yhypg+ezVACf/rgDuQt3kbWEv7LdUDQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.23.6.tgz", + "integrity": "sha512-aYH4ytZ0qSuBbpfhuofbg/e96oQ7U2w1Aw/UQmKT+1l39uEhUPoFS3fHevDc1G0OvewyDudfMKY1OulczHzWIw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.23.3.tgz", + "integrity": "sha512-I1QXp1LxIvt8yLaib49dRW5Okt7Q4oaxao6tFVKS/anCdEOMtYwWVKoiOA1p34GOWIZjUK0E+zCp7+l1pfQyiw==", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.23.4.tgz", + "integrity": "sha512-81nTOqM1dMwZ/aRXQ59zVubN9wHGqk6UtqRK+/q+ciXmRy8fSolhGVvG09HHRGo4l6fr/c4ZhXUQH0uFW7PZbg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.23.3.tgz", + "integrity": "sha512-wZ0PIXRxnwZvl9AYpqNUxpZ5BiTGrYt7kueGQ+N5FiQ7RCOD4cm8iShd6S6ggfVIWaJf2EMk8eRzAh52RfP4rQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.23.4.tgz", + "integrity": "sha512-Mc/ALf1rmZTP4JKKEhUwiORU+vcfarFVLfcFiolKUo6sewoxSEgl36ak5t+4WamRsNr6nzjZXQjM35WsU+9vbg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.23.3.tgz", + "integrity": "sha512-sC3LdDBDi5x96LA+Ytekz2ZPk8i/Ck+DEuDbRAll5rknJ5XRTSaPKEYwomLcs1AA8wg9b3KjIQRsnApj+q51Ag==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.3.tgz", + "integrity": "sha512-vJYQGxeKM4t8hYCKVBlZX/gtIY2I7mRGFNcm85sgXGMTBcoV3QdVtdpbcWEbzbfUIUZKwvgFT82mRvaQIebZzw==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.3.tgz", + "integrity": "sha512-aVS0F65LKsdNOtcz6FRCpE4OgsP2OFnW46qNxNIX9h3wuzaNcSQsJysuMwqSibC98HPrf2vCgtxKNwS0DAlgcA==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.3.tgz", + "integrity": "sha512-ZxyKGTkF9xT9YJuKQRo19ewf3pXpopuYQd8cDXqNzc3mUNbOME0RKMoZxviQk74hwzfQsEe66dE92MaZbdHKNQ==", + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.23.3.tgz", + "integrity": "sha512-zHsy9iXX2nIsCBFPud3jKn1IRPWg3Ing1qOZgeKV39m1ZgIdpJqvlWVeiHBZC6ITRG0MfskhYe9cLgntfSFPIg==", + "dependencies": { + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.23.3.tgz", + "integrity": "sha512-YJ3xKqtJMAT5/TIZnpAR3I+K+WaDowYbN3xyxI8zxx/Gsypwf9B9h0VB+1Nh6ACAAPRS5NSRje0uVv5i79HYGQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.23.4.tgz", + "integrity": "sha512-jHE9EVVqHKAQx+VePv5LLGHjmHSJR76vawFPTdlxR/LVJPfOEGxREQwQfjuZEOPTwG92X3LINSh3M40Rv4zpVA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.23.4.tgz", + "integrity": "sha512-mps6auzgwjRrwKEZA05cOwuDc9FAzoyFS4ZsG/8F43bTLf/TgkJg7QXOrPO1JO599iA3qgK9MXdMGOEC8O1h6Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.23.4.tgz", + "integrity": "sha512-9x9K1YyeQVw0iOXJlIzwm8ltobIIv7j2iLyP2jIhEbqPRQ7ScNgwQufU2I0Gq11VjyG4gI4yMXt2VFags+1N3g==", + "dependencies": { + "@babel/compat-data": "^7.23.3", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.23.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.23.3.tgz", + "integrity": "sha512-BwQ8q0x2JG+3lxCVFohg+KbQM7plfpBwThdW9A6TMtWwLsbDA01Ek2Zb/AgDN39BiZsExm4qrXxjk+P1/fzGrA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.23.4.tgz", + "integrity": "sha512-XIq8t0rJPHf6Wvmbn9nFxU6ao4c7WhghTR5WyV8SrJfUFzyxhCm4nhC+iAp3HFhbAKLfYpgzhJ6t4XCtVwqO5A==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.4.tgz", + "integrity": "sha512-ZU8y5zWOfjM5vZ+asjgAPwDaBjJzgufjES89Rs4Lpq63O300R/kOz30WCLo6BxxX6QVEilwSlpClnG5cZaikTA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.23.3.tgz", + "integrity": "sha512-09lMt6UsUb3/34BbECKVbVwrT9bO6lILWln237z7sLaWnMsTi7Yc9fhX5DLpkJzAGfaReXI22wP41SZmnAA3Vw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.23.3.tgz", + "integrity": "sha512-UzqRcRtWsDMTLrRWFvUBDwmw06tCQH9Rl1uAjfh6ijMSmGYQ+fpdB+cnqRC8EMh5tuuxSv0/TejGL+7vyj+50g==", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.23.4.tgz", + "integrity": "sha512-9G3K1YqTq3F4Vt88Djx1UZ79PDyj+yKRnUy7cZGSMe+a7jkwD259uKKuUzQlPkGam7R+8RJwh5z4xO27fA1o2A==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.23.3.tgz", + "integrity": "sha512-jR3Jn3y7cZp4oEWPFAlRsSWjxKe4PZILGBSd4nis1TsC5qeSpb+nrtihJuDhNI7QHiVbUaiXa0X2RZY3/TI6Nw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.23.3.tgz", + "integrity": "sha512-zP0QKq/p6O42OL94udMgSfKXyse4RyJ0JqbQ34zDAONWjyrEsghYEyTSK5FIpmXmCpB55SHokL1cRRKHv8L2Qw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.23.3.tgz", + "integrity": "sha512-GnvhtVfA2OAtzdX58FJxU19rhoGeQzyVndw3GgtdECQvQFXPEZIOVULHVZGAYmOgmqjXpVpfocAbSjh99V/Fqw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.23.4.tgz", + "integrity": "sha512-5xOpoPguCZCRbo/JeHlloSkTA8Bld1J/E1/kLfD1nsuiW1m8tduTA1ERCgIZokDflX/IBzKcqR3l7VlRgiIfHA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.23.3", + "@babel/types": "^7.23.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", + "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.23.3.tgz", + "integrity": "sha512-qMFdSS+TUhB7Q/3HVPnEdYJDQIk57jkntAwSuz9xfSE4n+3I+vHYCli3HoHawN1Z3RfCz/y1zXA/JXjG6cVImQ==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.23.3.tgz", + "integrity": "sha512-KP+75h0KghBMcVpuKisx3XTu9Ncut8Q8TuvGO4IhY+9D5DFEckQefOuIsB/gQ2tG71lCke4NMrtIPS8pOj18BQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.23.3.tgz", + "integrity": "sha512-QnNTazY54YqgGxwIexMZva9gqbPa15t/x9VS+0fsEFWplwVpXYZivtgl43Z1vMpc1bdPP2PP8siFeVcnFvA3Cg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.23.6.tgz", + "integrity": "sha512-kF1Zg62aPseQ11orDhFRw+aPG/eynNQtI+TyY+m33qJa2cJ5EEvza2P2BNTIA9E5MyqFABHEyY6CPHwgdy9aNg==", + "dependencies": { + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "babel-plugin-polyfill-corejs2": "^0.4.6", + "babel-plugin-polyfill-corejs3": "^0.8.5", + "babel-plugin-polyfill-regenerator": "^0.5.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.23.3.tgz", + "integrity": "sha512-ED2fgqZLmexWiN+YNFX26fx4gh5qHDhn1O2gvEhreLW2iI63Sqm4llRLCXALKrCnbN4Jy0VcMQZl/SAzqug/jg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.23.3.tgz", + "integrity": "sha512-VvfVYlrlBVu+77xVTOAoxQ6mZbnIq5FM0aGBSFEcIh03qHf+zNqA4DC/3XMUozTg7bZV3e3mZQ0i13VB6v5yUg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.23.3.tgz", + "integrity": "sha512-HZOyN9g+rtvnOU3Yh7kSxXrKbzgrm5X4GncPY1QOquu7epga5MxKHVpYu2hvQnry/H+JjckSYRb93iNfsioAGg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.23.3.tgz", + "integrity": "sha512-Flok06AYNp7GV2oJPZZcP9vZdszev6vPBkHLwxwSpaIqx75wn6mUd3UFWsSsA0l8nXAKkyCmL/sR02m8RYGeHg==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.23.3.tgz", + "integrity": "sha512-4t15ViVnaFdrPC74be1gXBSMzXk3B4Us9lP7uLRQHTFpV5Dvt33pn+2MyyNxmN3VTTm3oTrZVMUmuw3oBnQ2oQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.23.6.tgz", + "integrity": "sha512-6cBG5mBvUu4VUD04OHKnYzbuHNP8huDsD3EDqqpIpsswTDoqHCjLoHb6+QgsV1WsT2nipRqCPgxD3LXnEO7XfA==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.23.6", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-typescript": "^7.23.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.23.3.tgz", + "integrity": "sha512-OMCUx/bU6ChE3r4+ZdylEqAjaQgHAgipgW8nsCfu5pGqDcFytVd91AwRvUJSBZDz0exPGgnjoqhgRYLRjFZc9Q==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.23.3.tgz", + "integrity": "sha512-KcLIm+pDZkWZQAFJ9pdfmh89EwVfmNovFBcXko8szpBeF8z68kWIPeKlmSOkT9BXJxs2C0uk+5LxoxIv62MROA==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.23.3.tgz", + "integrity": "sha512-wMHpNA4x2cIA32b/ci3AfwNgheiva2W0WUKWTK7vBHBhDKfPsc5cFGNWm69WBqpwd86u1qwZ9PWevKqm1A3yAw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.23.3.tgz", + "integrity": "sha512-W7lliA/v9bNR83Qc3q1ip9CQMZ09CcHDbHfbLRDNuAhn1Mvkr1ZNF7hPmztMQvtTGVLJ9m8IZqWsTkXOml8dbw==", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.23.6.tgz", + "integrity": "sha512-2XPn/BqKkZCpzYhUUNZ1ssXw7DcXfKQEjv/uXZUXgaebCMYmkEsfZ2yY+vv+xtXv50WmL5SGhyB6/xsWxIvvOQ==", + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.23.3", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.23.3", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.23.3", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.23.3", + "@babel/plugin-syntax-import-attributes": "^7.23.3", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.23.3", + "@babel/plugin-transform-async-generator-functions": "^7.23.4", + "@babel/plugin-transform-async-to-generator": "^7.23.3", + "@babel/plugin-transform-block-scoped-functions": "^7.23.3", + "@babel/plugin-transform-block-scoping": "^7.23.4", + "@babel/plugin-transform-class-properties": "^7.23.3", + "@babel/plugin-transform-class-static-block": "^7.23.4", + "@babel/plugin-transform-classes": "^7.23.5", + "@babel/plugin-transform-computed-properties": "^7.23.3", + "@babel/plugin-transform-destructuring": "^7.23.3", + "@babel/plugin-transform-dotall-regex": "^7.23.3", + "@babel/plugin-transform-duplicate-keys": "^7.23.3", + "@babel/plugin-transform-dynamic-import": "^7.23.4", + "@babel/plugin-transform-exponentiation-operator": "^7.23.3", + "@babel/plugin-transform-export-namespace-from": "^7.23.4", + "@babel/plugin-transform-for-of": "^7.23.6", + "@babel/plugin-transform-function-name": "^7.23.3", + "@babel/plugin-transform-json-strings": "^7.23.4", + "@babel/plugin-transform-literals": "^7.23.3", + "@babel/plugin-transform-logical-assignment-operators": "^7.23.4", + "@babel/plugin-transform-member-expression-literals": "^7.23.3", + "@babel/plugin-transform-modules-amd": "^7.23.3", + "@babel/plugin-transform-modules-commonjs": "^7.23.3", + "@babel/plugin-transform-modules-systemjs": "^7.23.3", + "@babel/plugin-transform-modules-umd": "^7.23.3", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.23.3", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.23.4", + "@babel/plugin-transform-numeric-separator": "^7.23.4", + "@babel/plugin-transform-object-rest-spread": "^7.23.4", + "@babel/plugin-transform-object-super": "^7.23.3", + "@babel/plugin-transform-optional-catch-binding": "^7.23.4", + "@babel/plugin-transform-optional-chaining": "^7.23.4", + "@babel/plugin-transform-parameters": "^7.23.3", + "@babel/plugin-transform-private-methods": "^7.23.3", + "@babel/plugin-transform-private-property-in-object": "^7.23.4", + "@babel/plugin-transform-property-literals": "^7.23.3", + "@babel/plugin-transform-regenerator": "^7.23.3", + "@babel/plugin-transform-reserved-words": "^7.23.3", + "@babel/plugin-transform-shorthand-properties": "^7.23.3", + "@babel/plugin-transform-spread": "^7.23.3", + "@babel/plugin-transform-sticky-regex": "^7.23.3", + "@babel/plugin-transform-template-literals": "^7.23.3", + "@babel/plugin-transform-typeof-symbol": "^7.23.3", + "@babel/plugin-transform-unicode-escapes": "^7.23.3", + "@babel/plugin-transform-unicode-property-regex": "^7.23.3", + "@babel/plugin-transform-unicode-regex": "^7.23.3", + "@babel/plugin-transform-unicode-sets-regex": "^7.23.3", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.6", + "babel-plugin-polyfill-corejs3": "^0.8.5", + "babel-plugin-polyfill-regenerator": "^0.5.3", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.23.3.tgz", + "integrity": "sha512-tbkHOS9axH6Ysf2OUEqoSZ6T3Fa2SrNH6WTWSPBboxKzdxNc9qOICeLXkNG0ZEwbQ1HY8liwOce4aN/Ceyuq6w==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-transform-react-display-name": "^7.23.3", + "@babel/plugin-transform-react-jsx": "^7.22.15", + "@babel/plugin-transform-react-jsx-development": "^7.22.5", + "@babel/plugin-transform-react-pure-annotations": "^7.23.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.3.tgz", + "integrity": "sha512-17oIGVlqz6CchO9RFYn5U6ZpWRZIngayYCtrPRSgANSwC2V1Jb+iP74nVxzzXJte8b8BYxrL1yY96xfhTBrNNQ==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-syntax-jsx": "^7.23.3", + "@babel/plugin-transform-modules-commonjs": "^7.23.3", + "@babel/plugin-transform-typescript": "^7.23.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" + }, + "node_modules/@babel/runtime": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.6.tgz", + "integrity": "sha512-zHd0eUrf5GZoOWVCXp6koAKQTfZV07eit6bGPmJgnZdnSAvvZee6zniW2XMF7Cmc4ISOOnPy3QaSiIJGJkVEDQ==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.6.tgz", + "integrity": "sha512-Djs/ZTAnpyj0nyg7p1J6oiE/tZ9G2stqAFlLGZynrW+F3k2w2jGK2mLOBxzYIOcZYA89+c3d3wXKpYLcpwcU6w==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.6.tgz", + "integrity": "sha512-czastdK1e8YByZqezMPFiZ8ahwVMh/ESl9vPgvgdB9AmFMGP5jfpFax74AQgl5zj4XHzqeYAg2l8PuUeRS1MgQ==", + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.6", + "@babel/types": "^7.23.6", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.6.tgz", + "integrity": "sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==", + "dependencies": { + "@babel/helper-string-parser": "^7.23.4", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.5.2.tgz", + "integrity": "sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA==" + }, + "node_modules/@docsearch/react": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.5.2.tgz", + "integrity": "sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==", + "dependencies": { + "@algolia/autocomplete-core": "1.9.3", + "@algolia/autocomplete-preset-algolia": "1.9.3", + "@docsearch/css": "3.5.2", + "algoliasearch": "^4.19.1" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 19.0.0", + "react": ">= 16.8.0 < 19.0.0", + "react-dom": ">= 16.8.0 < 19.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.0.1.tgz", + "integrity": "sha512-CXrLpOnW+dJdSv8M5FAJ3JBwXtL6mhUWxFA8aS0ozK6jBG/wgxERk5uvH28fCeFxOGbAT9v1e9dOMo1X2IEVhQ==", + "dependencies": { + "@babel/core": "^7.23.3", + "@babel/generator": "^7.23.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.22.9", + "@babel/preset-env": "^7.22.9", + "@babel/preset-react": "^7.22.5", + "@babel/preset-typescript": "^7.22.5", + "@babel/runtime": "^7.22.6", + "@babel/runtime-corejs3": "^7.22.6", + "@babel/traverse": "^7.22.8", + "@docusaurus/cssnano-preset": "3.0.1", + "@docusaurus/logger": "3.0.1", + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-common": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "@slorber/static-site-generator-webpack-plugin": "^4.0.7", + "@svgr/webpack": "^6.5.1", + "autoprefixer": "^10.4.14", + "babel-loader": "^9.1.3", + "babel-plugin-dynamic-import-node": "^2.3.3", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "clean-css": "^5.3.2", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "copy-webpack-plugin": "^11.0.0", + "core-js": "^3.31.1", + "css-loader": "^6.8.1", + "css-minimizer-webpack-plugin": "^4.2.2", + "cssnano": "^5.1.15", + "del": "^6.1.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "html-minifier-terser": "^7.2.0", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.5.3", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "mini-css-extract-plugin": "^2.7.6", + "postcss": "^8.4.26", + "postcss-loader": "^7.3.3", + "prompts": "^2.4.2", + "react-dev-utils": "^12.0.1", + "react-helmet-async": "^1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "rtl-detect": "^1.0.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.5", + "shelljs": "^0.8.5", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "url-loader": "^4.1.1", + "webpack": "^5.88.1", + "webpack-bundle-analyzer": "^4.9.0", + "webpack-dev-server": "^4.15.1", + "webpack-merge": "^5.9.0", + "webpackbar": "^5.0.2" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.0.1.tgz", + "integrity": "sha512-wjuXzkHMW+ig4BD6Ya1Yevx9UJadO4smNZCEljqBoQfIQrQskTswBs7lZ8InHP7mCt273a/y/rm36EZhqJhknQ==", + "dependencies": { + "cssnano-preset-advanced": "^5.3.10", + "postcss": "^8.4.26", + "postcss-sort-media-queries": "^4.4.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.0.1.tgz", + "integrity": "sha512-I5L6Nk8OJzkVA91O2uftmo71LBSxe1vmOn9AMR6JRCzYeEBrqneWMH02AqMvjJ2NpMiviO+t0CyPjyYV7nxCWQ==", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.0.1.tgz", + "integrity": "sha512-ldnTmvnvlrONUq45oKESrpy+lXtbnTcTsFkOTIDswe5xx5iWJjt6eSa0f99ZaWlnm24mlojcIGoUWNCS53qVlQ==", + "dependencies": { + "@babel/parser": "^7.22.7", + "@babel/traverse": "^7.22.8", + "@docusaurus/logger": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^1.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.1.tgz", + "integrity": "sha512-DEHpeqUDsLynl3AhQQiO7AbC7/z/lBra34jTcdYuvp9eGm01pfH1wTVq8YqWZq6Jyx0BgcVl/VJqtE9StRd9Ag==", + "dependencies": { + "@docusaurus/react-loadable": "5.5.2", + "@docusaurus/types": "3.0.1", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "*", + "react-loadable": "npm:@docusaurus/react-loadable@5.5.2" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.0.1.tgz", + "integrity": "sha512-cLOvtvAyaMQFLI8vm4j26svg3ktxMPSXpuUJ7EERKoGbfpJSsgtowNHcRsaBVmfuCsRSk1HZ/yHBsUkTmHFEsg==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/logger": "3.0.1", + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-common": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "cheerio": "^1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "reading-time": "^1.5.0", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.0.1.tgz", + "integrity": "sha512-dRfAOA5Ivo+sdzzJGXEu33yAtvGg8dlZkvt/NEJ7nwi1F2j4LEdsxtfX2GKeETB2fP6XoGNSQnFXqa2NYGrHFg==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/logger": "3.0.1", + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/module-type-aliases": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.0.1.tgz", + "integrity": "sha512-oP7PoYizKAXyEttcvVzfX3OoBIXEmXTMzCdfmC4oSwjG4SPcJsRge3mmI6O8jcZBgUPjIzXD21bVGWEE1iu8gg==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.0.1.tgz", + "integrity": "sha512-09dxZMdATky4qdsZGzhzlUvvC+ilQ2hKbYF+wez+cM2mGo4qHbv8+qKXqxq0CQZyimwlAOWQLoSozIXU0g0i7g==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^1.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.0.1.tgz", + "integrity": "sha512-jwseSz1E+g9rXQwDdr0ZdYNjn8leZBnKPjjQhMBEiwDoenL3JYFcNW0+p0sWoVF/f2z5t7HkKA+cYObrUh18gg==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.0.1.tgz", + "integrity": "sha512-UFTDvXniAWrajsulKUJ1DB6qplui1BlKLQZjX4F7qS/qfJ+qkKqSkhJ/F4VuGQ2JYeZstYb+KaUzUzvaPK1aRQ==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.0.1.tgz", + "integrity": "sha512-IPFvuz83aFuheZcWpTlAdiiX1RqWIHM+OH8wS66JgwAKOiQMR3+nLywGjkLV4bp52x7nCnwhNk1rE85Cpy/CIw==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.0.1.tgz", + "integrity": "sha512-xARiWnjtVvoEniZudlCq5T9ifnhCu/GAZ5nA7XgyLfPcNpHQa241HZdsTlLtVcecEVVdllevBKOp7qknBBaMGw==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/logger": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-common": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.0.1.tgz", + "integrity": "sha512-il9m9xZKKjoXn6h0cRcdnt6wce0Pv1y5t4xk2Wx7zBGhKG1idu4IFHtikHlD0QPuZ9fizpXspXcTzjL5FXc1Gw==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/plugin-content-blog": "3.0.1", + "@docusaurus/plugin-content-docs": "3.0.1", + "@docusaurus/plugin-content-pages": "3.0.1", + "@docusaurus/plugin-debug": "3.0.1", + "@docusaurus/plugin-google-analytics": "3.0.1", + "@docusaurus/plugin-google-gtag": "3.0.1", + "@docusaurus/plugin-google-tag-manager": "3.0.1", + "@docusaurus/plugin-sitemap": "3.0.1", + "@docusaurus/theme-classic": "3.0.1", + "@docusaurus/theme-common": "3.0.1", + "@docusaurus/theme-search-algolia": "3.0.1", + "@docusaurus/types": "3.0.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/react-loadable": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.0.1.tgz", + "integrity": "sha512-XD1FRXaJiDlmYaiHHdm27PNhhPboUah9rqIH0lMpBt5kYtsGjJzhqa27KuZvHLzOP2OEpqd2+GZ5b6YPq7Q05Q==", + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/module-type-aliases": "3.0.1", + "@docusaurus/plugin-content-blog": "3.0.1", + "@docusaurus/plugin-content-docs": "3.0.1", + "@docusaurus/plugin-content-pages": "3.0.1", + "@docusaurus/theme-common": "3.0.1", + "@docusaurus/theme-translations": "3.0.1", + "@docusaurus/types": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-common": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.43", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.4.26", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.0.1.tgz", + "integrity": "sha512-cr9TOWXuIOL0PUfuXv6L5lPlTgaphKP+22NdVBOYah5jSq5XAAulJTjfe+IfLsEG4L7lJttLbhW7LXDFSAI7Ag==", + "dependencies": { + "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/module-type-aliases": "3.0.1", + "@docusaurus/plugin-content-blog": "3.0.1", + "@docusaurus/plugin-content-docs": "3.0.1", + "@docusaurus/plugin-content-pages": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-common": "3.0.1", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.0.1.tgz", + "integrity": "sha512-DDiPc0/xmKSEdwFkXNf1/vH1SzJPzuJBar8kMcBbDAZk/SAmo/4lf6GU2drou4Ae60lN2waix+jYWTWcJRahSA==", + "dependencies": { + "@docsearch/react": "^3.5.2", + "@docusaurus/core": "3.0.1", + "@docusaurus/logger": "3.0.1", + "@docusaurus/plugin-content-docs": "3.0.1", + "@docusaurus/theme-common": "3.0.1", + "@docusaurus/theme-translations": "3.0.1", + "@docusaurus/utils": "3.0.1", + "@docusaurus/utils-validation": "3.0.1", + "algoliasearch": "^4.18.0", + "algoliasearch-helper": "^3.13.3", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.0.1.tgz", + "integrity": "sha512-6UrbpzCTN6NIJnAtZ6Ne9492vmPVX+7Fsz4kmp+yor3KQwA1+MCzQP7ItDNkP38UmVLnvB/cYk/IvehCUqS3dg==", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/types": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.0.1.tgz", + "integrity": "sha512-plyX2iU1tcUsF46uQ01pAd4JhexR7n0iiQ5MSnBFX6M6NSJgDYdru/i1/YNPKOnQHBoXGLHv0dNT6OAlDWNjrg==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "^1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.0.1.tgz", + "integrity": "sha512-TwZ33Am0q4IIbvjhUOs+zpjtD/mXNmLmEgeTGuRq01QzulLHuPhaBTTAC/DHu6kFx3wDgmgpAlaRuCHfTcXv8g==", + "dependencies": { + "@docusaurus/logger": "3.0.1", + "@svgr/webpack": "^6.5.1", + "escape-string-regexp": "^4.0.0", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "resolve-pathname": "^3.0.0", + "shelljs": "^0.8.5", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.0.1.tgz", + "integrity": "sha512-W0AxD6w6T8g6bNro8nBRWf7PeZ/nn7geEWM335qHU2DDDjHuV4UZjgUGP1AQsdcSikPrlIqTJJbKzer1lRSlIg==", + "dependencies": { + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/types": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/types": { + "optional": true + } + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.0.1.tgz", + "integrity": "sha512-ujTnqSfyGQ7/4iZdB4RRuHKY/Nwm58IIb+41s5tCXOv/MBU2wGAjOHq3U+AEyJ8aKQcHbxvTKJaRchNHYUVUQg==", + "dependencies": { + "@docusaurus/logger": "3.0.1", + "@docusaurus/utils": "3.0.1", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.5.tgz", + "integrity": "sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", + "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", + "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.0.0.tgz", + "integrity": "sha512-Icm0TBKBLYqroYbNW3BPnzMGn+7mwpQOK310aZ7+fkCtiU3aqv2cdcX+nd0Ydo3wI5Rx8bX2Z2QmGb/XcAClCw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-to-js": "^2.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-estree": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "periscopic": "^3.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.0.tgz", + "integrity": "sha512-nDctevR9KyYFyV+m+/+S4cpzCWHqj+iHDHq3QrsWezcC+B17uZdIWgCguESUkwFhM3n/56KxWVE3V6EokrmONQ==", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.2.2.tgz", + "integrity": "sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.24", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.24.tgz", + "integrity": "sha512-2LuNTFBIO0m7kKIQvvPHN6UE63VjpmL9rnEEaOOaiSPbZK+zUOYIzBAWcED+3XYzhYsd/0mD57VdxAEqqV52CQ==" + }, + "node_modules/@sideway/address": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", + "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@slorber/static-site-generator-webpack-plugin": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", + "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==", + "dependencies": { + "eval": "^0.1.8", + "p-map": "^4.0.0", + "webpack-sources": "^3.2.2" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", + "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", + "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", + "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", + "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", + "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", + "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", + "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", + "@svgr/babel-plugin-remove-jsx-attribute": "*", + "@svgr/babel-plugin-remove-jsx-empty-expression": "*", + "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", + "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", + "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", + "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", + "@svgr/babel-plugin-transform-svg-component": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", + "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "camelcase": "^6.2.0", + "cosmiconfig": "^7.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", + "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", + "dependencies": { + "@babel/types": "^7.20.0", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", + "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", + "dependencies": { + "@babel/core": "^7.19.6", + "@svgr/babel-preset": "^6.5.1", + "@svgr/hast-util-to-babel-ast": "^6.5.1", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "^6.0.0" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz", + "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==", + "dependencies": { + "cosmiconfig": "^7.0.1", + "deepmerge": "^4.2.2", + "svgo": "^2.8.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz", + "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==", + "dependencies": { + "@babel/core": "^7.19.6", + "@babel/plugin-transform-react-constant-elements": "^7.18.12", + "@babel/preset-env": "^7.19.4", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.18.6", + "@svgr/core": "^6.5.1", + "@svgr/plugin-jsx": "^6.5.1", + "@svgr/plugin-svgo": "^6.5.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/acorn": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@types/acorn/-/acorn-4.0.6.tgz", + "integrity": "sha512-veQTnWP+1D/xbxVrPC3zHnCZRjSrKfhbMUlEA43iMZLu7EsnTtkJklIuwrCPbOi8YkvDQAiW05VQQFvvz9oieQ==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.0.tgz", + "integrity": "sha512-FlsN0p4FhuYRjIxpbdXovvHQhtlG05O1GG/RNWvdAxTboR438IOTwmrY/vLA+Xfgg06BTkP045M3vpFwTMv1dg==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.3.tgz", + "integrity": "sha512-pvQ+TKeRHeiUGRhvYwRrQ/ISnohKkSJR14fT2yqyZ4e9K5vqc7hrtY2Y1Dw0ZwAzQ6DQsxsaCUuSIIi8v0Cq6w==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.41", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.41.tgz", + "integrity": "sha512-OaJ7XLaelTgrvlZD8/aa0vvvxZdUmlCn6MtWeB7TkiKW70BQLc9XEPpDLPdbo52ZhXUCrznlWdCHWxJWtdyajA==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" + }, + "node_modules/@types/hast": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.3.tgz", + "integrity": "sha512-2fYGlaDy/qyLlhidX42wAH0KBi2TCjKMH8CHmBXgRlJ3Y+OXTiqsPQ6IWarZKwF1JoUcAJdPogv1d4b0COTpmQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.14", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.14.tgz", + "integrity": "sha512-SSrD0c1OQzlFX7pGu1eXxSEjemej64aaNPRhhVYUGqXh0BtldAAx37MG8btcumvpgKyZp1F5Gn3JkktdxiFv6w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/mdast": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.3.tgz", + "integrity": "sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.10.tgz", + "integrity": "sha512-Rllzc5KHk0Al5/WANwgSPl1/CwjqCy+AZrGd78zuK+jO9aDM6ffblZ+zIjgPNAaEBmlO0RYDvLNh7wD0zKVgEg==" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==" + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/@types/node": { + "version": "20.10.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.5.tgz", + "integrity": "sha512-nNPsNE65wjMxEKI93yOP+NPGGBJz/PoN3kZsVLee0XMiJolxSekEVD8wRwBUBqkwc7UWop0edW50yrCQW4CyRw==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.10.tgz", + "integrity": "sha512-y6PJDYN4xYBxwd22l+OVH35N+1fCYWiuC3aiP2SlXVE6Lo7SS+rSx9r89hLxrP4pn6n1lBGhHJ12pj3F3Mpttw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==" + }, + "node_modules/@types/prismjs": { + "version": "1.26.3", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.3.tgz", + "integrity": "sha512-A0D0aTXvjlqJ5ZILMz3rNfDBOx9hHxLZYv2by47Sm/pqW35zzjusrZTryatjN/Rf8Us2gZrJD+KeHbUSTux1Cw==" + }, + "node_modules/@types/prop-types": { + "version": "15.7.11", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", + "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==" + }, + "node_modules/@types/qs": { + "version": "6.9.11", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.11.tgz", + "integrity": "sha512-oGk0gmhnEJK4Yyk+oI7EfXsLayXatCWPHary1MtcmbAifkobT9cM9yutG/hZKIseOU0MqbIwQ/u2nn/Gb+ltuQ==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==" + }, + "node_modules/@types/react": { + "version": "18.2.45", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.45.tgz", + "integrity": "sha512-TtAxCNrlrBp8GoeEp1npd5g+d/OejJHFxS3OWmrPBMFaVQMSN0OFySozJio5BHxTuTeug00AVXVAjfDSfk+lUg==", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.8", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz", + "integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==" + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.5.tgz", + "integrity": "sha512-PDRk21MnK70hja/YF8AHfC7yIsiQHn1rcXx7ijCFBX/k+XQJhQT/gw3xekXKJvx+5SXaMMS8oqQy09Mzvz2TuQ==", + "dependencies": { + "@types/http-errors": "*", + "@types/mime": "*", + "@types/node": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, + "node_modules/@types/ws": { + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "dependencies": { + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.1", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.1.tgz", + "integrity": "sha512-TgUZgYvqZprrl7YldZNoa9OciCAyZR+Ejm9eXzKCmjsF5IKp/wgQ7Z/ZpjpGTIUPwrHQIcYeI8qDh4PsEwxMbw==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "4.22.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.22.0.tgz", + "integrity": "sha512-gfceltjkwh7PxXwtkS8KVvdfK+TSNQAWUeNSxf4dA29qW5tf2EGwa8jkJujlT9jLm17cixMVoGNc+GJFO1Mxhg==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.22.0", + "@algolia/cache-common": "4.22.0", + "@algolia/cache-in-memory": "4.22.0", + "@algolia/client-account": "4.22.0", + "@algolia/client-analytics": "4.22.0", + "@algolia/client-common": "4.22.0", + "@algolia/client-personalization": "4.22.0", + "@algolia/client-search": "4.22.0", + "@algolia/logger-common": "4.22.0", + "@algolia/logger-console": "4.22.0", + "@algolia/requester-browser-xhr": "4.22.0", + "@algolia/requester-common": "4.22.0", + "@algolia/requester-node-http": "4.22.0", + "@algolia/transporter": "4.22.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.16.1", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.16.1.tgz", + "integrity": "sha512-qxAHVjjmT7USVvrM8q6gZGaJlCK1fl4APfdAA7o8O6iXEc68G0xMNrzRkxoB/HmhhvyHnoteS/iMTiHiTcQQcg==", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-flatten": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", + "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.8.6", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.8.6.tgz", + "integrity": "sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.16", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.16.tgz", + "integrity": "sha512-7vd3UC6xKp0HLfua5IjZlcXvGAGy7cBAXTg2lyQ/8WpNhd6SiZ8Be+xm3FyBSYJx5GKcpRCzBh7RH4/0dnY+uQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.21.10", + "caniuse-lite": "^1.0.30001538", + "fraction.js": "^4.3.6", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.7", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.7.tgz", + "integrity": "sha512-LidDk/tEGDfuHW2DWh/Hgo4rmnw3cduK6ZkOI1NPFceSK3n/yAGeOsNT7FLnSGHkXj3RHGSEVkN3FsCTY6w2CQ==", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.4.4", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.8.7", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.7.tgz", + "integrity": "sha512-KyDvZYxAzkC0Aj2dAPyDzi2Ym15e5JKZSK+maI7NAwSqofvuFglbSsxE7wUOvTg9oFVnHMzVzBKcqEb4PJgtOA==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.4", + "core-js-compat": "^3.33.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.4.tgz", + "integrity": "sha512-S/x2iOCvDaCASLYsOOgWOq4bCfKYVqvO/uxjkaYyZ3rVsVE3CeAI/c84NpyuBBymEgNvHgjEot3a9/Z/kXvqsg==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.4" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/bonjour-service": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", + "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "dependencies": { + "array-flatten": "^2.1.2", + "dns-equal": "^1.0.0", + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.22.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.2.tgz", + "integrity": "sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001565", + "electron-to-chromium": "^1.4.601", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cacheable-request/node_modules/normalize-url": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.0.tgz", + "integrity": "sha512-uVFpKhj5MheNBJRTiMZ9pE/7hD1QTeEvugSJW/OmLzAp78PB5O6adfMNTvmfKhXBkvCzC+rqifWcVYpGFwTjnw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/call-bind": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz", + "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==", + "dependencies": { + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.1", + "set-function-length": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001571", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001571.tgz", + "integrity": "sha512-tYq/6MoXhdezDLFZuCO/TKboTzuQ/xR5cFdgXPfDtM7/kchBO3b4VWghE/OAi/DV7tTdhmLjZiZBZi1fA/GheQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", + "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz", + "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clone-deep/node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clsx": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", + "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", + "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", + "dependencies": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.16", + "debug": "2.6.9", + "on-headers": "~1.0.2", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/compression/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "2.15.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", + "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.34.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.34.0.tgz", + "integrity": "sha512-aDdvlDder8QmY91H88GzNi9EtQi2TjvQhpCX6B1v/dAZHU1AuLgHvRh54RiOerpEhEW46Tkf+vgAViB/CWC0ag==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.34.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.34.0.tgz", + "integrity": "sha512-4ZIyeNbW/Cn1wkMMDy+mvrRUxrwFNjKwbhCfQpDd+eLgYipDqp8oGFGtLmhh18EDPKA0g3VUBYOxQGGwvWLVpA==", + "dependencies": { + "browserslist": "^4.22.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.34.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.34.0.tgz", + "integrity": "sha512-pmhivkYXkymswFfbXsANmBAewXx86UBfmagP+w0wkK06kLsLlTK5oQmsURPivzMkIBQiYq2cjamcZExIwlFQIg==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-declaration-sorter": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz", + "integrity": "sha512-rtdthzxKuyq6IzqX6jEcIzQF/YqccluefyCYheovBOLhFT/drQA9zj/UbRAa9J7C0o6EG6u3E6g+vKkay7/k3g==", + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-loader": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz", + "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.21", + "postcss-modules-extract-imports": "^3.0.0", + "postcss-modules-local-by-default": "^4.0.3", + "postcss-modules-scope": "^3.0.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz", + "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==", + "dependencies": { + "cssnano": "^5.1.8", + "jest-worker": "^29.1.2", + "postcss": "^8.4.17", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/css-select": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", + "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", + "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", + "dependencies": { + "mdn-data": "2.0.14", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/css-tree/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/css-what": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", + "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "5.1.15", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz", + "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==", + "dependencies": { + "cssnano-preset-default": "^5.2.14", + "lilconfig": "^2.0.3", + "yaml": "^1.10.2" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "5.3.10", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz", + "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==", + "dependencies": { + "autoprefixer": "^10.4.12", + "cssnano-preset-default": "^5.2.14", + "postcss-discard-unused": "^5.1.0", + "postcss-merge-idents": "^5.1.1", + "postcss-reduce-idents": "^5.2.0", + "postcss-zindex": "^5.1.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-preset-default": { + "version": "5.2.14", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz", + "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==", + "dependencies": { + "css-declaration-sorter": "^6.3.1", + "cssnano-utils": "^3.1.0", + "postcss-calc": "^8.2.3", + "postcss-colormin": "^5.3.1", + "postcss-convert-values": "^5.1.3", + "postcss-discard-comments": "^5.1.2", + "postcss-discard-duplicates": "^5.1.0", + "postcss-discard-empty": "^5.1.1", + "postcss-discard-overridden": "^5.1.0", + "postcss-merge-longhand": "^5.1.7", + "postcss-merge-rules": "^5.1.4", + "postcss-minify-font-values": "^5.1.0", + "postcss-minify-gradients": "^5.1.1", + "postcss-minify-params": "^5.1.4", + "postcss-minify-selectors": "^5.2.1", + "postcss-normalize-charset": "^5.1.0", + "postcss-normalize-display-values": "^5.1.0", + "postcss-normalize-positions": "^5.1.1", + "postcss-normalize-repeat-style": "^5.1.1", + "postcss-normalize-string": "^5.1.0", + "postcss-normalize-timing-functions": "^5.1.0", + "postcss-normalize-unicode": "^5.1.1", + "postcss-normalize-url": "^5.1.0", + "postcss-normalize-whitespace": "^5.1.1", + "postcss-ordered-values": "^5.1.3", + "postcss-reduce-initial": "^5.1.2", + "postcss-reduce-transforms": "^5.1.0", + "postcss-svgo": "^5.1.0", + "postcss-unique-selectors": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/cssnano-utils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz", + "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/csso": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", + "dependencies": { + "css-tree": "^1.1.2" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", + "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", + "dependencies": { + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", + "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.4.616", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.616.tgz", + "integrity": "sha512-1n7zWYh8eS0L9Uy+GskE0lkBUNK83cXTVJI0pU3mGprFsbfSdAc15VTFbo+A+Bq4pwstmL30AVcEU3Fo463lNg==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", + "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.15.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz", + "integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-module-lexer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.4.1.tgz", + "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==" + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.0.1.tgz", + "integrity": "sha512-b2tdzTurEIbwRh+mKrEcaWfu1wgb8J1hVsgREg7FFiecWwK/PhO8X0kyc+0bIcKNtD4sqxIdNoRy6/p/TvECEA==", + "dependencies": { + "@types/estree": "^1.0.0", + "is-plain-obj": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/express/node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-url-parser": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", + "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "dependencies": { + "punycode": "^1.3.2" + } + }, + "node_modules/fastq": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.16.0.tgz", + "integrity": "sha512-ifCoaXsDrsdkWTtiNJX5uzHDsrck5TzfKKDcuFFTIrrc/BS076qgEIfoIy1VeZqViznfKiysPYTh/QeHtnIsYA==", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.5", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", + "integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", + "dependencies": { + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" + }, + "engines": { + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "dependencies": { + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data-encoder": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-4.0.2.tgz", + "integrity": "sha512-KQVhvhK8ZkWzxKxOr56CPulAhH3dobtuQ4+hNQ+HekH/Wp5gSOafqRAeTphQUJAIk0GBvHZgJ2ZGRWd5kphMuw==", + "engines": { + "node": ">= 18" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", + "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz", + "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==", + "dependencies": { + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dependencies": { + "global-prefix": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/got/-/got-14.0.0.tgz", + "integrity": "sha512-X01vTgaX9SwaMq5DfImvS+3GMQFFs5HtrrlS9CuzUSzkxAf/tWGEyynuI+Qy7BjciMczZGjyVSmawYbP4eYhYA==", + "dependencies": { + "@sindresorhus/is": "^6.1.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.14", + "decompress-response": "^6.0.0", + "form-data-encoder": "^4.0.2", + "get-stream": "^8.0.1", + "http2-wrapper": "^2.2.1", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^4.0.1", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-6.1.0.tgz", + "integrity": "sha512-BuvU07zq3tQ/2SIgBsEuxKYDyDjC0n7Zir52bpHy2xnBbW81+po43aLFPLbeV3HRAheFbGud1qgcqSYfhtHMAg==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", + "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", + "dependencies": { + "get-intrinsic": "^1.2.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", + "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^8.0.0", + "property-information": "^6.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.1.tgz", + "integrity": "sha512-5m1gmba658Q+lO5uqL5YNGQWeh1MYWZbZmWrM5lncdcuiXuo5E2HT/CIOp0rLF8ksfSwiCVJ3twlgVRyTGThGA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", + "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.2.tgz", + "integrity": "sha512-EcKzdTHVe8wFVOGEYXiW9WmJXPjqi1T+234YpJr98RiFYKHV3cdy1+3mkTE+KHTHxFFLH51SfaGOoUdW+v7ViQ==" + }, + "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.5.tgz", + "integrity": "sha512-rDRwHtoDD3UMMrmZ6BzOW0naTjMsVZLIjsGleSKS/0Oz+cgCfAPRspaqJuE8rDzpKha/nEvnM0IF4seEAZUTKQ==", + "dependencies": { + "inline-style-parser": "0.2.2" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", + "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz", + "integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", + "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.0.tgz", + "integrity": "sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", + "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "dependencies": { + "queue": "6.0.2" + }, + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.43", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", + "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", + "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "dependencies": { + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.0", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz", + "integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.11.0", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz", + "integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==", + "dependencies": { + "@hapi/hoek": "^9.0.0", + "@hapi/topo": "^5.0.0", + "@sideway/address": "^4.1.3", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.1.tgz", + "integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz", + "integrity": "sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", + "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", + "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.0.0.tgz", + "integrity": "sha512-XZuPPzQNBPAlaqsTTgRrcJnyFbSOBovSadFgbFu8SnuNgm+6Bdx1K+IWoitsmj6Lq6MNtI+ytOqwN70n//NaBA==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-remove-position": "^5.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.0.0.tgz", + "integrity": "sha512-xadSsJayQIucJ9n053dfQwVu1kuXg7jCTdYsMK8rqzKZh52nLfSH/k0sAxE0u+pj/zKZX+o5wB+ML5mRayOxFA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.0.2.tgz", + "integrity": "sha512-U5I+500EOOw9e3ZrclN3Is3fRpw8c19SMyNZlZ2IS+7vLsNzb2Om11VpIVOR+/0137GhZsFEF6YiKD5+0Hr2Og==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.0.tgz", + "integrity": "sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.0.tgz", + "integrity": "sha512-jThOz/pVmAYUtkroV3D5c1osFXAMv9e0ypGDOIZuCeAe91/sD6BoE2Sjzt30yuXtwOYUmySOhMas/PVyh02itA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.0.tgz", + "integrity": "sha512-61OI07qpQrERc+0wEysLHMvoiO3s2R56x5u7glHq2Yqq6EHbH4dW25G9GfDdGCDYqA21KE6DWgNSzxSwHc2hSg==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.0.0.tgz", + "integrity": "sha512-rTHfnpt/Q7dEAK1Y5ii0W8bhfJlVJFnJMHIPisfPK3gpVNuOP0VnRl96+YJ3RYWV/P4gFeQoGKNlT3RhuvpqAg==", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-6Rzu0CYRKDv3BfLAUnZsSlzx3ak6HAoI85KTiijuKIz5UxZxbUI+pD6oHgw+6UtQuiRwnGRhzMmPRv4smcz0fg==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-c3BR1ClMp5fxxmwP6AoOY2fXO9U8uFMKs4ADD66ahLTNcwzSCyRVU4k7LPV5Nxo/VJiR4TdzxRQY2v3qIUceCw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.0.0.tgz", + "integrity": "sha512-PoHlhypg1ItIucOaHmKE8fbin3vTLpDOUg8KAr8gRCF1MOZI9Nquq2i/44wFvviM4WuxJzc3demT8Y3dkfvYrw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.0.1.tgz", + "integrity": "sha512-cY5PzGcnULaN5O7T+cOzfMoHjBW7j+T9D2sucA5d/KbsBTPcYdebm9zUd9zzdgJGCwahV+/W78Z3nbulBYVbTw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.0.tgz", + "integrity": "sha512-sI0nwhUDz97xyzqJAbHQhp5TfaxEvZZZ2JDqUo+7NvyIYG6BZ5CPPqj2ogUoPJlmXHBnyZUzISg9+oUmU6tUjQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.0.tgz", + "integrity": "sha512-uvhhss8OGuzR4/N17L1JwvmJIpPhAd8oByMawEKx6NVdBCbesjH4t+vjEp3ZXft9DwvlKSD07fCeI44/N0Vf2w==", + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz", + "integrity": "sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-label": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz", + "integrity": "sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.1.tgz", + "integrity": "sha512-F0ccWIUHRLRrYp5TC9ZYXmZo+p2AM13ggbsW4T0b5CRKP8KHVRB8t4pwtBgTxtjRmwrK0Irwm7vs2JOZabHZfg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-title": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz", + "integrity": "sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz", + "integrity": "sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz", + "integrity": "sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz", + "integrity": "sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz", + "integrity": "sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz", + "integrity": "sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz", + "integrity": "sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.2.tgz", + "integrity": "sha512-Fk+xmBrOv9QZnEDguL9OI9/NQQp6Hz4FuQ4YmCb/5V7+9eAh1s6AYSvL20kHkD67YIg7EpE54TiSlcsf3vyZgA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/acorn": "^4.0.0", + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz", + "integrity": "sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz", + "integrity": "sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz", + "integrity": "sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.0.tgz", + "integrity": "sha512-vc93L1t+gpR3p8jxeVdaYlbV2jTYteDje19rNSS/H5dlhxUYll5Fy6vJ2cDwP8RnsXi818yGty1ayP55y3W6fg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz", + "integrity": "sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.7.6", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz", + "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==", + "dependencies": { + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mrmime": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", + "integrity": "sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.1.3.tgz", + "integrity": "sha512-E2WEOVsgs7O16zsURJ/eH8BqhF029wGpEOnv7Urwdo2wmQanOACwJQh0devF9D9RhoZru0+9JXIS0dBXIAz+lA==", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", + "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-4.0.1.tgz", + "integrity": "sha512-wBowNApzd45EIKdO1LaU+LrMBwAcjfPaYtVzV3lmfM3gf8Z4CHZsiIqlM8TZZ8okYvh5A1cP6gTfCRQtwUpaUg==", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", + "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==" + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz", + "integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==", + "dependencies": { + "domhandler": "^5.0.2", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-to-regexp": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", + "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/periscopic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz", + "integrity": "sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^3.0.0", + "is-reference": "^3.0.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz", + "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==", + "dependencies": { + "find-up": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-up/node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-up/node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/pkg-up/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss": { + "version": "8.4.32", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.32.tgz", + "integrity": "sha512-D/kj5JNu6oo2EIy+XL/26JEDTlIbB8hw85G8StOE6L74RQAVVP5rej6wxCNqyMbR4RkPfqvezVbPw81Ngd6Kcw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-calc": { + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz", + "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==", + "dependencies": { + "postcss-selector-parser": "^6.0.9", + "postcss-value-parser": "^4.2.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-colormin": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz", + "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "colord": "^2.9.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-convert-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz", + "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-comments": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz", + "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz", + "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-empty": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz", + "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz", + "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-discard-unused": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz", + "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.3.tgz", + "integrity": "sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==", + "dependencies": { + "cosmiconfig": "^8.2.0", + "jiti": "^1.18.2", + "semver": "^7.3.8" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-loader/node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/postcss-merge-idents": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz", + "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz", + "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^5.1.1" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-merge-rules": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz", + "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^3.1.0", + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz", + "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz", + "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==", + "dependencies": { + "colord": "^2.9.1", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-params": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz", + "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==", + "dependencies": { + "browserslist": "^4.21.4", + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz", + "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz", + "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz", + "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.1.0.tgz", + "integrity": "sha512-SaIbK8XW+MZbd0xHPf7kdfA/3eOt7vxJ72IRecn3EzuZVLr1r0orzf0MX/pN8m+NMDoo6X/SQd8oeKqGZd8PXg==", + "dependencies": { + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz", + "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz", + "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz", + "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz", + "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-string": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz", + "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz", + "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz", + "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-url": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz", + "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==", + "dependencies": { + "normalize-url": "^6.0.1", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz", + "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-ordered-values": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz", + "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==", + "dependencies": { + "cssnano-utils": "^3.1.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz", + "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz", + "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==", + "dependencies": { + "browserslist": "^4.21.4", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz", + "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.13", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz", + "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz", + "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==", + "dependencies": { + "sort-css-media-queries": "2.1.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.16" + } + }, + "node_modules/postcss-svgo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz", + "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^2.7.0" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz", + "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==", + "dependencies": { + "postcss-selector-parser": "^6.0.5" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" + }, + "node_modules/postcss-zindex": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz", + "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==", + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.4.0.tgz", + "integrity": "sha512-9t5qARVofg2xQqKtytzt+lZ4d1Qvj8t5B8fEwXK6qOfgRLgH/b13QlgEyDh033NOS31nXeFbYv7CLUDG1CeifQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz", + "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==", + "dependencies": { + "inherits": "~2.0.3" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", + "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dev-utils": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz", + "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "address": "^1.1.2", + "browserslist": "^4.18.1", + "chalk": "^4.1.2", + "cross-spawn": "^7.0.3", + "detect-port-alt": "^1.1.6", + "escape-string-regexp": "^4.0.0", + "filesize": "^8.0.6", + "find-up": "^5.0.0", + "fork-ts-checker-webpack-plugin": "^6.5.0", + "global-modules": "^2.0.0", + "globby": "^11.0.4", + "gzip-size": "^6.0.0", + "immer": "^9.0.7", + "is-root": "^2.1.0", + "loader-utils": "^3.2.0", + "open": "^8.4.0", + "pkg-up": "^3.1.0", + "prompts": "^2.4.2", + "react-error-overlay": "^6.0.11", + "recursive-readdir": "^2.2.2", + "shell-quote": "^1.7.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/react-dev-utils/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/loader-utils": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz", + "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==", + "engines": { + "node": ">= 12.13.0" + } + }, + "node_modules/react-dev-utils/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dev-utils/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/react-dev-utils/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-error-overlay": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz", + "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg==" + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==" + }, + "node_modules/react-helmet-async": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-json-view-lite": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.2.1.tgz", + "integrity": "sha512-Itc0g86fytOmKZoIoJyGgvNqohWSbh3NXIKNgH6W6FT9PC1ck4xas1tT3Rr/b3UlFXyA9Jjaw9QSXdZy2JwGMQ==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "react": "^16.13.1 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz", + "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==", + "dependencies": { + "@types/react": "*", + "prop-types": "^15.6.2" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reading-time": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/reading-time/-/reading-time-1.5.0.tgz", + "integrity": "sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg==" + }, + "node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/recursive-readdir": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz", + "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==", + "dependencies": { + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.0.2.tgz", + "integrity": "sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.0.tgz", + "integrity": "sha512-l1UyWJ6Eg1VPU7Hm/9tt0zKtReJQNOA4+iDMAxTyZNWnJnFlbS/7zhiel/rogTLQ2vMYwDzSJa4BiVNqGlqIMA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.0.tgz", + "integrity": "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.0.0.tgz", + "integrity": "sha512-O7yfjuC6ra3NHPbRVxfflafAj3LTwx3b73aBvkEFU5z4PsD6FD4vrqJAkE5iNGLz71GdjXfgRqm3SQ0h0VuE7g==", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.0.0.tgz", + "integrity": "sha512-vx8x2MDMcxuE4lBmQ46zYUDfcFMmvg80WYX+UNLeG6ixjdCCLcw1lrgAukwBTuOFsS78eoAedHGn9sNM0w7TPw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rtl-detect": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", + "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" + }, + "node_modules/rtlcss": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.1.1.tgz", + "integrity": "sha512-/oVHgBtnPNcggP2aVXQjSy6N1mMAfHg4GSag0QtZBlD5bdDgAHwr4pydqJGd+SUCu9260+Pjqbjwtvu7EMH1KQ==", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/sax": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", + "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/schema-utils": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", + "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz", + "integrity": "sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==" + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", + "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", + "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "fast-url-parser": "1.1.3", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", + "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz", + "integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==", + "dependencies": { + "define-data-property": "^1.1.1", + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", + "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz", + "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stable": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", + "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==", + "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility" + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.3.tgz", + "integrity": "sha512-BP9nNHMhhfcMbiuQKCqMjhDP5yBCAxsPu4pHFFzJ6Alo9dZgY4VLDPutXqIjpRiMoKdp7Av85Gr73Q5uH9k7+g==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-object": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.4.4.tgz", + "integrity": "sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==", + "dependencies": { + "inline-style-parser": "0.1.1" + } + }, + "node_modules/stylehacks": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", + "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==", + "dependencies": { + "browserslist": "^4.21.4", + "postcss-selector-parser": "^6.0.4" + }, + "engines": { + "node": "^10 || ^12 || >=14.0" + }, + "peerDependencies": { + "postcss": "^8.2.15" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==" + }, + "node_modules/svgo": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz", + "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^4.1.3", + "css-tree": "^1.1.3", + "csso": "^4.2.0", + "picocolors": "^1.0.0", + "stable": "^0.1.8" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/svgo/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/svgo/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/svgo/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/svgo/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.26.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.26.0.tgz", + "integrity": "sha512-dytTGoE2oHgbNV9nTzgBEPaqAWvcJNl66VZ0BkJqlvp71IjO8CxdBx/ykCNb47cLnCmCvRZ6ZR0tLkqvZCdVBQ==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.9", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.9.tgz", + "integrity": "sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.17", + "jest-worker": "^27.4.5", + "schema-utils": "^3.1.1", + "serialize-javascript": "^6.0.1", + "terser": "^5.16.8" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" + }, + "node_modules/tiny-invariant": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", + "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", + "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.4", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.4.tgz", + "integrity": "sha512-apMPnyLjAX+ty4OrNap7yumyVAMlKx5IWU2wlzzUdYJO9A8f1p9m/gywF/GM2ZDFcjQPrx59Mc90KwmxsoklxQ==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", + "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uri-js/node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==" + }, + "node_modules/utility-types": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", + "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.1.tgz", + "integrity": "sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.2.tgz", + "integrity": "sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/watchpack": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", + "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.89.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.89.0.tgz", + "integrity": "sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==", + "dependencies": { + "@types/eslint-scope": "^3.7.3", + "@types/estree": "^1.0.0", + "@webassemblyjs/ast": "^1.11.5", + "@webassemblyjs/wasm-edit": "^1.11.5", + "@webassemblyjs/wasm-parser": "^1.11.5", + "acorn": "^8.7.1", + "acorn-import-assertions": "^1.9.0", + "browserslist": "^4.14.5", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.15.0", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.9", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^3.2.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.7", + "watchpack": "^2.4.0", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.1.tgz", + "integrity": "sha512-s3P7pgexgT/HTUSYgxJyn28A+99mmLq4HsJepMPzu0R8ImJc52QNqaFYW1Z2z2uIb1/J3eYgaAWVpaC+v/1aAQ==", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "is-plain-object": "^5.0.0", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", + "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.1", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz", + "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.1", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.15.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.15.1.tgz", + "integrity": "sha512-W5OZiCjXEmk0yZ66ZN82beM5Sz7l7coYxpRkzS+p9PP+ToQry8szKh+61eNktr7EA9DOwvFGhfC605jDHbP6QQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/webpack/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/webpack/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/webpackbar": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", + "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==", + "dependencies": { + "chalk": "^4.1.0", + "consola": "^2.15.3", + "pretty-time": "^1.1.0", + "std-env": "^3.0.1" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.9", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", + "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/site/package.json b/site/package.json new file mode 100644 index 000000000..4f5998f35 --- /dev/null +++ b/site/package.json @@ -0,0 +1,47 @@ +{ + "name": "retina", + "version": "0.0.0", + "private": true, + "overrides": { + "trim": ">0.0.3", + "got": ">11.8.5" + }, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" + }, + "dependencies": { + "@docusaurus/core": "3.0.1", + "@docusaurus/preset-classic": "3.0.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.1", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.0.1" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "engines": { + "node": ">=16.14" + } +} diff --git a/site/sidebars.js b/site/sidebars.js new file mode 100644 index 000000000..a8b334ee0 --- /dev/null +++ b/site/sidebars.js @@ -0,0 +1,88 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + //tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + + // But you can create a sidebar manually + + tutorialSidebar: [ + 'intro', + { + type: 'category', + label: 'Quick Installation', + items: [ + 'installation/setup', + 'installation/prometheus-unmanaged', + 'installation/prometheus-azure-managed', + 'installation/cli', + 'installation/config', + ], + }, + { + type: 'category', + label: 'Metrics', + items: [ + 'metrics/modes', + 'metrics/basic', + 'metrics/advanced', + 'metrics/configuration', + 'metrics/annotations', + { + type: 'category', + label: 'Plugins', + items: [ + 'metrics/plugins/packetforward', + 'metrics/plugins/dropreason', + 'metrics/plugins/linuxutil', + 'metrics/plugins/dns', + 'metrics/plugins/hnsstats', + 'metrics/plugins/packetparser', + 'metrics/plugins/tcpretrans', + ], + }, + ], + }, + { + type: 'category', + label: 'Captures', + items: [ + 'captures/readme', + 'captures/cli', + ], + }, + { + type: 'category', + label: 'CRDs', + items: [ + 'CRDs/Capture', + 'CRDs/RetinaEndpoint', + 'CRDs/MetricsConfiguration', + ], + }, + { + type: 'category', + label: 'Troubleshooting Guides', + items: [ + 'troubleshooting/capture', + 'troubleshooting/basic-metrics', + ], + }, + 'contributing/readme', + ], + +}; + +module.exports = sidebars; diff --git a/site/src/css/custom.css b/site/src/css/custom.css new file mode 100644 index 000000000..2bc6a4cfd --- /dev/null +++ b/site/src/css/custom.css @@ -0,0 +1,30 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #2e8555; + --ifm-color-primary-dark: #29784c; + --ifm-color-primary-darker: #277148; + --ifm-color-primary-darkest: #205d3b; + --ifm-color-primary-light: #33925d; + --ifm-color-primary-lighter: #359962; + --ifm-color-primary-lightest: #3cad6e; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #25c2a0; + --ifm-color-primary-dark: #21af90; + --ifm-color-primary-darker: #1fa588; + --ifm-color-primary-darkest: #1a8870; + --ifm-color-primary-light: #29d5b0; + --ifm-color-primary-lighter: #32d8b4; + --ifm-color-primary-lightest: #4fddbf; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} diff --git a/site/src/pages/index.js b/site/src/pages/index.js new file mode 100644 index 000000000..575bf5d08 --- /dev/null +++ b/site/src/pages/index.js @@ -0,0 +1,48 @@ +import React from 'react'; +import clsx from 'clsx'; +import Link from '@docusaurus/Link'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import Layout from '@theme/Layout'; +import featuresImage from '@site/static/img/retina-features.png'; + +import styles from './index.module.css'; + +function HomepageHeader() { + const {siteConfig} = useDocusaurusContext(); + return ( +
+
+

{siteConfig.title}

+

{siteConfig.tagline}

+
+ + Get Started + +
+
+
+ ); +} + +export default function Home() { + const {siteConfig} = useDocusaurusContext(); + return ( + + +
+
+
What is Retina?
+
+
Retina is a cloud- and vendor-agnostic Kubernetes Network Observability platform which helps customers with enterprise grade DevOps, SecOps and compliance use cases.
+
It is designed to cater to cluster network administrators, cluster security administrators and DevOps engineers by providing a centralized platform for monitoring application and network health and security. Retina is capable of collecting telemetry, exporting it to multiple destinations (such as Prometheus, Azure Monitor, and other vendors), and visualizing the data in a variety of ways (like Grafana, Azure Monitor, Azure Log Analytics, etc.).
+
+
+ {"Retina +
+
+ ); +} diff --git a/site/src/pages/index.module.css b/site/src/pages/index.module.css new file mode 100644 index 000000000..546f77552 --- /dev/null +++ b/site/src/pages/index.module.css @@ -0,0 +1,50 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} + +.columns { + display: flex; + align-items: center; + padding: 4rem 1.5rem; +} + +.column1 { + flex: none; + width: 33.3333%; + font-weight: 600; + font-size: 3rem; +} + +.column2 { + flex-grow: 2; + font-size: 1.25rem; +} + +.paragraph1 { + margin: 0 0 20px +} + +.features { + display: block; + margin: 0 auto; +} \ No newline at end of file diff --git a/site/src/pages/markdown-page.md b/site/src/pages/markdown-page.md new file mode 100644 index 000000000..c14765afc --- /dev/null +++ b/site/src/pages/markdown-page.md @@ -0,0 +1,7 @@ +--- +title: Markdown page example +--- + +## Markdown page example + +You don't need React to write simple standalone pages. diff --git a/site/start-dev.sh b/site/start-dev.sh new file mode 100755 index 000000000..97e8c4ff1 --- /dev/null +++ b/site/start-dev.sh @@ -0,0 +1,2 @@ +npm install --prefix site +npm start --prefix site -- --host 0.0.0.0 diff --git a/site/static/.nojekyll b/site/static/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/site/static/img/android-chrome-192x192.png b/site/static/img/android-chrome-192x192.png new file mode 100644 index 000000000..459f2ce8b Binary files /dev/null and b/site/static/img/android-chrome-192x192.png differ diff --git a/site/static/img/android-chrome-512x512.png b/site/static/img/android-chrome-512x512.png new file mode 100644 index 000000000..981159a43 Binary files /dev/null and b/site/static/img/android-chrome-512x512.png differ diff --git a/site/static/img/apple-touch-icon.png b/site/static/img/apple-touch-icon.png new file mode 100644 index 000000000..cea75caff Binary files /dev/null and b/site/static/img/apple-touch-icon.png differ diff --git a/site/static/img/favicon-16x16.png b/site/static/img/favicon-16x16.png new file mode 100644 index 000000000..d3ecb385a Binary files /dev/null and b/site/static/img/favicon-16x16.png differ diff --git a/site/static/img/favicon-32x32.png b/site/static/img/favicon-32x32.png new file mode 100644 index 000000000..38ab1e934 Binary files /dev/null and b/site/static/img/favicon-32x32.png differ diff --git a/site/static/img/favicon.ico b/site/static/img/favicon.ico new file mode 100644 index 000000000..1b8849178 Binary files /dev/null and b/site/static/img/favicon.ico differ diff --git a/site/static/img/logo.svg b/site/static/img/logo.svg new file mode 100644 index 000000000..ad9d11a4a --- /dev/null +++ b/site/static/img/logo.svg @@ -0,0 +1 @@ + diff --git a/site/static/img/networking.svg b/site/static/img/networking.svg new file mode 100755 index 000000000..951712d61 --- /dev/null +++ b/site/static/img/networking.svg @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/site/static/img/retina-features.png b/site/static/img/retina-features.png new file mode 100644 index 000000000..805a5f777 Binary files /dev/null and b/site/static/img/retina-features.png differ diff --git a/site/static/img/retina-logo.png b/site/static/img/retina-logo.png new file mode 100644 index 000000000..e753850f8 Binary files /dev/null and b/site/static/img/retina-logo.png differ diff --git a/site/static/img/site.webmanifest b/site/static/img/site.webmanifest new file mode 100644 index 000000000..1dd911238 --- /dev/null +++ b/site/static/img/site.webmanifest @@ -0,0 +1 @@ +{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} diff --git a/test/enricher/enricher b/test/enricher/enricher new file mode 100644 index 000000000..9ca628a76 Binary files /dev/null and b/test/enricher/enricher differ diff --git a/test/enricher/main.go b/test/enricher/main.go new file mode 100644 index 000000000..2a4c8bd43 --- /dev/null +++ b/test/enricher/main.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "context" + + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/pubsub" + "github.com/cilium/cilium/api/v1/flow" + v1 "github.com/cilium/cilium/pkg/hubble/api/v1" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-enricher") + + ctx := context.Background() + c := cache.New(pubsub.New()) + + e := enricher.New(ctx, c) + + e.Run() + + for i := 0; i < 10; i++ { + addEvent(e) + } + + oreader := e.ExportReader() + + for i := 0; i < 10; i++ { + l.Info("Receiving event") + ev := oreader.NextFollow(ctx) + + l.Info("Received event", zap.Any("event", ev)) + } +} + +func addEvent(e *enricher.Enricher) { + l := log.Logger().Named("addev") + ev := &v1.Event{ + Timestamp: timestamppb.Now(), + Event: &flow.Flow{}, + } + + l.Info("Adding event", zap.Any("event", ev)) + e.Write(ev) +} diff --git a/test/goldpinger/cluster-role-binding.yaml b/test/goldpinger/cluster-role-binding.yaml new file mode 100644 index 000000000..e18b186a1 --- /dev/null +++ b/test/goldpinger/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: goldpinger-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: goldpinger-clusterrole +subjects: + - kind: ServiceAccount + name: goldpinger-serviceaccount + namespace: default diff --git a/test/goldpinger/cluster-role.yaml b/test/goldpinger/cluster-role.yaml new file mode 100644 index 000000000..1e73bc59b --- /dev/null +++ b/test/goldpinger/cluster-role.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: goldpinger-clusterrole +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - list diff --git a/test/goldpinger/daemonset.yaml b/test/goldpinger/daemonset.yaml new file mode 100644 index 000000000..895409a03 --- /dev/null +++ b/test/goldpinger/daemonset.yaml @@ -0,0 +1,173 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: goldpinger-host + namespace: default +spec: + selector: + matchLabels: + app: goldpinger + type: goldpinger-host + template: + metadata: + labels: + app: goldpinger + type: goldpinger-host + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + hostNetwork: true + serviceAccount: "goldpinger-serviceaccount" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + #- arm64 + #- ppc64le + #- s390x + - key: kubernetes.io/os + operator: In + values: + - linux + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger-vm + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "7070" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # - name: HOSTS_TO_RESOLVE + # value: "1.1.1.1 8.8.8.8 www.bing.com" + image: acnpublic.azurecr.io/goldpinger:v2.0.2 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ports: + - containerPort: 7070 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 7070 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 7070 + initialDelaySeconds: 5 + periodSeconds: 5 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: goldpinger-windows + namespace: default +spec: + selector: + matchLabels: + app: goldpinger + type: goldpinger-windows + template: + metadata: + labels: + app: goldpinger + type: goldpinger-windows + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + serviceAccount: "goldpinger-serviceaccount" + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + #- arm64 + #- ppc64le + #- s390x + - key: kubernetes.io/os + operator: In + values: + - windows + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger-vm + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "7070" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + # - name: HOSTS_TO_RESOLVE + # value: "1.1.1.1 8.8.8.8 www.bing.com" + image: "acnpublic.azurecr.io/goldpinger-win:v2.0.2" + command: ["\\goldpinger.exe"] + ports: + - containerPort: 7070 + name: http diff --git a/test/goldpinger/deployment.yaml b/test/goldpinger/deployment.yaml new file mode 100644 index 000000000..151b459cc --- /dev/null +++ b/test/goldpinger/deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-pod + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: goldpinger + type: goldpinger-pod + template: + metadata: + labels: + app: goldpinger + type: goldpinger-pod + spec: + serviceAccount: "goldpinger-serviceaccount" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + #- arm64 + #- ppc64le + #- s390x + - key: kubernetes.io/os + operator: In + values: + - linux + + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "7070" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "1.1.1.1 8.8.8.8 www.bing.com" + image: "mcr.microsoft.com/aks/e2e/bloomberg-goldpinger:v3.7.0" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ports: + - containerPort: 7070 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 7070 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 7070 + initialDelaySeconds: 5 + periodSeconds: 5 diff --git a/test/goldpinger/goldpinger-deny-all.yaml b/test/goldpinger/goldpinger-deny-all.yaml new file mode 100755 index 000000000..eb011b256 --- /dev/null +++ b/test/goldpinger/goldpinger-deny-all.yaml @@ -0,0 +1,12 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: goldpinger-deny-all + namespace: default +spec: + podSelector: + matchLabels: + app: goldpinger + policyTypes: + - Egress +status: {} diff --git a/test/goldpinger/service-account.yaml b/test/goldpinger/service-account.yaml new file mode 100644 index 000000000..7e418b1eb --- /dev/null +++ b/test/goldpinger/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: goldpinger-serviceaccount + namespace: default diff --git a/test/goldpinger/service.yaml b/test/goldpinger/service.yaml new file mode 100644 index 000000000..1e4154e6c --- /dev/null +++ b/test/goldpinger/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: goldpinger + namespace: default + labels: + app: goldpinger +spec: + type: NodePort + ports: + - port: 8080 + nodePort: 30080 + name: http + selector: + app: goldpinger diff --git a/test/image/.dockerignore b/test/image/.dockerignore new file mode 100644 index 000000000..0f046820f --- /dev/null +++ b/test/image/.dockerignore @@ -0,0 +1,4 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ +testbin/ diff --git a/test/image/Dockerfile b/test/image/Dockerfile new file mode 100644 index 000000000..e251ba61b --- /dev/null +++ b/test/image/Dockerfile @@ -0,0 +1,28 @@ +# build stage +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder + +RUN apt-get update &&\ + apt-get -y install lsb-release wget software-properties-common gnupg file git make + +RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - +RUN add-apt-repository "deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-14 main" +RUN apt-get update + +RUN apt-get install -y clang-14 lldb-14 lld-14 clangd-14 man-db +RUN apt-get install -y bpftool libbpf-dev + +RUN ln -s /usr/bin/clang-14 /usr/bin/clang + +COPY . /go/src/github.com/microsoft/retina +WORKDIR /go/src/github.com/microsoft/retina + +# RUN go mod edit -module retina +# RUN make all generate +#RUN go generate ./... +RUN make retina-ut +#RUN go test -covermode count -coverprofile /home/runner/work/_temp/go.cov -coverpkg ./... ./... + +RUN cat coverage.out + +FROM scratch AS artifacts +COPY --from=builder /go/src/github.com/microsoft/retina/coverage.out /coverage.out diff --git a/test/integration/Dockerfile.integration b/test/integration/Dockerfile.integration new file mode 100644 index 000000000..7a0e22c1c --- /dev/null +++ b/test/integration/Dockerfile.integration @@ -0,0 +1,20 @@ +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS integration +LABEL Name=retina-integration Version=0.0.1 + +RUN apt-get update &&\ + apt-get -y install lsb-release wget software-properties-common gnupg file git make ca-certificates curl vim + +RUN curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +RUN echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list +RUN apt-get update && apt-get install -y kubectl sudo + +COPY . /go/src/github.com/microsoft/retina +WORKDIR /go/src/github.com/microsoft/retina +RUN curl -LO https://github.com/kvaps/kubectl-node-shell/raw/master/kubectl-node_shell && \ + chmod +x ./kubectl-node_shell && \ + mv ./kubectl-node_shell /usr/local/bin/kubectl-node_shell + +ENV BUILD_LOCALLY=true +RUN make install-kubectl-retina + +ENTRYPOINT [ "make", "retina-integration" ] diff --git a/test/integration/README.md b/test/integration/README.md new file mode 100644 index 000000000..970cdf42c --- /dev/null +++ b/test/integration/README.md @@ -0,0 +1,24 @@ +# Integration Tests + +## Framework + Structure + +We are using ginkgo to run and test retina for our integration tests. The MAKEFILE provide commands to run these tests and create test images. These integration tests will run on an AKS cluster which can be found here +`Retina/scripts/create_aks_cluster.sh` +When testing for advanced metrics, we should check if enablePodLevel is set. For an example, we can look at ./plugins/dropreson/dropreason_suite_test.go. This checks for the environment variable set before adding a new test entry for advanced metrics. + +The azure pipeline was updated to accomdate these changes. The pipeline will create cluster for each profile, install retina with ENABLE_POD_LEVEL=true when the profile is "adv", and then it will run the integration tests. + +/capture - integration tests related to capture command +/common - common tools we will use for our integration tests +/fixtures - these are static files which will be used for integration tests, such as crd configuraitons +/plugins - these are integration tests related to our ebpf programs + +[This is the PR which has the integration test changes and pipeline changes](https://github.com/microsoft/retina/pull/)562/files + +## Expectations + +Each suite should: + +* be idempotent +* not change cluster state (like delete retina-agent) +* no interference with other tests diff --git a/test/integration/annotations/annotation_suite_test.go b/test/integration/annotations/annotation_suite_test.go new file mode 100644 index 000000000..4d458512d --- /dev/null +++ b/test/integration/annotations/annotation_suite_test.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package annotations + +import ( + "testing" + + "github.com/microsoft/retina/pkg/log" + common "github.com/microsoft/retina/test/integration/common" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + annotation_test_prefix = "test-forward-annotation-metrics-" +) + +var ( + namespace string + l *log.ZapLogger + ictx *common.IntegrationContext + k *common.KubeManager +) + +var expectedAnnotationTestMetrics = common.NewModelLocalCtxForwardMetrics( + "egress", + "", + annotation_test_prefix, + "client", + "", + "", +) + +func TestAnnotation(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Annotations integration tests") +} + +var _ = BeforeSuite(func() { + ictx = common.Init() + k = common.NewKubeManager(ictx.Logger(), ictx.Clientset) + ictx.Logger().Info("Before Suite setup complete") +}) + +var _ = AfterSuite(func() { + ictx.DumpAgentLogs() + k.CleanupNamespaces() + ictx.Logger().Info("After Suite cleanup complete") +}) + +var _ = Describe("Annotation Workflow Integration Tests", func() { + var pre uint64 + var ac *AnnotationContext + + BeforeEach(func() { + if !ictx.ValidateTestMode(common.LocalCtxMode) { + Skip("Skipping annotation workflow test due to test mode") + } + l = ictx.Logger().Named("annotation-workflow") + ac = InitAnnotationContext(ictx, k, expectedAnnotationTestMetrics.WithLabels(common.Count), annotation_test_prefix) + pre = ac.GetTotalMetrics() + }) + + Context("Without annotations", func() { + It("should validate that metrics are not emitted", func() { + ac.CheckMetricsWithoutAnnotations(pre) + }) + }) + + Context("With both pod and namespace annotations", func() { + It("should validate metric emissions", func() { + ac.CheckMetricsWithAnnotations(pre) + }) + + It("should ensure independence between pod and namespace annotations", func() { + ac.CheckAnnotationIndependence(pre) + }) + }) + + Context("With specific pod annotation", func() { + It("should validate that metrics are confined to its node", func() { + ac.CheckPodMetricsOnNode() + }) + }) +}) diff --git a/test/integration/annotations/annotations.go b/test/integration/annotations/annotations.go new file mode 100644 index 000000000..5714d7923 --- /dev/null +++ b/test/integration/annotations/annotations.go @@ -0,0 +1,293 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build integration +// +build integration + +package annotations + +import ( + "strconv" + "strings" + "time" + + kcommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + common "github.com/microsoft/retina/test/integration/common" + . "github.com/onsi/gomega" + "go.uber.org/zap" +) + +var a *AnnotationContext + +type AnnotationContext struct { + l *log.ZapLogger + Ictx *common.IntegrationContext + KubeManager *common.KubeManager + Pods []*common.ModelPod + Namespace *common.ModelNamespace + Metrics common.MetricWithLabels + Nodes []common.ModelNode + annotation map[string]string +} + +// create init function to initialize the annotation context +func InitAnnotationContext(ictx *common.IntegrationContext, k *common.KubeManager, annotationMetrics common.MetricWithLabels, namespace_prefix string) *AnnotationContext { + if a != nil { + return a + } + + a = &AnnotationContext{ + Ictx: ictx, + KubeManager: k, + Metrics: annotationMetrics, + l: log.Logger().Named("annotation-tests"), + } + + // Choose two linux nodes + nodes, err := ictx.GetTwoDistinctNodes("linux") + if err != nil { + panic(err.Error()) + } + + a.l.Info("Nodes chosen", zap.String("node1", nodes[0].Name), zap.String("node2", nodes[1].Name)) + + clientPod := common.NewModelPod("client", common.NewModelNode(nodes[0])).WithContainer(80, common.HTTP) + serverPod := common.NewModelPod("server", common.NewModelNode(nodes[1])).WithContainer(80, common.HTTP) + + a.Pods = []*common.ModelPod{clientPod, serverPod} + a.Nodes = []common.ModelNode{common.NewModelNode(nodes[0]), common.NewModelNode(nodes[1])} + + // Create unique namespace. This is to make sure namespace deletion is not + namespace := namespace_prefix + strconv.FormatInt(time.Now().Unix(), 10) + + twoPodsInSameNamespace := common.NewModelNamespace(namespace, clientPod, serverPod) + a.Namespace = twoPodsInSameNamespace + + a.annotation = map[string]string{ + kcommon.RetinaPodAnnotation: kcommon.RetinaPodAnnotationValue, + } + + a.l = log.Logger().Named("annotation-tests") + + err = k.InitializeClusterFromModel(twoPodsInSameNamespace) + if err != nil { + panic(err.Error()) + } + + return a +} + +// Check that no metrics are emitted when no annotations are present. +func (ac *AnnotationContext) CheckMetricsWithoutAnnotations(pre uint64) { + ac.performProbe(false) + post := ac.GetTotalMetrics() + + Expect(pre).To(Equal(post), "No annotations were present, so pre and post annotation values should be equal") + ac.l.Info("CheckMetricsWithoutAnnotations completed successfully") +} + +// Check that metrics are emitted when annotations are present. +func (ac *AnnotationContext) CheckMetricsWithAnnotations(pre uint64) { + ac.annotatePods() + // TODO - uncomment this once we have a fix for namespace annotation removal issue https://github.com/microsoft/retina/issues/667 + // ac.annotateNamespace() + ac.performProbe(false) + post := ac.GetTotalMetrics() + + Expect(post).To( + BeNumerically(">", pre), + "annotation exists, so post annotation count should be greater than pre annotation count") + ac.l.Info("CheckMetricsWithAnnotations completed successfully") +} + +// Check that removing the annotation from a pod doesn't affect namespace metrics, and vice versa +func (ac *AnnotationContext) CheckAnnotationIndependence(pre uint64) { + annotatedns := ac.KubeManager.GetAnnotatedNamespaces(ac.annotation) + ac.l.Info("Annotated namespaces before removal", zap.Any("annotatedns", annotatedns)) + + annotatedPods := ac.KubeManager.GetAnnotatedPods(ac.Namespace.BaseName, ac.annotation) + ac.l.Info("Annotated pods before removal", zap.Any("annotatedPods", annotatedPods)) + + ac.removeNamespaceAnnotations() + ac.performProbe(false) + post := ac.GetTotalMetrics() + + // post annotation count should be greater than pre annotation count, because pod annotation still exists + Expect(post).To( + BeNumerically(">", pre), + "Pod annotation still exists, so post annotation count should be greater than pre annotation count") + + ac.removePodAnnotations() + + annotatedns = ac.KubeManager.GetAnnotatedNamespaces(ac.annotation) + ac.l.Info("Annotated namespaces after removal", zap.Any("annotatedns", annotatedns)) + + annotatedPods = ac.KubeManager.GetAnnotatedPods(ac.Namespace.BaseName, ac.annotation) + ac.l.Info("Annotated pods after removal", zap.Any("annotatedPods", annotatedPods)) + + pre = ac.GetTotalMetrics() // update pre count + + ac.performProbe(false) + post = ac.GetTotalMetrics() + + // post annotation count should be equal to pre annotation count, because pod annotation was removed + Expect(pre).To(Equal(post), "All annotations were removed, so pre and post annotation values should be equal") + + // Annotate both namespace and pods then remove pod annotation + ac.annotateNamespace() + ac.annotatePods() + ac.removePodAnnotations() + + pre = ac.GetTotalMetrics() // update pre count + ac.performProbe(false) + post = ac.GetTotalMetrics() + + Expect(post).To( + BeNumerically(">", pre), + "Namespace annotation still exists, so post annotation count should be greater than pre annotation count") + ac.l.Info("CheckAnnotationIndependence completed successfully") +} + +// Check metrics of an annotated pod are only on the node it's running on. +func (ac *AnnotationContext) CheckPodMetricsOnNode() { + if len(ac.Pods) != 2 || len(ac.Nodes) != 2 { + panic("This test requires exactly 2 pods and 2 nodes") + } + + ac.removePodAnnotations() + ac.removeNamespaceAnnotations() + + for i, pod := range ac.Pods { + // if metric is contains drop reason, reverse the network policy + if strings.Contains(ac.Metrics.Metric, "drop") { + ac.ReverseNetworkPolicy() + } + // Annotate the pod and get initial metrics. + err := ac.KubeManager.AnnotatePod(pod.Name, ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) + + initialMetrics := ac.getNodeMetrics(ac.Nodes[i], pod.Name) + + // Perform a probe and get updated metrics. + ac.performProbe(i == 1) + updatedMetrics := ac.getNodeMetrics(ac.Nodes[i], pod.Name) + + Expect(updatedMetrics).To( + BeNumerically(">", initialMetrics), + "Metrics not emitted for annotated pod %s on node %s", pod.Name, ac.Nodes[i].HostName) + + // Ensure the other node metrics remain the same. + otherNodeInitialMetrics := ac.getNodeMetrics(ac.Nodes[1-i], pod.Name) + ac.performProbe(i == 1) + otherNodeUpdatedMetrics := ac.getNodeMetrics(ac.Nodes[1-i], pod.Name) + + Expect(otherNodeInitialMetrics).To( + Equal(otherNodeUpdatedMetrics), + "Metrics emitted for non annotated pod %s on a different node %s", pod.Name, ac.Nodes[1-i].HostName) + + // Cleanup: remove the annotation from the current pod. + err = ac.KubeManager.RemovePodAnnotations(pod.Name, ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) + + } + ac.l.Info("CheckPodMetricsOnNode completed successfully") +} + +// Fetch and parse metrics for a specific node. +func (ac *AnnotationContext) getNodeMetrics(node common.ModelNode, podName string) uint64 { + updatedLabels := ac.updatePodNameLabel(podName) + + mp := ac.fetchMetricParser(node.HostName) + return ac.parseMetricsValue(updatedLabels, true, mp) +} + +// FetchMetricParser fetches the metrics from the node and returns the MetricParser object +func (ac *AnnotationContext) fetchMetricParser(nodeName string) *common.MetricParser { + body, err := ac.Ictx.FetchMetrics(nodeName) + Expect(err).To(BeNil()) + mp := &common.MetricParser{ + Body: body, + } + return mp +} + +// Fetched the count for a given label +func (ac *AnnotationContext) parseMetricsValue(metricLabels common.MetricWithLabels, isAdvancedMode bool, mp *common.MetricParser) uint64 { + val, err := mp.Parse(metricLabels, isAdvancedMode) + Expect(err).To(BeNil()) + ac.l.Info("drop packet", zap.Any("labels", metricLabels), zap.Uint64("value", val)) + return val +} + +// Common function to get the total metrics from all nodes +func (ac *AnnotationContext) GetTotalMetrics() uint64 { + total := uint64(0) + for _, node := range ac.Nodes { + mp := ac.fetchMetricParser(node.HostName) + total += ac.parseMetricsValue(ac.Metrics, true, mp) + } + return total +} + +// Common function to perform a probe +// Perform a probe from client to server if reversed is false, else from server to client +func (ac *AnnotationContext) performProbe(reversed bool) { + clientPod := ac.Pods[0] + serverPod := ac.Pods[1] + + if reversed { + clientPod, serverPod = serverPod, clientPod + } + + ac.KubeManager.ProbeRepeatedlyKind(ac.Namespace.BaseName, clientPod.Name, ac.Namespace.BaseName, serverPod.Name, common.HTTP, 80, 10) +} + +// Common function to annotate pods +func (ac *AnnotationContext) annotatePods() { + for _, pod := range ac.Pods { + err := ac.KubeManager.AnnotatePod(pod.Name, ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) + } +} + +// Common function to annotate namespace +func (ac *AnnotationContext) annotateNamespace() { + err := ac.KubeManager.AnnotateNamespace(ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) +} + +// Common function to remove pod annotations +func (ac *AnnotationContext) removePodAnnotations() { + for _, pod := range ac.Pods { + err := ac.KubeManager.RemovePodAnnotations(pod.Name, ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) + } +} + +// Common function to remove namespace annotations +func (ac *AnnotationContext) removeNamespaceAnnotations() { + err := ac.KubeManager.RemoveNamespaceAnnotations(ac.Namespace.BaseName, ac.annotation) + Expect(err).To(BeNil()) +} + +// Common function to update pod name label +func (ac *AnnotationContext) updatePodNameLabel(podName string) common.MetricWithLabels { + metricWithLabels := ac.Metrics + // Look for the "podname" label and modify its value + for i := 0; i < len(metricWithLabels.Labels); i += 2 { + if metricWithLabels.Labels[i] == "podname" && i+1 < len(metricWithLabels.Labels) { + metricWithLabels.Labels[i+1] = podName + break + } + } + return metricWithLabels +} + +// Common function to reverse network policy from server to client +func (ac *AnnotationContext) ReverseNetworkPolicy() { + err := ac.KubeManager.RemoveNetworkPolicy(ac.Namespace.BaseName, "deny-server-ingress") + Expect(err).To(BeNil()) + ac.KubeManager.ApplyNetworkDropPolicy(ac.Namespace.BaseName, "deny-client-ingress", common.PodLabelKey, "client") + Expect(err).To(BeNil()) +} diff --git a/test/integration/capture/capture_suite_test.go b/test/integration/capture/capture_suite_test.go new file mode 100644 index 000000000..b72514c64 --- /dev/null +++ b/test/integration/capture/capture_suite_test.go @@ -0,0 +1,615 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package capture + +import ( + "context" + "fmt" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/microsoft/retina/pkg/log" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + captureConstants "github.com/microsoft/retina/pkg/capture/constants" + "github.com/microsoft/retina/pkg/label" + common "github.com/microsoft/retina/test/integration/common" +) + +const ( + podRunningCheckDuration = 2 * time.Minute + podRunningCheckInterval = time.Second + + defaultDurationStr = "1m" +) + +var ( + nodes []corev1.Node + + captureName string + namespace string + + unsetRetinaAgentEnv bool + + ictx *common.IntegrationContext + l *log.ZapLogger +) + +func TestCapture(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Capture integration tests") +} + +var _ = Describe("Capture flag tests", func() { + BeforeSuite(func() { + ictx = common.Init() + l = ictx.Logger().Named("capture-test") + }) + + BeforeEach(func() { + nodes = append(ictx.Nodes.Items, ictx.ControlPlaneNodes.Items...) + Expect(len(nodes)).NotTo(Equal(0)) + + // TODO(mainred): we currently build and load only mono-arch images to kind cluster, while by default Retina + // capture requires multi-arch images when the debug mode is enabled. + // As a workaround, we set RETINA_AGENT_IMAGE to mono-arch image for now when RETINA_AGENT_IMAGE env is not set + // by user. + if retinaAgentImagePreset := os.Getenv("RETINA_AGENT_IMAGE"); len(retinaAgentImagePreset) == 0 { + retinaVersion := assertExecCommandOutput("kubectl", "retina", "version") + retinaAgentImage := fmt.Sprintf("acnpublic.azurecr.io/retina-agent:%s", strings.TrimSpace(retinaVersion)) + os.Setenv("RETINA_AGENT_IMAGE", retinaAgentImage) + unsetRetinaAgentEnv = true + } + }) + + AfterEach(func() { + if unsetRetinaAgentEnv { + os.Unsetenv("RETINA_AGENT_IMAGE") + } + + if CurrentGinkgoTestDescription().Failed { + fetchAndPrintDebugInfo(namespace) + } + + deleteCapture(captureName, namespace) + deleteNamespace(namespace) + }) + + It("is running a capture only on one worker node (node-selector)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-nodeselector") + + // Create capture on nodes with specific labels. + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", ictx.Nodes.Items[0].Name) + out := assertExecCommandOutput("kubectl", "retina", "capture", "create", fmt.Sprintf("--node-selectors=%s", nodeSelector), fmt.Sprintf("--namespace=%s", namespace), fmt.Sprintf("--duration=%s", defaultDurationStr), "--host-path=/mnt/test", "--debug") + captureName = getCaptureName(out) + + // Wait for all capture jobs to start running a pod before checking for captures + waitUntilCapturePodsStart(captureName, namespace) + + for _, node := range nodes { + capturePodList, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{FieldSelector: "spec.nodeName=" + node.Name, LabelSelector: fmt.Sprintf("%s=%s", label.AppLabel, captureConstants.CaptureAppname)}) + Expect(err).ToNot(HaveOccurred()) + + // Make sure there's only one capture on capture nodes, and none on non-capture nodes + nodeSelectorMap, _ := labels.ConvertSelectorToLabelsMap(nodeSelector) + nodeSelected := containsMap(nodeSelectorMap, node.Labels) + expectedCaptureCount := 0 + if nodeSelected { + expectedCaptureCount = 1 + } + Expect(len(capturePodList.Items)).Should(Equal(expectedCaptureCount)) + } + }) + + It("capture should only be on nodes that have selected pods that are in selected namespaces (pod- and namespace-selector)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-podselector") + + // Create pod + jobName := "testjob" + _ = assertExecCommandOutput("kubectl", "create", "job", jobName, "--image=hello-world", fmt.Sprintf("--namespace=%s", namespace)) + Eventually(func() bool { + job, err := ictx.Clientset.BatchV1().Jobs(namespace).Get(context.TODO(), jobName, v1.GetOptions{}) + if err != nil { + return false + } + return job.Status.Active == 1 + }, podRunningCheckDuration, podRunningCheckInterval).Should(BeTrue()) + + // Create capture on nodes with selected pods in selected namespaces + podSelector := "job-name=" + jobName + namespaceSelector := "kubernetes.io/metadata.name=" + namespace + out := assertExecCommandOutput("kubectl", "retina", "capture", "create", fmt.Sprintf("--pod-selectors=%s", podSelector), fmt.Sprintf("--namespace-selectors=%s", namespaceSelector), fmt.Sprintf("--namespace=%s", namespace), fmt.Sprintf("--duration=%s", defaultDurationStr), "--host-path=/mnt/test", "--debug") + captureName = getCaptureName(out) + + // Wait for all capture jobs to start running a pod before checking for captures + waitUntilCapturePodsStart(captureName, namespace) + + for _, node := range nodes { + capturePodList, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{FieldSelector: "spec.nodeName=" + node.Name, LabelSelector: fmt.Sprintf("%s=%s", label.AppLabel, captureConstants.CaptureAppname)}) + Expect(err).ToNot(HaveOccurred()) + + podList, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{FieldSelector: "spec.nodeName=" + node.Name}) + Expect(err).ToNot(HaveOccurred()) + + podSelectorMap, _ := labels.ConvertSelectorToLabelsMap(podSelector) + namespaceSelectorMap, _ := labels.ConvertSelectorToLabelsMap(namespaceSelector) + selectedPods := 0 + for _, pod := range podList.Items { + namespaceObj, err := ictx.Clientset.CoreV1().Namespaces().Get(context.TODO(), pod.Namespace, v1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + if containsMap(podSelectorMap, pod.Labels) && containsMap(namespaceSelectorMap, namespaceObj.Labels) { + selectedPods++ + } + } + + // Make sure there's only one capture on nodes that have selected pods, and no captures on nodes without selected pods + Expect(len(capturePodList.Items)).Should(Equal(selectedPods)) + } + }) + + It("should verify the capture occurred in the specified namespace (namespace)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-namespace") + + // Create capture on nodes with specific labels. + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", ictx.Nodes.Items[0].Name) + out := assertExecCommandOutput("kubectl", "retina", "capture", "create", fmt.Sprintf("--node-selectors=%s", nodeSelector), fmt.Sprintf("--namespace=%s", namespace), "--host-path=/mnt/test", "--debug") + captureName = getCaptureName(out) + + // get capture from list in namespace + out = assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture list --namespace %s |grep -v NAMESPACE | awk '{print $2}'", namespace)) + capture := regexp.MustCompile("retina-capture-[a-zA-Z0-9]+").FindString(out) + Expect(capture).NotTo(BeEmpty()) + }) + + It("is running a capture for the specified duration (duration)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-duration") + + testDuration := "30s" + // Create capture on nodes with specific labels. + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", ictx.Nodes.Items[0].Name) + out := assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture create --node-selectors=%s --namespace=%s --duration=%s --host-path=/mnt/test --debug", nodeSelector, namespace, testDuration)) + captureName = getCaptureName(out) + + // Wait for all capture jobs to start running a pod before checking for captures + waitUntilCapturePodsStart(captureName, namespace) + + capturePods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{LabelSelector: "capture-name=" + captureName}) + Expect(err).ToNot(HaveOccurred()) + Expect(capturePods.Items).NotTo(BeEmpty()) + captureContainerExists := false + for _, container := range capturePods.Items[0].Spec.Containers { + if container.Name == captureConstants.CaptureAppname { + captureContainerExists = true + for _, envVar := range container.Env { + if envVar.Name == captureConstants.CaptureDurationEnvKey { + Expect(strings.TrimSpace(envVar.Value)).To(Equal(testDuration)) + } + } + } + } + Expect(captureContainerExists).To(Equal(true)) + }) + + It("should wait until the capture is completed when 'no-wait' is false (no-wait)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-no-wait") + + // Create capture on nodes with specific labels. + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", ictx.Nodes.Items[0].Name) + cmd := exec.Command("kubectl", "retina", "capture", "create", fmt.Sprintf("--node-selectors=%s", nodeSelector), fmt.Sprintf("--namespace=%s", namespace), fmt.Sprintf("--duration=%s", defaultDurationStr), "--no-wait=false", "--host-path=/mnt/test", "--debug") + err := cmd.Start() // Run command asynchronously + Expect(err).ToNot(HaveOccurred()) + + // Obtain capture name to delete after test. + // Starting the command does not instantly create the capture, so we need to wait until it is created. + var listCaptureOutput string + Eventually(func() bool { + listCaptureOutput = assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture list --namespace %s |grep -v NAMESPACE | awk '{print $2}'", namespace)) + return strings.Contains(listCaptureOutput, "retina-capture") + }, 10*time.Second, time.Second).Should(BeTrue()) + captureName = getCaptureName(listCaptureOutput) + + exited := cmd.ProcessState != nil && cmd.ProcessState.Exited() + Expect(exited).To(Equal(false)) + + err = cmd.Wait() + Expect(err).ToNot(HaveOccurred()) + exited = cmd.ProcessState != nil && cmd.ProcessState.Exited() + Expect(exited).To(Equal(true)) + }) + + It("should have a max file size (max-size)", func() { + fileSizeMB := 2 + hostPath := "/mnt/test" + namespace = createNamespace("test-max-size") + + // Create capture with determined filesize + nodeName := ictx.Nodes.Items[0].Name + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", nodeName) + // Give a big enough duration to not let duration decide when to stop the capture. + cmd := exec.Command("bash", "-c", fmt.Sprintf("kubectl retina capture create --node-selectors=%s --namespace=%s --duration=10m --max-size=%d --host-path=%s --debug", nodeSelector, namespace, fileSizeMB, hostPath)) + err := cmd.Start() + Expect(err).ToNot(HaveOccurred()) + + // Obtain capture name to delete after test. + // Starting the command does not instantly create the capture, so we need to wait until it is created. + var listCaptureOutput string + Eventually(func() bool { + listCaptureOutput = assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture list --namespace %s |grep -v NAMESPACE | awk '{print $2}'", namespace)) + return strings.Contains(listCaptureOutput, "retina-capture") + }, 10*time.Second, time.Second).Should(BeTrue()) + captureName = getCaptureName(listCaptureOutput) + + capturePod := corev1.Pod{} + // Make sure all pods are completed + Eventually(func() bool { + capturePods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{LabelSelector: "capture-name=" + captureName}) + // In case there's no pods created + if len(capturePods.Items) == 0 { + return false + } + if err != nil { + return false + } + for _, capturePod := range capturePods.Items { + if capturePod.Status.Phase != corev1.PodSucceeded { + return false + } + } + capturePod = capturePods.Items[0] + return true + }, 10*time.Minute, podRunningCheckInterval).Should(BeTrue()) + + // copy capture file to local + err, targetFilePath := copyCaptureFileToLocal(capturePod) + Expect(err).ToNot(HaveOccurred()) + + // get size of pcap file and compare to expected size + out := assertExecCommandOutput("bash", "-c", fmt.Sprintf("tar -ztvf %s | grep pcap | awk '{print $3}'", targetFilePath)) + sizeString := strings.Trim(out, "\n") + Expect(sizeString).NotTo(BeEmpty()) + size, _ := strconv.Atoi(sizeString) + // Small max file size may bring inaccuracy, while max big file size can be too slow to finish. + // We allow the actual file size to be at most 2 times of the expected size when we pick a small max file size. + Expect(size).To(BeNumerically(">", fileSizeMB*1000000/2)) + Expect(size).To(BeNumerically("<=", fileSizeMB*1000000*2)) + }) + + It("is running a capture only on nodes specified by name (node-names)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-nodenames") + + // create list of worker nodes + workerNodes := make([]corev1.Node, 0) + expectedCapturesOnNode := make(map[string]int) + for _, node := range nodes { + expectedCapturesOnNode[node.Name] = 0 + if _, exist := node.Labels["node-role.kubernetes.io/control-plane"]; !exist { + workerNodes = append(workerNodes, node) + } + } + + // sample of worker nodes + nodeNames := "" + for i := 0; i < len(workerNodes)/2; i++ { + expectedCapturesOnNode[workerNodes[i].Name] = 1 + nodeNames += "," + workerNodes[i].Name + } + nodeNames = strings.Trim(nodeNames, ",") + + // Create capture on nodes with specific labels. + out := assertExecCommandOutput("kubectl", "retina", "capture", "create", fmt.Sprintf("--node-names=%s", nodeNames), fmt.Sprintf("--namespace=%s", namespace), fmt.Sprintf("--duration=%s", defaultDurationStr), "--host-path=/mnt/test", "--debug") + captureName = getCaptureName(out) + + // Wait for all capture jobs to start running a pod before checking for captures + waitUntilCapturePodsStart(captureName, namespace) + + for _, node := range nodes { + capturePodList, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{FieldSelector: "spec.nodeName=" + node.Name, LabelSelector: fmt.Sprintf("%s=%s", label.AppLabel, captureConstants.CaptureAppname)}) + Expect(err).ToNot(HaveOccurred()) + Expect(len(capturePodList.Items)).Should(Equal(expectedCapturesOnNode[node.Name])) + } + }) + + It("is running a capture for the specified packet size (packet-size)", func() { + // Create unique namespace for this test + namespace = createNamespace("test-packet-size") + + // Create capture on nodes with specific labels. + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", ictx.Nodes.Items[0].Name) + out := assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture create --node-selectors=%s --namespace=%s --duration=%s --packet-size=96 --host-path=/mnt/test --debug", nodeSelector, namespace, defaultDurationStr)) + + // Obtain capture name to delete after test. + captureName = getCaptureName(out) + + // Wait for all capture jobs to start running a pod before checking for captures + waitUntilCapturePodsStart(captureName, namespace) + capturePods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{LabelSelector: "capture-name=" + captureName}) + Expect(err).ToNot(HaveOccurred()) + + captureContainerExists := false + for _, capturePod := range capturePods.Items { + for _, container := range capturePod.Spec.Containers { + if container.Name == captureConstants.CaptureAppname { + captureContainerExists = true + for _, envVar := range container.Env { + if envVar.Name == captureConstants.PacketSizeEnvKey { + Expect(strings.TrimSpace(envVar.Value)).To(Equal("96")) + } + } + } + } + } + + Expect(captureContainerExists).To(Equal(true)) + }) + + It("should not contain metadata (include-metadata)", func() { + hostPath := "/mnt/test" + namespace = createNamespace("test-metadata") + + // Create capture with metadata flag off + nodeName := ictx.Nodes.Items[0].Name + nodeSelector := fmt.Sprintf("kubernetes.io/hostname=%s", nodeName) + _ = assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture create --include-metadata=false --node-selectors=%s --namespace=%s --duration=%s --host-path=%s --debug", nodeSelector, namespace, defaultDurationStr, hostPath)) + + // Obtain capture name to delete after test. + out := assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl retina capture list --namespace %s | awk '{print $2}'", namespace)) + captureName = getCaptureName(out) + + capturePod := corev1.Pod{} + // Make sure all pods are completed + Eventually(func() bool { + capturePods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{LabelSelector: "capture-name=" + captureName}) + // In case there's no pods created + if len(capturePods.Items) == 0 { + return false + } + if err != nil { + return false + } + for _, capturePod := range capturePods.Items { + if capturePod.Status.Phase != corev1.PodSucceeded { + return false + } + } + capturePod = capturePods.Items[0] + return true + }, 10*time.Minute, podRunningCheckInterval).Should(BeTrue()) + + // copy capture file to local + err, targetFilePath := copyCaptureFileToLocal(capturePod) + Expect(err).ToNot(HaveOccurred()) + + out = assertExecCommandOutput("bash", "-c", fmt.Sprintf("tar -ztvf %s ", targetFilePath)) + // checks if ip-resources.txt (a metadata file) was extracted to the folder + Expect(strings.Contains(out, "ip-resources.txt")).To(Equal(false)) + }) + + It("should return with prompt and does not create capture jobs when the job number limit reaches", func() { + // Create unique namespace for this test + namespace = createNamespace("test-nodenames") + + // create list of worker nodes + workerNodes := make([]corev1.Node, 0) + for _, node := range nodes { + expectedCapturesOnNode[node.Name] = 0 + if _, exist := node.Labels["node-role.kubernetes.io/control-plane"]; !exist { + workerNodes = append(workerNodes, node) + } + } + + jobNumLimit := 1 + + // sample of worker nodes + if len(workerNodes) < jobNumLimit+1 { + Skip("Too few worker nodes to run job limit test") + } + nodeNames := "" + for i := 0; i < jobNumLimit+1; i++ { + nodeNames += "," + workerNodes[i].Name + } + nodeNames = strings.Trim(nodeNames, ",") + + command := exec.Command("kubectl", "retina", "capture", "create", fmt.Sprintf("--job-num-limit=%d", jobNumLimit), fmt.Sprintf("--node-names=%s", nodeNames), fmt.Sprintf("--namespace=%s", namespace), fmt.Sprintf("--duration=%s", defaultDurationStr), "--host-path=/mnt/test", "--debug") + out, err := command.CombinedOutput() + Expect(err).ToNot(HaveOccurred()) + captureJobNumExceedLimitError := CaptureJobNumExceedLimitError{ + CurrentNum: jobNumLimit + 1, + Limit: jobNumLimit, + } + Expect(string(out)).To(Equal(captureJobNumExceedLimitError.Error())) + }) +}) + +func bashCommand(command string) string { + return assertExecCommandOutput("bash", "-c", command) +} + +// waitUntilCapturePodsStart waits until capture pods start running +func waitUntilCapturePodsStart(captureName, namespace string) { + // Make sure all pods are running + Eventually(func() bool { + capturePods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{LabelSelector: "capture-name=" + captureName}) + // In case there's no pods created + if len(capturePods.Items) == 0 { + return false + } + if err != nil { + return false + } + for _, capturePod := range capturePods.Items { + if capturePod.Status.Phase != corev1.PodRunning { + return false + } + } + return true + }, podRunningCheckDuration, podRunningCheckInterval).Should(BeTrue()) +} + +func getCaptureName(output string) string { + re := regexp.MustCompile("retina-capture-[a-zA-Z0-9]+") + captureName := re.FindStringSubmatch(string(output))[0] + Expect(captureName).ToNot(BeEmpty()) + return captureName +} + +func deleteCapture(captureName string, namespace string) { + _ = assertExecCommandOutput("kubectl", "retina", "capture", "delete", "--name", captureName, "--namespace", namespace) +} + +func createNamespace(namespace string) string { + // Create unique namespace for each test + namespaceUID := fmt.Sprintf("%s-%s", namespace, utilrand.String(5)) + _ = assertExecCommandOutput("kubectl", "create", "namespace", namespaceUID) + return namespaceUID +} + +func deleteNamespace(namespace string) { + _ = assertExecCommandOutput("kubectl", "delete", "namespace", namespace) +} + +func containsMap(subset map[string]string, superset map[string]string) bool { + for k, v := range subset { + if supersetValue, ok := superset[k]; !ok || supersetValue != v { + return false + } + } + return true +} + +func fetchAndPrintDebugInfo(namespace string) { + l.Info("Printing debug info for Capture", zap.String("namespace", namespace)) + pods, err := ictx.Clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{}) + if err != nil { + l.Info("Failed to list pods", zap.String("namespace", namespace), zap.Error(err)) + return + } + for _, pod := range pods.Items { + l.Info("Printing logs for pod", zap.String("namespace", pod.Namespace), zap.String("pod", pod.Name)) + logs, err := ictx.Clientset.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() + if err != nil { + l.Error("Failed to get logs for pod", zap.String("namespace", pod.Namespace), zap.String("pod", pod.Name), zap.Error(err)) + } else { + l.Info("Printing log", zap.String("log", string(logs))) + } + + l.Info("Printing events for pod", zap.String("namespace", pod.Namespace), zap.String("pod", pod.Name)) + podEvents, err := ictx.Clientset.CoreV1().Events(namespace).List(context.TODO(), v1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", pod.Name), + }) + if err != nil { + l.Error("Failed to get events for pod", zap.String("namespace", pod.Namespace), zap.String("pod", pod.Name), zap.Error(err)) + } else { + for _, podEvent := range podEvents.Items { + l.Info("Pod event", zap.String("Type", podEvent.Type), zap.String("Reason", podEvent.Reason), zap.String("Message", podEvent.Message)) + } + } + } +} + +// execCommandOutput prints the error message if the error is not nil and asserts the error is nil. +func assertExecCommandOutput(name string, arg ...string) string { + command := exec.Command(name, arg...) + out, err := command.CombinedOutput() + if err != nil { + l.Error("command failed", zap.String("command", command.String()), zap.String("output", string(out)), zap.Error(err)) + } + Expect(err).ToNot(HaveOccurred()) + return string(out) +} + +func copyCaptureFileToLocal(capturePod corev1.Pod) (error, string) { + // get capture file from the Capture pod log + logs, err := ictx.Clientset.CoreV1().Pods(capturePod.Namespace).GetLogs(capturePod.Name, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() + if err != nil { + l.Error("Failed to get logs for pod", zap.String("namespace", capturePod.Namespace), zap.String("pod", capturePod.Name), zap.Error(err)) + return err, "" + } + captureName := capturePod.Labels[label.CaptureNameLabel] + // regular expression to get the name of the zip file, starting with retina-capture- and end wth .tar.gz + re := regexp.MustCompile(captureName + `.*\.tar\.gz`) + zipFileName := strings.TrimLeft(re.FindString(string(logs)), "\n") + if zipFileName == "" { + return fmt.Errorf("failed to get zip file name from pod log"), "" + } + + hostPath := capturePod.Spec.Containers[0].VolumeMounts[0].MountPath + // copy folder from k8s node to local target folder + podName := fmt.Sprintf("copy-file-from-node-capture-%s", captureName) + if _, err := ictx.Clientset.CoreV1().Pods(capturePod.Namespace).Create(context.TODO(), &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "alpine", + Image: "mcr.microsoft.com/mirror/docker/library/alpine:3.16", + Command: []string{"sleep", "infinity"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "host-path", + MountPath: hostPath, + }}, + }, + }, + NodeSelector: map[string]string{ + "kubernetes.io/os": "linux", + }, + Volumes: []corev1.Volume{{ + Name: "host-path", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: hostPath, + }, + }, + }}, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: corev1.LabelHostname, + Operator: corev1.NodeSelectorOpIn, + Values: []string{capturePod.Spec.NodeName}, + }, + }, + }, + }, + }, + }, + }, + }, + }, v1.CreateOptions{}); err != nil { + return err, "" + } + + if err := ictx.WaitForPodReady(namespace, podName); err != nil { + return err, "" + } + + targetFilePath := fmt.Sprintf("/tmp/%s.tar.gz", captureName) + assertExecCommandOutput("bash", "-c", fmt.Sprintf("kubectl cp %s/%s:%s/%s %s", namespace, podName, hostPath, zipFileName, targetFilePath)) + return nil, targetFilePath +} diff --git a/test/integration/common/kubemanager.go b/test/integration/common/kubemanager.go new file mode 100644 index 000000000..1e29b1f86 --- /dev/null +++ b/test/integration/common/kubemanager.go @@ -0,0 +1,631 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +// KubeManager emulates the KubeManager in upstream NetPol e2e: +// https://github.com/kubernetes/kubernetes/blob/master/test/e2e/network/netpol/kubemanager.go + +package integration + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + nwv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubectl/pkg/scheme" + netutils "k8s.io/utils/net" +) + +const probeTimeoutSeconds = 3 + +var ( + ErrUnsupportedProtocol = errors.New("unsupported protocol") + ErrNoServiceIP = errors.New("could not find service IP for given Pod") +) + +// TestPod represents an actual running pod. For each Pod defined by the model, +// there will be a corresponding TestPod. TestPod includes some runtime info +// (namespace name, service IP) which is not available in the model. +type TestPod struct { + Namespace string + Name string + Containers []string + ServiceIP string +} + +type KubeManager struct { + l *log.ZapLogger + clientSet clientset.Interface + crdClient apiextensionsv1beta1.ApiextensionsV1beta1Client + namespaceNames []string + allPods []TestPod +} + +func NewKubeManager(l *log.ZapLogger, clientSet clientset.Interface) *KubeManager { + return &KubeManager{ + l: l.Named("kube-manager"), + clientSet: clientSet, + } +} + +func (k *KubeManager) InitializeClusterFromModel(namespaces ...*ModelNamespace) error { + var createdPods []*v1.Pod + for _, ns := range namespaces { + err := k.createNamespace(ns.BaseName) + if err != nil { + return err + } + + k.namespaceNames = append(k.namespaceNames, ns.BaseName) + + for _, pod := range ns.Pods { + k.l.Info(fmt.Sprintf("creating pod %s/%s with matching service", ns.BaseName, pod.Name), zap.Any("pod", pod)) + + kubePod, err := k.createPod(pod.KubePod(ns.BaseName)) + if err != nil { + return err + } + + createdPods = append(createdPods, kubePod) + svc, err := k.createService(pod.Service(ns.BaseName)) + if err != nil { + return err + } + if netutils.ParseIPSloppy(svc.Spec.ClusterIP) == nil { + return fmt.Errorf("empty IP address found for service %s/%s", svc.Namespace, svc.Name) + } + + k.allPods = append(k.allPods, TestPod{ + Namespace: kubePod.Namespace, + Name: kubePod.Name, + ServiceIP: svc.Spec.ClusterIP, + }) + } + } + + for _, createdPod := range createdPods { + err := k.waitForPodReady(createdPod) + if err != nil { + return fmt.Errorf("unable to wait for pod %s/%s. err: %w", createdPod.Namespace, createdPod.Name, err) + } + } + + for _, ns := range namespaces { + k.logOutputOfKubectlCommand("get", "pod", "-n", ns.BaseName, "-o", "wide", "--show-labels") + k.logOutputOfKubectlCommand("get", "svc", "-n", ns.BaseName, "-o", "wide", "--show-labels") + } + + return nil +} + +// CleanupNamespaces will cleanup all created objects. +// This should be called after done with testing. +func (k *KubeManager) CleanupNamespaces() { + for _, ns := range k.namespaceNames { + if err := k.clientSet.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{}); err != nil { + k.l.Error("failed to delete namespace", zap.String("ns", ns), zap.String("err", err.Error())) + } + } +} + +// Probe execs into a pod and probes another pod once. +func (k *KubeManager) Probe(fromNs, fromPod, toNs, toPod string, proto ModelProtocol, toPort int) error { + return k.ProbeRepeatedly(fromNs, fromPod, toNs, toPod, proto, toPort, 0) +} + +// ProbeRepeatedlyKind will make several kubectl exec calls (one per probe) until secondsToProbe has passed +func (k *KubeManager) ProbeRepeatedlyKind(fromNs, fromPod, toNs, toPod string, proto ModelProtocol, toPort, secondsToProbe int) { + wg := sync.WaitGroup{} + wg.Add(1) + finish := make(chan struct{}, 1) + go func() { + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + round := 1 + for { + select { + case <-ticker.C: + k.l.Info(fmt.Sprintf("probing round %5d", round)) + round++ + k.ProbeRepeatedly(fromNs, fromPod, toNs, toPod, proto, toPort, 0) + case <-finish: + k.l.Info("finished probing") + wg.Done() + return + } + } + }() + + time.Sleep(time.Duration(secondsToProbe) * time.Second) + finish <- struct{}{} + wg.Wait() +} + +// ProbeRepeatedly execs into a pod once. +// If secondsToProbe <= 0 seconds, then this method will tell the Pod to probe once. +// Otherwise, it will tell the Pod to run a loop, probing until secondsToProbe has passed (loop does not work in Kind). +func (k *KubeManager) ProbeRepeatedly(fromNs, fromPod, toNs, toPod string, proto ModelProtocol, toPort, secondsToProbe int) error { + var toIP string + for _, pod := range k.allPods { + if pod.Namespace == toNs && pod.Name == toPod { + toIP = pod.ServiceIP + break + } + } + if toIP == "" { + return ErrNoServiceIP + } + + var cmd []string + agnhostTimeout := fmt.Sprintf("--timeout=%ds", probeTimeoutSeconds) + switch proto { + case TCP: + cmd = []string{"/agnhost", "connect", net.JoinHostPort(toIP, fmt.Sprint(toPort)), agnhostTimeout, "--protocol=tcp"} + case UDP: + cmd = []string{"/agnhost", "connect", net.JoinHostPort(toIP, fmt.Sprint(toPort)), agnhostTimeout, "--protocol=udp"} + case HTTP: + cmd = []string{"curl", net.JoinHostPort(toIP, fmt.Sprint(toPort)), "--connect-timeout", fmt.Sprint(probeTimeoutSeconds)} + default: + k.l.Error("probing protocol not supported", zap.Any("proto", proto)) + return ErrUnsupportedProtocol + } + + // works on AKS but not in Kind + if secondsToProbe >= 1 { + // break out of infinite for-loop once enough seconds have passed + loop := fmt.Sprintf("'start=`date +%%s` ; while : ; do %s ; curr=`date +%%s` ; if [[ $(( curr - start )) -ge %d ]]; then break ; fi ; done'", + strings.Join(cmd, " "), secondsToProbe) + cmd = []string{"bash", "-c", loop} + } + + cmd = append([]string{"kubectl", "exec", fromPod, "-n", fromNs, "--"}, cmd...) + + k.l.Info("Starting probe", zap.String("fromNs", fromNs), zap.String("fromPod", fromPod), zap.String("toNs", toNs), zap.String("toPod", toPod), zap.String("toIP", toIP)) + k.l.Info(fmt.Sprintf("executing command: %s", strings.Join(cmd, " "))) + + output, err := exec.Command(cmd[0], cmd[1:]...).CombinedOutput() + k.l.Info(string(output)) + if err != nil { + return fmt.Errorf("seems like we failed to run the probe command. err: %w", err) + } + return nil +} + +// Perform nslookup from a pod to a domain. +func (k *KubeManager) PerformNslookup(fromNs, fromPod, domain string) error { + // construct nslookup command + cmd := []string{"nslookup", domain} + cmd = append([]string{"kubectl", "exec", fromPod, "-n", fromNs, "--"}, cmd...) + + k.l.Info("Starting nslookup", zap.String("fromNs", fromNs), zap.String("fromPod", fromPod), zap.String("domain", domain)) + k.l.Info(fmt.Sprintf("executing command: %s", strings.Join(cmd, " "))) + + output, err := exec.Command(cmd[0], cmd[1:]...).CombinedOutput() + k.l.Info(string(output)) + if err != nil { + return fmt.Errorf("seems like we failed to run the nslookup command. err: %w", err) + } + return nil +} + +func (k *KubeManager) createNamespace(name string) error { + labels := map[string]string{ + "e2e": "true", + } + + namespaceObj := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Status: v1.NamespaceStatus{}, + } + + // retry a few times + return wait.PollImmediateWithContext(context.TODO(), 1*time.Minute, 30*time.Second, func(ctx context.Context) (bool, error) { + _, err := k.clientSet.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{}) + if err != nil { + k.l.Error("unable to create namespace", zap.String("err", err.Error())) + return false, nil + } + return true, nil + }) +} + +// createService is a convenience function for service setup. +func (k *KubeManager) createService(service *v1.Service) (*v1.Service, error) { + ns := service.Namespace + name := service.Name + + k.l.Info(fmt.Sprintf("creating svc %s/%s", ns, name)) + createdService, err := k.clientSet.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("unable to create service %s/%s. err: %w", ns, name, err) + } + return createdService, nil +} + +// createPod is a convenience function for pod setup. +func (k *KubeManager) createPod(pod *v1.Pod) (*v1.Pod, error) { + ns := pod.Namespace + k.l.Info(fmt.Sprintf("creating pod %s/%s", ns, pod.Name)) + + createdPod, err := k.clientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("unable to create pod %s/%s. err: %w", ns, pod.Name, err) + } + return createdPod, nil +} + +func (k *KubeManager) waitForPodReady(pod *v1.Pod) error { + ctx, cancelFn := context.WithTimeout(context.Background(), 60*time.Second) + defer cancelFn() + + for { + select { + case <-ctx.Done(): + k.logOutputOfKubectlCommand("get", "pods", "-n", pod.Namespace, "-o", "wide", "--show-labels") + + k.logOutputOfKubectlCommand("get", "node", "-o", "wide", "--show-labels") + + k.logOutputOfKubectlCommand("describe", "pod", "-n", pod.Namespace, pod.Name) + + return fmt.Errorf("timed out waiting for pod %s to be ready", pod.Name) + default: + pod, err := k.clientSet.CoreV1().Pods(pod.Namespace).Get(context.Background(), pod.Name, metav1.GetOptions{}) + if err != nil { + // Maybe intermittent error, retry. + continue + } + if pod.Status.Phase == corev1.PodRunning { + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +func (k *KubeManager) logOutputOfKubectlCommand(args ...string) { + k.l.Info(fmt.Sprintf("running command: kubectl %s", strings.Join(args, " "))) + out, err := exec.Command("kubectl", args...).CombinedOutput() + if err != nil { + k.l.Error("failed to run kubectl command", zap.String("err", err.Error())) + return + } + + k.l.Info(string(out)) +} + +func (km *KubeManager) RestartDaemonset(namespace string, name string) error { + km.l.Info("Restarting daemonset", zap.String("namespace", namespace), zap.String("name", name)) + execCmd := []string{"kubectl", "rollout", "restart", "daemonset", name, "-n", namespace} + cmd := exec.Command(execCmd[0], execCmd[1:]...) + out, err := cmd.Output() + if err != nil { + km.l.Error("Failed to restart daemonset", zap.Error(err), zap.String("output", string(out))) + return err + } + // sleep for a while to let the daemonset restart. + time.Sleep(5 * time.Second) + return nil +} + +// func to check daemonset is ready +func (km *KubeManager) WaitForDaemonsetReady(namespace string, name string) error { + km.l.Info("Waiting for daemonset to be ready", zap.String("namespace", namespace), zap.String("name", name)) + ctx, cancelFn := context.WithTimeout(context.Background(), timeout) + defer cancelFn() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for daemonset %s to be ready", name) + default: + ds, err := km.clientSet.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + // Maybe intermittent error, retry. + time.Sleep(1 * time.Second) + continue + } + // waits for daemonset pods to be running + if ds.Status.DesiredNumberScheduled == ds.Status.NumberReady && ds.Status.DesiredNumberScheduled == ds.Status.NumberAvailable { + km.l.Info("Daemonset is ready", zap.String("namespace", namespace), zap.String("name", name)) + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +// Update the configmap to enable pod level metrics. +func (km *KubeManager) UpdateRetinaConfigMap(kv map[string]string, configmap string) error { + km.l.Info("Updating configmap", zap.String("name", configmap), zap.Any("new key values", kv)) + cm, err := km.clientSet.CoreV1().ConfigMaps("kube-system").Get(context.Background(), configmap, metav1.GetOptions{}) + if err != nil { + km.l.Error("Failed to get configmap", zap.Error(err)) + return err + } + cfgstring := strings.ReplaceAll(cm.Data["config.yaml"], "\\n", "\n") + var data map[string]interface{} + if err := yaml.Unmarshal([]byte(cfgstring), &data); err != nil { + km.l.Error("Failed to unmarshal configmap", zap.Error(err), zap.Any("kcf", data)) + return err + } + for k, v := range kv { + data[k] = v + } + kcfgBytes, err := yaml.Marshal(data) + if err != nil { + km.l.Error("Failed to marshal configmap", zap.Error(err)) + return err + } + cm.Data["config.yaml"] = string(kcfgBytes) + _, err = km.clientSet.CoreV1().ConfigMaps("kube-system").Update(context.Background(), cm, metav1.UpdateOptions{}) + if err != nil { + km.l.Error("Failed to update configmap", zap.Error(err)) + return err + } + return nil +} + +// SetPodLevel sets the pod level metrics to enabled/disabled. +// It updates the configmap cmName and restarts the daemonset dsName in namespace. +// dsName should be retina-agent or retina-agent-win +func (km *KubeManager) SetPodLevel(enabled bool, cmName, namespace, dsName string) error { + km.l.Info("Setting enablePodLevel", zap.Bool("enabled", enabled), zap.String("configmap", cmName)) + kv := map[string]string{ + "enablePodLevel": strconv.FormatBool(enabled), + } + err := km.UpdateRetinaConfigMap(kv, cmName) + if err != nil { + km.l.Error("Failed to update configmap", zap.Error(err)) + return err + } + err = km.RestartDaemonset(namespace, dsName) + if err != nil { + return err + } + err = km.WaitForDaemonsetReady(namespace, dsName) + if err != nil { + return err + } + return nil +} + +func (km *KubeManager) CreateCRDFromFile(path string) error { + crdYaml, err := os.ReadFile(path) + if err != nil { + km.l.Error("Failed to read crd yaml", zap.Error(err)) + return err + } + + // Decode the CRD manifest into an Unstructured object + decoder := scheme.Codecs.UniversalDeserializer() + obj, _, err := decoder.Decode(crdYaml, nil, nil) + if err != nil { + return err + } + crd, ok := obj.(*v1beta1.CustomResourceDefinition) + if !ok { + km.l.Error("Failed to convert CRD fixture to CRD", zap.Error(err)) + return err + } + + _, err = km.crdClient.CustomResourceDefinitions().Create(context.Background(), crd, metav1.CreateOptions{}) + if err != nil { + if statusError, isStatus := err.(*apierrors.StatusError); isStatus { + if statusError.ErrStatus.Code == http.StatusConflict { + km.l.Info("CRD already exists") + return nil + } + } + km.l.Error("Failed to create crd", zap.Error(err)) + return err + } + + return nil +} + +// annotate namespace +// To annotate with retina observe annotations, use common.RetinaObserveAnnotations and common.RetinaObserveAnnotationValue +func (km *KubeManager) AnnotateNamespace(ns string, annotations map[string]string) error { + // uses client-go to annotate a namespace with the name ns + km.l.Info("Annotating namespace", zap.String("namespace", ns)) + namespace, err := km.clientSet.CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{}) + if err != nil { + km.l.Error("Failed to get namespace", zap.Error(err)) + return err + } + if namespace.Annotations == nil { + namespace.Annotations = make(map[string]string) + } + for k, v := range annotations { + namespace.Annotations[k] = v + } + _, err = km.clientSet.CoreV1().Namespaces().Update(context.Background(), namespace, metav1.UpdateOptions{}) + if err != nil { + km.l.Error("Failed to update namespace", zap.Error(err)) + return err + } + return nil +} + +// annotate pod in a specific namespace with retina observe +func (km *KubeManager) AnnotatePod(pod, namespace string, annotations map[string]string) error { + km.l.Info("Annotating pod", zap.String("pod", pod)) + podObj, err := km.clientSet.CoreV1().Pods(namespace).Get(context.Background(), pod, metav1.GetOptions{}) + if err != nil { + km.l.Error("Failed to get pod", zap.Error(err)) + return err + } + if podObj.Annotations == nil { + podObj.Annotations = make(map[string]string) + } + for k, v := range annotations { + podObj.Annotations[k] = v + } + _, err = km.clientSet.CoreV1().Pods(namespace).Update(context.Background(), podObj, metav1.UpdateOptions{}) + if err != nil { + km.l.Error("Failed to update pod", zap.Error(err)) + return err + } + return nil +} + +// get namespaces that are annotated with annotations +func (km *KubeManager) GetAnnotatedNamespaces(annotations map[string]string) []*corev1.Namespace { + km.l.Info("Getting annotated namespaces", zap.Any("annotations", annotations)) + // if no annotations are provided, return nil + if len(annotations) == 0 { + km.l.Warn("No annotations provided") + return nil + } + namespaces, err := km.clientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + if err != nil { + km.l.Error("Failed to get namespaces", zap.Error(err)) + return nil + } + + var annotatedNamespaces []*corev1.Namespace + for _, ns := range namespaces.Items { + matches := true + for k, v := range annotations { + if nsValue, exists := ns.Annotations[k]; !exists || nsValue != v { + matches = false + break + } + } + if matches { + annotatedNamespaces = append(annotatedNamespaces, &ns) + } + } + + return annotatedNamespaces +} + +// get annotated pods in a specific namespace with retina observe +func (km *KubeManager) GetAnnotatedPods(namespace string, annotations map[string]string) []*corev1.Pod { + km.l.Info("Getting annotated pods", zap.String("namespace", namespace), zap.Any("annotations", annotations)) + pods, err := km.clientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + km.l.Error("Failed to get pods", zap.Error(err)) + return nil + } + + var annotatedPods []*corev1.Pod + for _, pod := range pods.Items { + matches := true + for k, v := range annotations { + if val, ok := pod.Annotations[k]; !ok || val != v { + matches = false + break + } + } + if matches { + annotatedPods = append(annotatedPods, &pod) + } + } + return annotatedPods +} + +func (km *KubeManager) RemovePodAnnotations(pod, namespace string, annotations map[string]string) error { + km.l.Info("Removing pod annotations", zap.String("pod", pod), zap.String("namespace", namespace), zap.Any("annotations", annotations)) + podObj, err := km.clientSet.CoreV1().Pods(namespace).Get(context.Background(), pod, metav1.GetOptions{}) + if err != nil { + km.l.Error("Failed to get pod", zap.Error(err)) + return err + } + for k := range annotations { + delete(podObj.Annotations, k) + } + _, err = km.clientSet.CoreV1().Pods(namespace).Update(context.Background(), podObj, metav1.UpdateOptions{}) + if err != nil { + km.l.Error("Failed to update pod", zap.Error(err)) + return err + } + return nil +} + +func (km *KubeManager) RemoveNamespaceAnnotations(namespace string, annotations map[string]string) error { + km.l.Info("Removing namespace annotations", zap.String("namespace", namespace), zap.Any("annotations", annotations)) + namespaceObj, err := km.clientSet.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) + if err != nil { + km.l.Error("Failed to get namespace", zap.Error(err)) + return err + } + for k := range annotations { + delete(namespaceObj.Annotations, k) + } + _, err = km.clientSet.CoreV1().Namespaces().Update(context.Background(), namespaceObj, metav1.UpdateOptions{}) + if err != nil { + km.l.Error("Failed to update namespace", zap.Error(err)) + return err + } + return nil +} + +// ApplyNetworkDropPolicy applies drop ingress network policy on the given name in the given namespace. +func (km *KubeManager) ApplyNetworkDropPolicy(namespace, policyName, podLabelKey, podLabelValue string) error { + _, err := km.clientSet.NetworkingV1().NetworkPolicies(namespace).Create(context.Background(), &nwv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + Namespace: namespace, + }, + Spec: nwv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + podLabelKey: podLabelValue, + }, + }, + Ingress: []nwv1.NetworkPolicyIngressRule{}, + }, + }, metav1.CreateOptions{}) + if err != nil { + // Check if error is because the policy already exists + if k8serrors.IsAlreadyExists(err) { + km.l.Warn("Network policy already exists", zap.String("policyName", policyName)) + return nil + } + + km.l.Error("Failed to create network policy", zap.Error(err)) + return err + } + + km.l.Info("Network policy is created", zap.String("policyName", policyName)) + return nil +} + +// RemoveNetworkPolicy removes the network policy with the given name in the given namespace. +func (km *KubeManager) RemoveNetworkPolicy(namespace, policyName string) error { + err := km.clientSet.NetworkingV1().NetworkPolicies(namespace).Delete(context.Background(), policyName, metav1.DeleteOptions{}) + if err != nil { + km.l.Error("Failed to delete network policy", zap.Error(err)) + return err + } + + km.l.Info("Network policy is deleted", zap.String("policyName", policyName)) + return nil +} diff --git a/test/integration/common/metrics_model.go b/test/integration/common/metrics_model.go new file mode 100644 index 000000000..2e52b2efc --- /dev/null +++ b/test/integration/common/metrics_model.go @@ -0,0 +1,309 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//go:build integration +// +build integration + +package integration + +import ( + prom_exporter "github.com/microsoft/retina/pkg/exporter" +) + +type MetricType int + +const ( + // dropreason and forward metrics + Count MetricType = iota + Bytes + // dns metrics + Request + Response + // linuxutil metrics + IPMetricType + TCPMetricType + UDPMetricType +) + +// Prefix for all metrics +var RetinaPrefix = prom_exporter.RetinaNamespace + "_" + +// Drop related metrics +var ( + DropReasonCountMetricName = RetinaPrefix + "drop_count" + DropReasonBytesMetricName = RetinaPrefix + "drop_bytes" + AdvancedDropReasonCountMetricName = RetinaPrefix + "adv_drop_count" + AdvancedDropReasonBytesMetricName = RetinaPrefix + "adv_drop_bytes" +) + +// Forward related metrics +var ( + ForwardCountMetricName = RetinaPrefix + "forward_count" + ForwardBytesMetricName = RetinaPrefix + "forward_bytes" + AdvancedForwardCountMetricName = RetinaPrefix + "adv_forward_count" + AdvancedForwardBytesMetricName = RetinaPrefix + "adv_forward_bytes" +) + +// DNS related metrics +var ( + DNSRequestMetricName = RetinaPrefix + "adv_dns_request_count" + DNSResponseMetricName = RetinaPrefix + "adv_dns_response_count" +) + +// Linuxutil related metrics +var ( + LinuxUtilInterfaceStatsMetricName = RetinaPrefix + "interface_stats" + LinuxUtilIpConnectionStatsMetricName = RetinaPrefix + "ip_connection_stats" + LinuxUtilTcpConnectionRemoteMetricName = RetinaPrefix + "tcp_connection_remote" + LinuxUtilTcpConnectionStatsMetricName = RetinaPrefix + "tcp_connection_stats" + LinuxUtilTcpStateMetricName = RetinaPrefix + "tcp_state" + LinuxUtilUdpConnectionStatsMetricName = RetinaPrefix + "udp_connection_stats" +) + +// Other advanced metrics +var ( + AdvancedTcpFlagsCountMetricName = RetinaPrefix + "adv_tcpflags_count" +) + +type BaseObj struct { + Ip string + Namespace string + PodName string + WorkloadKind string + WorkloadName string +} + +type ModelBasicDropReasonMetrics struct { + Direction string + Reason string +} + +// NewModelBasicDropReasonMetrics creates a ModelBasicDropReasonMetrics +func NewModelBasicDropReasonMetrics(direction, reason string) *ModelBasicDropReasonMetrics { + return &ModelBasicDropReasonMetrics{ + Direction: direction, + Reason: reason, + } +} + +// Given a ModelBasicDropReasonMetrics, convert it to list of labels +func (m *ModelBasicDropReasonMetrics) WithLabels(metricType MetricType) MetricWithLabels { + var metricName string + switch metricType { + case Count: + metricName = DropReasonCountMetricName + case Bytes: + metricName = DropReasonBytesMetricName + } + return MetricWithLabels{ + Metric: metricName, + Labels: []string{ + "direction", m.Direction, + "reason", m.Reason, + }, + } +} + +type ModelLocalCtxDropReasonMetrics struct { + ModelBasicDropReasonMetrics + BaseObj +} + +func NewModelLocalCtxDropReasonMetrics(direction, ip, namespace, podname, reason, workloadKind, workloadName string) *ModelLocalCtxDropReasonMetrics { + return &ModelLocalCtxDropReasonMetrics{ + ModelBasicDropReasonMetrics: *NewModelBasicDropReasonMetrics(direction, reason), + BaseObj: BaseObj{ + Ip: ip, + Namespace: namespace, + PodName: podname, + WorkloadKind: workloadKind, + WorkloadName: workloadName, + }, + } +} + +// Given a ModelLocalCtxDropReasonMetrics, convert it to list of labels +func (m *ModelLocalCtxDropReasonMetrics) WithLabels(metricType MetricType) MetricWithLabels { + labels := m.ModelBasicDropReasonMetrics.WithLabels(metricType).Labels + labels = append(labels, []string{ + "ip", m.BaseObj.Ip, + "namespace", m.BaseObj.Namespace, + "podname", m.BaseObj.PodName, + "workloadKind", m.BaseObj.WorkloadKind, + "workloadName", m.BaseObj.WorkloadName, + }...) + + var metricName string + switch metricType { + case Count: + metricName = AdvancedDropReasonCountMetricName + case Bytes: + metricName = AdvancedDropReasonBytesMetricName + } + + return MetricWithLabels{ + Metric: metricName, + Labels: labels, + } +} + +type ModelTcpFlagsMetrics struct { + Flag string + BaseObj +} + +func NewModelTcpFlagsMetrics(flag, ip, namespace, podname, workloadKind, workloadName string) *ModelTcpFlagsMetrics { + return &ModelTcpFlagsMetrics{ + Flag: flag, + BaseObj: BaseObj{ + Ip: ip, + Namespace: namespace, + PodName: podname, + WorkloadKind: workloadKind, + WorkloadName: workloadName, + }, + } +} + +// Given a ModelTcpFlagsMetrics, convert it to list of labels +func (m *ModelTcpFlagsMetrics) WithLabels() MetricWithLabels { + return MetricWithLabels{ + Metric: AdvancedTcpFlagsCountMetricName, + Labels: []string{ + "flag", m.Flag, + "ip", m.BaseObj.Ip, + "namespace", m.BaseObj.Namespace, + "podname", m.BaseObj.PodName, + "workloadKind", m.BaseObj.WorkloadKind, + "workloadName", m.BaseObj.WorkloadName, + }, + } +} + +type ModelBasicForwardMetrics struct { + Direction string +} + +func NewModelBasicForwardMetrics(direction string) *ModelBasicForwardMetrics { + return &ModelBasicForwardMetrics{ + Direction: direction, + } +} + +// Given a ModelBasicForwardCountMetrics, convert it to list of labels +func (m *ModelBasicForwardMetrics) WithLabels(metricType MetricType) MetricWithLabels { + var metricName string + switch metricType { + case Count: + metricName = ForwardCountMetricName + case Bytes: + metricName = ForwardBytesMetricName + } + + return MetricWithLabels{ + Metric: metricName, + Labels: []string{ + "direction", m.Direction, + }, + } +} + +type ModelLocalCtxForwardMetrics struct { + ModelBasicForwardMetrics + BaseObj +} + +func NewModelLocalCtxForwardMetrics(direction, ip, namespace, podname, workloadKind, workloadName string) *ModelLocalCtxForwardMetrics { + return &ModelLocalCtxForwardMetrics{ + ModelBasicForwardMetrics: *NewModelBasicForwardMetrics(direction), + BaseObj: BaseObj{ + Ip: ip, + Namespace: namespace, + PodName: podname, + WorkloadKind: workloadKind, + WorkloadName: workloadName, + }, + } +} + +// Given a ModelLocalCtxForwardMetrics, convert it to list of labels +func (m *ModelLocalCtxForwardMetrics) WithLabels(metricType MetricType) MetricWithLabels { + labels := m.ModelBasicForwardMetrics.WithLabels(metricType).Labels + labels = append(labels, []string{ + "ip", m.BaseObj.Ip, + "namespace", m.BaseObj.Namespace, + "podname", m.BaseObj.PodName, + "workloadKind", m.BaseObj.WorkloadKind, + "workloadName", m.BaseObj.WorkloadName, + }...) + + var metricName string + switch metricType { + case Count: + metricName = AdvancedForwardCountMetricName + case Bytes: + metricName = AdvancedForwardBytesMetricName + } + + return MetricWithLabels{ + Metric: metricName, + Labels: labels, + } +} + +type ModelDnsCountMetrics struct { + NumResponse string + Query string + QueryType string + Response string + ReturnCode string + BaseObj +} + +func NewModelDnsCountMetrics(numResponse, query, queryType, response, returnCode, ip, namespace, podname, workloadKind, workloadName string) *ModelDnsCountMetrics { + return &ModelDnsCountMetrics{ + NumResponse: numResponse, + Query: query, + QueryType: queryType, + Response: response, + ReturnCode: returnCode, + BaseObj: BaseObj{ + Ip: ip, + Namespace: namespace, + PodName: podname, + WorkloadKind: workloadKind, + WorkloadName: workloadName, + }, + } +} + +func (m *ModelDnsCountMetrics) WithLabels(metricType MetricType) MetricWithLabels { + var metricName string + switch metricType { + case Request: + metricName = DNSRequestMetricName + case Response: + metricName = DNSResponseMetricName + } + + return MetricWithLabels{ + Metric: metricName, + Labels: []string{ + "num_response", m.NumResponse, + "query", m.Query, + "query_type", m.QueryType, + "response", m.Response, + "return_code", m.ReturnCode, + "ip", m.BaseObj.Ip, + "namespace", m.BaseObj.Namespace, + "podname", m.BaseObj.PodName, + "workloadKind", m.BaseObj.WorkloadKind, + "workloadName", m.BaseObj.WorkloadName, + }, + } +} + +type MetricWithLabels struct { + Metric string + Labels []string +} diff --git a/test/integration/common/model.go b/test/integration/common/model.go new file mode 100644 index 000000000..b47b532bd --- /dev/null +++ b/test/integration/common/model.go @@ -0,0 +1,233 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +// this file provides the Namespace and Pod structs that emulate those in upstream NetPol e2e: +// https://github.com/kubernetes/kubernetes/blob/master/test/e2e/network/netpol/model.go + +package integration + +import ( + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + PodLabelKey = "pod" + + agnHostImage = "mcr.microsoft.com/aks/e2e/k8s-agnhost:2.39" +) + +type ModelNode struct { + OS string + Arch string + HostName string +} + +func NewModelNode(n v1.Node) ModelNode { + return ModelNode{ + OS: n.Labels["kubernetes.io/os"], + Arch: n.Labels["kubernetes.io/arch"], + HostName: n.Labels["kubernetes.io/hostname"], + } +} + +var ( + LinuxAmd64 = ModelNode{ + OS: "linux", + Arch: "amd64", + } + + LinuxArm64 = ModelNode{ + OS: "linux", + Arch: "arm64", + } + + LinuxAnyArch = ModelNode{ + OS: "linux", + Arch: "", + } + + // TODO windows node types +) + +type ModelProtocol string + +const ( + TCP ModelProtocol = "TCP" + UDP ModelProtocol = "UDP" + HTTP ModelProtocol = "HTTP" +) + +func (p ModelProtocol) ToV1Proto() v1.Protocol { + if p == UDP { + return v1.ProtocolUDP + } + return v1.ProtocolTCP +} + +// ModelNamespace is an abstract representation i.e. ignores kube implementation details +type ModelNamespace struct { + BaseName string + Pods []*ModelPod +} + +// NewModelNamespace creates a Namespace with the given Pods +func NewModelNamespace(name string, pods ...*ModelPod) *ModelNamespace { + return &ModelNamespace{ + BaseName: name, + Pods: pods, + } +} + +// ModelPod is an abstract representation i.e. ignores kube implementation details +type ModelPod struct { + Name string + Node ModelNode + Containers []*ModelContainer +} + +// NewModelPod can be chained with WithContainer +func NewModelPod(name string, n ModelNode) *ModelPod { + return &ModelPod{ + Name: name, + Node: n, + } +} + +// WithContainer appends the specified container to the Pod and returns the same object +func (p *ModelPod) WithContainer(port int32, proto ModelProtocol) *ModelPod { + p.Containers = append(p.Containers, &ModelContainer{ + Port: port, + Protocol: proto, + }) + return p +} + +// ContainerSpecs builds kubernetes container specs for the pod +func (p *ModelPod) ContainerSpecs() []v1.Container { + var containers []v1.Container + for _, cont := range p.Containers { + containers = append(containers, cont.Spec()) + } + return containers +} + +// Labels returns the default labels that should be placed on a pod/deployment +// in order for it to be uniquely selectable by label selectors +func (p *ModelPod) Labels() map[string]string { + return map[string]string{ + PodLabelKey: p.Name, + } +} + +// KubePod returns the kube pod (will add label selectors for windows if needed). +func (p *ModelPod) KubePod(namespace string) *v1.Pod { + zero := int64(0) + + kPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.Name, + Labels: p.Labels(), + Namespace: namespace, + }, + Spec: v1.PodSpec{ + TerminationGracePeriodSeconds: &zero, + Containers: p.ContainerSpecs(), + }, + } + + kPod.Spec.NodeSelector = map[string]string{ + "kubernetes.io/os": p.Node.OS, + } + + if p.Node.Arch != "" { + kPod.Spec.NodeSelector["kubernetes.io/arch"] = p.Node.Arch + } + if p.Node.HostName != "" { + kPod.Spec.NodeSelector["kubernetes.io/hostname"] = p.Node.HostName + } + + return kPod +} + +// QualifiedServiceAddress returns the address that can be used to access the service +func (p *ModelPod) QualifiedServiceAddress(namespace string, dnsDomain string) string { + return fmt.Sprintf("%s.%s.svc.%s", p.ServiceName(namespace), namespace, dnsDomain) +} + +// ServiceName returns the unqualified service name +func (p *ModelPod) ServiceName(namespace string) string { + return fmt.Sprintf("s-%s-%s", namespace, p.Name) +} + +// Service returns a kube service spec +func (p *ModelPod) Service(namespace string) *v1.Service { + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ServiceName(namespace), + Namespace: namespace, + }, + Spec: v1.ServiceSpec{ + Selector: p.Labels(), + }, + } + + for _, container := range p.Containers { + service.Spec.Ports = append(service.Spec.Ports, v1.ServicePort{ + Name: fmt.Sprintf("service-port-%s-%d", strings.ToLower(string(container.Protocol)), container.Port), + Protocol: container.Protocol.ToV1Proto(), + Port: container.Port, + }) + } + return service +} + +// ModelContainer is an abstract representation i.e. ignores kube implementation details +type ModelContainer struct { + Port int32 + Protocol ModelProtocol +} + +// Name returns the container name +func (c *ModelContainer) Name() string { + return fmt.Sprintf("cont-%d-%s", c.Port, strings.ToLower(string(c.Protocol))) +} + +// PortName returns the container port name +func (c *ModelContainer) PortName() string { + return fmt.Sprintf("serve-%d-%s", c.Port, strings.ToLower(string(c.Protocol))) +} + +// Spec returns the kube container spec +func (c *ModelContainer) Spec() v1.Container { + var cmd []string + switch c.Protocol { + case TCP: + cmd = []string{"/agnhost", "serve-hostname", "--tcp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)} + case UDP: + cmd = []string{"/agnhost", "serve-hostname", "--udp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)} + case HTTP: + cmd = []string{"/agnhost", "serve-hostname", "--http=true", "--port", fmt.Sprintf("%d", c.Port)} + } + + return v1.Container{ + Name: c.Name(), + ImagePullPolicy: v1.PullIfNotPresent, + Image: agnHostImage, + Command: cmd, + Env: []v1.EnvVar{}, + SecurityContext: &v1.SecurityContext{}, + Ports: []v1.ContainerPort{ + { + ContainerPort: c.Port, + Name: c.PortName(), + Protocol: c.Protocol.ToV1Proto(), + }, + }, + } +} diff --git a/test/integration/common/types.go b/test/integration/common/types.go new file mode 100644 index 000000000..2914b419e --- /dev/null +++ b/test/integration/common/types.go @@ -0,0 +1,416 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package integration + +import ( + "context" + "fmt" + "math/big" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + config "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/log" + "go.uber.org/zap" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +const ( + jumpboxName = "retina-jumpbox" + jumpboxNs = "default" + timeout = 60 * time.Second + BasicMode = "basic" + LocalCtxMode = "localctx" + RemoteCtxMode = "remoteCtx" +) + +var c *IntegrationContext + +type IntegrationContext struct { + l *log.ZapLogger + kubeconfig string + config *rest.Config + + jumpbox *corev1.Pod + // Map agents to node name. + nodeToAgent map[string]corev1.Pod + + Clientset *kubernetes.Clientset + CrdClient *apiextensionsv1beta1.ApiextensionsV1beta1Client + Nodes *corev1.NodeList + RetinaAgents *corev1.PodList + ControlPlaneNodes *corev1.NodeList + ConfigMap config.Config + backoffStrategy wait.Backoff + LogPath string + enablePodLevel bool +} + +func Init() *IntegrationContext { + if c != nil { + return c + } + var err error + + c = &IntegrationContext{ + LogPath: "/tmp/retina-integration-logs", + enablePodLevel: false, + } + opts := log.GetDefaultLogOpts() + opts.File = true + opts.FileName = filepath.Join(c.LogPath, "retina.log") + log.SetupZapLogger(opts) + + // Set backoff strategy. + c.backoffStrategy = wait.Backoff{ + Duration: 10 * time.Millisecond, + Jitter: 0, + Factor: 2, + Steps: 5, + } + + c.l = log.Logger().Named("integration-test") + // Delete log folder if exists. + err = os.RemoveAll(c.LogPath) + if err != nil { + panic(err.Error()) + } + err = os.Mkdir(c.LogPath, 0o755) + if err != nil && !strings.Contains(err.Error(), "file exists") { + panic(err.Error()) + } + c.l.Info("Log path", zap.String("path", c.LogPath)) + c.kubeconfig = filepath.Join(homedir.HomeDir(), ".kube", "config") + if c.kubeconfig == "" { + panic("kubeconfig not set under $HOME/.kube/config") + } + c.l.Info("Using kubeconfig", zap.String("path", c.kubeconfig)) + c.config, err = clientcmd.BuildConfigFromFlags("", c.kubeconfig) + if err != nil { + panic(err.Error()) + } + c.Clientset, err = kubernetes.NewForConfig(c.config) + if err != nil { + panic(err.Error()) + } + c.CrdClient, err = apiextensionsv1beta1.NewForConfig(c.config) + c.Nodes, err = c.Clientset.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{ + LabelSelector: "!node-role.kubernetes.io/control-plane", + }) + if err != nil { + panic(err.Error()) + } + if len(c.Nodes.Items) == 0 { + panic("No agent nodes found") + } + c.ControlPlaneNodes, err = c.Clientset.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{ + LabelSelector: "node-role.kubernetes.io/control-plane", + }) + if err != nil { + panic(err.Error()) + } + c.RetinaAgents, err = c.Clientset.CoreV1().Pods("kube-system").List(context.TODO(), v1.ListOptions{ + LabelSelector: "app=retina", + }) + if err != nil { + panic(err.Error()) + } + if len(c.RetinaAgents.Items) == 0 { + panic("No retina agents found") + } + // Map agents to nodes. + c.nodeToAgent = map[string]corev1.Pod{} + for _, agent := range c.RetinaAgents.Items { + for _, node := range c.Nodes.Items { + // Split agent Node name by / to get the node name. + agentNodeName := strings.Split(agent.Spec.NodeName, "/")[0] + if agentNodeName == node.Name { + c.nodeToAgent[node.Name] = agent + c.l.Info("Mapped agent to node", zap.String("agent", agent.Name), zap.String("node", node.Name)) + break + } + } + } + c.jumpbox, err = c.Clientset.CoreV1().Pods("default").Create(context.TODO(), &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: "retina-jumpbox", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "alpine", + Image: "mcr.microsoft.com/mirror/docker/library/alpine:3.16", + Command: []string{"sleep", "infinity"}, + }, + }, + NodeSelector: map[string]string{ + "kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + }, + }, + }, v1.CreateOptions{}) + if err != nil && !strings.Contains(err.Error(), "already exists") { + panic(err.Error()) + } + // Wait for jumpbox to be ready. + err = c.WaitForPodReady(jumpboxNs, jumpboxName) + if err != nil { + panic(err.Error()) + } + + enablePodLevel, err := strconv.ParseBool(os.Getenv("ENABLE_POD_LEVEL")) + c.enablePodLevel = enablePodLevel + c.l.Info("ENABLE_POD_LEVEL set", zap.Bool("ENABLE_POD_LEVEL", enablePodLevel)) + if err != nil { + c.l.Warn("ENABLE_POD_LEVEL error parsing flag") + } + + configMap, err := c.GetConfigMap("retina-config") + c.ConfigMap = configMap + if err != nil { + c.l.Warn("Failed to parse config map", zap.Error(err)) + } + + c.l.Info("Remote context", zap.Bool("remoteContext", c.ConfigMap.RemoteContext)) + c.l.Info("Local context", zap.Bool("localContext", c.ConfigMap.EnableAnnotations)) + return c +} + +func (ictx *IntegrationContext) IsEnablePodLevel() bool { + return ictx.enablePodLevel +} + +func (ictx *IntegrationContext) Logger() *log.ZapLogger { + return ictx.l +} + +func (ictx *IntegrationContext) FetchMetrics(node string) (string, error) { + // Get agent IP. + ictx.l.Info("Fetching metrics", zap.String("node", node)) + retinaAgentIp := ictx.nodeToAgent[node].Status.PodIP + metricsUrl := fmt.Sprint("http://", retinaAgentIp, ":10093/metrics") + cmd := exec.Command("kubectl", "exec", jumpboxName, "-n", jumpboxNs, "--", "wget", "-qO-", metricsUrl) + out, err := cmd.Output() + if err != nil { + ictx.l.Error("Failed to fetch metrics", zap.Error(err), zap.String("output", string(out))) + return "", err + } + return string(out), nil +} + +func (ictx *IntegrationContext) WaitForPodReady(namespace string, name string) error { + ctx, cancelFn := context.WithTimeout(context.Background(), timeout) + defer cancelFn() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for pod %s to be ready", name) + default: + pod, err := ictx.Clientset.CoreV1().Pods(namespace).Get(context.Background(), name, v1.GetOptions{}) + if err != nil { + // Maybe intermittent error, retry. + continue + } + if pod.Status.Phase == corev1.PodRunning { + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +func (ictx *IntegrationContext) GetNode(os string, arch string) (corev1.Node, error) { + for _, node := range ictx.Nodes.Items { + if strings.Contains(node.Labels["kubernetes.io/os"], os) && strings.Contains(node.Labels["kubernetes.io/arch"], arch) { + return node, nil + } + } + return corev1.Node{}, fmt.Errorf("node not found for os %s and arch %s", os, arch) +} + +// Get two distinct nodes by OS +func (ictx *IntegrationContext) GetTwoDistinctNodes(os string) ([]corev1.Node, error) { + nodes := []corev1.Node{} + for _, node := range ictx.Nodes.Items { + if strings.Contains(node.Labels["kubernetes.io/os"], os) { + nodes = append(nodes, node) + } + if len(nodes) == 2 { + break + } + } + if len(nodes) < 2 { + return nil, fmt.Errorf("less than two nodes found for os %s", os) + } + return nodes, nil +} + +func (ictx *IntegrationContext) DumpAgentLogs() { + agentLogPath := filepath.Join(ictx.LogPath, "agent") + err := os.Mkdir(agentLogPath, 0o755) + if err != nil && !strings.Contains(err.Error(), "file exists") { + ictx.l.Error("Failed to create agent log folder", zap.Error(err)) + return + } + for _, agent := range ictx.RetinaAgents.Items { + out := ictx.DumpPodLogs(&agent) + agentLogFile := filepath.Join(agentLogPath, agent.Name+".log") + err = os.WriteFile(agentLogFile, out, 0o644) + if err != nil { + ictx.l.Error("Failed to write agent log file", zap.Error(err)) + continue + } + } +} + +func (ictx *IntegrationContext) DumpAllPodLogs(ns string) { + pods, err := ictx.Clientset.CoreV1().Pods(ns).List(context.Background(), v1.ListOptions{}) + if err != nil { + ictx.l.Error("Failed to list pods", zap.Error(err)) + return + } + podLogPath := filepath.Join(ictx.LogPath, ns) + err = os.Mkdir(podLogPath, 0o755) + if err != nil && !strings.Contains(err.Error(), "file exists") { + ictx.l.Error("Failed to create pod log folder", zap.Error(err)) + return + } + for _, pod := range pods.Items { + out := ictx.DumpPodLogs(&pod) + podLogFile := filepath.Join(podLogPath, pod.Name+".log") + err = os.WriteFile(podLogFile, out, 0o644) + if err != nil { + ictx.l.Error("Failed to write pod log file", zap.Error(err)) + continue + } + } +} + +// Get a value of a config map key +func (ictx *IntegrationContext) GetConfigMap(cmName string) (config.Config, error) { + ictx.l.Info("Getting config map", zap.String("name", cmName)) + var cm *corev1.ConfigMap + operation := func() (bool, error) { + var clientErr error + cm, clientErr = ictx.Clientset.CoreV1().ConfigMaps("kube-system").Get(context.Background(), cmName, metav1.GetOptions{}) + if clientErr != nil { + ictx.l.Warn("Failed to get configmap", zap.Error(clientErr)) + return false, clientErr + } + return true, nil + } + + err := wait.ExponentialBackoff(c.backoffStrategy, operation) + if err != nil { + ictx.l.Error("Failed to get configmap after all retries", zap.Error(err)) + } + + cfgstring := strings.ReplaceAll(cm.Data["config.yaml"], "\\n", "\n") + var data config.Config + if err := yaml.Unmarshal([]byte(cfgstring), &data); err != nil { + ictx.l.Error("Failed to unmarshal configmap", zap.Error(err), zap.Any("kcf", data)) + return data, err + } + return data, nil +} + +func (ictx *IntegrationContext) DumpPodLogs(pod *corev1.Pod) []byte { + execCmd := []string{"kubectl", "logs", pod.Name, "-n", pod.Namespace} + cmd := exec.Command(execCmd[0], execCmd[1:]...) + out, err := cmd.Output() + if err != nil { + ictx.l.Error("Failed to get pod logs", zap.Error(err)) + return []byte{} + } + return out +} + +// ValidateTestMode checks if the test mode is enabled. +func (ictx *IntegrationContext) ValidateTestMode(testMode string) bool { + if testMode == LocalCtxMode && !ictx.ConfigMap.EnableAnnotations { + ictx.l.Info("Skipping test", zap.String("reason", "Local context not enabled")) + return false + } + if testMode == RemoteCtxMode && !ictx.ConfigMap.RemoteContext { + ictx.l.Info("Skipping test", zap.String("reason", "Remote context not enabled")) + return false + } + return true +} + +type MetricParser struct { + // Body is the output from the metrics endpoint of Retina agent. + Body string +} + +// Parses metrics given labels. If parseAll is set to true, +// it will sum the values of all metrics that match the labels. +func (mp *MetricParser) Parse(metricLabels MetricWithLabels, parseAll bool) (uint64, error) { + // Append metric name to labels. + labels := metricLabels.Labels + labels = append(labels, metricLabels.Metric) + + result := uint64(0) + lines := strings.Split(mp.Body, "\n") + for _, line := range lines { + if mp.LineMatchesLabels(line, labels) { + values := strings.Split(line, " ") + flt, _, err := big.ParseFloat(values[1], 10, 0, big.ToNearestEven) + if err != nil { + return 0, err + } + val, _ := flt.Uint64() + if !parseAll { + return val, nil + } + result += val + } + } + return result, nil +} + +// Extracts lines that match all labels. +func (mp *MetricParser) ExtractMetricLines(metricLabels MetricWithLabels) []string { + // Append metric name to labels. + labels := metricLabels.Labels + labels = append(labels, metricLabels.Metric) + + lines := strings.Split(mp.Body, "\n") + result := []string{} + for _, line := range lines { + if mp.LineMatchesLabels(line, labels) { + result = append(result, line) + } + } + return result +} + +// Checks if a line matches all labels. +func (mp *MetricParser) LineMatchesLabels(line string, labels []string) bool { + if strings.HasPrefix(line, "#") { + return false + } + for _, label := range labels { + if label != "" && !strings.Contains(line, label) { + return false + } + } + return true +} diff --git a/test/integration/plugins/dns/dns_suite_test.go b/test/integration/plugins/dns/dns_suite_test.go new file mode 100644 index 000000000..5b9d2d419 --- /dev/null +++ b/test/integration/plugins/dns/dns_suite_test.go @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package dns + +import ( + "strconv" + "testing" + "time" + + kcommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + common "github.com/microsoft/retina/test/integration/common" + "go.uber.org/zap" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +const ( + namespacePrefix = "test-dns-metrics-" + dnsTestPod = "dns-test-pod" + domainName = "kubernetes.default" +) + +var ( + namespace string + l *log.ZapLogger + ictx *common.IntegrationContext + k *common.KubeManager +) + +var ( + expectedDnsRequestMetrics = common.NewModelDnsCountMetrics( + "", + domainName, + "A", + "", + "", + "", + namespacePrefix, + dnsTestPod, + "", + "", + ) + + expectedDnsResponseMetrics = common.NewModelDnsCountMetrics( + "", + domainName, + "A", + "", + "NOERROR", + "", + namespacePrefix, + dnsTestPod, + "", + "", + ) + // coreDnsResponseMetrics = common.NewModelDnsCountMetrics( + +) + +func TestDns(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "DNS integration tests") +} + +var _ = BeforeSuite(func() { + ictx = common.Init() + k = common.NewKubeManager(ictx.Logger(), ictx.Clientset) + ictx.Logger().Info("Before Suite setup complete") +}) + +var _ = AfterSuite(func() { + ictx.DumpAgentLogs() + k.CleanupNamespaces() + ictx.Logger().Info("After Suite cleanup complete") +}) + +var _ = Describe("DNS integration tests", func() { + BeforeEach(func() { + // Create unique namespace. This is to make sure namespace deletion is not + // interfering with other tests. + namespace = namespacePrefix + strconv.FormatInt(time.Now().Unix(), 10) + l = ictx.Logger().Named("dns-metrics") + }) + + AfterEach(func() { + go ictx.DumpAllPodLogs(namespace) + }) + + DescribeTable("dns count should increase for nodes", + testDnsMetrics, + getMetricsEntries()..., + ) +}) + +func getMetricsEntries() []TableEntry { + tableEntries := []TableEntry{ + Entry("[ADVANCED_METRICS_LOCAL_CTX] dns count", + common.LocalCtxMode, + expectedDnsRequestMetrics.WithLabels(common.Request), expectedDnsResponseMetrics.WithLabels(common.Response)), + } + return tableEntries +} + +// TestDnsMetrics tests the dns metrics by sending nslookup requests to kubernetes.default +// NsLookup send A requests and is expecting to receive NOERROR response from the dns server. +func testDnsMetrics(testMode string, requestLabels, responseLabels common.MetricWithLabels) { + l.Info("starting testDnsMetrics") + + // Skip test if test mode is not enabled. + if !ictx.ValidateTestMode(testMode) { + return + } + + // Choose a linux/arch node. + node, err := ictx.GetNode("linux", "") + if err != nil { + l.Warn("No linux/amd64 node, skipping test") + return + } + l.Info("Node chosen", zap.String("node", node.Name)) + // Use the node choosen to deploy server/client. + n := common.NewModelNode(node) + + err = k.InitializeClusterFromModel(common.NewModelNamespace(namespace, + common.NewModelPod(dnsTestPod, n).WithContainer(80, common.HTTP), + )) + Expect(err).To(BeNil()) + + // Annotate namespace in localctx mode. + if testMode == common.LocalCtxMode { + // annotate pod + annotation := map[string]string{ + kcommon.RetinaPodAnnotation: kcommon.RetinaPodAnnotationValue, + } + + err = k.AnnotateNamespace(namespace, annotation) + Expect(err).To(BeNil()) + + } + // Collect current state of metrics. + mp := fetchMetricParser(node.Name) + + metricOldRequest := parseMetricsValue(requestLabels, mp) + metricOldResponse := parseMetricsValue(responseLabels, mp) + + // Send nslookup requests to bing.com + err = k.PerformNslookup(namespace, dnsTestPod, domainName) + Expect(err).To(BeNil()) + + // Collect metrics again. + mp = fetchMetricParser(node.Name) + metricNewRequest := parseMetricsValue(requestLabels, mp) + metricNewResponse := parseMetricsValue(responseLabels, mp) + + Expect(metricNewRequest).Should(BeNumerically(">", metricOldRequest)) + Expect(metricNewResponse).Should(BeNumerically(">", metricOldResponse)) +} + +// FetchMetricParser fetches the metrics from the node and returns the MetricParser object +func fetchMetricParser(nodeName string) *common.MetricParser { + body, err := ictx.FetchMetrics(nodeName) + Expect(err).To(BeNil()) + mp := &common.MetricParser{ + Body: body, + } + return mp +} + +// Fetched the count for a given label +func parseMetricsValue(metricLabels common.MetricWithLabels, mp *common.MetricParser) uint64 { + val, err := mp.Parse(metricLabels, true) + Expect(err).To(BeNil()) + l.Info("dns packet", zap.Any("labels", metricLabels), zap.Uint64("value", val)) + return val +} diff --git a/test/integration/plugins/dropreason/dropreason_suite_test.go b/test/integration/plugins/dropreason/dropreason_suite_test.go new file mode 100644 index 000000000..238a61ba4 --- /dev/null +++ b/test/integration/plugins/dropreason/dropreason_suite_test.go @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package dropreason + +import ( + "strconv" + "testing" + "time" + + kcommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + common "github.com/microsoft/retina/test/integration/common" + "go.uber.org/zap" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +const ( + namespace_prefix = "test-drops-metrics-" + annotation_test_prefix = "test-drops-annotation-metrics-" +) + +var ( + namespace string + l *log.ZapLogger + ictx *common.IntegrationContext + k *common.KubeManager +) + +var ( + expectedBasicDropMetrics = common.NewModelBasicDropReasonMetrics( + "unknown", + "IPTABLE_RULE_DROP", + ) + expectedTcpFlagsMetrics = common.NewModelTcpFlagsMetrics( + "SYN", + "", + namespace_prefix, + "client", + "", + "", + ) + expectedLocalContextDropReasonMetrics = common.NewModelLocalCtxDropReasonMetrics( + "egress", + "", + namespace_prefix, + "client", + "IPTABLE_RULE_DROP", + "", + "", + ) +) + +func TestDrops(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Drop metrics and annotations integration tests") +} + +var _ = BeforeSuite(func() { + ictx = common.Init() + k = common.NewKubeManager(ictx.Logger(), ictx.Clientset) + ictx.Logger().Info("Before Suite setup complete") +}) + +var _ = AfterSuite(func() { + ictx.DumpAgentLogs() + k.CleanupNamespaces() + ictx.Logger().Info("After Suite cleanup complete") +}) + +var _ = Describe("Drop metrics integration tests", func() { + BeforeEach(func() { + // Create unique namespace. This is to make sure namespace deletion is not + // interfering with other tests. + namespace = namespace_prefix + strconv.FormatInt(time.Now().Unix(), 10) + l = ictx.Logger().Named("drop-metrics") + }) + + AfterEach(func() { + go ictx.DumpAllPodLogs(namespace) + }) + + DescribeTable("dropped packet count should increase for nodes", + testLinuxPacketDrop, + getAdvancedMetricsEntries()..., + ) +}) + +func getAdvancedMetricsEntries() []TableEntry { + tableEntries := []TableEntry{ + Entry( + "[BASIC_METRICS] drop linux/amd64", + common.BasicMode, common.LinuxAmd64.OS, common.LinuxAmd64.Arch, + expectedBasicDropMetrics.WithLabels(common.Count), expectedBasicDropMetrics.WithLabels(common.Bytes)), + Entry("[BASIC_METRICS] drop linux/arm64", + common.BasicMode, common.LinuxArm64.OS, common.LinuxArm64.Arch, + expectedBasicDropMetrics.WithLabels(common.Count), expectedBasicDropMetrics.WithLabels(common.Bytes)), + Entry("[ADVANCED_METRICS_LOCAL_CTX] drop linux/amd64", + common.LocalCtxMode, common.LinuxAmd64.OS, common.LinuxAmd64.Arch, + expectedLocalContextDropReasonMetrics.WithLabels(common.Count), expectedLocalContextDropReasonMetrics.WithLabels(common.Bytes)), + Entry("[ADVANCED_METRICS_LOCAL_CTX] drop linux/arm64", common.LocalCtxMode, common.LinuxArm64.OS, common.LinuxArm64.Arch, + expectedLocalContextDropReasonMetrics.WithLabels(common.Count), expectedLocalContextDropReasonMetrics.WithLabels(common.Bytes)), + } + return tableEntries +} + +func testLinuxPacketDrop(testMode, os, arch string, countLabels, bytesLabels common.MetricWithLabels) { + l.Info("starting testLinuxPacketDrop", zap.Any("OS", os), zap.Any("Arch", arch)) + + // Skip test if test mode is not enabled. + if !ictx.ValidateTestMode(testMode) { + return + } + + // set isAdvancedMode to true for advanced metrics + isAdvancedMode := testMode != common.BasicMode + + // Choose a linux/arch node. + node, err := ictx.GetNode(os, arch) + if err != nil { + l.Warn("No node of given os-arch, skipping test", zap.Any("OS", os), zap.Any("Arch", arch)) + return + } + l.Info("Node chosen", zap.String("node", node.Name)) + // Use the node choosen to deploy server/client. + n := common.NewModelNode(node) + + err = k.InitializeClusterFromModel(common.NewModelNamespace(namespace, + common.NewModelPod("server", n).WithContainer(80, common.HTTP), + common.NewModelPod("client", n).WithContainer(80, common.TCP), + )) + Expect(err).To(BeNil()) + + // Collect current state of metrics. + mp := fetchMetricParser(node.Name) + + // Annotate namespace in local context + if testMode == common.LocalCtxMode { + // annotate namespace + annotation := map[string]string{ + kcommon.RetinaPodAnnotation: kcommon.RetinaPodAnnotationValue, + } + + // Annotate client pod + err = k.AnnotatePod("client", namespace, annotation) + Expect(err).To(BeNil()) + + // Annotate server + err = k.AnnotatePod("server", namespace, annotation) + Expect(err).To(BeNil()) + + // Annotate namespace + err = k.AnnotateNamespace(namespace, annotation) + Expect(err).To(BeNil()) + + // collect metrics again + mp = fetchMetricParser(node.Name) + + } + metricOldCount := parseMetricsValue(countLabels, isAdvancedMode, mp) + metricOldBytes := parseMetricsValue(bytesLabels, isAdvancedMode, mp) + oldTcpFlagsCount := parseMetricsValue(expectedTcpFlagsMetrics.WithLabels(), isAdvancedMode, mp) + + // Apply Network Policy to drop traffic to nginx pod. + err = k.ApplyNetworkDropPolicy(namespace, "deny-server-ingress", common.PodLabelKey, "server") + Expect(err).To(BeNil()) + + k.ProbeRepeatedlyKind(namespace, "client", namespace, "server", common.HTTP, 80, 20) + + // Collect metrics again. + mp = fetchMetricParser(node.Name) + metricNewCount := parseMetricsValue(countLabels, isAdvancedMode, mp) + metricNewBytes := parseMetricsValue(bytesLabels, isAdvancedMode, mp) + + Expect(metricNewCount).Should(BeNumerically(">", metricOldCount)) + Expect(metricNewBytes).Should(BeNumerically(">", metricOldBytes)) + + // Check tcpflags metrics for advanced metrics + if isAdvancedMode { + newTcpFlagsCount := parseMetricsValue(expectedTcpFlagsMetrics.WithLabels(), isAdvancedMode, mp) + Expect(newTcpFlagsCount).Should(BeNumerically(">", oldTcpFlagsCount)) + } +} + +// FetchMetricParser fetches the metrics from the node and returns the MetricParser object +func fetchMetricParser(nodeName string) *common.MetricParser { + body, err := ictx.FetchMetrics(nodeName) + Expect(err).To(BeNil()) + mp := &common.MetricParser{ + Body: body, + } + return mp +} + +// Fetched the count for a given label +func parseMetricsValue(metricLabels common.MetricWithLabels, isAdvancedMode bool, mp *common.MetricParser) uint64 { + val, err := mp.Parse(metricLabels, isAdvancedMode) + Expect(err).To(BeNil()) + l.Info("drop packet", zap.Any("labels", metricLabels), zap.Uint64("value", val)) + return val +} diff --git a/test/integration/plugins/forward/forward_suite_test.go b/test/integration/plugins/forward/forward_suite_test.go new file mode 100644 index 000000000..a69ccbc8a --- /dev/null +++ b/test/integration/plugins/forward/forward_suite_test.go @@ -0,0 +1,259 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +//go:build integration +// +build integration + +package forward + +import ( + "strconv" + "strings" + "testing" + "time" + + kcommon "github.com/microsoft/retina/pkg/common" + "github.com/microsoft/retina/pkg/log" + common "github.com/microsoft/retina/test/integration/common" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +const ( + namespace_prefix = "test-forward-metrics-" +) + +var ( + namespace string + l *log.ZapLogger + ictx *common.IntegrationContext + k *common.KubeManager +) + +var ( + expectedBasicForwardMetricIngress = common.NewModelBasicForwardMetrics("ingress") + expectedBasicForwardMetricEgress = common.NewModelBasicForwardMetrics("egress") + expectedLocalCtxForwardMetricIngress = common.NewModelLocalCtxForwardMetrics( + "ingress", + "", + namespace_prefix, + "server", + "", + "", + ) + expectedLocalCtxForwardMetricEgress = common.NewModelLocalCtxForwardMetrics( + "egress", + "", + namespace_prefix, + "client", + "", + "", + ) + + expectedTcpFlagsMetrics = common.NewModelTcpFlagsMetrics( + "SYN", + "", + namespace_prefix, + "client", + "", + "", + ) +) + +func TestForward(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Forward metrics integration tests") +} + +var _ = BeforeSuite(func() { + ictx = common.Init() + k = common.NewKubeManager(ictx.Logger(), ictx.Clientset) + ictx.Logger().Info("Before Suite setup complete") +}) + +var _ = AfterSuite(func() { + ictx.DumpAgentLogs() + k.CleanupNamespaces() + ictx.Logger().Info("After Suite cleanup complete") +}) + +var _ = Describe("Forward metrics integration tests", func() { + BeforeEach(func() { + // Create unique namespace. This is to make sure namespace deletion is not + // interfering with other tests. + namespace = namespace_prefix + strconv.FormatInt(time.Now().Unix(), 10) + l = ictx.Logger().Named("forward-metrics") + }) + + AfterEach(func() { + go ictx.DumpAllPodLogs(namespace) + }) + + DescribeTable("Packet count should spike", + testLinuxPacketForward, + getEntries()..., + ) +}) + +func getEntries() []TableEntry { + tableEntries := []TableEntry{ + Entry("[BASIC_METRICS] packet forward linux", + common.BasicMode, + expectedBasicForwardMetricIngress.WithLabels(common.Count), expectedBasicForwardMetricIngress.WithLabels(common.Bytes), + expectedBasicForwardMetricEgress.WithLabels(common.Count), expectedBasicForwardMetricEgress.WithLabels(common.Bytes)), + Entry("[ADVANCED_METRICS_LOCAL_CTX] packet forward count linux with local context", + common.LocalCtxMode, + expectedLocalCtxForwardMetricIngress.WithLabels(common.Count), expectedLocalCtxForwardMetricIngress.WithLabels(common.Bytes), + expectedLocalCtxForwardMetricEgress.WithLabels(common.Count), expectedLocalCtxForwardMetricEgress.WithLabels(common.Bytes)), + } + return tableEntries +} + +func testLinuxPacketForward(testMode string, ingressCountLabels, ingressByteLabels, egressCountLabels, egressByteLabels common.MetricWithLabels) { + // check valid advanced metrics scenario + if !ictx.ValidateTestMode(testMode) { + return + } + + // check if advanced metrics is enabled + isAdvancedMode := testMode != common.BasicMode + + // Get two linux nodes. + nodes := []corev1.Node{} + metrics := map[string]map[string][][2]uint64{} + for _, node := range ictx.Nodes.Items { + if strings.Contains(node.Labels["kubernetes.io/os"], "linux") { + nodes = append(nodes, node) + metrics[node.Name] = map[string][][2]uint64{} + metrics[node.Name]["ingress"] = [][2]uint64{} + metrics[node.Name]["egress"] = [][2]uint64{} + } + if len(nodes) == 2 { + break + } + } + if len(nodes) < 2 { + l.Warn("Less than two linux nodes, skipping test") + return + } + l.Info("Nodes chosen", zap.String("node1", nodes[0].Name), zap.String("node2", nodes[1].Name)) + + f := func(checkTcpFlags, isAnnotated bool) { + for _, node := range nodes { + body, err := ictx.FetchMetrics(node.Name) + Expect(err).To(BeNil()) + mp := &common.MetricParser{ + Body: body, + } + // check which node and modify the podname label accordingly + if isAdvancedMode { + ingressMetrics := expectedLocalCtxForwardMetricIngress + egressMetrics := expectedLocalCtxForwardMetricEgress + if node.Name == nodes[0].Name { + expectedLocalCtxForwardMetricIngress.BaseObj.PodName = "server" + expectedLocalCtxForwardMetricEgress.BaseObj.PodName = "server" + expectedTcpFlagsMetrics.BaseObj.PodName = "server" + + ingressMetrics = expectedLocalCtxForwardMetricIngress + egressMetrics = expectedLocalCtxForwardMetricEgress + } else { + expectedLocalCtxForwardMetricIngress.BaseObj.PodName = "client" + expectedLocalCtxForwardMetricEgress.BaseObj.PodName = "client" + expectedTcpFlagsMetrics.BaseObj.PodName = "client" + + ingressMetrics = expectedLocalCtxForwardMetricIngress + egressMetrics = expectedLocalCtxForwardMetricEgress + } + // Obtain labels + ingressCountLabels = ingressMetrics.WithLabels(common.Count) + ingressByteLabels = ingressMetrics.WithLabels(common.Bytes) + egressCountLabels = egressMetrics.WithLabels(common.Count) + egressByteLabels = egressMetrics.WithLabels(common.Bytes) + } + + // Parse ingress and egress metrics for count and bytes. + for _, direction := range []string{"ingress", "egress"} { + countLabels := ingressCountLabels + bytesLabels := ingressByteLabels + if direction == "egress" { + countLabels = egressCountLabels + bytesLabels = egressByteLabels + } + + countVal, err := mp.Parse(countLabels, isAdvancedMode) + Expect(err).To(BeNil()) + l.Info("Forward packet count / "+direction, zap.Any("labels", countLabels), zap.Uint64("count", countVal)) + + bytesVal, err := mp.Parse(bytesLabels, isAdvancedMode) + Expect(err).To(BeNil()) + l.Info("Forward packet bytes / "+direction, zap.Any("labels", bytesLabels), zap.Uint64("bytes", bytesVal)) + + metrics[node.Name][direction] = append(metrics[node.Name][direction], [2]uint64{countVal, bytesVal}) + } + + // check for tcpflags metrics + if isAdvancedMode && checkTcpFlags { + lines := mp.ExtractMetricLines(expectedTcpFlagsMetrics.WithLabels()) + Expect(lines).NotTo(BeEmpty(), "tcpflags metrics are empty") + } + + } + } + + oneNsTwoPods := common.NewModelNamespace(namespace, + common.NewModelPod("server", common.NewModelNode(nodes[0])).WithContainer(80, common.HTTP).WithContainer(81, common.TCP).WithContainer(82, common.UDP), + common.NewModelPod("client", common.NewModelNode(nodes[1])).WithContainer(80, common.TCP)) + + err := k.InitializeClusterFromModel(oneNsTwoPods) + Expect(err).To(BeNil()) + + // let networking stabilize after creating Pods + time.Sleep(30 * time.Second) + + // Collect current state of metrics. + f(false, false) + + // Annotate namespace in local context + if testMode == common.LocalCtxMode { + annotation := map[string]string{ + kcommon.RetinaPodAnnotation: kcommon.RetinaPodAnnotationValue, + } + err = k.AnnotateNamespace(namespace, annotation) + Expect(err).To(BeNil()) + } + + seconds := 30 + time.Sleep(time.Duration(seconds) * time.Second) + + // Collect metrics again. This will be the baseline. + f(false, true) + + k.ProbeRepeatedlyKind(namespace, "client", namespace, "server", common.HTTP, 80, seconds) + + // Collect metrics again. + f(true, true) + + // Compare metrics. + for _, node := range nodes { + for _, dir := range []string{"ingress", "egress"} { + baselineCount := metrics[node.Name][dir][1][0] - metrics[node.Name][dir][0][0] + spikeCount := metrics[node.Name][dir][2][0] - metrics[node.Name][dir][1][0] + l.Info("Metrics count", zap.String("node", node.Name), zap.String("dir", dir), zap.Uint64("baseline", baselineCount), zap.Uint64("spike", spikeCount)) + Expect(spikeCount).Should(BeNumerically(">", baselineCount)) + + // For node level, the probes do not significanly increase the bytes sent or received. + // Hence, we are not checking for difference between baseline and spike. + baselineBytes := metrics[node.Name][dir][1][1] + spikeBytes := metrics[node.Name][dir][2][1] + if testMode != common.BasicMode { + baselineBytes = metrics[node.Name][dir][1][1] - metrics[node.Name][dir][0][1] + spikeBytes = metrics[node.Name][dir][2][1] - metrics[node.Name][dir][1][1] + } + l.Info("Metrics bytes", zap.String("node", node.Name), zap.String("dir", dir), zap.Uint64("baseline", baselineBytes), zap.Uint64("spike", spikeBytes)) + Expect(spikeBytes).Should(BeNumerically(">", baselineBytes)) + } + } +} diff --git a/test/kind/kind.yaml b/test/kind/kind.yaml new file mode 100644 index 000000000..133192aaa --- /dev/null +++ b/test/kind/kind.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerAddress: "0.0.0.0" +nodes: + - role: control-plane + image: kindest/node:v1.25.3 + - role: worker + image: kindest/node:v1.25.3 + - role: worker + image: kindest/node:v1.25.3 diff --git a/test/managers/filtermanager/main.go b/test/managers/filtermanager/main.go new file mode 100644 index 000000000..7de2a9fd8 --- /dev/null +++ b/test/managers/filtermanager/main.go @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "bufio" + "context" + "fmt" + "net" + "os" + "strings" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/managers/watchermanager" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/watchers/apiserver" + "github.com/microsoft/retina/pkg/watchers/endpoint" + "go.uber.org/zap" +) + +func main() { + reader := bufio.NewReader(os.Stdin) + fmt.Print("Enter comma-delimited ips for filter manager: ") + input, _ := reader.ReadString('\n') + + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-packetparser") + + metrics.InitializeMetrics() + + ctx := context.Background() + + // watcher manager + wm := watchermanager.NewWatcherManager() + wm.Watchers = []watchermanager.IWatcher{endpoint.Watcher(), apiserver.Watcher()} + + err := wm.Start(ctx) + if err != nil { + l.Error("Failed to start endpoint watcher", zap.Error(err)) + panic(err) + } + defer func() { + if err := wm.Stop(ctx); err != nil { + l.Error("Failed to stop endpoint watcher", zap.Error(err)) + } + }() + + // Filtermanager. + f, err := filtermanager.Init(5) + if err != nil { + l.Error("Failed to start Filtermanager", zap.Error(err)) + panic(err) + } + defer func() { + if err := f.Stop(); err != nil { + l.Error("Failed to stop Filtermanager", zap.Error(err)) + } + }() + + ipsStr := strings.Split(input, ",") + var ips []net.IP + for _, ipStr := range ipsStr { + ip := net.ParseIP(strings.TrimSpace(ipStr)) + if ip != nil { + ips = append(ips, ip) + } + } + // Add IPs to filtermanager. + err = f.AddIPs(ips, "packetparser-test", filtermanager.RequestMetadata{RuleID: "test"}) + if err != nil { + l.Error("AddIPs failed", zap.Error(err)) + return + } + + time.Sleep(30 * time.Second) +} diff --git a/test/plugin/dns/main.go b/test/plugin/dns/main.go new file mode 100644 index 000000000..59cb24f50 --- /dev/null +++ b/test/plugin/dns/main.go @@ -0,0 +1,91 @@ +// Copyright 2022 The Inspektor Gadget authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//nolint:typecheck +package main + +import ( + "context" + "time" + + "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/controllers/cache" + "github.com/microsoft/retina/pkg/enricher" + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/dns" + "github.com/microsoft/retina/pkg/pubsub" + "go.uber.org/zap" +) + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-dns") + + metrics.InitializeMetrics() + ctx := context.Background() + + tt := dns.New(&config.Config{ + EnablePodLevel: true, + }) + + err := tt.Stop() + if err != nil { + l.Error("Failed to stop dns plugin", zap.Error(err)) + return + } + + ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + c := cache.New(pubsub.New()) + e := enricher.New(ctx, c) + e.Run() + + err = tt.Generate(ctxTimeout) + if err != nil { + l.Error("Failed to generate the plugin specific header files", zap.Error(err)) + return + } + + err = tt.Compile(ctxTimeout) + if err != nil { + l.Error("Failed to compile the ebpf to generate bpf object", zap.Error(err)) + return + } + + err = tt.Init() + if err != nil { + l.Error("Failed to initialize plugin specific objects", zap.Error(err)) + return + } + + err = tt.Start(ctx) + if err != nil { + l.Error("Failed to start dns plugin", zap.Error(err)) + return + } + l.Info("Started dns") + + defer func() { + if err := tt.Stop(); err != nil { + l.Error("Failed to stop dns plugin", zap.Error(err)) + } + }() + + for range ctxTimeout.Done() { + } +} diff --git a/test/plugin/dropreason/main.go b/test/plugin/dropreason/main.go new file mode 100644 index 000000000..a9f406f66 --- /dev/null +++ b/test/plugin/dropreason/main.go @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package main + +import ( + "context" + "net" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + "github.com/microsoft/retina/pkg/managers/filtermanager" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/dropreason" + + "go.uber.org/zap" +) + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-dropreason") + + metrics.InitializeMetrics() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + cfg := &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + + // Filtermanager. + f, err := filtermanager.Init(3) + if err != nil { + l.Error("Start filtermanager failed", zap.Error(err)) + return + } + defer func() { + if err := f.Stop(); err != nil { + l.Error("Stop filtermanager failed", zap.Error(err)) + } + }() + + // Add IPs to filtermanager. + // ipsToAdd := []string{"10.224.0.106", "10.224.0.101"} + // ipsToAdd := []string{"20.69.116.85", "10.224.0.6"} + ipsToAdd := []string{"20.69.116.85"} + ips := []net.IP{} + for _, ip := range ipsToAdd { + x := net.ParseIP(ip).To4() + if x == nil { + l.Fatal("Invalid IP address", zap.String("ip", ip)) + } + ips = append(ips, x) + } + err = f.AddIPs(ips, "packetparser-test", filtermanager.RequestMetadata{RuleID: "test"}) + if err != nil { + l.Error("AddIPs failed", zap.Error(err)) + return + } + + tt := dropreason.New(cfg) + + err = tt.Stop() + if err != nil { + l.Error("Stop failed:%v", zap.Error(err)) + return + } + + err = tt.Generate(ctx) + if err != nil { + l.Error("Generate failed:%v", zap.Error(err)) + return + } + + err = tt.Compile(ctx) + if err != nil { + l.Error("Compile failed:%v", zap.Error(err)) + return + } + + err = tt.Init() + if err != nil { + l.Error("Init failed:%v", zap.Error(err)) + return + } + + err = tt.Start(ctx) + if err != nil { + l.Error("Start failed:%v", zap.Error(err)) + return + } + l.Info("Started dropreason") + + defer tt.Stop() + for range ctx.Done() { + } +} diff --git a/test/plugin/linuxutil/main.go b/test/plugin/linuxutil/main.go new file mode 100644 index 000000000..c46dcf04a --- /dev/null +++ b/test/plugin/linuxutil/main.go @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +package main + +import ( + "context" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/metrics" + "github.com/microsoft/retina/pkg/plugin/linuxutil" + + "go.uber.org/zap" +) + +func main() { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test-linuxutil") + + metrics.InitializeMetrics() + + cfg := &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + tt := linuxutil.New(cfg) + err := tt.Init() + if err != nil { + l.Error("Init failed:%v", zap.Error(err)) + return + } + ctx := context.Background() + err = tt.Start(ctx) + if err != nil { + l.Error("start failed:%v", zap.Error(err)) + return + } + l.Info("started linuxutil logger") + + defer func() { + err := tt.Stop() + if err != nil { + l.Error("stop failed:%v", zap.Error(err)) + } + }() + + for range ctx.Done() { + } +} diff --git a/test/plugin/packetforward/main.go b/test/plugin/packetforward/main.go new file mode 100644 index 000000000..7ace2e357 --- /dev/null +++ b/test/plugin/packetforward/main.go @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "context" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/plugin/packetforward" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/metrics" + "go.uber.org/zap" +) + +func main() { + log.SetupZapLogger(log.GetDefaultLogOpts()) + l := log.Logger().Named("test-packetforward") + + metrics.InitializeMetrics() + + ctx := context.Background() + + cfg := &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + tt := packetforward.New(cfg) + + err := tt.Stop() + if err != nil { + l.Error("Failed to stop packetforward plugin", zap.Error(err)) + return + } + + ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*10) + defer cancel() + err = tt.Generate(ctxTimeout) + if err != nil { + l.Error("Failed to generate the plugin specific header files", zap.Error(err)) + return + } + + err = tt.Compile(ctxTimeout) + if err != nil { + l.Error("Failed to compile the ebpf to generate bpf object", zap.Error(err)) + return + } + + err = tt.Init() + if err != nil { + l.Error("Failed to initialize plugin specific objects", zap.Error(err)) + return + } + + err = tt.Start(ctx) + if err != nil { + l.Error("Failed to start packetforward plugin", zap.Error(err)) + return + } + l.Info("Started packetforward") + + defer func() { + if err := tt.Stop(); err != nil { + l.Error("Failed to stop packetforward plugin", zap.Error(err)) + } + }() + + for range ctx.Done() { + } +} diff --git a/test/plugin/packetparser/deployment.yaml b/test/plugin/packetparser/deployment.yaml new file mode 100644 index 000000000..1b194ad02 --- /dev/null +++ b/test/plugin/packetparser/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: default + labels: + app: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + # image: nginx:1.14.2 + image: mcr.microsoft.com/mirror/docker/library/nginx:1.23 + # image: mcr.microsoft.com/mirror/docker/library/ubuntu:20.04 + # command: ["/bin/bash", "-c", "while true; do echo hello; sleep 10;done"] + ports: + - containerPort: 80 + nodeSelector: + kubernetes.io/hostname: aks-nodepool1-22492810-vmss000000 + # kubernetes.io/hostname: aks-arm64-36093013-vmss000000 diff --git a/test/plugin/packetparser/main.go b/test/plugin/packetparser/main.go new file mode 100644 index 000000000..96d09787c --- /dev/null +++ b/test/plugin/packetparser/main.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "context" + "net" + "time" + + kcfg "github.com/microsoft/retina/pkg/config" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/managers/watchermanager" + "github.com/microsoft/retina/pkg/plugin/api" + "github.com/microsoft/retina/pkg/plugin/packetparser" + "github.com/microsoft/retina/pkg/watchers/endpoint" + "go.uber.org/zap" + + "github.com/microsoft/retina/pkg/metrics" +) + +var tt api.Plugin + +// func test(l *log.ZapLogger) { +// m := &dto.Metric{} +// metrics.NodeApiServerTcpHandshakeLatencyMs.Write(m) + +// h := m.Histogram.GetBucket() +// var last uint64 +// last = 0 +// for _, b := range h { +// l.Info("Hist", zap.Any("Bucket", b.UpperBound), zap.Any("Count", *b.CumulativeCount-last)) +// last = *b.CumulativeCount +// } +// } + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-packetparser") + + metrics.InitializeMetrics() + + // test(l) + // return + + metrics.InitializeMetrics() + + ctxTimeout, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // watcher manager + wm := watchermanager.NewWatcherManager() + wm.Watchers = []watchermanager.IWatcher{endpoint.Watcher()} + + err := wm.Start(ctxTimeout) + if err != nil { + panic(err) + } + + defer func() { + if err := wm.Stop(ctxTimeout); err != nil { + l.Error("Stop endpoint watcher failed", zap.Error(err)) + } + }() + // Filtermanager. + f, err := filtermanager.Init(3) + if err != nil { + l.Error("Start filtermanager failed", zap.Error(err)) + return + } + defer func() { + if err := f.Stop(); err != nil { + l.Error("Stop filtermanager failed", zap.Error(err)) + } + }() + + // Add IPs to filtermanager. + // ipsToAdd := []string{"10.224.0.106", "10.224.0.101"} + // ipsToAdd := []string{"20.69.116.85", "10.224.0.6"} + ipsToAdd := []string{"20.69.116.85"} + ips := []net.IP{} + for _, ip := range ipsToAdd { + x := net.ParseIP(ip).To4() + if x == nil { + l.Fatal("Invalid IP address", zap.String("ip", ip)) + } + ips = append(ips, x) + } + err = f.AddIPs(ips, "packetparser-test", filtermanager.RequestMetadata{RuleID: "test"}) + if err != nil { + l.Error("AddIPs failed", zap.Error(err)) + return + } + + // time.Sleep(30 * time.Second) + // f.DeleteIPs(ips, "packetparser-test") + // return + + // Start packetparser plugin. + cfg := &kcfg.Config{ + MetricsInterval: 1 * time.Second, + EnablePodLevel: true, + } + tt = packetparser.New(cfg) + if err = tt.Stop(); err != nil { + l.Error("Stop packetparser plugin failed", zap.Error(err)) + return + } + + defer cancel() + err = tt.Generate(ctxTimeout) + if err != nil { + l.Error("Generate failed", zap.Error(err)) + return + } + + err = tt.Compile(ctxTimeout) + if err != nil { + l.Error("Compile failed", zap.Error(err)) + return + } + + err = tt.Init() + if err != nil { + l.Error("Init failed", zap.Error(err)) + return + } + + err = tt.Start(ctxTimeout) + if err != nil { + l.Error("Start failed", zap.Error(err)) + return + } + l.Info("Started packetparser") + + for range ctxTimeout.Done() { + l.Info("packetparser is running") + time.Sleep(1 * time.Second) + } + + tt.Stop() + l.Info("Stopping packetparser") + + // test(l) +} diff --git a/test/profiles/advanced/crd/metrics_config_crd.yaml b/test/profiles/advanced/crd/metrics_config_crd.yaml new file mode 100644 index 000000000..318c6a8ff --- /dev/null +++ b/test/profiles/advanced/crd/metrics_config_crd.yaml @@ -0,0 +1,23 @@ +apiVersion: retina.io/v1alpha1 +kind: MetricsConfiguration +metadata: + name: metricsconfigcrd +spec: + contextOptions: + - metricName: drop_count + sourceLabels: + - ip + - pod + - port + additionalLabels: + - direction + - metricName: forward_count + sourceLabels: + - ip + - pod + - port + additionalLabels: + - direction + namespaces: + exclude: + - kube-system diff --git a/test/profiles/advanced/values.yaml b/test/profiles/advanced/values.yaml new file mode 100644 index 000000000..60785021f --- /dev/null +++ b/test/profiles/advanced/values.yaml @@ -0,0 +1,6 @@ +enablePodLevel: true +remoteContext: true +logLevel: info +operator: + enabled: true +enabledPlugin_linux: '["dropreason","packetforward","packetparser"]' diff --git a/test/profiles/basic/crd/.gitkeep b/test/profiles/basic/crd/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/test/profiles/basic/values.yaml b/test/profiles/basic/values.yaml new file mode 100644 index 000000000..571181f5b --- /dev/null +++ b/test/profiles/basic/values.yaml @@ -0,0 +1,4 @@ +enablePodLevel: false +operator: + enabled: false +# Plugins diff --git a/test/profiles/localctx/crd/.gitkeep b/test/profiles/localctx/crd/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/test/profiles/localctx/values.yaml b/test/profiles/localctx/values.yaml new file mode 100644 index 000000000..c9c8448b9 --- /dev/null +++ b/test/profiles/localctx/values.yaml @@ -0,0 +1,8 @@ +operator: + enabled: false +# Plugins will default to deploy/manifests/controller/helm/retina/values.yaml +# TODO add all plugins that we want to test here for local context. +remoteContext: false +enablePodLevel: true +enableAnnotations: true +enabledPlugin_linux: '["dropreason","packetforward","packetparser", "dns"]' diff --git a/test/scale/README.md b/test/scale/README.md new file mode 100644 index 000000000..c49c69efc --- /dev/null +++ b/test/scale/README.md @@ -0,0 +1,15 @@ +# Retina Scale Testing + +* example-*.yaml files are used to generate resources for the cluster using `./generate-yamls.sh`. This generates cluster role bindings, deployments, network polcies, and service accounts. + +* `create-all.sh` will create all the resources for the cluster. + +* `cpu-and-mem.sh` is the script used to track the cpu and mem of retina pods, and node metrics and output the results as a csv in `./results/`. + +* `pprof.sh` will get the pprof for retina agents which have mem > 100 Mb and will output the results in `./results/$pod`. + +* `restarts.sh` will get the previous logs for retina agents which have restarted and will output the results in `./results`. + +* `scrape-metrics.sh` is currently not used, but can be used/modified to go through each retina pod and get logs or metrics. + +* `az aks nodepool scale --name --cluster-name --resource-group --node-count 1000` was used to scale to 1k nodes for testing. diff --git a/test/scale/cpu-and-mem.sh b/test/scale/cpu-and-mem.sh new file mode 100755 index 000000000..651321b6c --- /dev/null +++ b/test/scale/cpu-and-mem.sh @@ -0,0 +1,27 @@ +#!/bin/bash +sleepSeconds=65 + +echo "time,pod,cpu,mem" > cpu-and-mem-pod-results.csv +echo "time,node,cpu,cpuPercent,mem,memPercent" > cpu-and-mem-node-results.csv +while true; do + currentTime=`date -u` + echo "running k top pod" + lines=`kubectl top pod -n kube-system | grep retina | awk '{$1=$1;print}' | tr ' ' ',' | tr -d 'm' | tr -d 'Mi'` + for line in $lines; do + echo "$currentTime,$line" >> cpu-and-mem-pod-results.csv + done + + currentTime=`date -u` + echo "running k top node" + lines=`kubectl top node | grep -v NAME | awk '{$1=$1;print}' | tr ' ' ',' | tr -d 'm' | tr -d 'Mi' | tr -d '%'` + for line in $lines; do + echo "$currentTime,$line" >> cpu-and-mem-node-results.csv + done + + echo `date -u` >> cpu-and-mem-running-pods.out + kubectl get pod -n kube-system | grep retina >> cpu-and-mem-running-pods.out + echo " " >> cpu-and-mem-running-pods.out + + echo "sleeping $sleepSeconds seconds" + sleep $sleepSeconds +done diff --git a/test/scale/create-all.sh b/test/scale/create-all.sh new file mode 100755 index 000000000..0b0cb0002 --- /dev/null +++ b/test/scale/create-all.sh @@ -0,0 +1,6 @@ +#!/bin/bash +./create-ns.sh +./create-sa.sh +./create-crb.sh +./create-netpols.sh +./create-deployments.sh diff --git a/test/scale/create-crb.sh b/test/scale/create-crb.sh new file mode 100755 index 000000000..7dd16f2d3 --- /dev/null +++ b/test/scale/create-crb.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -e +numNamespaces=`kubectl get ns | grep test-ns- | wc -l` +numClusterrolebindings=`ls clusterrolebindings/ | wc -l` +startTime=`date -u` + +echo "creating $numClusterrolebindings clusterrolebindings" +for (( i=1; i<=$numNamespaces; i++ )); do + kubectl -n test-ns-$i apply -f clusterrolebindings/ +done + +endTime=`date -u` +echo +echo "DONE. All $numClusterrolebindings clusterrolebindings are created" +elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) +echo "Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" +echo "start time: $startTime" +echo "end time: $endTime" diff --git a/test/scale/create-deployments.sh b/test/scale/create-deployments.sh new file mode 100755 index 000000000..13bac2b24 --- /dev/null +++ b/test/scale/create-deployments.sh @@ -0,0 +1,35 @@ +#!/bin/bash +numNamespaces=1 + +set -e +numDeployments=`ls deployments/ | wc -l` +desiredNumPods=$(( numDeployments * numNamespaces )) + +startTime=`date -u` +summary="$desiredNumPods deployments. $numDeployments deployments in $numNamespaces namespaces" +echo "creating $summary" +for (( i=1; i<=$numNamespaces; i++ )); do + kubectl apply -n test-ns-$i -f deployments/ +done + +echo +echo "start time: $startTime" +echo "created $summary" +echo "waiting for $desiredNumPods deployments to come up" +while true; do + numPods=`kubectl get pod -A | grep test-ns- | grep Running | wc -l` + endTime=`date -u` + if [[ $numPods == $desiredNumPods ]]; then + break + fi + elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) + ## 250 should be a variable/calculated. goldpinger pods have 5 replicas per deployment + echo "$numPods pods up, want 250. Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" + sleep 15 +done +echo +echo "DONE. All $desiredNumPods pods are running" +elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) +echo "Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" +echo "start time: $startTime" +echo "end time: $endTime" diff --git a/test/scale/create-netpols.sh b/test/scale/create-netpols.sh new file mode 100755 index 000000000..8f391eef6 --- /dev/null +++ b/test/scale/create-netpols.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e +numNamespaces=`kubectl get ns | grep test-ns- | wc -l` +numPolicies=`ls netpols/ | wc -l` +totalNetPols=$(( numPolicies * numNamespaces )) +startTime=`date -u` + +echo "creating $totalNetPols network policies, $numPolicies in each of $numNamespaces namespaces" +for (( i=1; i<=$numNamespaces; i++ )); do + kubectl -n test-ns-$i apply -f netpols/ +done + +endTime=`date -u` +echo +echo "DONE. All $totalNetPols policies are created" +elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) +echo "Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" +echo "start time: $startTime" +echo "end time: $endTime" diff --git a/test/scale/create-ns.sh b/test/scale/create-ns.sh new file mode 100755 index 000000000..1ee649d21 --- /dev/null +++ b/test/scale/create-ns.sh @@ -0,0 +1,11 @@ +#!/bin/bash +numNamespaces=1 + +set -e + +startTime=`date -u` +summary="creating $numNamespaces namespaces" +echo "$summary" +for (( i=1; i<=$numNamespaces; i++ )); do + kubectl create namespace test-ns-$i +done diff --git a/test/scale/create-sa.sh b/test/scale/create-sa.sh new file mode 100755 index 000000000..92be6b0f2 --- /dev/null +++ b/test/scale/create-sa.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e +numNamespaces=`kubectl get ns | grep test-ns- | wc -l` +numServiceAccounts=`ls serviceaccounts/ | wc -l` +startTime=`date -u` + +echo "creating $numServiceAccounts service accounts" +kubectl apply -f serviceaccounts/ + +endTime=`date -u` +echo +echo "DONE. All $numServiceAccounts service accounts are created" +elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) +echo "Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" +echo "start time: $startTime" +echo "end time: $endTime" diff --git a/test/scale/example-clusterrolebinding.yaml b/test/scale/example-clusterrolebinding.yaml new file mode 100644 index 000000000..b1f9f0129 --- /dev/null +++ b/test/scale/example-clusterrolebinding.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view +subjects: + - kind: ServiceAccount + name: goldpinger-serviceaccount + namespace: labelReplaceNamespace diff --git a/test/scale/example-deployment.yaml b/test/scale/example-deployment.yaml new file mode 100644 index 000000000..ae0c30f3a --- /dev/null +++ b/test/scale/example-deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nameReplace + namespace: labelReplaceNamespace + labels: + app: labelReplace +spec: + replicas: 5 + selector: + matchLabels: + app: labelReplace + template: + metadata: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8080' + labels: + app: labelReplace + spec: + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + # injecting real hostname will make for easier to understand graphs/metrics + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # podIP is used to select a randomized subset of nodes to ping. + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: "mcr.microsoft.com/aks/e2e/bloomberg-goldpinger:v3.7.0" + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 5 diff --git a/test/scale/example-netpol.yaml b/test/scale/example-netpol.yaml new file mode 100644 index 000000000..18efd7202 --- /dev/null +++ b/test/scale/example-netpol.yaml @@ -0,0 +1,27 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: nameReplace +spec: + podSelector: + matchLabels: + app: labelReplace1 + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + app: labelReplace2 + ports: + - protocol: TCP + port: 8080 + egress: + - to: + - podSelector: + matchLabels: + app: labelReplace3 + ports: + - protocol: TCP + port: 8080 diff --git a/test/scale/example-serviceaccount.yaml b/test/scale/example-serviceaccount.yaml new file mode 100644 index 000000000..3845e7058 --- /dev/null +++ b/test/scale/example-serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: goldpinger-serviceaccount + namespace: labelReplaceNamespace diff --git a/test/scale/generate-yamls.sh b/test/scale/generate-yamls.sh new file mode 100755 index 000000000..92619c7d2 --- /dev/null +++ b/test/scale/generate-yamls.sh @@ -0,0 +1,53 @@ +#!/bin/bash +numDeployments=50 +numNetpolsPerDeployment=1 +numNamespaces=1 + +echo "creating $numDeployments deployments and $numNetpolsPerDeployment netpols per deployment" + +mkdir -p deployments +mkdir -p netpols +mkdir -p serviceaccounts +mkdir -p clusterrolebindings +rm deployments/deployment*.yaml +rm netpols/netpol*.yaml +rm serviceaccounts/serviceaccount*.yaml +rm clusterrolebindings/clusterrolebinding*.yaml + +for (( k=1; k<=$numNamespaces; k++ )); do + ## generate clusterrolebinding + toFile=clusterrolebindings/clusterrolebinding-$k.yaml + sed "s/nameReplace/test-clusterrolebinding-$k/g" example-clusterrolebinding.yaml > $toFile + sed -i "s/labelReplaceNamespace/test-ns-$k/g" $toFile + sed -i "s/labelReplace/label-$i/g" $toFile + + ## generate service accounts + toFile=serviceaccounts/serviceaccount-$i.yaml + sed "s/nameReplace/test-other-$i/g" example-serviceaccount.yaml > $toFile + sed -i "s/labelReplaceNamespace/test-ns-$k/g" $toFile + sed -i "s/labelReplace/label-$i/g" $toFile +done + +for (( i=1; i<=$numDeployments; i++ )); do + ## generate all netpols for this deployment + for (( j=1; j<=$numNetpolsPerDeployment; j++ )); do + toFile=netpols/netpol-$i-$j.yaml + sed "s/nameReplace/test-netpol-$i-$j/g" example-netpol.yaml > $toFile + sed -i "s/labelReplace1/label-$i/g" $toFile + z=$(( i + (j-1)*2 )) + plus1=$(( z % numDeployments + 1)) + sed -i "s/labelReplace2/label-$plus1/g" $toFile + plus2=$(( (z+1) % numDeployments + 1)) + sed -i "s/labelReplace3/label-$plus2/g" $toFile + done + + for (( k=1; k<=$numNamespaces; k++ )); do + ## generate deployment yaml + toFile=deployments/deployment-$i.yaml + sed "s/nameReplace/test-deployment-$i/g" example-deployment.yaml > $toFile + sed -i "s/labelReplaceNamespace/test-ns-$k/g" $toFile + sed -i "s/labelReplace/label-$i/g" $toFile + done +done + +echo "done" diff --git a/test/scale/pprof.sh b/test/scale/pprof.sh new file mode 100755 index 000000000..955bfa11f --- /dev/null +++ b/test/scale/pprof.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +## Script to get high mem usage pods and get retina pprof outputs + +## Memory limit before we get the pprof of the running pod +limit=100 +## Time duration to collect the cpu profile +profsec=60 +## Time duration to collect the traces profile +tracesec=60 +echo "Getting pprof output of pods above mem: $limit" +lines=`kubectl top pod -n kube-system | grep retina | awk '{$1=$1;print}' | tr ' ' ',' | tr -d 'm'` +for line in $lines; do + IFS=',' + read -ra values <<< "$line" + pod="${values[0]}" + mem=`echo ${values[2]} | tr -d 'Mi'` + if [ $mem -gt $limit ]; then + echo "Getting pprof for $pod with mem $mem" + kubectl exec -it -n kube-system $pod -- bash -c "apt update && apt install -y curl" + kubectl exec -it -n kube-system $pod -- bash -c "curl localhost:10093/debug/pprof/trace?seconds=$tracesec -o trace.out && curl localhost:10093/debug/pprof/profile?seconds=$profsec -o profile.out && curl localhost:10093/debug/pprof/heap -o heap.out" + kubectl cp -n kube-system $pod:/heap.out ./results/$pod/heap.out + kubectl cp -n kube-system $pod:/trace.out ./results/$pod/trace.out + kubectl cp -n kube-system $pod:/profile.out ./results/$pod/profile.out + fi + IFS=' ' +done diff --git a/test/scale/restarts.sh b/test/scale/restarts.sh new file mode 100755 index 000000000..962497170 --- /dev/null +++ b/test/scale/restarts.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +## Script to get the previous logs of pods that have restarted on the cluster + +echo "pod,restarts,error" > pod-restarts.out +lines=`kubectl get pod -n kube-system | awk '{$1=$1;print}' | grep retina | tr ' ' ','` +for line in $lines; do + IFS=',' + read -ra values <<< "$line" + pod="${values[0]}" + restarts="${values[3]}" + echo "Getting previous logs for $pod with $restarts restarts" + if [ $restarts -gt 0 ]; then + err=`kubectl logs -n kube-system $pod --previous` + echo "$pod,$restarts,$err" >> ./results/pod-restarts.out + fi + IFS=' ' +done diff --git a/test/scale/scale-deployments.sh b/test/scale/scale-deployments.sh new file mode 100755 index 000000000..c79075fd7 --- /dev/null +++ b/test/scale/scale-deployments.sh @@ -0,0 +1,39 @@ +#!/bin/bash +numReplicas=100 +# 50 deployments * 100 replicas = 5000 pods + +set -e +numNamespaces=`kubectl get ns | grep test-ns- | wc -l` +numDeployments=`ls deployments/ | wc -l` +desiredNumPods=$(( numDeployments * numNamespaces * numReplicas )) + +startTime=`date -u` +summary="$numDeployments deployments per $numNamespaces namespaces to $numReplicas replicas each. Total of $desiredNumPods pods" +echo "scaling $summary" +for (( i=1; i<=$numNamespaces; i++ )); do + for (( j=1; j<=$numDeployments; j++ )); do + kubectl scale -n test-ns-$i deployment/test-deployment-$j --replicas=$numReplicas + done +done + +## the rest is copied from create-deployments.sh +echo +echo "start time: $startTime" +echo "scaled $summary" +echo "waiting for $desiredNumPods pods to come up" +while true; do + numPods=`kubectl get pod -A | grep test-ns- | grep Running | wc -l` + endTime=`date -u` + if [[ $numPods == $desiredNumPods ]]; then + break + fi + elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) + echo "$numPods pods up, want $desiredNumPods. Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" + sleep 15 +done +echo +echo "DONE. All $desiredNumPods pods are running" +elapsedTime=$(( $(date -d "$endTime" '+%s') - $(date -d "$startTime" '+%s') )) +echo "Elapsed time: $(( elapsedTime / 60 )) minutes $(( elapsedTime % 60 )) seconds" +echo "start time: $startTime" +echo "end time: $endTime" diff --git a/test/scale/scrape-metrics.sh b/test/scale/scrape-metrics.sh new file mode 100755 index 000000000..767855b17 --- /dev/null +++ b/test/scale/scrape-metrics.sh @@ -0,0 +1,41 @@ +#!/bin/bash +numPods=10 +sleepSeconds=$((60*3)) +csvFile=latencies.csv +tmpFile=scrape-temp.txt + +echo "logs/ folders. Will fail if it exists" +set -e +mkdir logs +if [ -f $csvFile ]; then + echo "File $csvFile already exists." + exit 1 +fi + +# shuf generates random permutations +retinaPods=`kubectl get pod -n kube-system | grep retina | grep Running | awk '{print $1}' | shuf -n $numPods` +echo "OBSERVING THESE PODS" +echo $retinaPods +echo + +for pod in $retinaPods; do + echo "capturing log for $pod in background" + kubectl logs -f $pod -n kube-system > logs/$pod.txt & +done + +for pod in $retinaPods; do + echo "installing curl on $pod" + kubectl exec -n kube-system $pod -- bash -c "apt update && apt install -y curl" +done + +round=1 +while true; do + echo "scraping node metrics" + for pod in $retinaPods; do + time=`date -u` + # exec in pod, curl localhost:10093/metrics and grab metrics + done + echo "finished scraping node metrics for round $round" + round=$((round+1)) + sleep $sleepSeconds +done diff --git a/test/testcrds/metrics-config-goldpinger.yaml b/test/testcrds/metrics-config-goldpinger.yaml new file mode 100644 index 000000000..ec1eb8894 --- /dev/null +++ b/test/testcrds/metrics-config-goldpinger.yaml @@ -0,0 +1,33 @@ +apiVersion: retina.io/v1alpha1 +kind: MetricsConfiguration +metadata: + name: metricsconfigurationaaaaa +spec: + namespaces: + include: + - default + contextOptions: + - metricName: forward_count + sourceLabels: + - podName + - ip + destinationLabels: + - podName + - ip + - workload + - metricName: drop_count + sourceLabels: + - podName + - ip + destinationLabels: + - podName + - ip + - workload + - metricName: tcp_retransmission_count + sourceLabels: + - podName + - ip + destinationLabels: + - podName + - ip + - workload diff --git a/test/trafficgen/agnhost.yaml b/test/trafficgen/agnhost.yaml new file mode 100644 index 000000000..901f2989b --- /dev/null +++ b/test/trafficgen/agnhost.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: agnhost + agnhost: a + name: agnhost-a + namespace: kube-system +spec: + nodeName: aks-nodepool1-12809644-vmss000000 + containers: + - name: a1 + image: k8s.gcr.io/e2e-test-images/agnhost:2.36 + command: ["/agnhost"] + args: ["serve-hostname", "--http", "--port", "80"] +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + app: agnhost + agnhost: b + name: agnhost-b + namespace: kube-system +spec: + nodeName: aks-nodepool1-12809644-vmss000001 + containers: + - name: b1 + image: k8s.gcr.io/e2e-test-images/agnhost:2.36 + command: ["/agnhost"] + args: ["serve-hostname", "--http", "--port", "80"] +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + app: agnhost + agnhost: c + name: agnhost-c + namespace: kube-system +spec: + nodeName: aks-nodepool1-12809644-vmss000002 + containers: + - name: c1 + image: k8s.gcr.io/e2e-test-images/agnhost:2.36 + command: ["/agnhost"] + args: ["serve-hostname", "--http", "--port", "80"] diff --git a/test/trafficgen/deny.yaml b/test/trafficgen/deny.yaml new file mode 100644 index 000000000..2d13743b1 --- /dev/null +++ b/test/trafficgen/deny.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-ingress + namespace: default +spec: + podSelector: + matchLabels: + # agnhost: b + server: bad + policyTypes: + - Ingress diff --git a/test/trafficgen/kapinger.yaml b/test/trafficgen/kapinger.yaml new file mode 100644 index 000000000..c7393c91f --- /dev/null +++ b/test/trafficgen/kapinger.yaml @@ -0,0 +1,135 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kapinger-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kapinger-role + namespace: default +rules: + - apiGroups: [""] + resources: ["services", "pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kapinger-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kapinger-role +subjects: + - kind: ServiceAccount + name: kapinger-sa + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kapinger-good + namespace: default +spec: + replicas: 10 + selector: + matchLabels: + app: kapinger + template: + metadata: + labels: + app: kapinger + server: good + spec: + serviceAccountName: kapinger-sa + containers: + - name: kapinger + image: acnpublic.azurecr.io/kapinger:latest + resources: + limits: + memory: 20Mi + requests: + memory: 20Mi + env: + - name: TARGET_TYPE + value: "service" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HTTP_PORT + value: "8080" + - name: TCP_PORT + value: "8085" + - name: UDP_PORT + value: "8086" + ports: + - containerPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kapinger-bad + namespace: default +spec: + replicas: 5 + selector: + matchLabels: + app: kapinger + template: + metadata: + labels: + app: kapinger + server: bad + spec: + serviceAccountName: kapinger-sa + containers: + - name: kapinger + image: acnpublic.azurecr.io/kapinger:latest + resources: + limits: + memory: 20Mi + requests: + memory: 20Mi + env: + - name: TARGET_TYPE + value: "service" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HTTP_PORT + value: "8080" + - name: TCP_PORT + value: "8085" + - name: UDP_PORT + value: "8086" + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: kapinger-service + namespace: default + labels: + app: kapinger +spec: + selector: + app: kapinger + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + type: ClusterIP diff --git a/test/utsummary/main.go b/test/utsummary/main.go new file mode 100644 index 000000000..3faff6044 --- /dev/null +++ b/test/utsummary/main.go @@ -0,0 +1,144 @@ +// Copyright 2019 The Go Cloud Development Kit Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Summarizes the output of go test. +// Run like so: +// +// go test -json ./... | test-summary +package main + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "time" +) + +var ( + progress = flag.Bool("progress", false, "display test progress") + verbose = flag.Bool("verbose", false, "display all test output") +) + +// TestEvent is copied from "go doc test2json". +type TestEvent struct { + Time time.Time // encodes as an RFC3339-format string + Action string + Package string + Test string + Elapsed float64 // seconds + Output string +} + +func main() { + flag.Parse() + s, fails, err := run(os.Stdin) + if err != nil { + log.Fatal(err) + } + fmt.Println(s) + if fails { + os.Exit(1) + } +} + +func run(r io.Reader) (msg string, failures bool, err error) { + counts := map[string]int{} + scanner := bufio.NewScanner(bufio.NewReader(r)) + + // Collects tests that failed. + var failedTests []string + + // Stores output produced by each test. + testOutputs := map[string][]string{} + + start := time.Now() + for scanner.Scan() { + // When the build fails, go test -json doesn't emit a valid JSON value, only + // a line of output starting with FAIL. Report a more reasonable error in + // this case. + if strings.HasPrefix(scanner.Text(), "FAIL") { + return "", true, fmt.Errorf("No test output: %q", scanner.Text()) + } + + var event TestEvent + if err := json.Unmarshal(scanner.Bytes(), &event); err != nil { + return "", false, fmt.Errorf("%q: %v", scanner.Text(), err) + } + testpath := filepath.Join(event.Package, event.Test) + + // The Test field, if non-empty, specifies the test, example, or benchmark + // function that caused the event. Events for the overall package test do + // not set Test. + if event.Action == "fail" && event.Test != "" { + failedTests = append(failedTests, testpath) + } + + if event.Action == "output" { + if *verbose { + fmt.Print(event.Output) + } + testOutputs[testpath] = append(testOutputs[testpath], event.Output) + } + + // We don't want to count package passes/fails because these don't + // represent specific tests being run. However, skips of an entire package + // are not duplicated with individual test skips. + if event.Test != "" || event.Action == "skip" { + counts[event.Action]++ + } + + // For failed tests, print all the output we collected for them before + // the "fail" event. + if event.Action == "fail" { + fmt.Println(strings.Join(testOutputs[testpath], "")) + } + + if *progress { + // Only print progress for fail events for packages and tests, or + // pass events for packages only (not individual tests, since this is + // too noisy). + if event.Action == "fail" || (event.Test == "" && event.Action == "pass") { + fmt.Printf("%s %s (%.2fs)\n", event.Action, testpath, event.Elapsed) + } + } + } + if err := scanner.Err(); err != nil { + return "", false, err + } + p := counts["pass"] + f := counts["fail"] + s := counts["skip"] + + summary := fmt.Sprintf("ran %d; passed %d; failed %d; skipped %d (in %.1f sec)", p+f+s, p, f, s, time.Since(start).Seconds()) + if len(failedTests) > 0 { + var sb strings.Builder + sb.WriteString("Failures (reporting up to 10):\n") + for i := 0; i < len(failedTests) && i < 10; i++ { + fmt.Fprintf(&sb, " %s\n", failedTests[i]) + } + if len(failedTests) > 10 { + sb.WriteString(" ...\n") + } + sb.WriteString(summary) + summary = sb.String() + } + + return summary, f > 0, nil +} diff --git a/test/watchers/apiserver/main.go b/test/watchers/apiserver/main.go new file mode 100644 index 000000000..4703e1292 --- /dev/null +++ b/test/watchers/apiserver/main.go @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "context" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/filtermanager" + "github.com/microsoft/retina/pkg/managers/watchermanager" + "github.com/microsoft/retina/pkg/watchers/apiserver" + "go.uber.org/zap" +) + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-apiserver") + + ctx := context.Background() + + // Filtermanager. + f, err := filtermanager.Init(5) + if err != nil { + l.Error("Failed to start Filtermanager", zap.Error(err)) + panic(err) + } + defer func() { + if err := f.Stop(); err != nil { + l.Error("Failed to stop Filtermanager", zap.Error(err)) + } + }() + // watcher manager + wm := watchermanager.NewWatcherManager() + wm.Watchers = []watchermanager.IWatcher{apiserver.Watcher()} + + // apiserver watcher. + err = wm.Start(ctx) + if err != nil { + l.Error("Failed to start watcher manager", zap.Error(err)) + panic(err) + } + + // Sleep 1 minute. + time.Sleep(60 * time.Second) + + err = wm.Stop(ctx) + if err != nil { + panic(err) + } +} diff --git a/test/watchers/veth/deployment.yaml b/test/watchers/veth/deployment.yaml new file mode 100644 index 000000000..9ea6b4a46 --- /dev/null +++ b/test/watchers/veth/deployment.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: default + labels: + app: nginx +spec: + replicas: 0 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + # image: nginx:1.14.2 + image: mcr.microsoft.com/mirror/docker/library/nginx:1.23 + ports: + - containerPort: 80 + nodeSelector: + # kubernetes.io/hostname: aks-nodepool1-29238948-vmss000000 + # kubernetes.io/hostname: aks-arm64-36093013-vmss000000 diff --git a/test/watchers/veth/main.go b/test/watchers/veth/main.go new file mode 100644 index 000000000..50a00c1c0 --- /dev/null +++ b/test/watchers/veth/main.go @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// nolint + +package main + +import ( + "context" + "time" + + "github.com/microsoft/retina/pkg/log" + "github.com/microsoft/retina/pkg/managers/watchermanager" + "github.com/microsoft/retina/pkg/watchers/endpoint" + "go.uber.org/zap" +) + +func main() { + opts := log.GetDefaultLogOpts() + opts.Level = "debug" + log.SetupZapLogger(opts) + l := log.Logger().Named("test-veth") + + ctx := context.Background() + + // watcher manager + wm := watchermanager.NewWatcherManager() + wm.Watchers = []watchermanager.IWatcher{endpoint.Watcher()} + + err := wm.Start(ctx) + if err != nil { + panic(err) + } + + // Sleep 1 minute. + time.Sleep(60 * time.Second) + + err = wm.Stop(ctx) + if err != nil { + l.Error("Failed to start watcher manager", zap.Error(err)) + panic(err) + } +} diff --git a/windows/kubeconfigtemplate.yaml b/windows/kubeconfigtemplate.yaml new file mode 100644 index 000000000..49e6c8ad5 --- /dev/null +++ b/windows/kubeconfigtemplate.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: kubernetes + cluster: + certificate-authority-data: + +contexts: +- name: azure-retina-windows@kubernetes + context: + cluster: kubernetes + namespace: kube-system + user: azure-retina-windows +current-context: azure-retina-windows@kubernetes +users: +- name: azure-retina-windows + user: + token: diff --git a/windows/manifests/node-selector-patch.yaml b/windows/manifests/node-selector-patch.yaml new file mode 100644 index 000000000..2db79a8c6 --- /dev/null +++ b/windows/manifests/node-selector-patch.yaml @@ -0,0 +1,5 @@ +spec: + template: + spec: + nodeSelector: + kubernetes.io/os: linux diff --git a/windows/manifests/windows.yaml b/windows/manifests/windows.yaml new file mode 100644 index 000000000..80e3c8098 --- /dev/null +++ b/windows/manifests/windows.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: retina + name: retina-win + namespace: kube-system + annotations: + prometheus.io/port: "10093" + prometheus.io/scrape: "true" +spec: + selector: + matchLabels: + app: retina + template: + metadata: + labels: + app: retina + name: retina + namespace: retina + spec: + serviceAccountName: retina-agent + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + runAsNonRoot: false + hostNetwork: true + containers: + - name: retinawin + image: mcr.microsoft.com/containernetworking/retina-agent:v0.0.6 + ports: + - containerPort: 10093 + command: + - powershell.exe + - -command + - .\setkubeconfigpath.ps1; ./controller.exe --config ./retina/config.yaml --kubeconfig ./kubeconfig + # .\setkubeconfigpath.ps1; Start-Sleep -s 1000 + securityContext: + privileged: true + volumeMounts: + - name: retina-config-win + mountPath: retina + nodeSelector: + kubernetes.io/os: windows + volumes: + - name: retina-config-win + configMap: + name: retina-config-win +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: retina-config-win + namespace: kube-system +data: + config.yaml: |- + apiServer: + host: "0.0.0.0" + port: 10093 + logLevel: info + enabledPlugin: ["hnsstats"] + metricsInterval: 10 diff --git a/windows/readme.md b/windows/readme.md new file mode 100644 index 000000000..40343ccfd --- /dev/null +++ b/windows/readme.md @@ -0,0 +1,28 @@ +# Windows plugins + +1. Cordon all windows nodes. Until the below selector is added, needed so helm install isn't blocked. +2. Install Linux Retina helm chart. + + `helm install retina ./deploy/manifests/controller/helm/retina/ --namespace kube-system` + +3. Uncordon the Windows and nodes. + +4. Apply nodeSelector patch to prometheus-node-exporter + + ```bash + kubectl patch ds/retina-prometheus-node-exporter -n kube-system --patch "$(cat ./windows/manifests/node-selector-patch.yaml)" + kubectl patch deployment/retina-kube-prometheus-sta-operator -n kube-system --patch "$(cat ./windows/manifests/node-selector-patch.yaml)" + kubectl patch deployment/retina-kube-state-metrics -n kube-system --patch "$(cat ./windows/manifests/node-selector-patch.yaml)" + kubectl patch statefulset/prometheus-retina-kube-prometheus-sta-prometheus -n kube-system --patch "$(cat ./windows/manifests/node-selector-patch.yaml)" + kubectl patch statefulset/alertmanager-retina-kube-prometheus-sta-alertmanager -n kube-system --patch "$(cat ./windows/manifests/node-selector-patch.yaml)" + ``` + +5. Build (and push) the Retina windows image. (Don't forget to `az acr login`) + + `make retina-image-win-push` + +6. Modify `./windows/manifests/windows.yaml` with the image tag you just pushed + +7. Apply the Windows manifest + + `k apply -f ./windows/manifests/windows.yaml` diff --git a/windows/setkubeconfigpath.ps1 b/windows/setkubeconfigpath.ps1 new file mode 100644 index 000000000..38ec3440b --- /dev/null +++ b/windows/setkubeconfigpath.ps1 @@ -0,0 +1,11 @@ +# pull the server value from the kubeconfig on host to construct our own kubeconfig, but using service principal settings +# this is required to build a kubeconfig using the kubeconfig on disk in c:\k, and the service principle granted in the container mount, to generate clientset +$cpEndpoint = Get-Content C:\k\config | ForEach-Object -Process {if($_.Contains("server:")) {$_.Trim().Split()[1]}} +$token = Get-Content -Path $env:CONTAINER_SANDBOX_MOUNT_POINT\var\run\secrets\kubernetes.io\serviceaccount\token +$ca = Get-Content -Raw -Path $env:CONTAINER_SANDBOX_MOUNT_POINT\var\run\secrets\kubernetes.io\serviceaccount\ca.crt +$caBase64 = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($ca)) +$server = "server: $cpEndpoint" +(Get-Content $env:CONTAINER_SANDBOX_MOUNT_POINT\kubeconfigtemplate.yaml). + replace("", $caBase64). + replace("", $server.Trim()). + replace("", $token) | Set-Content $env:CONTAINER_SANDBOX_MOUNT_POINT\kubeconfig -Force